aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/coresight.txt5
-rw-r--r--Documentation/devicetree/bindings/arm/marvell/cp110-system-controller.txt4
-rw-r--r--Documentation/devicetree/bindings/edac/marvell-sdei-ghes.txt110
-rw-r--r--Documentation/devicetree/bindings/mmc/cavium-mmc.txt58
-rw-r--r--Documentation/devicetree/bindings/net/dsa/mvmdio.txt67
-rw-r--r--Documentation/devicetree/bindings/perf/marvell-cn10k-tad-pmu.txt20
-rw-r--r--Documentation/devicetree/bindings/spi/cdns,xspi.yaml77
-rw-r--r--Documentation/devicetree/bindings/thermal/armada-thermal.txt1
-rw-r--r--Documentation/networking/devlink-params-octeontx2.txt66
-rw-r--r--Documentation/trace/coresight/coresight-trbe.rst38
-rw-r--r--Documentation/trace/coresight/coresight.rst32
-rw-r--r--MAINTAINERS11
-rw-r--r--arch/arm64/Kconfig48
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap807.dtsi7
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp11x.dtsi6
-rw-r--r--arch/arm64/include/asm/barrier.h1
-rw-r--r--arch/arm64/include/asm/cpucaps.h8
-rw-r--r--arch/arm64/include/asm/cputype.h13
-rw-r--r--arch/arm64/include/asm/mmu_context.h6
-rw-r--r--arch/arm64/include/asm/sysreg.h61
-rw-r--r--arch/arm64/kernel/cpu_errata.c93
-rw-r--r--arch/arm64/kernel/entry.S86
-rw-r--r--arch/arm64/kvm/arch_timer.c42
-rw-r--r--arch/arm64/lib/copy_from_user.S13
-rw-r--r--arch/arm64/lib/copy_template_nops.S234
-rw-r--r--arch/arm64/lib/copy_to_user.S14
-rw-r--r--arch/arm64/mm/context.c79
-rw-r--r--drivers/acpi/apei/bert.c65
-rw-r--r--drivers/acpi/apei/ghes.c64
-rw-r--r--drivers/acpi/apei/hest.c13
-rw-r--r--drivers/char/hw_random/Kconfig17
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/cavium-rng-vf.c11
-rw-r--r--drivers/char/hw_random/cn10k-rng.c184
-rw-r--r--drivers/char/hw_random/nomadik-rng.c2
-rw-r--r--drivers/clk/mvebu/ap-cpu-clk.c175
-rw-r--r--drivers/clocksource/Kconfig9
-rw-r--r--drivers/clocksource/arm_arch_timer.c54
-rw-r--r--drivers/cpufreq/armada-8k-cpufreq.c3
-rw-r--r--drivers/crypto/marvell/Kconfig17
-rw-r--r--drivers/crypto/marvell/Makefile1
-rw-r--r--drivers/crypto/marvell/octeontx2/Makefile11
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.c104
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.h36
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h175
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c118
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.h20
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h476
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c221
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h197
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.c434
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.h410
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf.h77
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c881
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c600
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c1885
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h169
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf.h33
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c1771
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h178
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c446
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c213
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c544
-rw-r--r--drivers/firmware/arm_scmi/mailbox.c4
-rw-r--r--drivers/firmware/arm_scmi/perf.c25
-rw-r--r--drivers/firmware/arm_sdei.c46
-rw-r--r--drivers/gpio/gpio-thunderx.c374
-rw-r--r--drivers/hwtracing/coresight/Kconfig32
-rw-r--r--drivers/hwtracing/coresight/Makefile5
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c12
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c168
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-core.c18
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-platform.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c10
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c161
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-core.c9
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c1125
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c467
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h583
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c9
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h9
-rw-r--r--drivers/hwtracing/coresight/coresight-quirks.c155
-rw-r--r--drivers/hwtracing/coresight/coresight-quirks.h64
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c15
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-core.c120
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c10
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c66
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-secure-etr.c417
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-secure-etr.h115
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h52
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c33
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.c1157
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.h152
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c29
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.c73
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.h18
-rw-r--r--drivers/i2c/busses/i2c-thunderx-pcidrv.c8
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c115
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h8
-rw-r--r--drivers/irqchip/irq-gic-v3.c17
-rw-r--r--drivers/mailbox/Kconfig9
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/mvl_mhu.c411
-rw-r--r--drivers/memory/pl353-smc.c2
-rw-r--r--drivers/mfd/Kconfig15
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/rsmu_i2c.c344
-rw-r--r--drivers/mfd/rsmu_private.h32
-rw-r--r--drivers/misc/Kconfig25
-rw-r--r--drivers/misc/Makefile4
-rw-r--r--drivers/misc/mrvl-loki.c223
-rw-r--r--drivers/misc/otx_bphy_ctr.c328
-rw-r--r--drivers/misc/rsmu_cdev.c336
-rw-r--r--drivers/misc/rsmu_cdev.h72
-rw-r--r--drivers/misc/rsmu_cm.c166
-rw-r--r--drivers/misc/rsmu_sabre.c128
-rw-r--r--drivers/mmc/core/Kconfig14
-rw-r--r--drivers/mmc/core/Makefile1
-rw-r--r--drivers/mmc/core/block.c26
-rw-r--r--drivers/mmc/core/block.h10
-rw-r--r--drivers/mmc/core/core.c44
-rw-r--r--drivers/mmc/core/mmcpstore.c232
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/cavium-octeon.c11
-rw-r--r--drivers/mmc/host/cavium-thunderx.c197
-rw-r--r--drivers/mmc/host/cavium.c1577
-rw-r--r--drivers/mmc/host/cavium.h158
-rw-r--r--drivers/mmc/host/sdhci-cadence.c1555
-rw-r--r--drivers/mmc/host/sdhci-xenon-phy.c26
-rw-r--r--drivers/mmc/host/sdhci-xenon.c1
-rw-r--r--drivers/mmc/host/sdhci.c61
-rw-r--r--drivers/mtd/spi-nor/core.c54
-rw-r--r--drivers/mtd/spi-nor/macronix.c2
-rw-r--r--drivers/mtd/spi-nor/micron-st.c6
-rw-r--r--drivers/mtd/spi-nor/spansion.c2
-rw-r--r--drivers/mtd/spi-nor/winbond.c2
-rw-r--r--drivers/net/dsa/Kconfig9
-rw-r--r--drivers/net/dsa/Makefile1
-rw-r--r--drivers/net/dsa/dsa_mvmdio.c528
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c5
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c36
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c5
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c92
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h1
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c22
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h586
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c1174
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h182
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c309
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c4219
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c368
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c1479
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h90
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h105
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h48
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h145
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c68
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h1223
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h265
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h3775
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c346
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.h6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c464
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h99
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c1252
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h499
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c727
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c568
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c1110
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c2226
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c1817
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h82
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_fixes.c1009
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_fixes.h18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c2858
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c27
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c1655
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c1434
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_ree.c1242
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h561
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c108
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sso.c1661
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h835
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c259
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_tim.c461
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.c984
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.h69
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/Makefile12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/bphy_common.h39
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/bphy_netdev_comm_if.h45
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_bphy_hw.h482
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_bphy_netdev_comm_if.h296
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe.c1427
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe.h142
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe_ethtool.c149
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe_ptp.c79
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy.h74
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_debugfs.c165
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_debugfs.h31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_hw.h381
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_main.c887
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri.c755
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri.h150
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri_ethtool.c102
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe.c1697
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe.h227
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe_ethtool.c152
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe_ptp.c268
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/rfoe_bphy_netdev_comm_if.h190
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/rfoe_common.h132
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c482
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h42
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c511
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h385
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c170
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c300
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c174
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c958
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c1478
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c826
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c218
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_smqvf.c282
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c1065
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c503
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c267
-rw-r--r--drivers/net/mdio/mdio-cavium.h1
-rw-r--r--drivers/net/mdio/mdio-thunder.c66
-rw-r--r--drivers/net/phy/marvell10g.c183
-rw-r--r--drivers/net/phy/phylink.c22
-rw-r--r--drivers/of/dynamic.c10
-rw-r--r--drivers/pci/controller/Kconfig16
-rw-r--r--drivers/pci/controller/Makefile2
-rw-r--r--drivers/pci/controller/pci-octeon-pem.c204
-rw-r--r--drivers/pci/controller/pci-octeontx2-pem.c481
-rw-r--r--drivers/pci/endpoint/Kconfig7
-rw-r--r--drivers/pci/endpoint/Makefile1
-rw-r--r--drivers/pci/endpoint/pcie-armada-dw-ep.c403
-rw-r--r--drivers/pci/pcie/portdrv_core.c9
-rw-r--r--drivers/pci/probe.c15
-rw-r--r--drivers/pci/quirks.c41
-rw-r--r--drivers/perf/Kconfig15
-rw-r--r--drivers/perf/Makefile2
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c10
-rw-r--r--drivers/perf/marvell_cn10k_ddr_pmu.c766
-rw-r--r--drivers/perf/marvell_cn10k_tad_pmu.c428
-rw-r--r--drivers/phy/marvell/Kconfig8
-rw-r--r--drivers/phy/marvell/Makefile1
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c25
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-utmi.c384
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-cp110.c4
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile1
-rw-r--r--drivers/soc/marvell/Kconfig308
-rw-r--r--drivers/soc/marvell/Makefile22
-rw-r--r--drivers/soc/marvell/cn10k-einj.c92
-rw-r--r--drivers/soc/marvell/cn10k-fwlog.c207
-rw-r--r--drivers/soc/marvell/cn10k-rpram.c223
-rw-r--r--drivers/soc/marvell/cn10k_serdes_diag.c1490
-rw-r--r--drivers/soc/marvell/cn10ka-swup/Makefile7
-rw-r--r--drivers/soc/marvell/cn10ka-swup/mrvl_swup.c512
-rw-r--r--drivers/soc/marvell/cn10ka-swup/mrvl_swup.h307
-rw-r--r--drivers/soc/marvell/gti/Makefile8
-rw-r--r--drivers/soc/marvell/gti/gti.c64
-rw-r--r--drivers/soc/marvell/gti/gti.h29
-rw-r--r--drivers/soc/marvell/gti/gti_watchdog.c268
-rw-r--r--drivers/soc/marvell/hw-access/Makefile10
-rw-r--r--drivers/soc/marvell/hw-access/hw_rw_access.c391
-rw-r--r--drivers/soc/marvell/marvell_mac_mgmt.c251
-rw-r--r--drivers/soc/marvell/mvmdio_uio.c314
-rw-r--r--drivers/soc/marvell/octeontx2-ccu/Makefile8
-rw-r--r--drivers/soc/marvell/octeontx2-ccu/README97
-rw-r--r--drivers/soc/marvell/octeontx2-ccu/ccu.c257
-rw-r--r--drivers/soc/marvell/octeontx2-dpi/Makefile8
-rw-r--r--drivers/soc/marvell/octeontx2-dpi/dpi.c621
-rw-r--r--drivers/soc/marvell/octeontx2-dpi/dpi.h335
-rw-r--r--drivers/soc/marvell/octeontx2-ghes/Makefile10
-rw-r--r--drivers/soc/marvell/octeontx2-ghes/otx2-einj.c166
-rw-r--r--drivers/soc/marvell/octeontx2-ghes/otx2-ghes-bert.c406
-rw-r--r--drivers/soc/marvell/octeontx2-ghes/otx2-ghes-bert.h35
-rw-r--r--drivers/soc/marvell/octeontx2-ghes/otx2-sdei-ghes.c900
-rw-r--r--drivers/soc/marvell/octeontx2-ghes/otx2-sdei-ghes.h107
-rw-r--r--drivers/soc/marvell/octeontx2-llc/Makefile8
-rw-r--r--drivers/soc/marvell/octeontx2-llc/llc.c117
-rw-r--r--drivers/soc/marvell/octeontx2-npa/Makefile8
-rw-r--r--drivers/soc/marvell/octeontx2-npa/npa.c1774
-rw-r--r--drivers/soc/marvell/octeontx2-npa/npa.h167
-rw-r--r--drivers/soc/marvell/octeontx2-npa/npa_api.h19
-rw-r--r--drivers/soc/marvell/octeontx2-pcicons/Makefile8
-rw-r--r--drivers/soc/marvell/octeontx2-pcicons/otx2-pci-console.c1350
-rw-r--r--drivers/soc/marvell/octeontx2-pcicons/otx2-pci-console.h194
-rw-r--r--drivers/soc/marvell/octeontx2-rm/Makefile11
-rw-r--r--drivers/soc/marvell/octeontx2-rm/domain_sysfs.c830
-rw-r--r--drivers/soc/marvell/octeontx2-rm/domain_sysfs.h18
-rw-r--r--drivers/soc/marvell/octeontx2-rm/otx2_rm.c1841
-rw-r--r--drivers/soc/marvell/octeontx2-rm/otx2_rm.h95
-rw-r--r--drivers/soc/marvell/octeontx2-rm/otxrmcmd.h42
-rw-r--r--drivers/soc/marvell/octeontx2-rm/quota.c190
-rw-r--r--drivers/soc/marvell/octeontx2-rm/quota.h90
-rw-r--r--drivers/soc/marvell/octeontx2-sdp/Makefile9
-rw-r--r--drivers/soc/marvell/octeontx2-sdp/sdp.c1789
-rw-r--r--drivers/soc/marvell/octeontx2-sdp/sdp.h150
-rw-r--r--drivers/soc/marvell/octeontx2-serdes/Makefile6
-rw-r--r--drivers/soc/marvell/octeontx2-serdes/serdes_debugfs.c1055
-rw-r--r--drivers/soc/marvell/octeontx_info.c513
-rw-r--r--drivers/soc/marvell/phy_diag.c1022
-rw-r--r--drivers/spi/Kconfig21
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/spi-cadence-xspi.c936
-rw-r--r--drivers/spi/spi-cavium-thunderx.c18
-rw-r--r--drivers/spi/spi-octeontx2.c392
-rw-r--r--drivers/spi/spi-octeontx2.h156
-rw-r--r--drivers/spi/spi-orion.c66
-rw-r--r--drivers/thermal/armada_thermal.c161
-rw-r--r--drivers/tty/serial/mvebu-uart.c11
-rw-r--r--drivers/uio/Kconfig9
-rw-r--r--drivers/uio/Makefile1
-rw-r--r--drivers/uio/uio_pci_ep.c216
-rw-r--r--drivers/vfio/platform/vfio_amba.c10
-rw-r--r--fs/pstore/Kconfig4
-rw-r--r--include/acpi/apei.h4
-rw-r--r--include/linux/armada-pcie-ep.h36
-rw-r--r--include/linux/coresight-pmu.h20
-rw-r--r--include/linux/coresight.h231
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/gfp.h12
-rw-r--r--include/linux/kernel.h6
-rw-r--r--include/linux/mfd/idt82p33_reg.h116
-rw-r--r--include/linux/mfd/idt8a340_reg.h846
-rw-r--r--include/linux/mfd/rsmu.h42
-rw-r--r--include/linux/mmc/core.h5
-rw-r--r--include/linux/mmc/host.h12
-rw-r--r--include/linux/mtd/spi-nor.h1
-rw-r--r--include/linux/pci-acpi.h1
-rw-r--r--include/linux/phy.h7
-rw-r--r--include/linux/skbuff.h36
-rw-r--r--include/linux/soc/marvell/llc.h17
-rw-r--r--include/linux/soc/marvell/octeontx2/asm.h57
-rw-r--r--include/net/flow_offload.h2
-rw-r--r--include/net/sch_generic.h14
-rw-r--r--include/net/tc_act/tc_police.h50
-rw-r--r--include/soc/marvell/armada8k/fw.h22
-rw-r--r--include/soc/marvell/octeontx/octeontx_smc.h60
-rw-r--r--include/uapi/linux/perf_event.h13
-rw-r--r--include/uapi/linux/pkt_cls.h2
-rw-r--r--include/uapi/linux/rsmu.h64
-rw-r--r--kernel/exit.c66
-rw-r--r--mm/page_alloc.c8
-rw-r--r--net/core/skbuff.c26
-rw-r--r--net/sched/act_police.c59
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/sched/sch_generic.c75
-rw-r--r--tools/include/linux/coresight-pmu.h20
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c116
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c53
-rw-r--r--tools/perf/util/cs-etm.c286
-rw-r--r--tools/perf/util/cs-etm.h32
371 files changed, 94438 insertions, 7285 deletions
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index d711676b4a51..66641d89c72b 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -124,6 +124,11 @@ its hardware characteristcs.
* arm,scatter-gather: boolean. Indicates that the TMC-ETR can safely
use the SG mode on this system.
+ * arm,max-burst-size: The maximum burst size initiated by TMC on the
+ AXI master interface. The burst size can be in the range [0..15],
+ the setting supports one data transfer per burst up to a maximum of
+ 16 data transfers per burst.
+
* Optional property for CATU :
* interrupts : Exactly one SPI may be listed for reporting the address
error
diff --git a/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller.txt b/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller.txt
index a21f7709596c..0705e765f432 100644
--- a/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller.txt
+++ b/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller.txt
@@ -142,8 +142,8 @@ mpp50 50 gpio, ge1(rxclk), mss_i2c(sda), spi1(csn0), uart2(txd), uart0(rxd), xg(
mpp51 51 gpio, ge1(rxd0), mss_i2c(sck), spi1(csn1), uart2(rxd), uart0(cts), sdio(pwr10)
mpp52 52 gpio, ge1(rxd1), synce1(clk), synce2(clk), spi1(csn2), uart1(cts), led(clk), pcie(rstoutn), pcie0(clkreq)
mpp53 53 gpio, ge1(rxd2), ptp(clk), spi1(csn3), uart1(rxd), led(stb), sdio(led)
-mpp54 54 gpio, ge1(rxd3), synce2(clk), ptp(pclk_out), synce1(clk), led(data), sdio(hw_rst), sdio(wr_protect)
-mpp55 55 gpio, ge1(rxctl_rxdv), ptp(pulse), sdio(led), sdio(card_detect)
+mpp54 54 gpio, ge1(rxd3), synce2(clk), ptp(pclk_out), synce1(clk), led(data), sdio(hw_rst), sdio_wp(wr_protect)
+mpp55 55 gpio, ge1(rxctl_rxdv), ptp(pulse), sdio(led), sdio_cd(card_detect)
mpp56 56 gpio, tdm(drx), au(i2sdo_spdifo), spi0(clk), uart1(rxd), sata1(present_act), sdio(clk)
mpp57 57 gpio, mss_i2c(sda), ptp(pclk_out), tdm(intn), au(i2sbclk), spi0(mosi), uart1(txd), sata0(present_act), sdio(cmd)
mpp58 58 gpio, mss_i2c(sck), ptp(clk), tdm(rstn), au(i2sdi), spi0(miso), uart1(cts), led(clk), sdio(d0)
diff --git a/Documentation/devicetree/bindings/edac/marvell-sdei-ghes.txt b/Documentation/devicetree/bindings/edac/marvell-sdei-ghes.txt
new file mode 100644
index 000000000000..b25034c8a263
--- /dev/null
+++ b/Documentation/devicetree/bindings/edac/marvell-sdei-ghes.txt
@@ -0,0 +1,110 @@
+* Marvell OcteonTX SOC sdei-ghes, bed-bert nodes
+
+The sdei-ghes and bed-bert nodes are defined to describe resources to the
+sdei-ghes module.
+
+Abstract
+--------
+
+The Generic Hardware Error Source (GHES) allows for non-standard errors to be
+reported to the system (please refer to the ACPI specification).
+
+The standard GHES driver requires the presence of ACPI tables, and accompanying
+kernel ACPI support, which is not available to systems utilizing Device Tree.
+
+The OcteonTX2 sdei-ghes module provides the required Hardware Error Source Table
+(HEST) to the kernel, allowing the GHES driver to load. This module also
+provides the Boot Error Record Table (BERT) to the kernel, which allows for
+reporting of fatal RAS errors to Linux on a subsequent boot.
+
+Additionally, this module presents GHES devices to the system, which allows
+the firmware (ATF) to report RAS errors.
+
+The following error sources are supported:
+
+ MDC - OcteonTX Memory Diagnostic Controller
+ MCC - OcteonTX Memory Common Controller
+ LMC - OcteonTX Local Memory DDR4 Controller
+ BERT - A virtual device which records fatal errors from any of the above
+ into system preserved memory.
+ NOTE: this is located in the 'bed-bert' node.
+
+Device Tree sdei-ghes binding
+-----------------------------
+
+Required properties:
+- compatible : Shall be "marvell,sdei-ghes".
+
+Required properties for mdc subnode:
+- reg : Shall contain three entries, one for each of:
+ - GHES Error Status Address (ACPI 4.0a, sec 17.3.2.6)
+ - GHES Error Status Block (ACPI 4.0a, sec 17.3.2.6.1)
+ - ring buffer for communication with firmware
+
+- event-id : SDEI event ID for receiving firmware notifications
+
+Example
+-------
+
+sdei-ghes {
+ compatible = "marvell,sdei-ghes";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ memory-region = <&ghes_hest_reserved>;
+ ranges = <0x0 0x00000000 0x0 0x7f020000 0x00100>,
+ <0x0 0x00000100 0x0 0x7f020100 0x00700>,
+ <0x0 0x00000800 0x0 0x7f020800 0x08000>;
+ mdc@0 {
+ reg = <0x0 0x00000000 0x008>,
+ <0x0 0x00000100 0x100>,
+ <0x0 0x00000800 0x800>;
+ event-id = <0x40000000>;
+ };
+ mcc@8 {
+ reg = <0x0 0x00000008 0x008>,
+ <0x0 0x00000200 0x100>,
+ <0x0 0x00001000 0x800>;
+ event-id = <0x40000001>;
+ };
+ lmc@10 {
+ reg = <0x0 0x00000010 0x008>,
+ <0x0 0x00000300 0x100>,
+ <0x0 0x00001800 0x800>;
+ event-id = <0x40000002>;
+ };
+};
+
+Device Tree bed-bert binding
+-----------------------------
+
+Required properties:
+- compatible : Shall be "marvell,bed-bert".
+
+Required properties for bert subnode:
+- reg : Shall contain three entries, one for each of:
+ - GHES Error Status Address (ACPI 4.0a, sec 17.3.2.6)
+ - GHES Error Status Block (ACPI 4.0a, sec 17.3.2.6.1)
+ - ring buffer for communication with firmware
+
+Example
+-------
+
+/* Boot Error Data / BERT support */
+bed-bert {
+ compatible = "marvell,bed-bert";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ memory-region = <&ghes_bert_reserved>;
+ /*
+ * Range placeholders; these are set dynamically by
+ * firmware to match 'reg' in 'ghes_bert_reserved'.
+ */
+ ranges = <0x0 0x0 0x0 0x0 0x0 0x0>,
+ <0x0 0x0 0x0 0x0 0x0 0x0>,
+ <0x0 0x0 0x0 0x0 0x0 0x0>;
+ bert {
+ reg = <0x0 0x00000000 0x0 0x008>,
+ <0x0 0x00000100 0x0 0x100>,
+ <0x0 0x00000200 0x0 0x800>;
+ };
+}; \ No newline at end of file
diff --git a/Documentation/devicetree/bindings/mmc/cavium-mmc.txt b/Documentation/devicetree/bindings/mmc/cavium-mmc.txt
index 1433e6201dff..21ed6d4fedcc 100644
--- a/Documentation/devicetree/bindings/mmc/cavium-mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/cavium-mmc.txt
@@ -17,16 +17,56 @@ Required properties:
- clocks : phandle
Optional properties:
- - for cd, bus-width and additional generic mmc parameters
- please refer to mmc.txt within this directory
+ - for cd, bus-width, vmmc-supply, vqmmc-supply, and additional generic
+ mmc parameters please refer to mmc.txt within this directory
- cavium,cmd-clk-skew : number of coprocessor clocks before sampling command
- cavium,dat-clk-skew : number of coprocessor clocks before sampling data
Deprecated properties:
-- spi-max-frequency : use max-frequency instead
-- cavium,bus-max-width : use bus-width instead
-- power-gpios : use vmmc-supply instead
-- cavium,octeon-6130-mmc-slot : use mmc-slot instead
+ - spi-max-frequency : use max-frequency instead
+ - cavium,bus-max-width : use bus-width instead
+ - power-gpios : use vmmc-supply instead
+ - cavium,octeon-6130-mmc-slot : use mmc-slot instead
+
+GPIO control via vmmc-supply & vqmmc-supply:
+ Two types of regulator object can be specified as mmc properties,
+ typically regulator-fixed controlled by GPIO pins.
+
+ Octeon/OcteonTX chips commonly use GPIO8 as an MMC-reset pin.
+ In systems which may boot from MMC, it starts as input, and is gently
+ pulled up/down by board logic to indicate the active sense of the
+ signal. Chip reset then drives the signal in the opposite direction
+ to effect a reset of target devices.
+ Device tree should model this with a vmmc-supply regulator, gated by
+ GPIO8, so GPIO8 is driven in the non-reset direction when MMC devices
+ are probed, and held there until rmmod/shutdown/suspend.
+ This allows a warm reboot to reset the MMC devices.
+
+ Octeon/OcteonTX MMC supports up to 3 mmc slots, but any
+ level-shifting to accommodate different signal voltages is
+ done by external hardware, under control of an optional
+ vqmmc regulator object, typically controlled by GPIO.
+
+ If any mmc-slots have a vqmmc-supply property, it is taken as a warning
+ that we must switch carefully between slots (unless they have the same
+ vqmmc object), tri-stating MMC signals to avoid any transient states
+ as level-shifters are enabled/disabled.
+
+ Even when so-called bi-directional level shifters are used,
+ this technique should be employed when using different bus-widths
+ on different slots, disabling level shifters to avoid presenting
+ non-uniform impedance across DATA0-7 & CMD when non-selected
+ 4-wide slots are left enabled, while accessing 8-wide targets.
+
+ Note that it's not possible to specify multiple regulators
+ controlled by same GPIO pin, but with different active state.
+ If one GPIO line is require to switch voltage/routing between
+ different mmc-slots, specify a vqmmc-supply on one slot, but
+ not the other. The regulator_disable call on leaving that slot
+ will implicitly switch the state to support the unmarked slot.
+
+ There's no need to list vqmmc-supply if all the mmc-slots on
+ a board run at same voltage, and have same width.
Examples:
mmc_1_4: mmc@1,4 {
@@ -40,7 +80,8 @@ Examples:
compatible = "mmc-slot";
reg = <0>;
vmmc-supply = <&mmc_supply_3v3>;
- max-frequency = <42000000>;
+ vqmmc-supply = <&vqmmc_3v3>;
+ max-frequency = <52000000>;
bus-width = <4>;
cap-sd-highspeed;
};
@@ -49,7 +90,8 @@ Examples:
compatible = "mmc-slot";
reg = <1>;
vmmc-supply = <&mmc_supply_3v3>;
- max-frequency = <42000000>;
+ vqmmc-supply = <&vqmmc_1v8>;
+ max-frequency = <100000000>;
bus-width = <8>;
cap-mmc-highspeed;
non-removable;
diff --git a/Documentation/devicetree/bindings/net/dsa/mvmdio.txt b/Documentation/devicetree/bindings/net/dsa/mvmdio.txt
new file mode 100644
index 000000000000..c5a64e24f98a
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/mvmdio.txt
@@ -0,0 +1,67 @@
+Marvell SOHU Switch sysfs Device Tree Bindings
+----------------------------------------------
+
+The Sysfs driver (also referred to as the dsa_mvmdio driver) is designed to provide access to the SMI/XSMI bus from the user space. This driver is available from the LSP 17.08 release onwards.
+
+Sysfs Interface
+Sysfs is available for accessing SOHO switch registers from the user space. The following are Sysfs paths:
+Read register - /sys/devices/platform/dsa_mvmdio/read
+Write register - /sys/devices/platform/dsa_mvmdio/write
+Dump registers - /sys/devices/platform/dsa_mvmdio/dump
+Print help - /sys/devices/platform/dsa_mvmdio/help
+Updating values in these files from the user space reads or modifies the switch internal register values. The purpose of this is to debug switch functionality by reading/writing of registers.
+
+Compilation
+To enable this driver, set the compilation flag CONFIG_MV_DSA_MVMVDIO
+
+DTS Configuration Example
+This is an example configuration for a Marvell board.
+The customer should remove one the "mii-bus" or "xmii-bus" parameters as needed.
+{
+ cpn-110-master {
+ config-space {
+ mdio: mdio@12a200 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ xmdio: mdio@15b000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ dsa_mvmdio {
+ compatible = "marvell,dsa-mvmdio";
+ status = "okay";
+ mii-bus = <&mdio>; /* remove if using only xmii */
+ xmii-bus = <&xmdio>; /* remove if using only mii */
+ sw-smi-addr = <3>;
+ };
+ };
+ };
+}
+
+Usage
+help Displays all available switch commands and their syntax. # cd /sys/devices/platform/dsa_mvmdio
+ cat help
+read - Reads a register
+ echo [type] [port] [xdev] [reg] > read
+write - Writes to a register
+ echo [type] [port] [xdev] [reg] [val] > write
+dump - Dumps 0 to 32 registers
+ echo [type] [port] [xdev] > dump
+
+The following parameters are in hexadecimal:
+type
+ 0 - Switch registers
+ 1 - Switch internal PHY registers
+ 2 - Switch external PHY registers
+ 3 - Regular Gigabit PHY
+ 4 - Extended PHY i.e. 100G phy
+port
+ PHY ID for PHYs.
+ Port address for switches. The port address equals the port number for the Peridot family; otherwise the port address is 0x10 + port number.
+ For global 1 and global 2 registers of the switch, this parameter is the device address, ie. 0X1b and 0x1c respectively.
+xdev
+ Device address for extended PHY, i.e. type = 4.
+ 0 otherwise.
+reg - Register address
+val - Value
diff --git a/Documentation/devicetree/bindings/perf/marvell-cn10k-tad-pmu.txt b/Documentation/devicetree/bindings/perf/marvell-cn10k-tad-pmu.txt
new file mode 100644
index 000000000000..8b1f753303e2
--- /dev/null
+++ b/Documentation/devicetree/bindings/perf/marvell-cn10k-tad-pmu.txt
@@ -0,0 +1,20 @@
+* Marvell CN10K LLC-TAD performace monitor unit
+
+Required properties:
+- compatible: must be:
+ "marvell,cn10k-tad-pmu"
+- tad-cnt: number of tad pmu regions
+- tad-page-size: size of entire tad block
+- tad-pmu-page-size: size of one tad pmu region
+- reg: physical address and size
+
+Example:
+
+/* Actual values updated by firmware at boot time */
+tad_pmu {
+ compatible = "marvell,cn10k-tad-pmu";
+ tad-cnt = <1>;
+ tad-page-size = <0x1000>;
+ tad-pmu-page-size = <0x1000>;
+ reg = <0x87e2 0x80000000 0x0 0x1000>;
+};
diff --git a/Documentation/devicetree/bindings/spi/cdns,xspi.yaml b/Documentation/devicetree/bindings/spi/cdns,xspi.yaml
new file mode 100644
index 000000000000..b8bb8a3dbf54
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/cdns,xspi.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2020-21 Cadence
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/spi/cdns,xspi.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Cadence XSPI Controller
+
+maintainers:
+ - Parshuram Thombare <pthombar@cadence.com>
+
+description: |
+ The XSPI controller allows SPI protocol communication in
+ single, dual, quad or octal wire transmission modes for
+ read/write access to slaves such as SPI-NOR flash.
+
+allOf:
+ - $ref: "spi-controller.yaml#"
+
+properties:
+ compatible:
+ const: cdns,xspi-nor
+
+ reg:
+ items:
+ - description: address and length of the controller register set
+ - description: address and length of the Slave DMA data port
+ - description: address and length of the auxiliary registers
+
+ reg-names:
+ items:
+ - const: io
+ - const: sdma
+ - const: aux
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ bus {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ xspi: spi@a0010000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "cdns,xspi-nor";
+ reg = <0x0 0xa0010000 0x0 0x1040>,
+ <0x0 0xb0000000 0x0 0x1000>,
+ <0x0 0xa0020000 0x0 0x100>;
+ reg-names = "io", "sdma", "aux";
+ interrupts = <0 90 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ spi-max-frequency = <75000000>;
+ reg = <0>;
+ };
+
+ flash@1 {
+ compatible = "jedec,spi-nor";
+ spi-max-frequency = <75000000>;
+ reg = <1>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/thermal/armada-thermal.txt b/Documentation/devicetree/bindings/thermal/armada-thermal.txt
index b0bee7e42038..ab8b8fccc7af 100644
--- a/Documentation/devicetree/bindings/thermal/armada-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/armada-thermal.txt
@@ -8,6 +8,7 @@ Required properties:
* marvell,armada380-thermal
* marvell,armadaxp-thermal
* marvell,armada-ap806-thermal
+ * marvell,armada-ap807-thermal
* marvell,armada-cp110-thermal
Note: these bindings are deprecated for AP806/CP110 and should instead
diff --git a/Documentation/networking/devlink-params-octeontx2.txt b/Documentation/networking/devlink-params-octeontx2.txt
new file mode 100644
index 000000000000..7339c8de7f92
--- /dev/null
+++ b/Documentation/networking/devlink-params-octeontx2.txt
@@ -0,0 +1,66 @@
+tim_capture_timers [DEVICE, DRIVER-SPECIFIC]
+ Trigger capture of cycles count of TIM clock sources.
+ Valid values:
+ * 0 - capture free running cycle count.
+ * 1 - capture at the software trigger.
+ * 2 - capture at the next rising edge of GPIO.
+ Type: u8
+ Configuration mode: runtime
+
+tim_capture_tenns [DEVICE, DRIVER-SPECIFIC]
+ Capture cycle count of tenns clock.
+ Type: String
+
+tim_capture_gpios [DEVICE, DRIVER-SPECIFIC]
+ Capture cycle count of gpios clock.
+ Type: String
+
+tim_capture_gti [DEVICE, DRIVER-SPECIFIC]
+ Capture cycle count of gti clock.
+ Type: String
+
+tim_capture_ptp [DEVICE, DRIVER-SPECIFIC]
+ Capture cycle count of ptp clock.
+ Type: String
+
+tim_capture_sync [DEVICE, DRIVER-SPECIFIC]
+ Capture cycle count of sync clock.
+ Type: String
+
+tim_capture_bts [DEVICE, DRIVER-SPECIFIC]
+ Capture cycle count of bts clock.
+ Type: String
+
+tim_capture_ext_gti [DEVICE, DRIVER-SPECIFIC]
+ Capture cycle count of external gti clock.
+ Type: String
+
+tim_adjust_timers [DEVICE, DRIVER-SPECIFIC]
+ Trigger adjustment of all TIM clock sources.
+ Type: Boolean
+ Configuration mode: runtime
+
+tim_adjust_tens [DEVICE, DRIVER-SPECIFIC]
+ Adjustment required in number of cycles for tenns clock.
+ Type: String
+ Configuration mode: runtime
+
+tim_adjust_gpios [DEVICE, DRIVER-SPECIFIC]
+ Adjustment required in number of cycles for gpios clock.
+ Type: String
+ Configuration mode: runtime
+
+tim_adjust_gti [DEVICE, DRIVER-SPECIFIC]
+ Adjustment required in number of cycles for gti clock.
+ Type: String
+ Configuration mode: runtime
+
+tim_adjust_ptp [DEVICE, DRIVER-SPECIFIC]
+ Adjustment required in number of cycles for ptp clock.
+ Type: String
+ Configuration mode: runtime
+
+tim_adjust_bts [DEVICE, DRIVER-SPECIFIC]
+ Adjustment required in number of cycles for bts clock.
+ Type: String
+ Configuration mode: runtime
diff --git a/Documentation/trace/coresight/coresight-trbe.rst b/Documentation/trace/coresight/coresight-trbe.rst
new file mode 100644
index 000000000000..b9928ef148da
--- /dev/null
+++ b/Documentation/trace/coresight/coresight-trbe.rst
@@ -0,0 +1,38 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============================
+Trace Buffer Extension (TRBE).
+==============================
+
+ :Author: Anshuman Khandual <anshuman.khandual@arm.com>
+ :Date: November 2020
+
+Hardware Description
+--------------------
+
+Trace Buffer Extension (TRBE) is a percpu hardware which captures in system
+memory, CPU traces generated from a corresponding percpu tracing unit. This
+gets plugged in as a coresight sink device because the corresponding trace
+generators (ETE), are plugged in as source device.
+
+The TRBE is not compliant to CoreSight architecture specifications, but is
+driven via the CoreSight driver framework to support the ETE (which is
+CoreSight compliant) integration.
+
+Sysfs files and directories
+---------------------------
+
+The TRBE devices appear on the existing coresight bus alongside the other
+coresight devices::
+
+ >$ ls /sys/bus/coresight/devices
+ trbe0 trbe1 trbe2 trbe3
+
+The ``trbe<N>`` named TRBEs are associated with a CPU.::
+
+ >$ ls /sys/bus/coresight/devices/trbe0/
+ align flag
+
+*Key file items are:-*
+ * ``align``: TRBE write pointer alignment
+ * ``flag``: TRBE updates memory with access and dirty flags
diff --git a/Documentation/trace/coresight/coresight.rst b/Documentation/trace/coresight/coresight.rst
index 0b73acb44efa..169749efd8d1 100644
--- a/Documentation/trace/coresight/coresight.rst
+++ b/Documentation/trace/coresight/coresight.rst
@@ -512,6 +512,38 @@ The --itrace option controls the type and frequency of synthesized events
Note that only 64-bit programs are currently supported - further work is
required to support instruction decode of 32-bit Arm programs.
+2.2) Tracing PID
+
+The kernel can be built to write the PID value into the PE ContextID registers.
+For a kernel running at EL1, the PID is stored in CONTEXTIDR_EL1. A PE may
+implement Arm Virtualization Host Extensions (VHE), which the kernel can
+run at EL2 as a virtualisation host; in this case, the PID value is stored in
+CONTEXTIDR_EL2.
+
+perf provides PMU formats that program the ETM to insert these values into the
+trace data; the PMU formats are defined as below:
+
+ "contextid1": Available on both EL1 kernel and EL2 kernel. When the
+ kernel is running at EL1, "contextid1" enables the PID
+ tracing; when the kernel is running at EL2, this enables
+ tracing the PID of guest applications.
+
+ "contextid2": Only usable when the kernel is running at EL2. When
+ selected, enables PID tracing on EL2 kernel.
+
+ "contextid": Will be an alias for the option that enables PID
+ tracing. I.e,
+ contextid == contextid1, on EL1 kernel.
+ contextid == contextid2, on EL2 kernel.
+
+perf will always enable PID tracing at the relevant EL, this is accomplished by
+automatically enable the "contextid" config - but for EL2 it is possible to make
+specific adjustments using configs "contextid1" and "contextid2", E.g. if a user
+wants to trace PIDs for both host and guest, the two configs "contextid1" and
+"contextid2" can be set at the same time:
+
+ perf record -e cs_etm/contextid1,contextid2/u -- vm
+
Generating coverage files for Feedback Directed Optimization: AutoFDO
---------------------------------------------------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index 436bf762a66e..f06c5bba7fa7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -10446,6 +10446,11 @@ F: drivers/mailbox/
F: include/linux/mailbox_client.h
F: include/linux/mailbox_controller.h
+MAILBOX MARVELL MHU DRIVER
+M: Wojciech Bartczak <wbartczak@marvell.com>
+S: Maintained
+F: drivers/mailbox/mvl_mhu.c
+
MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7
M: Michael Kerrisk <mtk.manpages@gmail.com>
L: linux-man@vger.kernel.org
@@ -10492,6 +10497,7 @@ M: Srujana Challa <schalla@marvell.com>
L: linux-crypto@vger.kernel.org
S: Maintained
F: drivers/crypto/marvell/
+F: include/linux/soc/marvell/octeontx2/
MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2)
M: Mirko Lindner <mlindner@marvell.com>
@@ -10511,6 +10517,11 @@ L: linux-arm-kernel@lists.infradead.org
S: Maintained
F: arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts
+MARVELL MDIO UIO DRIVER
+M: Damian Eppel <deppel@marvell.com>
+S: Maintained
+F: drivers/soc/marvell/mvmdio_uio.c
+
MARVELL MV643XX ETHERNET DRIVER
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
L: netdev@vger.kernel.org
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 861a8aad9a17..f02465ce8e25 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -761,6 +761,45 @@ config HISILICON_ERRATUM_161600802
If unsure, say Y.
+config CAVIUM_ERRATUM_36890
+ bool "Cavium erratum 36890"
+ default y
+ help
+ Enable workaround for erratum 36890. On all ThunderX T88xx and
+ OcteonTX1 T81/T83 and some OcteonTX2 chips, the "dc zva" instruction
+ does not work all the time. This happens when there are two VAs
+ that match up with one PA; including when the two VAs match but have
+ different asids. The fix is to disable "dc zva" in userspace.
+
+ If unsure, say Y.
+
+config MRVL_ERRATUM_38500
+ bool "Marvell erratum 38500"
+ default y
+ help
+ Enable workaround for erratum 38500. T8x ARM CPU can incorrectly
+ forward data from an older store to a younger load. When this happens
+ L1 Dcache parity error occurs in hardware and Synchronous parity error
+ abort is raised to software. To workaround this erratum, nops are
+ introduced in between loads and stores.
+
+ If unsure, say Y.
+
+config MRVL_ERRATUM_38545
+ bool "Marvell erratum 38545"
+ default y
+ help
+ On some OcteonTX2 chips, an issue exists wherein when an interrupt
+ is pending and a second interrupt arrives at the core under certain
+ stall conditions, a read of interrpt acknowledge register will return
+ the first interrupt but not acknowledge it. A second read of IAR will
+ return the first interrupt again, not the second interrupt. To
+ workaround, software checks the active interrupt priorities before
+ and after PE acknowledges interrupt in sw. The PE ignores interrupt
+ if there was no change in acitive priorities.
+
+ If unsure, say Y.
+
config QCOM_FALKOR_ERRATUM_1003
bool "Falkor E1003: Incorrect translation due to ASID change"
default y
@@ -814,6 +853,14 @@ config SOCIONEXT_SYNQUACER_PREITS
endmenu
+config MRVL_OCTEONTX_EL0_INTR
+ bool "Handle interrupts in EL0 via EL3"
+ default y
+ help
+ Handle certain interrupts in EL0 with the help of EL3 firmware to achieve
+ low latency and also not break task isolation.
+ Generally implemented and tested on OcteonTx and its successive
+ generation CPUs.
choice
prompt "Page size"
@@ -1175,6 +1222,7 @@ config FORCE_MAX_ZONEORDER
config UNMAP_KERNEL_AT_EL0
bool "Unmap kernel when running in userspace (aka \"KAISER\")" if EXPERT
default y
+ depends on !GTI_WATCHDOG
help
Speculation attacks against some high-performance processors can
be used to bypass MMU permission checks and leak kernel data to
diff --git a/arch/arm64/boot/dts/marvell/armada-ap807.dtsi b/arch/arm64/boot/dts/marvell/armada-ap807.dtsi
index 623010f3ca89..6f5a38a75a02 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap807.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap807.dtsi
@@ -26,4 +26,11 @@
clocks = <&ap_clk 0>, <&ap_clk 1>;
#clock-cells = <1>;
};
+ ap_thermal: thermal-sensor@80 {
+ compatible = "marvell,armada-ap807-thermal";
+ };
+};
+
+&ap_sdhci0 {
+ compatible = "marvell,armada-ap807-sdhci";
};
diff --git a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
index 9dcf16beabf5..69b72ffc02c9 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
@@ -11,6 +11,7 @@
#include "armada-common.dtsi"
#define CP11X_PCIEx_CONF_BASE(iface) (CP11X_PCIEx_MEM_BASE(iface) + CP11X_PCIEx_MEM_SIZE(iface))
+#define CP11X_EIP197_INDEX CP11X_NUM
/ {
/*
@@ -341,7 +342,7 @@
};
CP11X_LABEL(spi0): spi@700600 {
- compatible = "marvell,armada-380-spi";
+ compatible = "marvell,armada-cp110-spi";
reg = <0x700600 0x50>;
#address-cells = <0x1>;
#size-cells = <0x0>;
@@ -352,7 +353,7 @@
};
CP11X_LABEL(spi1): spi@700680 {
- compatible = "marvell,armada-380-spi";
+ compatible = "marvell,armada-cp110-spi";
reg = <0x700680 0x50>;
#address-cells = <1>;
#size-cells = <0>;
@@ -488,6 +489,7 @@
clock-names = "core", "reg";
clocks = <&CP11X_LABEL(clk) 1 26>,
<&CP11X_LABEL(clk) 1 17>;
+ cell-index = <CP11X_EIP197_INDEX>;
dma-coherent;
};
};
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 37d891af8ea5..bffa8860af1c 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -23,6 +23,7 @@
#define dsb(opt) asm volatile("dsb " #opt : : : "memory")
#define psb_csync() asm volatile("hint #17" : : : "memory")
+#define tsb_csync() asm volatile("hint #18" : : : "memory")
#define csdb() asm volatile("hint #20" : : : "memory")
#define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index f42fd0a2e81c..c30ec18a714f 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -66,8 +66,12 @@
#define ARM64_HAS_TLB_RANGE 56
#define ARM64_MTE 57
#define ARM64_WORKAROUND_1508412 58
-#define ARM64_SPECTRE_BHB 59
+#define ARM64_WORKAROUND_CAVIUM_36890 59
+#define ARM64_WORKAROUND_MRVL_38500 60
+#define ARM64_WORKAROUND_MRVL_38545 61
+#define ARM64_WORKAROUND_MRVL_38627 62
+#define ARM64_SPECTRE_BHB 63
-#define ARM64_NCAPS 60
+#define ARM64_NCAPS 64
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 39f5c1672f48..5efc259dd89e 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -90,6 +90,13 @@
#define CAVIUM_CPU_PART_THUNDERX_83XX 0x0A3
#define CAVIUM_CPU_PART_THUNDERX2 0x0AF
+#define MRVL_CPU_PART_OCTEONTX2_98XX 0x0B1
+#define MRVL_CPU_PART_OCTEONTX2_96XX 0x0B2
+#define MRVL_CPU_PART_OCTEONTX2_95XX 0x0B3
+#define MRVL_CPU_PART_OCTEONTX2_LOKI 0x0B4
+#define MRVL_CPU_PART_OCTEONTX2_95MM 0x0B5
+#define MRVL_CPU_PART_OCTEONTX2_95O 0x0B6
+
#define BRCM_CPU_PART_BRAHMA_B53 0x100
#define BRCM_CPU_PART_VULCAN 0x516
@@ -135,6 +142,12 @@
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
#define MIDR_CAVIUM_THUNDERX2 MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX2)
+#define MIDR_MRVL_OCTEONTX2_98XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, MRVL_CPU_PART_OCTEONTX2_98XX)
+#define MIDR_MRVL_OCTEONTX2_96XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, MRVL_CPU_PART_OCTEONTX2_96XX)
+#define MIDR_MRVL_OCTEONTX2_95XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, MRVL_CPU_PART_OCTEONTX2_95XX)
+#define MIDR_MRVL_OCTEONTX2_LOKI MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, MRVL_CPU_PART_OCTEONTX2_LOKI)
+#define MIDR_MRVL_OCTEONTX2_95MM MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, MRVL_CPU_PART_OCTEONTX2_95MM)
+#define MIDR_MRVL_OCTEONTX2_95O MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, MRVL_CPU_PART_OCTEONTX2_95O)
#define MIDR_BRAHMA_B53 MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_BRAHMA_B53)
#define MIDR_BRCM_VULCAN MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_VULCAN)
#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 5a54a5ab5f92..e96ce33a6518 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -254,6 +254,12 @@ void post_ttbr_update_workaround(void);
unsigned long arm64_mm_context_get(struct mm_struct *mm);
void arm64_mm_context_put(struct mm_struct *mm);
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+int lock_context(struct mm_struct *mm, int index);
+int unlock_context_by_index(int index);
+bool unlock_context_by_mm(struct mm_struct *mm);
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* !__ASM_MMU_CONTEXT_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 1f2209ad2cca..3bddea1f9357 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -188,6 +188,7 @@
#define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6)
#define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0)
+#define SYS_TRFCR_EL1 sys_reg(3, 0, 1, 2, 1)
#define SYS_TTBR0_EL1 sys_reg(3, 0, 2, 0, 0)
#define SYS_TTBR1_EL1 sys_reg(3, 0, 2, 0, 1)
@@ -325,6 +326,55 @@
/*** End of Statistical Profiling Extension ***/
+/*
+ * TRBE Registers
+ */
+#define SYS_TRBLIMITR_EL1 sys_reg(3, 0, 9, 11, 0)
+#define SYS_TRBPTR_EL1 sys_reg(3, 0, 9, 11, 1)
+#define SYS_TRBBASER_EL1 sys_reg(3, 0, 9, 11, 2)
+#define SYS_TRBSR_EL1 sys_reg(3, 0, 9, 11, 3)
+#define SYS_TRBMAR_EL1 sys_reg(3, 0, 9, 11, 4)
+#define SYS_TRBTRG_EL1 sys_reg(3, 0, 9, 11, 6)
+#define SYS_TRBIDR_EL1 sys_reg(3, 0, 9, 11, 7)
+
+#define TRBLIMITR_LIMIT_MASK GENMASK_ULL(51, 0)
+#define TRBLIMITR_LIMIT_SHIFT 12
+#define TRBLIMITR_NVM BIT(5)
+#define TRBLIMITR_TRIG_MODE_MASK GENMASK(1, 0)
+#define TRBLIMITR_TRIG_MODE_SHIFT 3
+#define TRBLIMITR_FILL_MODE_MASK GENMASK(1, 0)
+#define TRBLIMITR_FILL_MODE_SHIFT 1
+#define TRBLIMITR_ENABLE BIT(0)
+#define TRBPTR_PTR_MASK GENMASK_ULL(63, 0)
+#define TRBPTR_PTR_SHIFT 0
+#define TRBBASER_BASE_MASK GENMASK_ULL(51, 0)
+#define TRBBASER_BASE_SHIFT 12
+#define TRBSR_EC_MASK GENMASK(5, 0)
+#define TRBSR_EC_SHIFT 26
+#define TRBSR_IRQ BIT(22)
+#define TRBSR_TRG BIT(21)
+#define TRBSR_WRAP BIT(20)
+#define TRBSR_ABORT BIT(18)
+#define TRBSR_STOP BIT(17)
+#define TRBSR_MSS_MASK GENMASK(15, 0)
+#define TRBSR_MSS_SHIFT 0
+#define TRBSR_BSC_MASK GENMASK(5, 0)
+#define TRBSR_BSC_SHIFT 0
+#define TRBSR_FSC_MASK GENMASK(5, 0)
+#define TRBSR_FSC_SHIFT 0
+#define TRBMAR_SHARE_MASK GENMASK(1, 0)
+#define TRBMAR_SHARE_SHIFT 8
+#define TRBMAR_OUTER_MASK GENMASK(3, 0)
+#define TRBMAR_OUTER_SHIFT 4
+#define TRBMAR_INNER_MASK GENMASK(3, 0)
+#define TRBMAR_INNER_SHIFT 0
+#define TRBTRG_TRG_MASK GENMASK(31, 0)
+#define TRBTRG_TRG_SHIFT 0
+#define TRBIDR_FLAG BIT(5)
+#define TRBIDR_PROG BIT(4)
+#define TRBIDR_ALIGN_MASK GENMASK(3, 0)
+#define TRBIDR_ALIGN_SHIFT 0
+
#define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1)
#define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2)
@@ -467,6 +517,7 @@
#define SYS_PMCCFILTR_EL0 sys_reg(3, 3, 14, 15, 7)
#define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0)
+#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1)
#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0)
#define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0)
#define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1)
@@ -836,6 +887,8 @@
#define ID_AA64MMFR2_CNP_SHIFT 0
/* id_aa64dfr0 */
+#define ID_AA64DFR0_TRBE_SHIFT 44
+#define ID_AA64DFR0_TRACE_FILT_SHIFT 40
#define ID_AA64DFR0_DOUBLELOCK_SHIFT 36
#define ID_AA64DFR0_PMSVER_SHIFT 32
#define ID_AA64DFR0_CTX_CMPS_SHIFT 28
@@ -1010,6 +1063,14 @@
/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */
#define SYS_MPIDR_SAFE_VAL (BIT(31))
+#define TRFCR_ELx_TS_SHIFT 5
+#define TRFCR_ELx_TS_VIRTUAL ((0x1UL) << TRFCR_ELx_TS_SHIFT)
+#define TRFCR_ELx_TS_GUEST_PHYSICAL ((0x2UL) << TRFCR_ELx_TS_SHIFT)
+#define TRFCR_ELx_TS_PHYSICAL ((0x3UL) << TRFCR_ELx_TS_SHIFT)
+#define TRFCR_EL2_CX BIT(3)
+#define TRFCR_ELx_ExTRE BIT(1)
+#define TRFCR_ELx_E0TRE BIT(0)
+
#ifdef __ASSEMBLY__
.irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 533559c7d2b3..e3b0c099a55b 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -351,6 +351,70 @@ static const struct midr_range erratum_1463225[] = {
{},
};
#endif
+#ifdef CONFIG_MARVELL_ERRATUM_38627
+static const struct midr_range marvell_erratum_38627_cpus[] = {
+ /* Marvell OcteonTX 2, 95xx all passes */
+ MIDR_ALL_VERSIONS(MIDR_MRVL_OCTEONTX2_95XX),
+ /* Marvell OcteonTX 2, 95MM all passes */
+ MIDR_ALL_VERSIONS(MIDR_MRVL_OCTEONTX2_95MM),
+ /* Marvell OcteonTX 2, LOKI all passes */
+ MIDR_ALL_VERSIONS(MIDR_MRVL_OCTEONTX2_LOKI),
+ /* Marvell OcteonTX 2, 96xx all passes */
+ MIDR_ALL_VERSIONS(MIDR_MRVL_OCTEONTX2_96XX),
+ /* Marvell OcteonTX 2, 98xx pass 1.0 */
+ MIDR_REV(MIDR_MRVL_OCTEONTX2_98XX, 0, 0),
+ /* Marvell OcteonTX 2, 95O pass 1.0 */
+ MIDR_REV(MIDR_MRVL_OCTEONTX2_95O, 0, 0),
+ {},
+};
+#endif
+
+static void __maybe_unused
+cpu_enable_trap_zva_access(const struct arm64_cpu_capabilities *__unused)
+{
+ /*
+ * Clear SCTLR_EL2.DZE or SCTLR_EL1.DZE depending
+ * on if we are in EL2.
+ */
+ if (!is_kernel_in_hyp_mode())
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_DZE, 0);
+ else
+ sysreg_clear_set(sctlr_el2, SCTLR_EL1_DZE, 0);
+}
+
+static const struct midr_range cavium_erratum_36890_cpus[] = {
+ MIDR_ALL_VERSIONS(MIDR_THUNDERX),
+ /* Cavium ThunderX, T81 all passes */
+ MIDR_ALL_VERSIONS(MIDR_THUNDERX_81XX),
+ /* Cavium ThunderX, T83 all passes */
+ MIDR_ALL_VERSIONS(MIDR_THUNDERX_83XX),
+ /* Marvell OcteonTX 2, 96xx pass A0, A1, and B0 */
+ MIDR_RANGE(MIDR_MRVL_OCTEONTX2_96XX, 0, 0, 1, 0),
+ /* Marvell OcteonTX 2, 95 pass A0/A1 */
+ MIDR_RANGE(MIDR_MRVL_OCTEONTX2_95XX, 0, 0, 0, 1),
+};
+
+static const struct midr_range marvell_erratum_38500_cpus[] = {
+ /* ThunderX, T83 all passes */
+ MIDR_ALL_VERSIONS(MIDR_THUNDERX_83XX),
+};
+
+static const struct midr_range marvell_erratum_38545_cpus[] = {
+ /* Cavium ThunderX, T81 all passes */
+ MIDR_ALL_VERSIONS(MIDR_THUNDERX_81XX),
+ /* Cavium ThunderX, T83 all passes */
+ MIDR_ALL_VERSIONS(MIDR_THUNDERX_83XX),
+ /* Marvell OcteonTX 2, 95xx all passes */
+ MIDR_ALL_VERSIONS(MIDR_MRVL_OCTEONTX2_95XX),
+ /* Marvell OcteonTX 2, 95MM all passes */
+ MIDR_ALL_VERSIONS(MIDR_MRVL_OCTEONTX2_95MM),
+ /* Marvell OcteonTX 2, LOKI all passes */
+ MIDR_ALL_VERSIONS(MIDR_MRVL_OCTEONTX2_LOKI),
+ /* Marvell OcteonTX 2, 96xx all passes */
+ MIDR_ALL_VERSIONS(MIDR_MRVL_OCTEONTX2_96XX),
+ /* Marvell OcteonTX 2, 98xx all passes */
+ MIDR_ALL_VERSIONS(MIDR_MRVL_OCTEONTX2_98XX),
+};
const struct arm64_cpu_capabilities arm64_errata[] = {
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
@@ -542,6 +606,35 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
1, 0),
},
#endif
+#ifdef CONFIG_CAVIUM_ERRATUM_36890
+ {
+ .desc = "Cavium erratum 36890",
+ .capability = ARM64_WORKAROUND_CAVIUM_36890,
+ ERRATA_MIDR_RANGE_LIST(cavium_erratum_36890_cpus),
+ .cpu_enable = cpu_enable_trap_zva_access,
+ },
+#endif
+#ifdef CONFIG_MRVL_ERRATUM_38500
+ {
+ .desc = "Marvell erratum 38500",
+ .capability = ARM64_WORKAROUND_MRVL_38500,
+ ERRATA_MIDR_RANGE_LIST(marvell_erratum_38500_cpus),
+ },
+#endif
+#ifdef CONFIG_MRVL_ERRATUM_38545
+ {
+ .desc = "Marvell erratum 38545",
+ .capability = ARM64_WORKAROUND_MRVL_38545,
+ ERRATA_MIDR_RANGE_LIST(marvell_erratum_38545_cpus),
+ },
+#endif
+#ifdef CONFIG_MARVELL_ERRATUM_38627
+ {
+ .desc = "MARVELL erratum 38627",
+ .capability = ARM64_WORKAROUND_MRVL_38627,
+ ERRATA_MIDR_RANGE_LIST(marvell_erratum_38627_cpus),
+ },
+#endif
{
}
};
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index d5bc1dbdd2fd..362ead616c9c 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -182,7 +182,11 @@ alternative_else_nop_endif
#endif
.endm
+#ifdef CONFIG_GTI_WATCHDOG
+ .macro kernel_entry, el, regsize = 64, exc_el3 = 0
+#else
.macro kernel_entry, el, regsize = 64
+#endif
.if \regsize == 32
mov w0, w0 // zero upper 32 bits of x0
.endif
@@ -232,8 +236,31 @@ alternative_else_nop_endif
str x20, [tsk, #TSK_TI_ADDR_LIMIT]
/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
.endif /* \el == 0 */
+
+#ifdef CONFIG_GTI_WATCHDOG
+ .if \exc_el3 == 0
mrs x22, elr_el1
mrs x23, spsr_el1
+ .else
+ /*
+ * load elr and spsr in case of simulated exception return from
+ * respective percpu gti_elr and gti_spsr variables, which are shared
+ * with el3. EL3 gti watchdog handler store interrupted register
+ * context in percpu shared location and elr_el1/2 & spsr_el1/2 will be
+ * used for simulated return to el1/el2 by el3 nmi handler.
+ */
+ ldr_this_cpu x22, gti_elr, x29
+ ldr_this_cpu x23, gti_spsr, x29
+ .if \el == 0
+ mov x29, xzr
+ .else
+ ldr x29, [sp, #S_X28+8]
+ .endif
+ .endif /* \exc_el3 == 0 */
+#else
+ mrs x22, elr_el1
+ mrs x23, spsr_el1
+#endif
stp lr, x21, [sp, #S_LR]
/*
@@ -287,7 +314,11 @@ alternative_else_nop_endif
*/
.endm
+#ifdef CONFIG_GTI_WATCHDOG
+ .macro kernel_exit, el, exc_el3 = 0
+#else
.macro kernel_exit, el
+#endif
.if \el != 0
disable_daif
@@ -359,6 +390,15 @@ alternative_else_nop_endif
ldp x26, x27, [sp, #16 * 13]
ldp x28, x29, [sp, #16 * 14]
+#ifdef CONFIG_GTI_WATCHDOG
+ /*
+ * Cannot do an eret here as we have not
+ * entered from a real exception.
+ */
+ .if \exc_el3 == 1
+ b 6f
+ .endif
+#endif
.if \el == 0
alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
ldr lr, [sp, #S_LR]
@@ -383,6 +423,9 @@ alternative_else_nop_endif
eret
.endif
+#ifdef CONFIG_GTI_WATCHDOG
+6:
+#endif
sb
.endm
@@ -719,6 +762,49 @@ el0_irq_naked:
b ret_to_user
SYM_CODE_END(el0_irq)
+#ifdef CONFIG_GTI_WATCHDOG
+
+/*
+ * Simulate an exception return at same ELx, for example,
+ * exception entry and pstate are loaded from ELR_ELx and SPSR_ELx.
+ */
+.globl el0_nmi_callback
+el0_nmi_callback:
+ sub sp, sp, #S_FRAME_SIZE
+ kernel_entry 0, 64, 1
+ mov x0, sp
+ bl nmi_kernel_callback
+ kernel_exit 0, 1
+ b ret_back_to_el3
+
+.globl el1_nmi_callback
+el1_nmi_callback:
+ sub sp, sp, #S_FRAME_SIZE
+ kernel_entry 1, 64, 1
+ mov x0, sp
+ bl nmi_kernel_callback
+ kernel_exit 1, 1
+
+ret_back_to_el3:
+ /*
+ * We return back to the interrupted context via EL3,
+ * as we need to do cleanup in ATF before restoring
+ * interrupted context such as dropping lock and
+ * do interrupt completion, etc.
+ * Make OCTEONTX_RESTORE_WDOG_CTXT (0xc2000c04) SMC
+ * call.
+ */
+ mov x0, #0xc04 // OCTEONTX_RESTORE_WDOG_CTXT
+ mov x7, #0x0 // #0
+ movk x0, #0xc200, lsl #16
+ mov x6, #0x0 // #0
+ mov x5, #0x0 // #0
+ mov x4, #0x0 // #0
+ mov x3, #0x0 // #0
+ mov x1, #0x0 // #0
+ smc #0x0
+#endif
+
SYM_CODE_START_LOCAL(el1_error)
kernel_entry 1
mrs x1, esr_el1
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index 32ba6fbc3814..96c7cd39d9c0 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/uaccess.h>
+#include <linux/delay.h>
#include <clocksource/arm_arch_timer.h>
#include <asm/arch_timer.h>
@@ -440,6 +441,10 @@ static void timer_save_state(struct arch_timer_context *ctx)
timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTV_CTL));
timer_set_cval(ctx, read_sysreg_el0(SYS_CNTV_CVAL));
+#ifdef CONFIG_MARVELL_ERRATUM_38627
+ if (static_branch_likely(&timer_errata_38627))
+ apply_mrvl_erratum_38627(index);
+#endif
/* Disable the timer */
write_sysreg_el0(0, SYS_CNTV_CTL);
isb();
@@ -449,6 +454,10 @@ static void timer_save_state(struct arch_timer_context *ctx)
timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL));
timer_set_cval(ctx, read_sysreg_el0(SYS_CNTP_CVAL));
+#ifdef CONFIG_MARVELL_ERRATUM_38627
+ if (static_branch_likely(&timer_errata_38627))
+ apply_mrvl_erratum_38627(index);
+#endif
/* Disable the timer */
write_sysreg_el0(0, SYS_CNTP_CTL);
isb();
@@ -907,6 +916,34 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
return val;
}
+#ifdef CONFIG_MARVELL_ERRATUM_38627
+static DEFINE_STATIC_KEY_FALSE(timer_errata_38627);
+
+/* Workaround is to ensure maximum 2us of time gap between timer expiry
+ * and timer programming which can de-assert timer interrupt.
+ * Time calculation below is based on 100MHz as timer frequency is fixed
+ * to 100MHz on all affected parts.
+ */
+static void apply_mrvl_erratum_38627(enum kvm_arch_timers index)
+{
+
+ int32_t tval;
+
+ if(index == TIMER_VTIMER)
+ tval = read_sysreg(cntv_tval_el0);
+ else
+ tval = read_sysreg(cntp_tval_el0);
+
+ /* Timer already expired, wait for (2 - expired time)us */
+ if ((tval > -200) && (tval < 0))
+ udelay(2 + tval/100);
+
+ /* Timer is about to expire, wait for 2us + time to expire */
+ if (tval >= 0 && tval < 200)
+ udelay(3 + tval/100);
+}
+#endif
+
u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
enum kvm_arch_timers tmr,
enum kvm_arch_timer_regs treg)
@@ -1061,6 +1098,11 @@ int kvm_timer_hyp_init(bool has_gic)
goto out_free_irq;
}
+#ifdef CONFIG_MARVELL_ERRATUM_38627
+ if (cpus_have_const_cap(ARM64_WORKAROUND_MRVL_38627))
+ static_branch_enable(&timer_errata_38627);
+#endif
+
cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
"kvm/arm/timer:starting", kvm_timer_starting_cpu,
kvm_timer_dying_cpu);
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 957a6d092d7a..942996e82603 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -19,6 +19,9 @@
* Returns:
* x0 - bytes not copied
*/
+ .macro ins_nops
+ nops 7
+ .endm
.macro ldrb1 reg, ptr, val
uao_user_alternative 9998f, ldrb, ldtrb, \reg, \ptr, \val
@@ -57,7 +60,17 @@ srcin .req x15
SYM_FUNC_START(__arch_copy_from_user)
add end, x0, x2
mov srcin, x1
+alternative_if_not ARM64_WORKAROUND_MRVL_38500
+ nop
+alternative_else
+ b .Lcopy_with_nops
+alternative_endif
+
#include "copy_template.S"
+ b .Lgetout
+.Lcopy_with_nops:
+#include "copy_template_nops.S"
+.Lgetout:
mov x0, #0 // Nothing to copy
ret
SYM_FUNC_END(__arch_copy_from_user)
diff --git a/arch/arm64/lib/copy_template_nops.S b/arch/arm64/lib/copy_template_nops.S
new file mode 100644
index 000000000000..a28aa34ff1ee
--- /dev/null
+++ b/arch/arm64/lib/copy_template_nops.S
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2013 Linaro.
+ */
+
+/*
+ * Copy a buffer from src to dest (alignment handled by the hardware)
+ *
+ * Parameters:
+ * x0 - dest
+ * x1 - src
+ * x2 - n
+ * Returns:
+ * x0 - dest
+ */
+dstin .req x0
+src .req x1
+count .req x2
+tmp1 .req x3
+tmp1w .req w3
+tmp2 .req x4
+tmp2w .req w4
+dst .req x6
+
+A_l .req x7
+A_h .req x8
+B_l .req x9
+B_h .req x10
+C_l .req x11
+C_h .req x12
+D_l .req x13
+D_h .req x14
+
+cvmtmp .req x15
+cvmctl .req x16
+cvmmemctl .req x17
+
+ mrs cvmctl, S3_0_C11_C0_0 // AP_CVMCTL_EL1
+ mov cvmtmp, cvmctl
+ bfi cvmtmp, xzr, #40, #4 // clear [43:40]
+ msr S3_0_C11_C0_0, cvmtmp
+
+ mrs cvmmemctl, S3_0_C11_C0_4 // AP_CVMMEMCTL0_EL1
+ mov cvmtmp, cvmmemctl
+ bfi cvmtmp, xzr, #35, #1 // Bit 35 - prefetch disable/enable
+ msr S3_0_C11_C0_4, cvmtmp
+ dmb ld
+
+ mov dst, dstin
+ cmp count, #16
+ /*When memory length is less than 16, the accessed are not aligned.*/
+ b.lo .Ltiny15_nops
+
+ neg tmp2, src
+ ands tmp2, tmp2, #15/* Bytes to reach alignment. */
+ b.eq .LSrcAligned_nops
+ sub count, count, tmp2
+ /*
+ * Copy the leading memory data from src to dst in an increasing
+ * address order.By this way,the risk of overwriting the source
+ * memory data is eliminated when the distance between src and
+ * dst is less than 16. The memory accesses here are alignment.
+ */
+ tbz tmp2, #0, 1f
+ ldrb1 tmp1w, src, #1
+ ins_nops
+ strb1 tmp1w, dst, #1
+ ins_nops
+1:
+ tbz tmp2, #1, 2f
+ ldrh1 tmp1w, src, #2
+ ins_nops
+ strh1 tmp1w, dst, #2
+ ins_nops
+2:
+ tbz tmp2, #2, 3f
+ ldr1 tmp1w, src, #4
+ ins_nops
+ str1 tmp1w, dst, #4
+ ins_nops
+3:
+ tbz tmp2, #3, .LSrcAligned_nops
+ ldr1 tmp1, src, #8
+ ins_nops
+ str1 tmp1, dst, #8
+ ins_nops
+
+.LSrcAligned_nops:
+ cmp count, #64
+ b.ge .Lcpy_over64_nops
+ /*
+ * Deal with small copies quickly by dropping straight into the
+ * exit block.
+ */
+.Ltail63_nops:
+ /*
+ * Copy up to 48 bytes of data. At this point we only need the
+ * bottom 6 bits of count to be accurate.
+ */
+ ands tmp1, count, #0x30
+ b.eq .Ltiny15_nops
+ cmp tmp1w, #0x20
+ b.eq 1f
+ b.lt 2f
+ ldp1 A_l, A_h, src, #16
+ ins_nops
+ stp1 A_l, A_h, dst, #16
+ ins_nops
+1:
+ ldp1 A_l, A_h, src, #16
+ ins_nops
+ stp1 A_l, A_h, dst, #16
+ ins_nops
+2:
+ ldp1 A_l, A_h, src, #16
+ ins_nops
+ stp1 A_l, A_h, dst, #16
+ ins_nops
+.Ltiny15_nops:
+ /*
+ * Prefer to break one ldp/stp into several load/store to access
+ * memory in an increasing address order,rather than to load/store 16
+ * bytes from (src-16) to (dst-16) and to backward the src to aligned
+ * address,which way is used in original cortex memcpy. If keeping
+ * the original memcpy process here, memmove need to satisfy the
+ * precondition that src address is at least 16 bytes bigger than dst
+ * address,otherwise some source data will be overwritten when memove
+ * call memcpy directly. To make memmove simpler and decouple the
+ * memcpy's dependency on memmove, withdrew the original process.
+ */
+ tbz count, #3, 1f
+ ldr1 tmp1, src, #8
+ ins_nops
+ str1 tmp1, dst, #8
+ ins_nops
+1:
+ tbz count, #2, 2f
+ ldr1 tmp1w, src, #4
+ ins_nops
+ str1 tmp1w, dst, #4
+ ins_nops
+2:
+ tbz count, #1, 3f
+ ldrh1 tmp1w, src, #2
+ ins_nops
+ strh1 tmp1w, dst, #2
+ ins_nops
+3:
+ tbz count, #0, .Lexitfunc_nops
+ ldrb1 tmp1w, src, #1
+ ins_nops
+ strb1 tmp1w, dst, #1
+ ins_nops
+
+ b .Lexitfunc_nops
+
+.Lcpy_over64_nops:
+ subs count, count, #128
+ b.ge .Lcpy_body_large_nops
+ /*
+ * Less than 128 bytes to copy, so handle 64 here and then jump
+ * to the tail.
+ */
+ ldp1 A_l, A_h, src, #16
+ ins_nops
+ stp1 A_l, A_h, dst, #16
+ ins_nops
+ ldp1 B_l, B_h, src, #16
+ ins_nops
+ ldp1 C_l, C_h, src, #16
+ ins_nops
+ stp1 B_l, B_h, dst, #16
+ ins_nops
+ stp1 C_l, C_h, dst, #16
+ ins_nops
+ ldp1 D_l, D_h, src, #16
+ ins_nops
+ stp1 D_l, D_h, dst, #16
+ ins_nops
+
+ tst count, #0x3f
+ b.ne .Ltail63_nops
+ b .Lexitfunc_nops
+
+ /*
+ * Critical loop. Start at a new cache line boundary. Assuming
+ * 64 bytes per line this ensures the entire loop is in one line.
+ */
+ .p2align L1_CACHE_SHIFT
+.Lcpy_body_large_nops:
+ /* pre-get 64 bytes data. */
+ ldp1 A_l, A_h, src, #16
+ ldp1 B_l, B_h, src, #16
+ ldp1 C_l, C_h, src, #16
+ ldp1 D_l, D_h, src, #16
+ ins_nops
+1:
+ /*
+ * interlace the load of next 64 bytes data block with store of the last
+ * loaded 64 bytes data.
+ */
+ stp1 A_l, A_h, dst, #16
+ ins_nops
+ ldp1 A_l, A_h, src, #16
+ ins_nops
+ stp1 B_l, B_h, dst, #16
+ ins_nops
+ ldp1 B_l, B_h, src, #16
+ ins_nops
+ stp1 C_l, C_h, dst, #16
+ ins_nops
+ ldp1 C_l, C_h, src, #16
+ ins_nops
+ stp1 D_l, D_h, dst, #16
+ ins_nops
+ ldp1 D_l, D_h, src, #16
+ ins_nops
+ subs count, count, #64
+ b.ge 1b
+ stp1 A_l, A_h, dst, #16
+ ins_nops
+ stp1 B_l, B_h, dst, #16
+ ins_nops
+ stp1 C_l, C_h, dst, #16
+ ins_nops
+ stp1 D_l, D_h, dst, #16
+ ins_nops
+
+ tst count, #0x3f
+ b.ne .Ltail63_nops
+.Lexitfunc_nops:
+ msr S3_0_C11_C0_0, cvmctl
+ msr S3_0_C11_C0_4, cvmmemctl
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 85705350ff35..d141c6c71fa3 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -19,6 +19,10 @@
* Returns:
* x0 - bytes not copied
*/
+ .macro ins_nops
+ nops 7
+ .endm
+
.macro ldrb1 reg, ptr, val
ldrb \reg, [\ptr], \val
.endm
@@ -56,7 +60,17 @@ srcin .req x15
SYM_FUNC_START(__arch_copy_to_user)
add end, x0, x2
mov srcin, x1
+alternative_if_not ARM64_WORKAROUND_MRVL_38500
+ nop
+alternative_else
+ b .Lcopy_with_nops
+alternative_endif
+
#include "copy_template.S"
+ b .Lgetout
+.Lcopy_with_nops:
+#include "copy_template_nops.S"
+.Lgetout:
mov x0, #0
ret
SYM_FUNC_END(__arch_copy_to_user)
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 001737a8f309..4858bbf7f861 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -25,6 +25,13 @@ static unsigned long *asid_map;
static DEFINE_PER_CPU(atomic64_t, active_asids);
static DEFINE_PER_CPU(u64, reserved_asids);
+
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+#define LOCKED_ASIDS_COUNT 128
+
+static u64 locked_asids[LOCKED_ASIDS_COUNT];
+#endif
+
static cpumask_t tlb_flush_pending;
static unsigned long max_pinned_asids;
@@ -124,6 +131,14 @@ static void flush_context(void)
per_cpu(reserved_asids, i) = asid;
}
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+ /* Set bits for locked ASIDs. */
+ for (i = 0; i < LOCKED_ASIDS_COUNT; i++) {
+ asid = locked_asids[i];
+ if (asid != 0)
+ __set_bit(asid & ~ASID_MASK, asid_map);
+ }
+#endif
/*
* Queue a TLB invalidation for each CPU to perform on next
* context-switch
@@ -131,9 +146,61 @@ static void flush_context(void)
cpumask_setall(&tlb_flush_pending);
}
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+int lock_context(struct mm_struct *mm, int index)
+{
+ unsigned long flags;
+ u64 asid;
+
+ if ((index < 0) || (index >= LOCKED_ASIDS_COUNT))
+ return -1;
+ raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+ asid = atomic64_read(&mm->context.id);
+ locked_asids[index] = asid;
+ raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(lock_context);
+
+int unlock_context_by_index(int index)
+{
+ unsigned long flags;
+
+ if ((index < 0) || (index >= LOCKED_ASIDS_COUNT))
+ return -1;
+ raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+ locked_asids[index] = 0;
+ raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(unlock_context_by_index);
+
+bool unlock_context_by_mm(struct mm_struct *mm)
+{
+ int i;
+ unsigned long flags;
+ bool hit = false;
+ u64 asid;
+
+ raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+ asid = atomic64_read(&mm->context.id);
+
+ for (i = 0; i < LOCKED_ASIDS_COUNT; i++) {
+ if (locked_asids[i] == asid) {
+ hit = true;
+ locked_asids[i] = 0;
+ }
+ }
+
+ raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+
+ return hit;
+}
+#endif
+
static bool check_update_reserved_asid(u64 asid, u64 newasid)
{
- int cpu;
+ int i, cpu;
bool hit = false;
/*
@@ -152,6 +219,16 @@ static bool check_update_reserved_asid(u64 asid, u64 newasid)
}
}
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+ /* Same mechanism for locked ASIDs */
+ for (i = 0; i < LOCKED_ASIDS_COUNT; i++) {
+ if (locked_asids[i] == asid) {
+ hit = true;
+ locked_asids[i] = newasid;
+ }
+ }
+#endif
+
return hit;
}
diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c
index 598fd19b65fa..2c5c8d2b30f1 100644
--- a/drivers/acpi/apei/bert.c
+++ b/drivers/acpi/apei/bert.c
@@ -24,6 +24,7 @@
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/io.h>
+#include <linux/of.h>
#include "apei-internal.h"
@@ -33,6 +34,26 @@
static int bert_disable;
+static struct acpi_table_bert *__read_mostly bert_tab;
+
+/*
+ * Checks device tree for support of bed-bert [driver].
+ * This driver supports BERT in the absence of ACPI.
+ * on entry:
+ * void
+ * returns:
+ * true if bed-bert support found in Device Tree else false
+ */
+static bool bed_bert_present_dt(void)
+{
+ struct device_node *np;
+
+ np = of_find_node_by_name(NULL, "bed-bert");
+ of_node_put(np);
+
+ return !!np;
+}
+
static void __init bert_print_all(struct acpi_bert_region *region,
unsigned int region_len)
{
@@ -85,10 +106,10 @@ static int __init setup_bert_disable(char *str)
}
__setup("bert_disable", setup_bert_disable);
-static int __init bert_check_table(struct acpi_table_bert *bert_tab)
+static int __init bert_check_table(struct acpi_table_bert *bert)
{
- if (bert_tab->header.length < sizeof(struct acpi_table_bert) ||
- bert_tab->region_length < sizeof(struct acpi_bert_region))
+ if (bert->header.length < sizeof(struct acpi_table_bert) ||
+ bert->region_length < sizeof(struct acpi_bert_region))
return -EINVAL;
return 0;
@@ -98,12 +119,12 @@ static int __init bert_init(void)
{
struct apei_resources bert_resources;
struct acpi_bert_region *boot_error_region;
- struct acpi_table_bert *bert_tab;
unsigned int region_len;
acpi_status status;
int rc = 0;
- if (acpi_disabled)
+ /* permit BERT initialization if either ACPI or GHES_BERT is present */
+ if (acpi_disabled && !bed_bert_present_dt())
return 0;
if (bert_disable) {
@@ -111,7 +132,12 @@ static int __init bert_init(void)
return 0;
}
- status = acpi_get_table(ACPI_SIG_BERT, 0, (struct acpi_table_header **)&bert_tab);
+ /* BERT table may have been initialized by bert_table_set() */
+ if (bert_tab)
+ status = AE_OK;
+ else
+ status = acpi_get_table(ACPI_SIG_BERT, 0,
+ (struct acpi_table_header **)&bert_tab);
if (status == AE_NOT_FOUND)
return 0;
@@ -128,13 +154,15 @@ static int __init bert_init(void)
region_len = bert_tab->region_length;
apei_resources_init(&bert_resources);
- rc = apei_resources_add(&bert_resources, bert_tab->address,
- region_len, true);
- if (rc)
- goto out_put_bert_tab;
- rc = apei_resources_request(&bert_resources, "APEI BERT");
- if (rc)
- goto out_fini;
+ if (!acpi_disabled) {
+ rc = apei_resources_add(&bert_resources, bert_tab->address,
+ region_len, true);
+ if (rc)
+ goto out_put_bert_tab;
+ rc = apei_resources_request(&bert_resources, "APEI BERT");
+ if (rc)
+ goto out_fini;
+ }
boot_error_region = ioremap_cache(bert_tab->address, region_len);
if (boot_error_region) {
bert_print_all(boot_error_region, region_len);
@@ -143,7 +171,8 @@ static int __init bert_init(void)
rc = -ENOMEM;
}
- apei_resources_release(&bert_resources);
+ if (!acpi_disabled)
+ apei_resources_release(&bert_resources);
out_fini:
apei_resources_fini(&bert_resources);
out_put_bert_tab:
@@ -152,4 +181,12 @@ out_put_bert_tab:
return rc;
}
+/*
+ * This allows the BERT to be initialized externally, in the absence of ACPI.
+ */
+void __init bert_table_set(struct acpi_table_bert *table)
+{
+ bert_tab = table;
+}
+
late_initcall(bert_init);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 0c8330ed1ffd..8fa0fe36f050 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -41,6 +41,7 @@
#include <linux/uuid.h>
#include <linux/ras.h>
#include <linux/task_work.h>
+#include <linux/of.h>
#include <acpi/actbl1.h>
#include <acpi/ghes.h>
@@ -143,6 +144,24 @@ static atomic_t ghes_estatus_cache_alloced;
static int ghes_panic_timeout __read_mostly = 30;
+
+/*
+ * Checks device tree for support of sdei-ghes [driver].
+ * This driver supports GHES in the absence of ACPI.
+ * on entry:
+ * void
+ * returns:
+ * true if sdei-ghes support found in Device Tree else false
+ */
+static bool sdei_ghes_present_dt(void)
+{
+ struct device_node *np;
+
+ np = of_find_node_by_name(NULL, "sdei-ghes");
+ of_node_put(np);
+ return !!np;
+}
+
static void __iomem *ghes_map(u64 pfn, enum fixed_addresses fixmap_idx)
{
phys_addr_t paddr;
@@ -231,6 +250,20 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
return ERR_PTR(-ENOMEM);
ghes->generic = generic;
+
+ /* If sdei-ghes is present (via device tree), ACPI mappings are not
+ * available and will be relegated to 'early_mem_remap()'. However, any
+ * such outstanding 'early' mappings will be detected as leaks during
+ * late kernel initialization - see 'check_early_ioremap_leak()'.
+ * Since this mapping is a 'sanity check' only (the mapping isn't used),
+ * skip this step to avoid it being detected as an [errant] leak.
+ * Notes:
+ * * the presence of the Device Tree disables ACPI
+ * * the status register is actually mapped at run-time, when accessed
+ */
+ if (sdei_ghes_present_dt())
+ goto skip_map_status;
+
if (is_hest_type_generic_v2(ghes)) {
rc = map_gen_v2(ghes);
if (rc)
@@ -240,6 +273,8 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
rc = apei_map_generic_address(&generic->error_status_address);
if (rc)
goto err_unmap_read_ack_addr;
+
+skip_map_status:
error_block_length = generic->error_block_length;
if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
pr_warn(FW_WARN GHES_PFX
@@ -257,6 +292,9 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
return ghes;
err_unmap_status_addr:
+ /* if sdei-ghes is present, status was not mapped - skip the UNmap */
+ if (sdei_ghes_present_dt())
+ goto err_free;
apei_unmap_generic_address(&generic->error_status_address);
err_unmap_read_ack_addr:
if (is_hest_type_generic_v2(ghes))
@@ -269,6 +307,9 @@ err_free:
static void ghes_fini(struct ghes *ghes)
{
kfree(ghes->estatus);
+ /* if sdei-ghes is present, status was not mapped - skip the UNmap */
+ if (sdei_ghes_present_dt())
+ return;
apei_unmap_generic_address(&ghes->generic->error_status_address);
if (is_hest_type_generic_v2(ghes))
unmap_gen_v2(ghes);
@@ -1461,7 +1502,8 @@ static int __init ghes_init(void)
{
int rc;
- if (acpi_disabled)
+ /* permit GHES initialization if either ACPI or SDEI_GHES is present */
+ if (acpi_disabled && !sdei_ghes_present_dt())
return -ENODEV;
switch (hest_disable) {
@@ -1485,15 +1527,17 @@ static int __init ghes_init(void)
if (rc)
goto err;
- rc = apei_osc_setup();
- if (rc == 0 && osc_sb_apei_support_acked)
- pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n");
- else if (rc == 0 && !osc_sb_apei_support_acked)
- pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n");
- else if (rc && osc_sb_apei_support_acked)
- pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n");
- else
- pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n");
+ if (!acpi_disabled) {
+ rc = apei_osc_setup();
+ if (rc == 0 && osc_sb_apei_support_acked)
+ pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n");
+ else if (rc == 0 && !osc_sb_apei_support_acked)
+ pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n");
+ else if (rc && osc_sb_apei_support_acked)
+ pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n");
+ else
+ pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n");
+ }
return 0;
err:
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 7bf48c2776fb..6eafbff29018 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -235,7 +235,10 @@ void __init acpi_hest_init(void)
return;
}
- status = acpi_get_table(ACPI_SIG_HEST, 0,
+ if (hest_tab)
+ status = AE_OK;
+ else
+ status = acpi_get_table(ACPI_SIG_HEST, 0,
(struct acpi_table_header **)&hest_tab);
if (status == AE_NOT_FOUND) {
hest_disable = HEST_NOT_FOUND;
@@ -268,3 +271,11 @@ err:
hest_disable = HEST_DISABLED;
acpi_put_table((struct acpi_table_header *)hest_tab);
}
+
+/*
+ * This allows the HEST to be initialized externally, in the absence of ACPI.
+ */
+void __init hest_table_set(struct acpi_table_hest *table)
+{
+ hest_tab = table;
+}
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index a7d9e4600d40..e5bcdaa23cd3 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -426,8 +426,8 @@ config HW_RANDOM_MESON
If unsure, say Y.
config HW_RANDOM_CAVIUM
- tristate "Cavium ThunderX Random Number Generator support"
- depends on HW_RANDOM && PCI && ARCH_THUNDER
+ tristate "Cavium ThunderX/OcteonTx Random Number Generator support"
+ depends on HW_RANDOM && PCI && (ARM64 || (COMPILE_TEST && 64BIT))
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -536,6 +536,19 @@ config HW_RANDOM_XIPHERA
To compile this driver as a module, choose M here: the
module will be called xiphera-trng.
+config HW_RANDOM_CN10K
+ tristate "Marvell CN10K Random Number Generator support"
+ depends on HW_RANDOM && PCI && ARM64
+ default HW_RANDOM
+ help
+ This driver provides support for the True Random Number
+ Generator available in Marvell CN10K SoCs.
+
+ To compile this driver as a module, choose M here.
+ The module will be called cn10k_rng.
+
+ If unsure, say Y.
+
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 5da344509a4d..348b9f3efa2a 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -46,3 +46,4 @@ obj-$(CONFIG_HW_RANDOM_OPTEE) += optee-rng.o
obj-$(CONFIG_HW_RANDOM_NPCM) += npcm-rng.o
obj-$(CONFIG_HW_RANDOM_CCTRNG) += cctrng.o
obj-$(CONFIG_HW_RANDOM_XIPHERA) += xiphera-trng.o
+obj-$(CONFIG_HW_RANDOM_CN10K) += cn10k-rng.o
diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c
index 6f66919652bf..168ed8c7a548 100644
--- a/drivers/char/hw_random/cavium-rng-vf.c
+++ b/drivers/char/hw_random/cavium-rng-vf.c
@@ -1,7 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Hardware Random Number Generator support.
- * Cavium Thunder, Marvell OcteonTx/Tx2 processor families.
+ * Hardware Random Number Generator support for Cavium, Inc.
+ * Thunder, OcteonTx/Tx2 processor families.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
*
* Copyright (C) 2016 Cavium, Inc.
*/
@@ -43,9 +47,6 @@ static inline bool is_octeontx(struct pci_dev *pdev)
MIDR_CPU_VAR_REV(3, 0)) ||
midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX_81XX,
MIDR_CPU_VAR_REV(0, 0),
- MIDR_CPU_VAR_REV(3, 0)) ||
- midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX,
- MIDR_CPU_VAR_REV(0, 0),
MIDR_CPU_VAR_REV(3, 0)))
return true;
diff --git a/drivers/char/hw_random/cn10k-rng.c b/drivers/char/hw_random/cn10k-rng.c
new file mode 100644
index 000000000000..07c7640dd0ce
--- /dev/null
+++ b/drivers/char/hw_random/cn10k-rng.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CN10K RVU Hardware Random Number Generator.
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/delay.h>
+
+#include <linux/arm-smccc.h>
+
+/* CSRs */
+#define RNM_CTL_STATUS 0x000
+#define RNM_ENTROPY_STATUS 0x008
+#define RNM_CONST 0x030
+#define RNM_EBG_ENT 0x048
+#define RNM_PF_EBG_HEALTH 0x050
+#define RNM_PF_RANDOM 0x400
+#define RNM_TRNG_RESULT 0x408
+
+struct cn10k_rng {
+ void __iomem *reg_base;
+ struct hwrng ops;
+ struct pci_dev *pdev;
+};
+
+#define PLAT_OCTEONTX_RESET_RNG_EBG_HEALTH_STATE 0xc2000b0f
+
+static int reset_rng_health_state(struct cn10k_rng *rng)
+{
+ struct arm_smccc_res res;
+
+ /* Send SMC service call to to reset EBG health state */
+ arm_smccc_smc(PLAT_OCTEONTX_RESET_RNG_EBG_HEALTH_STATE, 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 != 0UL)
+ return -EIO;
+
+ return 0;
+}
+
+static int check_rng_health(struct cn10k_rng *rng)
+{
+ u64 status;
+ int err;
+
+ /* Skip checking health */
+ if (!rng->reg_base)
+ return 0;
+
+ status = readq(rng->reg_base + RNM_PF_EBG_HEALTH);
+ if (status & BIT_ULL(20)) {
+ err = reset_rng_health_state(rng);
+ if (err) {
+ dev_err(&rng->pdev->dev, "HWRNG: Health test failed (status=%llx)\n",
+ status);
+ dev_err(&rng->pdev->dev, "HWRNG: error during reset\n");
+ }
+ }
+ return 0;
+}
+
+static void cn10k_read_trng(struct cn10k_rng *rng, u64 *value)
+{
+ u64 upper, lower;
+
+ *value = readq(rng->reg_base + RNM_PF_RANDOM);
+
+ /* HW can run out of entropy if large amount random data is read in
+ * quick succession. Zeros may not be real random data from HW.
+ */
+ if (!*value) {
+ upper = readq(rng->reg_base + RNM_PF_RANDOM);
+ lower = readq(rng->reg_base + RNM_PF_RANDOM);
+ while (!(upper & 0x00000000FFFFFFFFULL))
+ upper = readq(rng->reg_base + RNM_PF_RANDOM);
+ while (!(lower & 0xFFFFFFFF00000000ULL))
+ lower = readq(rng->reg_base + RNM_PF_RANDOM);
+
+ *value = (upper & 0xFFFFFFFF00000000) | (lower & 0xFFFFFFFF);
+ }
+}
+
+static int cn10k_rng_read(struct hwrng *hwrng, void *data,
+ size_t max, bool wait)
+{
+ struct cn10k_rng *rng = (struct cn10k_rng *)hwrng->priv;
+ unsigned int size;
+ int err = 0;
+ u64 value;
+
+ err = check_rng_health(rng);
+ if (err)
+ return err;
+
+ size = max;
+
+ while (size >= 8) {
+ cn10k_read_trng(rng, &value);
+
+ *((u64 *)data) = (u64)value;
+ size -= 8;
+ data += 8;
+ }
+
+ while (size > 0) {
+ cn10k_read_trng(rng, &value);
+
+ *((u8 *)data) = (u8)value;
+ size--;
+ data++;
+ }
+
+ return max - size;
+}
+
+static int cn10k_rng_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct cn10k_rng *rng;
+ int err;
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ rng->pdev = pdev;
+ pci_set_drvdata(pdev, rng);
+
+ rng->reg_base = pcim_iomap(pdev, 0, 0);
+ if (!rng->reg_base) {
+ dev_err(&pdev->dev, "Error while mapping CSRs, exiting\n");
+ return -ENOMEM;
+ }
+
+ rng->ops.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "cn10k-rng-%s", dev_name(&pdev->dev));
+ if (!rng->ops.name)
+ return -ENOMEM;
+
+ rng->ops.read = cn10k_rng_read;
+ rng->ops.quality = 1000;
+ rng->ops.priv = (unsigned long) rng;
+
+ reset_rng_health_state(rng);
+
+ err = devm_hwrng_register(&pdev->dev, &rng->ops);
+ if (err) {
+ dev_err(&pdev->dev, "Could not register hwrng device.\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static void cn10k_rng_remove(struct pci_dev *pdev)
+{
+ /* Nothing to do */
+}
+
+static const struct pci_device_id cn10k_rng_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xA098) }, /* RNG PF */
+ {0,},
+};
+
+MODULE_DEVICE_TABLE(pci, cn10k_rng_id_table);
+
+static struct pci_driver cn10k_rng_driver = {
+ .name = "cn10k_rng",
+ .id_table = cn10k_rng_id_table,
+ .probe = cn10k_rng_probe,
+ .remove = cn10k_rng_remove,
+};
+
+module_pci_driver(cn10k_rng_driver);
+MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
+MODULE_DESCRIPTION("Marvell CN10K HW RNG Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index e8f9621e7954..6a194b022da3 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -72,7 +72,7 @@ out_clk:
static void nmk_rng_remove(struct amba_device *dev)
{
amba_release_regions(dev);
- clk_disable_unprepare(rng_clk);
+ clk_disable(rng_clk);
}
static const struct amba_id nmk_rng_ids[] = {
diff --git a/drivers/clk/mvebu/ap-cpu-clk.c b/drivers/clk/mvebu/ap-cpu-clk.c
index 25de4b6da776..9791874eeb61 100644
--- a/drivers/clk/mvebu/ap-cpu-clk.c
+++ b/drivers/clk/mvebu/ap-cpu-clk.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) "ap-cpu-clk: " fmt
+#include <linux/arm-smccc.h>
#include <linux/clk-provider.h>
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
@@ -19,6 +20,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include "armada_ap_cp_helper.h"
+#include "soc/marvell/armada8k/fw.h"
#define AP806_CPU_CLUSTER0 0
#define AP806_CPU_CLUSTER1 1
@@ -139,8 +141,122 @@ struct ap_cpu_clk {
struct clk_hw hw;
struct regmap *pll_cr_base;
const struct cpu_dfs_regs *pll_regs;
+ phys_addr_t phys;
+ int (*clk_regmap_read)(struct ap_cpu_clk *clk, unsigned int reg,
+ unsigned int *val);
+ int (*clk_regmap_write)(struct ap_cpu_clk *clk, unsigned int reg,
+ unsigned int val);
+ int (*clk_regmap_update_bits)(struct ap_cpu_clk *clk, unsigned int reg,
+ unsigned int mask, unsigned int val);
+ int (*clk_regmap_read_poll_timeout)(struct ap_cpu_clk *clk, unsigned int reg,
+ unsigned int stable_bit);
+
};
+static int dfx_sread_smc(unsigned long addr, unsigned int *reg)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(MV_SIP_DFX, MV_SIP_DFX_SREAD, addr, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0 == 0 && reg != NULL)
+ *reg = res.a1;
+
+ return res.a0;
+}
+
+static int dfx_swrite_smc(unsigned long addr, unsigned long val)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(MV_SIP_DFX, MV_SIP_DFX_SWRITE, addr, val,
+ 0, 0, 0, 0, &res);
+
+ return res.a0;
+}
+
+static int smc_regmap_read(struct ap_cpu_clk *clk, unsigned int reg,
+ unsigned int *val)
+{
+ return dfx_sread_smc(clk->phys + reg, val);
+}
+
+static int legacy_regmap_read(struct ap_cpu_clk *clk, unsigned int reg,
+ unsigned int *val)
+{
+ return regmap_read(clk->pll_cr_base, reg, val);
+}
+
+static int smc_regmap_write(struct ap_cpu_clk *clk, unsigned int reg,
+ unsigned int val)
+{
+ return dfx_swrite_smc(clk->phys + reg, val);
+}
+
+static int legacy_regmap_write(struct ap_cpu_clk *clk, unsigned int reg,
+ unsigned int val)
+{
+ return regmap_write(clk->pll_cr_base, reg, val);
+}
+
+static int smc_regmap_update_bits(struct ap_cpu_clk *clk, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ int ret;
+ unsigned int tmp;
+
+ ret = dfx_sread_smc(clk->phys + reg, &tmp);
+ if (ret != SMCCC_RET_SUCCESS)
+ return ret;
+
+ tmp &= ~mask;
+ tmp |= val & mask;
+
+ return dfx_swrite_smc(clk->phys + reg, tmp);
+}
+
+static int legacy_regmap_update_bits(struct ap_cpu_clk *clk, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_update_bits(clk->pll_cr_base, reg, mask, val);
+}
+
+static int smc_regmap_read_poll_timeout(struct ap_cpu_clk *clk,
+ unsigned int reg,
+ unsigned int stable_bit)
+{
+ int ret;
+ u32 val;
+ ktime_t timeout;
+
+ timeout = ktime_add_us(ktime_get(), STATUS_POLL_TIMEOUT_US);
+ do {
+ ret = dfx_sread_smc(clk->phys + reg, &val);
+ if (ret || (val & stable_bit))
+ break;
+
+ usleep_range((STATUS_POLL_PERIOD_US >> 2) + 1,
+ STATUS_POLL_PERIOD_US);
+
+ } while (ktime_before(ktime_get(), timeout));
+
+ if (ret == SMCCC_RET_SUCCESS)
+ return (val & stable_bit) ? 0 : -ETIMEDOUT;
+
+ return ret;
+}
+
+static int legacy_regmap_read_poll_timeout(struct ap_cpu_clk *clk,
+ unsigned int reg,
+ unsigned int stable_bit)
+{
+ u32 val;
+
+ return regmap_read_poll_timeout(clk->pll_cr_base,
+ reg, val,
+ (val & stable_bit), STATUS_POLL_PERIOD_US,
+ STATUS_POLL_TIMEOUT_US);
+}
static unsigned long ap_cpu_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -150,7 +266,7 @@ static unsigned long ap_cpu_clk_recalc_rate(struct clk_hw *hw,
cpu_clkdiv_reg = clk->pll_regs->divider_reg +
(clk->cluster * clk->pll_regs->cluster_offset);
- regmap_read(clk->pll_cr_base, cpu_clkdiv_reg, &cpu_clkdiv_ratio);
+ clk->clk_regmap_read(clk, cpu_clkdiv_reg, &cpu_clkdiv_ratio);
cpu_clkdiv_ratio &= clk->pll_regs->divider_mask;
cpu_clkdiv_ratio >>= clk->pll_regs->divider_offset;
@@ -171,7 +287,7 @@ static int ap_cpu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
cpu_ratio_reg = clk->pll_regs->ratio_reg +
(clk->cluster * clk->pll_regs->cluster_offset);
- regmap_read(clk->pll_cr_base, cpu_clkdiv_reg, &reg);
+ clk->clk_regmap_read(clk, cpu_clkdiv_reg, &reg);
reg &= ~(clk->pll_regs->divider_mask);
reg |= (divider << clk->pll_regs->divider_offset);
@@ -184,29 +300,26 @@ static int ap_cpu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
reg |= ((divider * clk->pll_regs->divider_ratio) <<
AP807_PLL_CR_1_CPU_CLK_DIV_RATIO_OFFSET);
}
- regmap_write(clk->pll_cr_base, cpu_clkdiv_reg, reg);
-
+ clk->clk_regmap_write(clk, cpu_clkdiv_reg, reg);
- regmap_update_bits(clk->pll_cr_base, cpu_force_reg,
- clk->pll_regs->force_mask,
- clk->pll_regs->force_mask);
-
- regmap_update_bits(clk->pll_cr_base, cpu_ratio_reg,
- BIT(clk->pll_regs->ratio_offset),
- BIT(clk->pll_regs->ratio_offset));
+ clk->clk_regmap_update_bits(clk, cpu_force_reg,
+ clk->pll_regs->force_mask,
+ clk->pll_regs->force_mask);
+ clk->clk_regmap_update_bits(clk, cpu_ratio_reg,
+ BIT(clk->pll_regs->ratio_offset),
+ BIT(clk->pll_regs->ratio_offset));
stable_bit = BIT(clk->pll_regs->ratio_state_offset +
clk->cluster *
clk->pll_regs->ratio_state_cluster_offset);
- ret = regmap_read_poll_timeout(clk->pll_cr_base,
- clk->pll_regs->ratio_state_reg, reg,
- reg & stable_bit, STATUS_POLL_PERIOD_US,
- STATUS_POLL_TIMEOUT_US);
+ ret = clk->clk_regmap_read_poll_timeout(clk,
+ clk->pll_regs->ratio_state_reg,
+ stable_bit);
if (ret)
return ret;
- regmap_update_bits(clk->pll_cr_base, cpu_ratio_reg,
- BIT(clk->pll_regs->ratio_offset), 0);
+ clk->clk_regmap_update_bits(clk, cpu_ratio_reg,
+ BIT(clk->pll_regs->ratio_offset), 0);
return 0;
}
@@ -235,6 +348,11 @@ static int ap_cpu_clock_probe(struct platform_device *pdev)
struct clk_hw_onecell_data *ap_cpu_data;
struct ap_cpu_clk *ap_cpu_clk;
struct regmap *regmap;
+ struct resource res;
+
+ ret = of_address_to_resource(np->parent, 0, &res);
+ if (ret)
+ return ret;
regmap = syscon_node_to_regmap(np->parent);
if (IS_ERR(regmap)) {
@@ -289,6 +407,7 @@ static int ap_cpu_clock_probe(struct platform_device *pdev)
const char *parent_name;
struct clk *parent;
int cpu, err;
+ unsigned int tmp;
err = of_property_read_u32(dn, "reg", &cpu);
if (WARN_ON(err)) {
@@ -319,6 +438,28 @@ static int ap_cpu_clock_probe(struct platform_device *pdev)
ap_cpu_clk[cluster_index].dev = dev;
ap_cpu_clk[cluster_index].pll_regs = of_device_get_match_data(&pdev->dev);
+ /* Get the physical address to hand to the firmware. */
+ ap_cpu_clk[cluster_index].phys = res.start;
+
+ /* Try to read a register using SMC and setup DFX access APIs accordingly */
+ ret = smc_regmap_read(&ap_cpu_clk[cluster_index],
+ ap_cpu_clk[cluster_index].pll_regs->divider_reg,
+ &tmp);
+ if (ret == SMCCC_RET_SUCCESS) {
+ ap_cpu_clk[cluster_index].clk_regmap_read = smc_regmap_read;
+ ap_cpu_clk[cluster_index].clk_regmap_write = smc_regmap_write;
+ ap_cpu_clk[cluster_index].clk_regmap_update_bits = smc_regmap_update_bits;
+ ap_cpu_clk[cluster_index].clk_regmap_read_poll_timeout =
+ smc_regmap_read_poll_timeout;
+ } else {
+ ap_cpu_clk[cluster_index].clk_regmap_read = legacy_regmap_read;
+ ap_cpu_clk[cluster_index].clk_regmap_write = legacy_regmap_write;
+ ap_cpu_clk[cluster_index].clk_regmap_update_bits =
+ legacy_regmap_update_bits;
+ ap_cpu_clk[cluster_index].clk_regmap_read_poll_timeout =
+ legacy_regmap_read_poll_timeout;
+ }
+
init.name = ap_cpu_clk[cluster_index].clk_name;
init.ops = &ap_cpu_clk_ops;
init.num_parents = 1;
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index a0c6e88bebe0..b889e0d14892 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -348,6 +348,15 @@ config ARM_ARCH_TIMER_EVTSTREAM
config ARM_ARCH_TIMER_OOL_WORKAROUND
bool
+config MARVELL_ERRATUM_38627
+ bool "Workaround for Marvell Erratum 38627"
+ default y
+ depends on ARM_ARCH_TIMER && ARM64
+ select ARM_ARCH_TIMER_OOL_WORKAROUND
+ help
+ This option enables a workaround for Marvell Erratum
+ 38627.
+
config FSL_ERRATUM_A008585
bool "Workaround for Freescale/NXP Erratum A-008585"
default y
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index f4881764bf8f..1bbd70e73c1a 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -24,6 +24,7 @@
#include <linux/sched/clock.h>
#include <linux/sched_clock.h>
#include <linux/acpi.h>
+#include <linux/delay.h>
#include <asm/arch_timer.h>
#include <asm/virt.h>
@@ -77,6 +78,8 @@ static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_NONE;
static cpumask_t evtstrm_available = CPU_MASK_NONE;
static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
+static __always_inline void set_next_event(const int access, unsigned long evt,
+ struct clock_event_device *clk);
static int __init early_evtstrm_cfg(char *buf)
{
@@ -420,6 +423,48 @@ static __maybe_unused int erratum_set_next_event_tval_phys(unsigned long evt,
return 0;
}
+#ifdef CONFIG_MARVELL_ERRATUM_38627
+/* Workaround is to ensure maximum 2us of time gap between timer expiry
+ * and timer programming which can de-assert timer interrupt.
+ * Time calculation below is based on 100MHz as timer frequency is fixed
+ * to 100MHz on all affected parts.
+ */
+static __always_inline
+void erratum_38627_set_next_event(const int access, unsigned long evt,
+ struct clock_event_device *clk)
+{
+ int32_t tval;
+
+ tval = arch_timer_reg_read(access, ARCH_TIMER_REG_TVAL, clk);
+
+ /* Timer already expired, wait for (2 - expired time)us */
+ if ((tval > -200) && (tval < 0))
+ udelay(2 + tval/100);
+
+ /* Timer is about to expire, wait for 2us + time to expire */
+ if (tval >= 0 && tval < 200)
+ udelay(3 + tval/100);
+
+ set_next_event(access, evt, clk);
+}
+
+static __maybe_unused
+int erratum_38627_set_next_event_tval_virt(unsigned long evt,
+ struct clock_event_device *clk)
+{
+ erratum_38627_set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
+ return 0;
+}
+
+static __maybe_unused
+int erratum_38627_set_next_event_tval_phys(unsigned long evt,
+ struct clock_event_device *clk)
+{
+ erratum_38627_set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
+ return 0;
+}
+#endif
+
static const struct arch_timer_erratum_workaround ool_workarounds[] = {
#ifdef CONFIG_FSL_ERRATUM_A008585
{
@@ -467,6 +512,15 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.read_cntvct_el0 = arm64_858921_read_cntvct_el0,
},
#endif
+#ifdef CONFIG_MARVELL_ERRATUM_38627
+ {
+ .match_type = ate_match_local_cap_id,
+ .id = (void *)ARM64_WORKAROUND_MRVL_38627,
+ .desc = "Marvell erratum 38627",
+ .set_next_event_phys = erratum_38627_set_next_event_tval_phys,
+ .set_next_event_virt = erratum_38627_set_next_event_tval_virt,
+ },
+#endif
#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
{
.match_type = ate_match_dt,
diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
index b0fc5e84f857..36f85f0ed073 100644
--- a/drivers/cpufreq/armada-8k-cpufreq.c
+++ b/drivers/cpufreq/armada-8k-cpufreq.c
@@ -128,6 +128,9 @@ static int __init armada_8k_cpufreq_init(void)
struct cpumask cpus;
node = of_find_compatible_node(NULL, NULL, "marvell,ap806-cpu-clock");
+ if (!node)
+ node = of_find_compatible_node(NULL, NULL,
+ "marvell,ap807-cpu-clock");
if (!node || !of_device_is_available(node)) {
of_node_put(node);
return -ENODEV;
diff --git a/drivers/crypto/marvell/Kconfig b/drivers/crypto/marvell/Kconfig
index 13063384f958..e249aba23a9f 100644
--- a/drivers/crypto/marvell/Kconfig
+++ b/drivers/crypto/marvell/Kconfig
@@ -35,3 +35,20 @@ config CRYPTO_DEV_OCTEONTX_CPT
To compile this driver as module, choose M here:
the modules will be called octeontx-cpt and octeontx-cptvf
+
+config CRYPTO_DEV_OCTEONTX2_CPT
+ tristate "Support for Marvell OcteonTX2 CPT driver"
+ depends on ARCH_THUNDER || COMPILE_TEST
+ depends on PCI_MSI && 64BIT
+ depends on CRYPTO_LIB_AES
+ select OCTEONTX2_MBOX
+ select CRYPTO_SKCIPHER
+ select CRYPTO_HASH
+ select CRYPTO_AEAD
+ select CRYPTO_DEV_MARVELL
+ help
+ This driver allows you to utilize the Marvell Cryptographic
+ Accelerator Unit(CPT) found in OcteonTX2 series of processors.
+
+ To compile this driver as module, choose M here:
+ the modules will be called rvu_cptpf and rvu_cptvf
diff --git a/drivers/crypto/marvell/Makefile b/drivers/crypto/marvell/Makefile
index 6c6a1519b0f1..39db6d9c0aaf 100644
--- a/drivers/crypto/marvell/Makefile
+++ b/drivers/crypto/marvell/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += cesa/
obj-$(CONFIG_CRYPTO_DEV_OCTEONTX_CPT) += octeontx/
+obj-$(CONFIG_CRYPTO_DEV_OCTEONTX2_CPT) += octeontx2/
diff --git a/drivers/crypto/marvell/octeontx2/Makefile b/drivers/crypto/marvell/octeontx2/Makefile
new file mode 100644
index 000000000000..965297e96954
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_OCTEONTX2_CPT) += rvu_cptpf.o rvu_cptvf.o
+
+rvu_cptpf-objs := otx2_cptpf_main.o otx2_cptpf_mbox.o \
+ otx2_cpt_mbox_common.o otx2_cptpf_ucode.o otx2_cptlf.o \
+ cn10k_cpt.o otx2_cpt_devlink.o
+rvu_cptvf-objs := otx2_cptvf_main.o otx2_cptvf_mbox.o otx2_cptlf.o \
+ otx2_cpt_mbox_common.o otx2_cptvf_reqmgr.o \
+ otx2_cptvf_algs.o cn10k_cpt.o
+
+ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
new file mode 100644
index 000000000000..7cdc6cbe678f
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 Marvell. */
+
+#include "otx2_cptpf.h"
+#include "otx2_cptvf.h"
+#include "otx2_cptlf.h"
+#include "cn10k_cpt.h"
+
+#if defined(CONFIG_ARM64)
+#define cn10k_lmt_flush(val, addr) \
+({ \
+ __asm__ volatile(".cpu generic+lse\n" \
+ "steor %x[rf],[%[rs]]" \
+ : [rf]"+r"(val) \
+ : [rs]"r"(addr)); \
+})
+#else
+#define cn10k_lmt_flush(val, addr) ({ addr = val; })
+#endif
+
+static struct cpt_hw_ops otx2_hw_ops = {
+ .send_cmd = otx2_cpt_send_cmd,
+ .cpt_get_compcode = otx2_cpt_get_compcode,
+ .cpt_get_uc_compcode = otx2_cpt_get_uc_compcode,
+};
+
+static struct cpt_hw_ops cn10k_hw_ops = {
+ .send_cmd = cn10k_cpt_send_cmd,
+ .cpt_get_compcode = cn10k_cpt_get_compcode,
+ .cpt_get_uc_compcode = cn10k_cpt_get_uc_compcode,
+};
+
+void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
+ struct otx2_cptlf_info *lf)
+{
+ void __iomem *lmtline = lf->lmtline;
+ u64 val = (lf->slot & 0x7FF);
+ u64 tar_addr = 0;
+
+ /* tar_addr<6:4> = Size of first LMTST - 1 in units of 128b. */
+ tar_addr |= (__force u64)lf->ioreg |
+ (((OTX2_CPT_INST_SIZE/16) - 1) & 0x7) << 4;
+ /*
+ * Make sure memory areas pointed in CPT_INST_S
+ * are flushed before the instruction is sent to CPT
+ */
+ dma_wmb();
+
+ /* Copy CPT command to LMTLINE */
+ memcpy_toio(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE);
+ cn10k_lmt_flush(val, tar_addr);
+}
+
+int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf)
+{
+ struct pci_dev *pdev = cptpf->pdev;
+ resource_size_t size;
+ u64 lmt_base;
+
+ if (!test_bit(CN10K_LMTST, &cptpf->cap_flag)) {
+ cptpf->lfs.ops = &otx2_hw_ops;
+ return 0;
+ }
+
+ cptpf->lfs.ops = &cn10k_hw_ops;
+ lmt_base = readq(cptpf->reg_base + RVU_PF_LMTLINE_ADDR);
+ if (!lmt_base) {
+ dev_err(&pdev->dev, "PF LMTLINE address not configured\n");
+ return -ENOMEM;
+ }
+ size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
+ size -= ((1 + cptpf->max_vfs) * MBOX_SIZE);
+ cptpf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, lmt_base, size);
+ if (!cptpf->lfs.lmt_base) {
+ dev_err(&pdev->dev,
+ "Mapping of PF LMTLINE address failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
+{
+ struct pci_dev *pdev = cptvf->pdev;
+ resource_size_t offset, size;
+
+ if (!test_bit(CN10K_LMTST, &cptvf->cap_flag)) {
+ cptvf->lfs.ops = &otx2_hw_ops;
+ return 0;
+ }
+
+ cptvf->lfs.ops = &cn10k_hw_ops;
+ offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
+ size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
+ /* Map VF LMILINE region */
+ cptvf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, offset, size);
+ if (!cptvf->lfs.lmt_base) {
+ dev_err(&pdev->dev, "Unable to map BAR4\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
new file mode 100644
index 000000000000..c091392b47e0
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2021 Marvell.
+ */
+#ifndef __CN10K_CPT_H
+#define __CN10K_CPT_H
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptpf.h"
+#include "otx2_cptvf.h"
+
+static inline u8 cn10k_cpt_get_compcode(union otx2_cpt_res_s *result)
+{
+ return ((struct cn10k_cpt_res_s *)result)->compcode;
+}
+
+static inline u8 cn10k_cpt_get_uc_compcode(union otx2_cpt_res_s *result)
+{
+ return ((struct cn10k_cpt_res_s *)result)->uc_compcode;
+}
+
+static inline u8 otx2_cpt_get_compcode(union otx2_cpt_res_s *result)
+{
+ return ((struct cn9k_cpt_res_s *)result)->compcode;
+}
+
+static inline u8 otx2_cpt_get_uc_compcode(union otx2_cpt_res_s *result)
+{
+ return ((struct cn9k_cpt_res_s *)result)->uc_compcode;
+}
+
+void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
+ struct otx2_cptlf_info *lf);
+int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf);
+int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf);
+
+#endif /* __CN10K_CPTLF_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
new file mode 100644
index 000000000000..02676dc928fa
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2018 Marvell.
+ */
+
+#ifndef __OTX2_CPT_COMMON_H
+#define __OTX2_CPT_COMMON_H
+
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <net/devlink.h>
+#include "otx2_cpt_hw_types.h"
+#include "rvu.h"
+#include "mbox.h"
+
+#define OTX2_CPT_MAX_VFS_NUM 128
+#define OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs) \
+ (((blk) << 20) | ((slot) << 12) | (offs))
+#define OTX2_CPT_RVU_PFFUNC(pf, func) \
+ ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \
+ (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT))
+
+#define OTX2_CPT_INVALID_CRYPTO_ENG_GRP 0xFF
+#define OTX2_CPT_NAME_LENGTH 64
+#define OTX2_CPT_DMA_MINALIGN 128
+
+/* HW capability flags */
+#define CN10K_MBOX 0
+#define CN10K_LMTST 1
+
+#define BAD_OTX2_CPT_ENG_TYPE OTX2_CPT_MAX_ENG_TYPES
+
+enum otx2_cpt_eng_type {
+ OTX2_CPT_AE_TYPES = 1,
+ OTX2_CPT_SE_TYPES = 2,
+ OTX2_CPT_IE_TYPES = 3,
+ OTX2_CPT_MAX_ENG_TYPES,
+};
+
+/* Take mbox id from end of CPT mbox range in AF (range 0xA00 - 0xBFF) */
+#define MBOX_MSG_RX_INLINE_IPSEC_LF_CFG 0xBFE
+#define MBOX_MSG_GET_ENG_GRP_NUM 0xBFF
+#define MBOX_MSG_GET_CAPS 0xBFD
+#define MBOX_MSG_GET_KVF_LIMITS 0xBFC
+
+/*
+ * Message request to config cpt lf for inline inbound ipsec.
+ * This message is only used between CPT PF <-> CPT VF
+ */
+struct otx2_cpt_rx_inline_lf_cfg {
+ struct mbox_msghdr hdr;
+ u16 sso_pf_func;
+ u16 param1;
+ u16 param2;
+ u16 reserved;
+};
+
+/*
+ * Message request and response to get engine group number
+ * which has attached a given type of engines (SE, AE, IE)
+ * This messages are only used between CPT PF <=> CPT VF
+ */
+struct otx2_cpt_egrp_num_msg {
+ struct mbox_msghdr hdr;
+ u8 eng_type;
+};
+
+struct otx2_cpt_egrp_num_rsp {
+ struct mbox_msghdr hdr;
+ u8 eng_type;
+ u8 eng_grp_num;
+};
+
+/*
+ * Message request and response to get kernel crypto limits
+ * This messages are only used between CPT PF <-> CPT VF
+ */
+struct otx2_cpt_kvf_limits_msg {
+ struct mbox_msghdr hdr;
+};
+
+struct otx2_cpt_kvf_limits_rsp {
+ struct mbox_msghdr hdr;
+ u8 kvf_limits;
+};
+
+/* CPT HW capabilities */
+union otx2_cpt_eng_caps {
+ u64 u;
+ struct {
+ u64 reserved_0_4:5;
+ u64 mul:1;
+ u64 sha1_sha2:1;
+ u64 chacha20:1;
+ u64 zuc_snow3g:1;
+ u64 sha3:1;
+ u64 aes:1;
+ u64 kasumi:1;
+ u64 des:1;
+ u64 crc:1;
+ u64 reserved_14_63:50;
+ };
+};
+
+/*
+ * Message request and response to get HW capabilities for each
+ * engine type (SE, IE, AE).
+ * This messages are only used between CPT PF <=> CPT VF
+ */
+struct otx2_cpt_caps_msg {
+ struct mbox_msghdr hdr;
+};
+
+struct otx2_cpt_caps_rsp {
+ struct mbox_msghdr hdr;
+ u16 cpt_pf_drv_version;
+ u8 cpt_revision;
+ union otx2_cpt_eng_caps eng_caps[OTX2_CPT_MAX_ENG_TYPES];
+};
+
+static inline void otx2_cpt_write64(void __iomem *reg_base, u64 blk, u64 slot,
+ u64 offs, u64 val)
+{
+ writeq_relaxed(val, reg_base +
+ OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs));
+}
+
+static inline u64 otx2_cpt_read64(void __iomem *reg_base, u64 blk, u64 slot,
+ u64 offs)
+{
+ return readq_relaxed(reg_base +
+ OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs));
+}
+
+static inline bool is_dev_otx2(struct pci_dev *pdev)
+{
+ if (pdev->device == OTX2_CPT_PCI_PF_DEVICE_ID ||
+ pdev->device == OTX2_CPT_PCI_VF_DEVICE_ID)
+ return true;
+
+ return false;
+}
+
+static inline void otx2_cpt_set_hw_caps(struct pci_dev *pdev,
+ unsigned long *cap_flag)
+{
+ if (!is_dev_otx2(pdev)) {
+ __set_bit(CN10K_MBOX, cap_flag);
+ __set_bit(CN10K_LMTST, cap_flag);
+ }
+}
+
+
+int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
+int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
+
+int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox,
+ struct pci_dev *pdev);
+int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 *val, int blkaddr);
+int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 val, int blkaddr);
+int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 *val, int blkaddr);
+int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 val, int blkaddr);
+struct otx2_cptlfs_info;
+int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs);
+int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs);
+int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs);
+int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox);
+
+#endif /* __OTX2_CPT_COMMON_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
new file mode 100644
index 000000000000..56b7b119f5d8
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 Marvell. */
+
+#include "otx2_cpt_devlink.h"
+
+static int otx2_cpt_dl_egrp_create(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
+ struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
+
+ return otx2_cpt_dl_custom_egrp_create(cptpf, ctx);
+}
+
+static int otx2_cpt_dl_egrp_delete(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
+ struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
+
+ return otx2_cpt_dl_custom_egrp_delete(cptpf, ctx);
+}
+
+static int otx2_cpt_dl_uc_info(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
+ struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
+
+ otx2_cpt_print_uc_dbg_info(cptpf);
+
+ return 0;
+}
+
+enum otx2_cpt_dl_param_id {
+ OTX2_CPT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ OTX2_CPT_DEVLINK_PARAM_ID_EGRP_CREATE,
+ OTX2_CPT_DEVLINK_PARAM_ID_EGRP_DELETE,
+};
+
+static const struct devlink_param otx2_cpt_dl_params[] = {
+ DEVLINK_PARAM_DRIVER(OTX2_CPT_DEVLINK_PARAM_ID_EGRP_CREATE,
+ "egrp_create", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ otx2_cpt_dl_uc_info, otx2_cpt_dl_egrp_create,
+ NULL),
+ DEVLINK_PARAM_DRIVER(OTX2_CPT_DEVLINK_PARAM_ID_EGRP_DELETE,
+ "egrp_delete", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ otx2_cpt_dl_uc_info, otx2_cpt_dl_egrp_delete,
+ NULL),
+};
+
+static int otx2_cpt_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ return devlink_info_driver_name_put(req, "rvu_cptpf");
+}
+
+static const struct devlink_ops otx2_cpt_devlink_ops = {
+ .info_get = otx2_cpt_devlink_info_get,
+};
+
+int otx2_cpt_register_dl(struct otx2_cptpf_dev *cptpf)
+{
+ struct device *dev = &cptpf->pdev->dev;
+ struct otx2_cpt_devlink *cpt_dl;
+ struct devlink *dl;
+ int err;
+
+ dl = devlink_alloc(&otx2_cpt_devlink_ops,
+ sizeof(struct otx2_cpt_devlink));
+ if (!dl) {
+ dev_warn(dev, "devlink_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ err = devlink_register(dl, dev);
+ if (err) {
+ dev_err(dev, "devlink register failed with error %d\n", err);
+ goto dl_free;
+ }
+ cpt_dl = devlink_priv(dl);
+ cpt_dl->dl = dl;
+ cpt_dl->cptpf = cptpf;
+ cptpf->dl = dl;
+
+ err = devlink_params_register(dl, otx2_cpt_dl_params,
+ ARRAY_SIZE(otx2_cpt_dl_params));
+ if (err) {
+ dev_err(dev,
+ "devlink params register failed with error %d", err);
+ goto dl_unreg;
+ }
+ devlink_params_publish(dl);
+
+ return 0;
+
+dl_unreg:
+ devlink_unregister(dl);
+dl_free:
+ devlink_free(dl);
+ return err;
+}
+
+void otx2_cpt_unregister_dl(struct otx2_cptpf_dev *cptpf)
+{
+ struct devlink *dl = cptpf->dl;
+
+ if (!dl)
+ return;
+
+ devlink_params_unregister(dl, otx2_cpt_dl_params,
+ ARRAY_SIZE(otx2_cpt_dl_params));
+ devlink_unregister(dl);
+ devlink_free(dl);
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.h
new file mode 100644
index 000000000000..082df4e159fb
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2018 Marvell.
+ */
+
+#ifndef __OTX2_CPT_DEVLINK_H
+#define __OTX2_CPT_DEVLINK_H
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptpf.h"
+
+struct otx2_cpt_devlink {
+ struct devlink *dl;
+ struct otx2_cptpf_dev *cptpf;
+};
+
+/* Devlink APIs */
+int otx2_cpt_register_dl(struct otx2_cptpf_dev *cptpf);
+void otx2_cpt_unregister_dl(struct otx2_cptpf_dev *cptpf);
+
+#endif /* __OTX2_CPT_DEVLINK_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h
new file mode 100644
index 000000000000..3e71eb9bc023
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h
@@ -0,0 +1,476 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2018 Marvell.
+ */
+
+#ifndef __OTX2_CPT_HW_TYPES_H
+#define __OTX2_CPT_HW_TYPES_H
+
+#include <linux/types.h>
+
+/* Device IDs */
+#define OTX2_CPT_PCI_PF_DEVICE_ID 0xA0FD
+#define OTX2_CPT_PCI_VF_DEVICE_ID 0xA0FE
+#define CN10K_CPT_PCI_PF_DEVICE_ID 0xA0F2
+#define CN10K_CPT_PCI_VF_DEVICE_ID 0xA0F3
+
+/* Mailbox interrupts offset */
+#define OTX2_CPT_PF_MBOX_INT 6
+#define OTX2_CPT_PF_INT_VEC_E_MBOXX(x, a) ((x) + (a))
+
+/* Maximum supported microcode groups */
+#define OTX2_CPT_MAX_ENGINE_GROUPS 8
+
+/* CPT instruction size in bytes */
+#define OTX2_CPT_INST_SIZE 64
+/*
+ * CPT VF MSIX vectors and their offsets
+ */
+#define OTX2_CPT_VF_MSIX_VECTORS 1
+#define OTX2_CPT_VF_INTR_MBOX_MASK BIT(0)
+#define CN10K_CPT_VF_MBOX_REGION (0xC0000)
+
+/* CPT LF MSIX vectors */
+#define OTX2_CPT_LF_MSIX_VECTORS 2
+
+/* OcteonTX2 CPT PF registers */
+#define OTX2_CPT_PF_CONSTANTS (0x0)
+#define OTX2_CPT_PF_RESET (0x100)
+#define OTX2_CPT_PF_DIAG (0x120)
+#define OTX2_CPT_PF_BIST_STATUS (0x160)
+#define OTX2_CPT_PF_ECC0_CTL (0x200)
+#define OTX2_CPT_PF_ECC0_FLIP (0x210)
+#define OTX2_CPT_PF_ECC0_INT (0x220)
+#define OTX2_CPT_PF_ECC0_INT_W1S (0x230)
+#define OTX2_CPT_PF_ECC0_ENA_W1S (0x240)
+#define OTX2_CPT_PF_ECC0_ENA_W1C (0x250)
+#define OTX2_CPT_PF_MBOX_INTX(b) (0x400 | (b) << 3)
+#define OTX2_CPT_PF_MBOX_INT_W1SX(b) (0x420 | (b) << 3)
+#define OTX2_CPT_PF_MBOX_ENA_W1CX(b) (0x440 | (b) << 3)
+#define OTX2_CPT_PF_MBOX_ENA_W1SX(b) (0x460 | (b) << 3)
+#define OTX2_CPT_PF_EXEC_INT (0x500)
+#define OTX2_CPT_PF_EXEC_INT_W1S (0x520)
+#define OTX2_CPT_PF_EXEC_ENA_W1C (0x540)
+#define OTX2_CPT_PF_EXEC_ENA_W1S (0x560)
+#define OTX2_CPT_PF_GX_EN(b) (0x600 | (b) << 3)
+#define OTX2_CPT_PF_EXEC_INFO (0x700)
+#define OTX2_CPT_PF_EXEC_BUSY (0x800)
+#define OTX2_CPT_PF_EXEC_INFO0 (0x900)
+#define OTX2_CPT_PF_EXEC_INFO1 (0x910)
+#define OTX2_CPT_PF_INST_REQ_PC (0x10000)
+#define OTX2_CPT_PF_INST_LATENCY_PC (0x10020)
+#define OTX2_CPT_PF_RD_REQ_PC (0x10040)
+#define OTX2_CPT_PF_RD_LATENCY_PC (0x10060)
+#define OTX2_CPT_PF_RD_UC_PC (0x10080)
+#define OTX2_CPT_PF_ACTIVE_CYCLES_PC (0x10100)
+#define OTX2_CPT_PF_EXE_CTL (0x4000000)
+#define OTX2_CPT_PF_EXE_STATUS (0x4000008)
+#define OTX2_CPT_PF_EXE_CLK (0x4000010)
+#define OTX2_CPT_PF_EXE_DBG_CTL (0x4000018)
+#define OTX2_CPT_PF_EXE_DBG_DATA (0x4000020)
+#define OTX2_CPT_PF_EXE_BIST_STATUS (0x4000028)
+#define OTX2_CPT_PF_EXE_REQ_TIMER (0x4000030)
+#define OTX2_CPT_PF_EXE_MEM_CTL (0x4000038)
+#define OTX2_CPT_PF_EXE_PERF_CTL (0x4001000)
+#define OTX2_CPT_PF_EXE_DBG_CNTX(b) (0x4001100 | (b) << 3)
+#define OTX2_CPT_PF_EXE_PERF_EVENT_CNT (0x4001180)
+#define OTX2_CPT_PF_EXE_EPCI_INBX_CNT(b) (0x4001200 | (b) << 3)
+#define OTX2_CPT_PF_EXE_EPCI_OUTBX_CNT(b) (0x4001240 | (b) << 3)
+#define OTX2_CPT_PF_ENGX_UCODE_BASE(b) (0x4002000 | (b) << 3)
+#define OTX2_CPT_PF_QX_CTL(b) (0x8000000 | (b) << 20)
+#define OTX2_CPT_PF_QX_GMCTL(b) (0x8000020 | (b) << 20)
+#define OTX2_CPT_PF_QX_CTL2(b) (0x8000100 | (b) << 20)
+#define OTX2_CPT_PF_VFX_MBOXX(b, c) (0x8001000 | (b) << 20 | \
+ (c) << 8)
+
+/* OcteonTX2 CPT LF registers */
+#define OTX2_CPT_LF_CTL (0x10)
+#define OTX2_CPT_LF_DONE_WAIT (0x30)
+#define OTX2_CPT_LF_INPROG (0x40)
+#define OTX2_CPT_LF_DONE (0x50)
+#define OTX2_CPT_LF_DONE_ACK (0x60)
+#define OTX2_CPT_LF_DONE_INT_ENA_W1S (0x90)
+#define OTX2_CPT_LF_DONE_INT_ENA_W1C (0xa0)
+#define OTX2_CPT_LF_MISC_INT (0xb0)
+#define OTX2_CPT_LF_MISC_INT_W1S (0xc0)
+#define OTX2_CPT_LF_MISC_INT_ENA_W1S (0xd0)
+#define OTX2_CPT_LF_MISC_INT_ENA_W1C (0xe0)
+#define OTX2_CPT_LF_Q_BASE (0xf0)
+#define OTX2_CPT_LF_Q_SIZE (0x100)
+#define OTX2_CPT_LF_Q_INST_PTR (0x110)
+#define OTX2_CPT_LF_Q_GRP_PTR (0x120)
+#define OTX2_CPT_LF_NQX(a) (0x400 | (a) << 3)
+#define OTX2_CPT_RVU_FUNC_BLKADDR_SHIFT 20
+/* LMT LF registers */
+#define OTX2_CPT_LMT_LFBASE BIT_ULL(OTX2_CPT_RVU_FUNC_BLKADDR_SHIFT)
+#define OTX2_CPT_LMT_LF_LMTLINEX(a) (OTX2_CPT_LMT_LFBASE | 0x000 | \
+ (a) << 12)
+/* RVU VF registers */
+#define OTX2_RVU_VF_INT (0x20)
+#define OTX2_RVU_VF_INT_W1S (0x28)
+#define OTX2_RVU_VF_INT_ENA_W1S (0x30)
+#define OTX2_RVU_VF_INT_ENA_W1C (0x38)
+
+/*
+ * Enumeration otx2_cpt_ucode_error_code_e
+ *
+ * Enumerates ucode errors
+ */
+enum otx2_cpt_ucode_comp_code_e {
+ OTX2_CPT_UCC_SUCCESS = 0x00,
+ OTX2_CPT_UCC_INVALID_OPCODE = 0x01,
+
+ /* Scatter gather */
+ OTX2_CPT_UCC_SG_WRITE_LENGTH = 0x02,
+ OTX2_CPT_UCC_SG_LIST = 0x03,
+ OTX2_CPT_UCC_SG_NOT_SUPPORTED = 0x04,
+
+};
+
+/*
+ * Enumeration otx2_cpt_comp_e
+ *
+ * OcteonTX2 CPT Completion Enumeration
+ * Enumerates the values of CPT_RES_S[COMPCODE].
+ */
+enum otx2_cpt_comp_e {
+ OTX2_CPT_COMP_E_NOTDONE = 0x00,
+ OTX2_CPT_COMP_E_GOOD = 0x01,
+ OTX2_CPT_COMP_E_FAULT = 0x02,
+ OTX2_CPT_COMP_E_HWERR = 0x04,
+ OTX2_CPT_COMP_E_INSTERR = 0x05,
+ OTX2_CPT_COMP_E_WARN = 0x06
+};
+
+/*
+ * Enumeration otx2_cpt_vf_int_vec_e
+ *
+ * OcteonTX2 CPT VF MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+enum otx2_cpt_vf_int_vec_e {
+ OTX2_CPT_VF_INT_VEC_E_MBOX = 0x00
+};
+
+/*
+ * Enumeration otx2_cpt_lf_int_vec_e
+ *
+ * OcteonTX2 CPT LF MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+enum otx2_cpt_lf_int_vec_e {
+ OTX2_CPT_LF_INT_VEC_E_MISC = 0x00,
+ OTX2_CPT_LF_INT_VEC_E_DONE = 0x01
+};
+
+/*
+ * Structure otx2_cpt_inst_s
+ *
+ * CPT Instruction Structure
+ * This structure specifies the instruction layout. Instructions are
+ * stored in memory as little-endian unless CPT()_PF_Q()_CTL[INST_BE] is set.
+ * cpt_inst_s_s
+ * Word 0
+ * doneint:1 Done interrupt.
+ * 0 = No interrupts related to this instruction.
+ * 1 = When the instruction completes, CPT()_VQ()_DONE[DONE] will be
+ * incremented,and based on the rules described there an interrupt may
+ * occur.
+ * Word 1
+ * res_addr [127: 64] Result IOVA.
+ * If nonzero, specifies where to write CPT_RES_S.
+ * If zero, no result structure will be written.
+ * Address must be 16-byte aligned.
+ * Bits <63:49> are ignored by hardware; software should use a
+ * sign-extended bit <48> for forward compatibility.
+ * Word 2
+ * grp:10 [171:162] If [WQ_PTR] is nonzero, the SSO guest-group to use when
+ * CPT submits work SSO.
+ * For the SSO to not discard the add-work request, FPA_PF_MAP() must map
+ * [GRP] and CPT()_PF_Q()_GMCTL[GMID] as valid.
+ * tt:2 [161:160] If [WQ_PTR] is nonzero, the SSO tag type to use when CPT
+ * submits work to SSO
+ * tag:32 [159:128] If [WQ_PTR] is nonzero, the SSO tag to use when CPT
+ * submits work to SSO.
+ * Word 3
+ * wq_ptr [255:192] If [WQ_PTR] is nonzero, it is a pointer to a
+ * work-queue entry that CPT submits work to SSO after all context,
+ * output data, and result write operations are visible to other
+ * CNXXXX units and the cores. Bits <2:0> must be zero.
+ * Bits <63:49> are ignored by hardware; software should
+ * use a sign-extended bit <48> for forward compatibility.
+ * Internal:
+ * Bits <63:49>, <2:0> are ignored by hardware, treated as always 0x0.
+ * Word 4
+ * ei0; [319:256] Engine instruction word 0. Passed to the AE/SE.
+ * Word 5
+ * ei1; [383:320] Engine instruction word 1. Passed to the AE/SE.
+ * Word 6
+ * ei2; [447:384] Engine instruction word 1. Passed to the AE/SE.
+ * Word 7
+ * ei3; [511:448] Engine instruction word 1. Passed to the AE/SE.
+ *
+ */
+union otx2_cpt_inst_s {
+ u64 u[8];
+
+ struct {
+ /* Word 0 */
+ u64 nixtxl:3;
+ u64 doneint:1;
+ u64 nixtx_addr:60;
+ /* Word 1 */
+ u64 res_addr;
+ /* Word 2 */
+ u64 tag:32;
+ u64 tt:2;
+ u64 grp:10;
+ u64 reserved_172_175:4;
+ u64 rvu_pf_func:16;
+ /* Word 3 */
+ u64 qord:1;
+ u64 reserved_194_193:2;
+ u64 wq_ptr:61;
+ /* Word 4 */
+ u64 ei0;
+ /* Word 5 */
+ u64 ei1;
+ /* Word 6 */
+ u64 ei2;
+ /* Word 7 */
+ u64 ei3;
+ } s;
+};
+
+/*
+ * Structure otx2_cpt_res_s
+ *
+ * CPT Result Structure
+ * The CPT coprocessor writes the result structure after it completes a
+ * CPT_INST_S instruction. The result structure is exactly 16 bytes, and
+ * each instruction completion produces exactly one result structure.
+ *
+ * This structure is stored in memory as little-endian unless
+ * CPT()_PF_Q()_CTL[INST_BE] is set.
+ * cpt_res_s_s
+ * Word 0
+ * doneint:1 [16:16] Done interrupt. This bit is copied from the
+ * corresponding instruction's CPT_INST_S[DONEINT].
+ * compcode:8 [7:0] Indicates completion/error status of the CPT coprocessor
+ * for the associated instruction, as enumerated by CPT_COMP_E.
+ * Core software may write the memory location containing [COMPCODE] to
+ * 0x0 before ringing the doorbell, and then poll for completion by
+ * checking for a nonzero value.
+ * Once the core observes a nonzero [COMPCODE] value in this case,the CPT
+ * coprocessor will have also completed L2/DRAM write operations.
+ * Word 1
+ * reserved
+ *
+ */
+union otx2_cpt_res_s {
+ u64 u[2];
+
+ struct cn9k_cpt_res_s {
+ u64 compcode:8;
+ u64 uc_compcode:8;
+ u64 doneint:1;
+ u64 reserved_17_63:47;
+ u64 reserved_64_127;
+ } s;
+
+ struct cn10k_cpt_res_s {
+ u64 compcode:7;
+ u64 doneint:1;
+ u64 uc_compcode:8;
+ u64 rlen:16;
+ u64 spi:32;
+ u64 esn;
+ } cn10k;
+};
+
+/*
+ * Register (RVU_PF_BAR0) cpt#_af_constants1
+ *
+ * CPT AF Constants Register
+ * This register contains implementation-related parameters of CPT.
+ */
+union otx2_cptx_af_constants1 {
+ u64 u;
+ struct otx2_cptx_af_constants1_s {
+ u64 se:16;
+ u64 ie:16;
+ u64 ae:16;
+ u64 reserved_48_63:16;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_misc_int
+ *
+ * This register contain the per-queue miscellaneous interrupts.
+ *
+ */
+union otx2_cptx_lf_misc_int {
+ u64 u;
+ struct otx2_cptx_lf_misc_int_s {
+ u64 reserved_0:1;
+ u64 nqerr:1;
+ u64 irde:1;
+ u64 nwrp:1;
+ u64 reserved_4:1;
+ u64 hwerr:1;
+ u64 fault:1;
+ u64 reserved_7_63:57;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_misc_int_ena_w1s
+ *
+ * This register sets interrupt enable bits.
+ *
+ */
+union otx2_cptx_lf_misc_int_ena_w1s {
+ u64 u;
+ struct otx2_cptx_lf_misc_int_ena_w1s_s {
+ u64 reserved_0:1;
+ u64 nqerr:1;
+ u64 irde:1;
+ u64 nwrp:1;
+ u64 reserved_4:1;
+ u64 hwerr:1;
+ u64 fault:1;
+ u64 reserved_7_63:57;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_ctl
+ *
+ * This register configures the queue.
+ *
+ * When the queue is not execution-quiescent (see CPT_LF_INPROG[EENA,INFLIGHT]),
+ * software must only write this register with [ENA]=0.
+ */
+union otx2_cptx_lf_ctl {
+ u64 u;
+ struct otx2_cptx_lf_ctl_s {
+ u64 ena:1;
+ u64 fc_ena:1;
+ u64 fc_up_crossing:1;
+ u64 reserved_3:1;
+ u64 fc_hyst_bits:4;
+ u64 reserved_8_63:56;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_done_wait
+ *
+ * This register specifies the per-queue interrupt coalescing settings.
+ */
+union otx2_cptx_lf_done_wait {
+ u64 u;
+ struct otx2_cptx_lf_done_wait_s {
+ u64 num_wait:20;
+ u64 reserved_20_31:12;
+ u64 time_wait:16;
+ u64 reserved_48_63:16;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_done
+ *
+ * This register contain the per-queue instruction done count.
+ */
+union otx2_cptx_lf_done {
+ u64 u;
+ struct otx2_cptx_lf_done_s {
+ u64 done:20;
+ u64 reserved_20_63:44;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_inprog
+ *
+ * These registers contain the per-queue instruction in flight registers.
+ *
+ */
+union otx2_cptx_lf_inprog {
+ u64 u;
+ struct otx2_cptx_lf_inprog_s {
+ u64 inflight:9;
+ u64 reserved_9_15:7;
+ u64 eena:1;
+ u64 grp_drp:1;
+ u64 reserved_18_30:13;
+ u64 grb_partial:1;
+ u64 grb_cnt:8;
+ u64 gwb_cnt:8;
+ u64 reserved_48_63:16;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_q_base
+ *
+ * CPT initializes these CSR fields to these values on any CPT_LF_Q_BASE write:
+ * _ CPT_LF_Q_INST_PTR[XQ_XOR]=0.
+ * _ CPT_LF_Q_INST_PTR[NQ_PTR]=2.
+ * _ CPT_LF_Q_INST_PTR[DQ_PTR]=2.
+ * _ CPT_LF_Q_GRP_PTR[XQ_XOR]=0.
+ * _ CPT_LF_Q_GRP_PTR[NQ_PTR]=1.
+ * _ CPT_LF_Q_GRP_PTR[DQ_PTR]=1.
+ */
+union otx2_cptx_lf_q_base {
+ u64 u;
+ struct otx2_cptx_lf_q_base_s {
+ u64 fault:1;
+ u64 reserved_1_6:6;
+ u64 addr:46;
+ u64 reserved_53_63:11;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_q_size
+ *
+ * CPT initializes these CSR fields to these values on any CPT_LF_Q_SIZE write:
+ * _ CPT_LF_Q_INST_PTR[XQ_XOR]=0.
+ * _ CPT_LF_Q_INST_PTR[NQ_PTR]=2.
+ * _ CPT_LF_Q_INST_PTR[DQ_PTR]=2.
+ * _ CPT_LF_Q_GRP_PTR[XQ_XOR]=0.
+ * _ CPT_LF_Q_GRP_PTR[NQ_PTR]=1.
+ * _ CPT_LF_Q_GRP_PTR[DQ_PTR]=1.
+ */
+union otx2_cptx_lf_q_size {
+ u64 u;
+ struct otx2_cptx_lf_q_size_s {
+ u64 size_div40:15;
+ u64 reserved_15_63:49;
+ } s;
+};
+
+/*
+ * RVU_PF_BAR0 - cpt_af_lf_ctl
+ *
+ * This register configures queues. This register should be written only
+ * when the queue is execution-quiescent (see CPT_LF_INPROG[INFLIGHT]).
+ */
+union otx2_cptx_af_lf_ctrl {
+ u64 u;
+ struct otx2_cptx_af_lf_ctrl_s {
+ u64 pri:1;
+ u64 reserved_1_8:8;
+ u64 pf_func_inst:1;
+ u64 cont_err:1;
+ u64 reserved_11_15:5;
+ u64 nixtx_en:1;
+ u64 reserved_17_47:31;
+ u64 grp:8;
+ u64 reserved_56_63:8;
+ } s;
+};
+
+#endif /* __OTX2_CPT_HW_TYPES_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
new file mode 100644
index 000000000000..a219ddf03eef
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2018 Marvell. */
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptlf.h"
+
+int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
+{
+ int ret;
+
+ otx2_mbox_msg_send(mbox, 0);
+ ret = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (ret == -EIO) {
+ dev_err(&pdev->dev, "RVU MBOX timeout.\n");
+ return ret;
+ } else if (ret) {
+ dev_err(&pdev->dev, "RVU MBOX error: %d.\n", ret);
+ return -EFAULT;
+ }
+ return ret;
+}
+
+int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
+{
+ struct mbox_msghdr *req;
+
+ req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct ready_msg_rsp));
+ if (req == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+ req->id = MBOX_MSG_READY;
+ req->sig = OTX2_MBOX_REQ_SIG;
+ req->pcifunc = 0;
+
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
+}
+
+int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox, struct pci_dev *pdev)
+{
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
+}
+
+int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 *val, int blkaddr)
+{
+ struct cpt_rd_wr_reg_msg *reg_msg;
+
+ reg_msg = (struct cpt_rd_wr_reg_msg *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*reg_msg),
+ sizeof(*reg_msg));
+ if (reg_msg == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+
+ reg_msg->hdr.id = MBOX_MSG_CPT_RD_WR_REGISTER;
+ reg_msg->hdr.sig = OTX2_MBOX_REQ_SIG;
+ reg_msg->hdr.pcifunc = 0;
+
+ reg_msg->is_write = 0;
+ reg_msg->reg_offset = reg;
+ reg_msg->ret_val = val;
+ reg_msg->blkaddr = blkaddr;
+
+ return 0;
+}
+
+int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 val, int blkaddr)
+{
+ struct cpt_rd_wr_reg_msg *reg_msg;
+
+ reg_msg = (struct cpt_rd_wr_reg_msg *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*reg_msg),
+ sizeof(*reg_msg));
+ if (reg_msg == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+
+ reg_msg->hdr.id = MBOX_MSG_CPT_RD_WR_REGISTER;
+ reg_msg->hdr.sig = OTX2_MBOX_REQ_SIG;
+ reg_msg->hdr.pcifunc = 0;
+
+ reg_msg->is_write = 1;
+ reg_msg->reg_offset = reg;
+ reg_msg->val = val;
+ reg_msg->blkaddr = blkaddr;
+
+ return 0;
+}
+
+int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 *val, int blkaddr)
+{
+ int ret;
+
+ ret = otx2_cpt_add_read_af_reg(mbox, pdev, reg, val, blkaddr);
+ if (ret)
+ return ret;
+
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
+}
+
+int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 val, int blkaddr)
+{
+ int ret;
+
+ ret = otx2_cpt_add_write_af_reg(mbox, pdev, reg, val, blkaddr);
+ if (ret)
+ return ret;
+
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
+}
+
+int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_mbox *mbox = lfs->mbox;
+ struct rsrc_attach *req;
+ int ret;
+
+ req = (struct rsrc_attach *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct msg_rsp));
+ if (req == NULL) {
+ dev_err(&lfs->pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+
+ req->hdr.id = MBOX_MSG_ATTACH_RESOURCES;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = 0;
+ req->cptlfs = lfs->lfs_num;
+ req->cpt_blkaddr = lfs->blkaddr;
+ req->modify = 1;
+ ret = otx2_cpt_send_mbox_msg(mbox, lfs->pdev);
+ if (ret)
+ return ret;
+
+ if (!lfs->are_lfs_attached)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_mbox *mbox = lfs->mbox;
+ struct rsrc_detach *req;
+ int ret;
+
+ req = (struct rsrc_detach *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct msg_rsp));
+ if (req == NULL) {
+ dev_err(&lfs->pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+
+ req->hdr.id = MBOX_MSG_DETACH_RESOURCES;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = 0;
+ req->cptlfs = 1;
+ ret = otx2_cpt_send_mbox_msg(mbox, lfs->pdev);
+ if (ret)
+ return ret;
+
+ if (lfs->are_lfs_attached)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_mbox *mbox = lfs->mbox;
+ struct pci_dev *pdev = lfs->pdev;
+ struct mbox_msghdr *req;
+ int ret, i;
+
+ req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct msix_offset_rsp));
+ if (req == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+
+ req->id = MBOX_MSG_MSIX_OFFSET;
+ req->sig = OTX2_MBOX_REQ_SIG;
+ req->pcifunc = 0;
+ ret = otx2_cpt_send_mbox_msg(mbox, pdev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ if (lfs->lf[i].msix_offset == MSIX_VECTOR_INVALID) {
+ dev_err(&pdev->dev,
+ "Invalid msix offset %d for LF %d\n",
+ lfs->lf[i].msix_offset, i);
+ return -EINVAL;
+ }
+ }
+ return ret;
+}
+
+int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox)
+{
+ int err;
+
+ if (!otx2_mbox_nonempty(mbox, 0))
+ return 0;
+ otx2_mbox_msg_send(mbox, 0);
+ err = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (err)
+ return err;
+
+ return otx2_mbox_check_rsp_msgs(mbox, 0);
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
new file mode 100644
index 000000000000..2e4532c382cc
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2018 Marvell.
+ */
+
+#ifndef __OTX2_CPT_REQMGR_H
+#define __OTX2_CPT_REQMGR_H
+
+#include "otx2_cpt_common.h"
+
+/* Completion code size and initial value */
+#define OTX2_CPT_COMPLETION_CODE_SIZE 8
+#define OTX2_CPT_COMPLETION_CODE_INIT OTX2_CPT_COMP_E_NOTDONE
+/*
+ * Maximum total number of SG buffers is 100, we divide it equally
+ * between input and output
+ */
+#define OTX2_CPT_MAX_SG_IN_CNT 50
+#define OTX2_CPT_MAX_SG_OUT_CNT 50
+
+/* DMA mode direct or SG */
+#define OTX2_CPT_DMA_MODE_DIRECT 0
+#define OTX2_CPT_DMA_MODE_SG 1
+
+/* Context source CPTR or DPTR */
+#define OTX2_CPT_FROM_CPTR 0
+#define OTX2_CPT_FROM_DPTR 1
+
+#define OTX2_CPT_MAX_REQ_SIZE 65535
+
+union otx2_cpt_opcode {
+ u16 flags;
+ struct {
+ u8 major;
+ u8 minor;
+ } s;
+};
+
+struct otx2_cptvf_request {
+ u32 param1;
+ u32 param2;
+ u16 dlen;
+ union otx2_cpt_opcode opcode;
+};
+
+/*
+ * CPT_INST_S software command definitions
+ * Words EI (0-3)
+ */
+union otx2_cpt_iq_cmd_word0 {
+ u64 u;
+ struct {
+ __be16 opcode;
+ __be16 param1;
+ __be16 param2;
+ __be16 dlen;
+ } s;
+};
+
+union otx2_cpt_iq_cmd_word3 {
+ u64 u;
+ struct {
+ u64 cptr:61;
+ u64 grp:3;
+ } s;
+};
+
+struct otx2_cpt_iq_command {
+ union otx2_cpt_iq_cmd_word0 cmd;
+ u64 dptr;
+ u64 rptr;
+ union otx2_cpt_iq_cmd_word3 cptr;
+};
+
+struct otx2_cpt_pending_entry {
+ void *completion_addr; /* Completion address */
+ void *info;
+ /* Kernel async request callback */
+ void (*callback)(int status, void *arg1, void *arg2);
+ struct crypto_async_request *areq; /* Async request callback arg */
+ u8 resume_sender; /* Notify sender to resume sending requests */
+ u8 busy; /* Entry status (free/busy) */
+};
+
+struct otx2_cpt_pending_queue {
+ struct otx2_cpt_pending_entry *head; /* Head of the queue */
+ u32 front; /* Process work from here */
+ u32 rear; /* Append new work here */
+ u32 pending_count; /* Pending requests count */
+ u32 qlen; /* Queue length */
+ spinlock_t lock; /* Queue lock */
+};
+
+struct otx2_cpt_buf_ptr {
+ u8 *vptr;
+ dma_addr_t dma_addr;
+ u16 size;
+};
+
+union otx2_cpt_ctrl_info {
+ u32 flags;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved_6_31:26;
+ u32 grp:3; /* Group bits */
+ u32 dma_mode:2; /* DMA mode */
+ u32 se_req:1; /* To SE core */
+#else
+ u32 se_req:1; /* To SE core */
+ u32 dma_mode:2; /* DMA mode */
+ u32 grp:3; /* Group bits */
+ u32 reserved_6_31:26;
+#endif
+ } s;
+};
+
+struct otx2_cpt_req_info {
+ /* Kernel async request callback */
+ void (*callback)(int status, void *arg1, void *arg2);
+ struct crypto_async_request *areq; /* Async request callback arg */
+ struct otx2_cptvf_request req;/* Request information (core specific) */
+ union otx2_cpt_ctrl_info ctrl;/* User control information */
+ struct otx2_cpt_buf_ptr in[OTX2_CPT_MAX_SG_IN_CNT];
+ struct otx2_cpt_buf_ptr out[OTX2_CPT_MAX_SG_OUT_CNT];
+ u8 *iv_out; /* IV to send back */
+ u16 rlen; /* Output length */
+ u8 in_cnt; /* Number of input buffers */
+ u8 out_cnt; /* Number of output buffers */
+ u8 req_type; /* Type of request */
+ u8 is_enc; /* Is a request an encryption request */
+ u8 is_trunc_hmac;/* Is truncated hmac used */
+};
+
+struct otx2_cpt_inst_info {
+ struct otx2_cpt_pending_entry *pentry;
+ struct otx2_cpt_req_info *req;
+ struct pci_dev *pdev;
+ void *completion_addr;
+ u8 *out_buffer;
+ u8 *in_buffer;
+ dma_addr_t dptr_baddr;
+ dma_addr_t rptr_baddr;
+ dma_addr_t comp_baddr;
+ unsigned long time_in;
+ u32 dlen;
+ u32 dma_len;
+ u8 extra_time;
+};
+
+struct otx2_cpt_sglist_component {
+ __be16 len0;
+ __be16 len1;
+ __be16 len2;
+ __be16 len3;
+ __be64 ptr0;
+ __be64 ptr1;
+ __be64 ptr2;
+ __be64 ptr3;
+};
+
+static inline void otx2_cpt_info_destroy(struct pci_dev *pdev,
+ struct otx2_cpt_inst_info *info)
+{
+ struct otx2_cpt_req_info *req;
+ int i;
+
+ if (info->dptr_baddr)
+ dma_unmap_single(&pdev->dev, info->dptr_baddr,
+ info->dma_len, DMA_BIDIRECTIONAL);
+
+ if (info->req) {
+ req = info->req;
+ for (i = 0; i < req->out_cnt; i++) {
+ if (req->out[i].dma_addr)
+ dma_unmap_single(&pdev->dev,
+ req->out[i].dma_addr,
+ req->out[i].size,
+ DMA_BIDIRECTIONAL);
+ }
+
+ for (i = 0; i < req->in_cnt; i++) {
+ if (req->in[i].dma_addr)
+ dma_unmap_single(&pdev->dev,
+ req->in[i].dma_addr,
+ req->in[i].size,
+ DMA_BIDIRECTIONAL);
+ }
+ }
+ kfree(info);
+}
+
+struct otx2_cptlf_wqe;
+int otx2_cpt_do_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
+ int cpu_num);
+void otx2_cpt_post_process(struct otx2_cptlf_wqe *wqe);
+int otx2_cpt_get_kcrypto_eng_grp_num(struct pci_dev *pdev);
+
+#endif /* __OTX2_CPT_REQMGR_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
new file mode 100644
index 000000000000..1d6b1b505ca7
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2018 Marvell. */
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptlf.h"
+#include "rvu_reg.h"
+
+#define CPT_TIMER_HOLD 0x03F
+#define CPT_COUNT_HOLD 32
+
+static void cptlf_do_set_done_time_wait(struct otx2_cptlf_info *lf,
+ int time_wait)
+{
+ union otx2_cptx_lf_done_wait done_wait;
+
+ done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr,
+ lf->slot, OTX2_CPT_LF_DONE_WAIT);
+ done_wait.s.time_wait = time_wait;
+ otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
+ OTX2_CPT_LF_DONE_WAIT, done_wait.u);
+}
+
+static void cptlf_do_set_done_num_wait(struct otx2_cptlf_info *lf, int num_wait)
+{
+ union otx2_cptx_lf_done_wait done_wait;
+
+ done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr,
+ lf->slot, OTX2_CPT_LF_DONE_WAIT);
+ done_wait.s.num_wait = num_wait;
+ otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
+ OTX2_CPT_LF_DONE_WAIT, done_wait.u);
+}
+
+static void cptlf_set_done_time_wait(struct otx2_cptlfs_info *lfs,
+ int time_wait)
+{
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ cptlf_do_set_done_time_wait(&lfs->lf[slot], time_wait);
+}
+
+static void cptlf_set_done_num_wait(struct otx2_cptlfs_info *lfs, int num_wait)
+{
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ cptlf_do_set_done_num_wait(&lfs->lf[slot], num_wait);
+}
+
+static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri)
+{
+ struct otx2_cptlfs_info *lfs = lf->lfs;
+ union otx2_cptx_af_lf_ctrl lf_ctrl;
+ int ret;
+
+ ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
+ CPT_AF_LFX_CTL(lf->slot),
+ &lf_ctrl.u, lfs->blkaddr);
+ if (ret)
+ return ret;
+
+ lf_ctrl.s.pri = pri ? 1 : 0;
+
+ ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
+ CPT_AF_LFX_CTL(lf->slot),
+ lf_ctrl.u, lfs->blkaddr);
+ return ret;
+}
+
+static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf,
+ int eng_grps_mask)
+{
+ struct otx2_cptlfs_info *lfs = lf->lfs;
+ union otx2_cptx_af_lf_ctrl lf_ctrl;
+ int ret;
+
+ ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
+ CPT_AF_LFX_CTL(lf->slot),
+ &lf_ctrl.u, lfs->blkaddr);
+ if (ret)
+ return ret;
+
+ lf_ctrl.s.grp = eng_grps_mask;
+
+ ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
+ CPT_AF_LFX_CTL(lf->slot),
+ lf_ctrl.u, lfs->blkaddr);
+ return ret;
+}
+
+static int cptlf_set_grp_and_pri(struct otx2_cptlfs_info *lfs,
+ int eng_grp_mask, int pri)
+{
+ int slot, ret = 0;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
+ ret = cptlf_set_pri(&lfs->lf[slot], pri);
+ if (ret)
+ return ret;
+
+ ret = cptlf_set_eng_grps_mask(&lfs->lf[slot], eng_grp_mask);
+ if (ret)
+ return ret;
+ }
+ return ret;
+}
+
+static void cptlf_hw_init(struct otx2_cptlfs_info *lfs)
+{
+ /* Disable instruction queues */
+ otx2_cptlf_disable_iqueues(lfs);
+
+ /* Set instruction queues base addresses */
+ otx2_cptlf_set_iqueues_base_addr(lfs);
+
+ /* Set instruction queues sizes */
+ otx2_cptlf_set_iqueues_size(lfs);
+
+ /* Set done interrupts time wait */
+ cptlf_set_done_time_wait(lfs, CPT_TIMER_HOLD);
+
+ /* Set done interrupts num wait */
+ cptlf_set_done_num_wait(lfs, CPT_COUNT_HOLD);
+
+ /* Enable instruction queues */
+ otx2_cptlf_enable_iqueues(lfs);
+}
+
+static void cptlf_hw_cleanup(struct otx2_cptlfs_info *lfs)
+{
+ /* Disable instruction queues */
+ otx2_cptlf_disable_iqueues(lfs);
+}
+
+static void cptlf_set_misc_intrs(struct otx2_cptlfs_info *lfs, u8 enable)
+{
+ union otx2_cptx_lf_misc_int_ena_w1s irq_misc = { .u = 0x0 };
+ u64 reg = enable ? OTX2_CPT_LF_MISC_INT_ENA_W1S :
+ OTX2_CPT_LF_MISC_INT_ENA_W1C;
+ int slot;
+
+ irq_misc.s.fault = 0x1;
+ irq_misc.s.hwerr = 0x1;
+ irq_misc.s.irde = 0x1;
+ irq_misc.s.nqerr = 0x1;
+ irq_misc.s.nwrp = 0x1;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot, reg,
+ irq_misc.u);
+}
+
+static void cptlf_enable_intrs(struct otx2_cptlfs_info *lfs)
+{
+ int slot;
+
+ /* Enable done interrupts */
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot,
+ OTX2_CPT_LF_DONE_INT_ENA_W1S, 0x1);
+ /* Enable Misc interrupts */
+ cptlf_set_misc_intrs(lfs, true);
+}
+
+static void cptlf_disable_intrs(struct otx2_cptlfs_info *lfs)
+{
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot,
+ OTX2_CPT_LF_DONE_INT_ENA_W1C, 0x1);
+ cptlf_set_misc_intrs(lfs, false);
+}
+
+static inline int cptlf_read_done_cnt(struct otx2_cptlf_info *lf)
+{
+ union otx2_cptx_lf_done irq_cnt;
+
+ irq_cnt.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
+ OTX2_CPT_LF_DONE);
+ return irq_cnt.s.done;
+}
+
+static irqreturn_t cptlf_misc_intr_handler(int __always_unused irq, void *arg)
+{
+ union otx2_cptx_lf_misc_int irq_misc, irq_misc_ack;
+ struct otx2_cptlf_info *lf = arg;
+ struct device *dev;
+
+ dev = &lf->lfs->pdev->dev;
+ irq_misc.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr,
+ lf->slot, OTX2_CPT_LF_MISC_INT);
+ irq_misc_ack.u = 0x0;
+
+ if (irq_misc.s.fault) {
+ dev_err(dev, "Memory error detected while executing CPT_INST_S, LF %d.\n",
+ lf->slot);
+ irq_misc_ack.s.fault = 0x1;
+
+ } else if (irq_misc.s.hwerr) {
+ dev_err(dev, "HW error from an engine executing CPT_INST_S, LF %d.",
+ lf->slot);
+ irq_misc_ack.s.hwerr = 0x1;
+
+ } else if (irq_misc.s.nwrp) {
+ dev_err(dev, "SMMU fault while writing CPT_RES_S to CPT_INST_S[RES_ADDR], LF %d.\n",
+ lf->slot);
+ irq_misc_ack.s.nwrp = 0x1;
+
+ } else if (irq_misc.s.irde) {
+ dev_err(dev, "Memory error when accessing instruction memory queue CPT_LF_Q_BASE[ADDR].\n");
+ irq_misc_ack.s.irde = 0x1;
+
+ } else if (irq_misc.s.nqerr) {
+ dev_err(dev, "Error enqueuing an instruction received at CPT_LF_NQ.\n");
+ irq_misc_ack.s.nqerr = 0x1;
+
+ } else {
+ dev_err(dev, "Unhandled interrupt in CPT LF %d\n", lf->slot);
+ return IRQ_NONE;
+ }
+
+ /* Acknowledge interrupts */
+ otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
+ OTX2_CPT_LF_MISC_INT, irq_misc_ack.u);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t cptlf_done_intr_handler(int irq, void *arg)
+{
+ union otx2_cptx_lf_done_wait done_wait;
+ struct otx2_cptlf_info *lf = arg;
+ int irq_cnt;
+
+ /* Read the number of completed requests */
+ irq_cnt = cptlf_read_done_cnt(lf);
+ if (irq_cnt) {
+ done_wait.u = otx2_cpt_read64(lf->lfs->reg_base,
+ lf->lfs->blkaddr,
+ lf->slot, OTX2_CPT_LF_DONE_WAIT);
+ /* Acknowledge the number of completed requests */
+ otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
+ OTX2_CPT_LF_DONE_ACK, irq_cnt);
+
+ otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
+ OTX2_CPT_LF_DONE_WAIT, done_wait.u);
+ if (unlikely(!lf->wqe)) {
+ dev_err(&lf->lfs->pdev->dev, "No work for LF %d\n",
+ lf->slot);
+ return IRQ_NONE;
+ }
+
+ /* Schedule processing of completed requests */
+ tasklet_hi_schedule(&lf->wqe->work);
+ }
+ return IRQ_HANDLED;
+}
+
+void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs)
+{
+ int i, offs, vector;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) {
+ if (!lfs->lf[i].is_irq_reg[offs])
+ continue;
+
+ vector = pci_irq_vector(lfs->pdev,
+ lfs->lf[i].msix_offset + offs);
+ free_irq(vector, &lfs->lf[i]);
+ lfs->lf[i].is_irq_reg[offs] = false;
+ }
+ }
+ cptlf_disable_intrs(lfs);
+}
+
+static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs,
+ int lf_num, int irq_offset,
+ irq_handler_t handler)
+{
+ int ret, vector;
+
+ vector = pci_irq_vector(lfs->pdev, lfs->lf[lf_num].msix_offset +
+ irq_offset);
+ ret = request_irq(vector, handler, 0,
+ lfs->lf[lf_num].irq_name[irq_offset],
+ &lfs->lf[lf_num]);
+ if (ret)
+ return ret;
+
+ lfs->lf[lf_num].is_irq_reg[irq_offset] = true;
+
+ return ret;
+}
+
+int otx2_cptlf_register_interrupts(struct otx2_cptlfs_info *lfs)
+{
+ int irq_offs, ret, i;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ irq_offs = OTX2_CPT_LF_INT_VEC_E_MISC;
+ snprintf(lfs->lf[i].irq_name[irq_offs], 32, "CPTLF Misc%d", i);
+ ret = cptlf_do_register_interrrupts(lfs, i, irq_offs,
+ cptlf_misc_intr_handler);
+ if (ret)
+ goto free_irq;
+
+ irq_offs = OTX2_CPT_LF_INT_VEC_E_DONE;
+ snprintf(lfs->lf[i].irq_name[irq_offs], 32, "OTX2_CPTLF Done%d",
+ i);
+ ret = cptlf_do_register_interrrupts(lfs, i, irq_offs,
+ cptlf_done_intr_handler);
+ if (ret)
+ goto free_irq;
+ }
+ cptlf_enable_intrs(lfs);
+ return 0;
+
+free_irq:
+ otx2_cptlf_unregister_interrupts(lfs);
+ return ret;
+}
+
+void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
+{
+ int slot, offs;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
+ for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++)
+ irq_set_affinity_hint(pci_irq_vector(lfs->pdev,
+ lfs->lf[slot].msix_offset +
+ offs), NULL);
+ free_cpumask_var(lfs->lf[slot].affinity_mask);
+ }
+}
+
+int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_cptlf_info *lf = lfs->lf;
+ int slot, offs, ret;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
+ if (!zalloc_cpumask_var(&lf[slot].affinity_mask, GFP_KERNEL)) {
+ dev_err(&lfs->pdev->dev,
+ "cpumask allocation failed for LF %d", slot);
+ ret = -ENOMEM;
+ goto free_affinity_mask;
+ }
+
+ cpumask_set_cpu(cpumask_local_spread(slot,
+ dev_to_node(&lfs->pdev->dev)),
+ lf[slot].affinity_mask);
+
+ for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) {
+ ret = irq_set_affinity_hint(pci_irq_vector(lfs->pdev,
+ lf[slot].msix_offset + offs),
+ lf[slot].affinity_mask);
+ if (ret)
+ goto free_affinity_mask;
+ }
+ }
+ return 0;
+
+free_affinity_mask:
+ otx2_cptlf_free_irqs_affinity(lfs);
+ return ret;
+}
+
+int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
+ int lfs_num)
+{
+ int slot, ret;
+
+ if (!lfs->pdev || !lfs->reg_base)
+ return -EINVAL;
+
+ lfs->lfs_num = lfs_num;
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
+ lfs->lf[slot].lfs = lfs;
+ lfs->lf[slot].slot = slot;
+ if (lfs->lmt_base)
+ lfs->lf[slot].lmtline = lfs->lmt_base +
+ (slot * LMTLINE_SIZE);
+ else
+ lfs->lf[slot].lmtline = lfs->reg_base +
+ OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_LMT, slot,
+ OTX2_CPT_LMT_LF_LMTLINEX(0));
+
+ lfs->lf[slot].ioreg = lfs->reg_base +
+ OTX2_CPT_RVU_FUNC_ADDR_S(lfs->blkaddr, slot,
+ OTX2_CPT_LF_NQX(0));
+ }
+ /* Send request to attach LFs */
+ ret = otx2_cpt_attach_rscrs_msg(lfs);
+ if (ret)
+ goto clear_lfs_num;
+
+ ret = otx2_cpt_alloc_instruction_queues(lfs);
+ if (ret) {
+ dev_err(&lfs->pdev->dev,
+ "Allocating instruction queues failed\n");
+ goto detach_rsrcs;
+ }
+ cptlf_hw_init(lfs);
+ /*
+ * Allow each LF to execute requests destined to any of 8 engine
+ * groups and set queue priority of each LF to high
+ */
+ ret = cptlf_set_grp_and_pri(lfs, eng_grp_mask, pri);
+ if (ret)
+ goto free_iq;
+
+ return 0;
+
+free_iq:
+ otx2_cpt_free_instruction_queues(lfs);
+ cptlf_hw_cleanup(lfs);
+detach_rsrcs:
+ otx2_cpt_detach_rsrcs_msg(lfs);
+clear_lfs_num:
+ lfs->lfs_num = 0;
+ return ret;
+}
+
+void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
+{
+ lfs->lfs_num = 0;
+ /* Cleanup LFs hardware side */
+ cptlf_hw_cleanup(lfs);
+ /* Send request to detach LFs */
+ otx2_cpt_detach_rsrcs_msg(lfs);
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
new file mode 100644
index 000000000000..48f95fe8b438
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
@@ -0,0 +1,410 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2018 Marvell.
+ */
+#ifndef __OTX2_CPTLF_H
+#define __OTX2_CPTLF_H
+
+#include <mbox.h>
+#include <rvu.h>
+#include "otx2_cpt_common.h"
+#include "otx2_cpt_reqmgr.h"
+
+/*
+ * CPT instruction and pending queues user requested length in CPT_INST_S msgs
+ */
+#define OTX2_CPT_USER_REQUESTED_QLEN_MSGS 8200
+
+/*
+ * CPT instruction queue size passed to HW is in units of 40*CPT_INST_S
+ * messages.
+ */
+#define OTX2_CPT_SIZE_DIV40 (OTX2_CPT_USER_REQUESTED_QLEN_MSGS/40)
+
+/*
+ * CPT instruction and pending queues length in CPT_INST_S messages
+ */
+#define OTX2_CPT_INST_QLEN_MSGS ((OTX2_CPT_SIZE_DIV40 - 1) * 40)
+
+/*
+ * LDWB is getting incorrectly used when IQB_LDWB = 1 and CPT instruction
+ * queue has less than 320 free entries. So, increase HW instruction queue
+ * size by 320 and give 320 entries less for SW/NIX RX as a workaround.
+ */
+#define OTX2_CPT_INST_QLEN_EXTRA_BYTES (320 * OTX2_CPT_INST_SIZE)
+#define OTX2_CPT_EXTRA_SIZE_DIV40 (320/40)
+
+/* CPT instruction queue length in bytes */
+#define OTX2_CPT_INST_QLEN_BYTES \
+ ((OTX2_CPT_SIZE_DIV40 * 40 * OTX2_CPT_INST_SIZE) + \
+ OTX2_CPT_INST_QLEN_EXTRA_BYTES)
+
+/* CPT instruction group queue length in bytes */
+#define OTX2_CPT_INST_GRP_QLEN_BYTES \
+ ((OTX2_CPT_SIZE_DIV40 + OTX2_CPT_EXTRA_SIZE_DIV40) * 16)
+
+/* CPT FC length in bytes */
+#define OTX2_CPT_Q_FC_LEN 128
+
+/* CPT instruction queue alignment */
+#define OTX2_CPT_INST_Q_ALIGNMENT 128
+
+/* Mask which selects all engine groups */
+#define OTX2_CPT_ALL_ENG_GRPS_MASK 0xFF
+
+/* Maximum LFs supported in OcteonTX2 for CPT */
+#define OTX2_CPT_MAX_LFS_NUM 64
+
+/* Queue priority */
+#define OTX2_CPT_QUEUE_HI_PRIO 0x1
+#define OTX2_CPT_QUEUE_LOW_PRIO 0x0
+
+#if defined(CONFIG_ARM64)
+/*
+ * otx2_lmt_flush is used for LMT store operation.
+ * On octeontx2 platform CPT instruction enqueue and
+ * NIX packet send are only possible via LMTST
+ * operations and it uses LDEOR instruction targeting
+ * the coprocessor address.
+ */
+#define otx2_lmt_flush(ioaddr) \
+({ \
+ u64 result = 0; \
+ __asm__ volatile(".cpu generic+lse\n" \
+ "ldeor xzr, %x[rf], [%[rs]]" \
+ : [rf]"=r" (result) \
+ : [rs]"r" (ioaddr)); \
+ (result); \
+})
+#else
+#define otx2_lmt_flush(ioaddr) ({ 0; })
+#endif
+
+enum otx2_cptlf_state {
+ OTX2_CPTLF_IN_RESET,
+ OTX2_CPTLF_STARTED,
+};
+
+struct otx2_cpt_inst_queue {
+ u8 *vaddr;
+ u8 *real_vaddr;
+ dma_addr_t dma_addr;
+ dma_addr_t real_dma_addr;
+ u32 size;
+};
+
+struct otx2_cptlfs_info;
+struct otx2_cptlf_wqe {
+ struct tasklet_struct work;
+ struct otx2_cptlfs_info *lfs;
+ u8 lf_num;
+};
+
+struct otx2_cptlf_info {
+ struct otx2_cptlfs_info *lfs; /* Ptr to cptlfs_info struct */
+ void __iomem *lmtline; /* Address of LMTLINE */
+ void __iomem *ioreg; /* LMTLINE send register */
+ int msix_offset; /* MSI-X interrupts offset */
+ cpumask_var_t affinity_mask; /* IRQs affinity mask */
+ u8 irq_name[OTX2_CPT_LF_MSIX_VECTORS][32];/* Interrupts name */
+ u8 is_irq_reg[OTX2_CPT_LF_MSIX_VECTORS]; /* Is interrupt registered */
+ u8 slot; /* Slot number of this LF */
+
+ struct otx2_cpt_inst_queue iqueue;/* Instruction queue */
+ struct otx2_cpt_pending_queue pqueue; /* Pending queue */
+ struct otx2_cptlf_wqe *wqe; /* Tasklet work info */
+};
+
+struct cpt_hw_ops {
+ void (*send_cmd)(union otx2_cpt_inst_s *cptinst, u32 insts_num,
+ struct otx2_cptlf_info *lf);
+ u8 (*cpt_get_compcode)(union otx2_cpt_res_s *result);
+ u8 (*cpt_get_uc_compcode)(union otx2_cpt_res_s *result);
+};
+
+struct otx2_cptlfs_info {
+ /* Registers start address of VF/PF LFs are attached to */
+ void __iomem *reg_base;
+#define LMTLINE_SIZE 128
+ void __iomem *lmt_base;
+ struct pci_dev *pdev; /* Device LFs are attached to */
+ struct otx2_cptlf_info lf[OTX2_CPT_MAX_LFS_NUM];
+ struct otx2_mbox *mbox;
+ struct cpt_hw_ops *ops;
+ u8 are_lfs_attached; /* Whether CPT LFs are attached */
+ u8 lfs_num; /* Number of CPT LFs */
+ u8 kcrypto_eng_grp_num; /* Kernel crypto engine group number */
+ u8 kvf_limits; /* Kernel crypto limits */
+ atomic_t state; /* LF's state. started/reset */
+ int blkaddr; /* CPT blkaddr: BLKADDR_CPT0/BLKADDR_CPT1 */
+};
+
+static inline void otx2_cpt_free_instruction_queues(
+ struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_cpt_inst_queue *iq;
+ int i;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ iq = &lfs->lf[i].iqueue;
+ if (iq->real_vaddr)
+ dma_free_coherent(&lfs->pdev->dev,
+ iq->size,
+ iq->real_vaddr,
+ iq->real_dma_addr);
+ iq->real_vaddr = NULL;
+ iq->vaddr = NULL;
+ }
+}
+
+static inline int otx2_cpt_alloc_instruction_queues(
+ struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_cpt_inst_queue *iq;
+ int ret = 0, i;
+
+ if (!lfs->lfs_num)
+ return -EINVAL;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ iq = &lfs->lf[i].iqueue;
+ iq->size = OTX2_CPT_INST_QLEN_BYTES +
+ OTX2_CPT_Q_FC_LEN +
+ OTX2_CPT_INST_GRP_QLEN_BYTES +
+ OTX2_CPT_INST_Q_ALIGNMENT;
+ iq->real_vaddr = dma_alloc_coherent(&lfs->pdev->dev, iq->size,
+ &iq->real_dma_addr, GFP_KERNEL);
+ if (!iq->real_vaddr) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ iq->vaddr = iq->real_vaddr + OTX2_CPT_INST_GRP_QLEN_BYTES;
+ iq->dma_addr = iq->real_dma_addr + OTX2_CPT_INST_GRP_QLEN_BYTES;
+
+ /* Align pointers */
+ iq->vaddr = PTR_ALIGN(iq->vaddr, OTX2_CPT_INST_Q_ALIGNMENT);
+ iq->dma_addr = PTR_ALIGN(iq->dma_addr,
+ OTX2_CPT_INST_Q_ALIGNMENT);
+ }
+ return 0;
+
+error:
+ otx2_cpt_free_instruction_queues(lfs);
+ return ret;
+}
+
+static inline void otx2_cptlf_set_iqueues_base_addr(
+ struct otx2_cptlfs_info *lfs)
+{
+ union otx2_cptx_lf_q_base lf_q_base;
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
+ lf_q_base.u = lfs->lf[slot].iqueue.dma_addr;
+ otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot,
+ OTX2_CPT_LF_Q_BASE, lf_q_base.u);
+ }
+}
+
+static inline void otx2_cptlf_do_set_iqueue_size(struct otx2_cptlf_info *lf)
+{
+ union otx2_cptx_lf_q_size lf_q_size = { .u = 0x0 };
+
+ lf_q_size.s.size_div40 = OTX2_CPT_SIZE_DIV40 +
+ OTX2_CPT_EXTRA_SIZE_DIV40;
+ otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot,
+ OTX2_CPT_LF_Q_SIZE, lf_q_size.u);
+}
+
+static inline void otx2_cptlf_set_iqueues_size(struct otx2_cptlfs_info *lfs)
+{
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ otx2_cptlf_do_set_iqueue_size(&lfs->lf[slot]);
+}
+
+static inline void otx2_cptlf_do_disable_iqueue(struct otx2_cptlf_info *lf)
+{
+ union otx2_cptx_lf_ctl lf_ctl = { .u = 0x0 };
+ union otx2_cptx_lf_inprog lf_inprog;
+ u8 blkaddr = lf->lfs->blkaddr;
+ int timeout = 20;
+
+ /* Disable instructions enqueuing */
+ otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot,
+ OTX2_CPT_LF_CTL, lf_ctl.u);
+
+ /* Wait for instruction queue to become empty */
+ do {
+ lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, blkaddr,
+ lf->slot, OTX2_CPT_LF_INPROG);
+ if (!lf_inprog.s.inflight)
+ break;
+
+ usleep_range(10000, 20000);
+ if (timeout-- < 0) {
+ dev_err(&lf->lfs->pdev->dev,
+ "Error LF %d is still busy.\n", lf->slot);
+ break;
+ }
+
+ } while (1);
+
+ /*
+ * Disable executions in the LF's queue,
+ * the queue should be empty at this point
+ */
+ lf_inprog.s.eena = 0x0;
+ otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot,
+ OTX2_CPT_LF_INPROG, lf_inprog.u);
+}
+
+static inline void otx2_cptlf_disable_iqueues(struct otx2_cptlfs_info *lfs)
+{
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ otx2_cptlf_do_disable_iqueue(&lfs->lf[slot]);
+}
+
+static inline void otx2_cptlf_set_iqueue_enq(struct otx2_cptlf_info *lf,
+ bool enable)
+{
+ u8 blkaddr = lf->lfs->blkaddr;
+ union otx2_cptx_lf_ctl lf_ctl;
+
+ lf_ctl.u = otx2_cpt_read64(lf->lfs->reg_base, blkaddr, lf->slot,
+ OTX2_CPT_LF_CTL);
+
+ /* Set iqueue's enqueuing */
+ lf_ctl.s.ena = enable ? 0x1 : 0x0;
+ otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot,
+ OTX2_CPT_LF_CTL, lf_ctl.u);
+}
+
+static inline void otx2_cptlf_enable_iqueue_enq(struct otx2_cptlf_info *lf)
+{
+ otx2_cptlf_set_iqueue_enq(lf, true);
+}
+
+static inline void otx2_cptlf_set_iqueue_exec(struct otx2_cptlf_info *lf,
+ bool enable)
+{
+ union otx2_cptx_lf_inprog lf_inprog;
+ u8 blkaddr = lf->lfs->blkaddr;
+
+ lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, blkaddr, lf->slot,
+ OTX2_CPT_LF_INPROG);
+
+ /* Set iqueue's execution */
+ lf_inprog.s.eena = enable ? 0x1 : 0x0;
+ otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot,
+ OTX2_CPT_LF_INPROG, lf_inprog.u);
+}
+
+static inline void otx2_cptlf_enable_iqueue_exec(struct otx2_cptlf_info *lf)
+{
+ otx2_cptlf_set_iqueue_exec(lf, true);
+}
+
+static inline void otx2_cptlf_disable_iqueue_exec(struct otx2_cptlf_info *lf)
+{
+ otx2_cptlf_set_iqueue_exec(lf, false);
+}
+
+static inline void otx2_cptlf_enable_iqueues(struct otx2_cptlfs_info *lfs)
+{
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
+ otx2_cptlf_enable_iqueue_exec(&lfs->lf[slot]);
+ otx2_cptlf_enable_iqueue_enq(&lfs->lf[slot]);
+ }
+}
+
+static inline void otx2_cpt_fill_inst(union otx2_cpt_inst_s *cptinst,
+ struct otx2_cpt_iq_command *iq_cmd,
+ u64 comp_baddr)
+{
+ cptinst->u[0] = 0x0;
+ cptinst->s.doneint = true;
+ cptinst->s.res_addr = comp_baddr;
+ cptinst->u[2] = 0x0;
+ cptinst->u[3] = 0x0;
+ cptinst->s.ei0 = iq_cmd->cmd.u;
+ cptinst->s.ei1 = iq_cmd->dptr;
+ cptinst->s.ei2 = iq_cmd->rptr;
+ cptinst->s.ei3 = iq_cmd->cptr.u;
+}
+
+/*
+ * On OcteonTX2 platform the parameter insts_num is used as a count of
+ * instructions to be enqueued. The valid values for insts_num are:
+ * 1 - 1 CPT instruction will be enqueued during LMTST operation
+ * 2 - 2 CPT instructions will be enqueued during LMTST operation
+ */
+static inline void otx2_cpt_send_cmd(union otx2_cpt_inst_s *cptinst,
+ u32 insts_num, struct otx2_cptlf_info *lf)
+{
+ void __iomem *lmtline = lf->lmtline;
+ long ret;
+
+ /*
+ * Make sure memory areas pointed in CPT_INST_S
+ * are flushed before the instruction is sent to CPT
+ */
+ dma_wmb();
+
+ do {
+ /* Copy CPT command to LMTLINE */
+ memcpy_toio(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE);
+
+ /*
+ * LDEOR initiates atomic transfer to I/O device
+ * The following will cause the LMTST to fail (the LDEOR
+ * returns zero):
+ * - No stores have been performed to the LMTLINE since it was
+ * last invalidated.
+ * - The bytes which have been stored to LMTLINE since it was
+ * last invalidated form a pattern that is non-contiguous, does
+ * not start at byte 0, or does not end on a 8-byte boundary.
+ * (i.e.comprises a formation of other than 1–16 8-byte
+ * words.)
+ *
+ * These rules are designed such that an operating system
+ * context switch or hypervisor guest switch need have no
+ * knowledge of the LMTST operations; the switch code does not
+ * need to store to LMTCANCEL. Also note as LMTLINE data cannot
+ * be read, there is no information leakage between processes.
+ */
+ ret = otx2_lmt_flush(lf->ioreg);
+
+ } while (!ret);
+}
+
+static inline bool otx2_cptlf_started(struct otx2_cptlfs_info *lfs)
+{
+ return atomic_read(&lfs->state) == OTX2_CPTLF_STARTED;
+}
+
+static inline void otx2_cptlf_set_dev_info(struct otx2_cptlfs_info *lfs,
+ struct pci_dev *pdev,
+ void __iomem *reg_base,
+ struct otx2_mbox *mbox,
+ int blkaddr)
+{
+ lfs->pdev = pdev;
+ lfs->reg_base = reg_base;
+ lfs->mbox = mbox;
+ lfs->blkaddr = blkaddr;
+}
+
+int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_msk, int pri,
+ int lfs_num);
+void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs);
+int otx2_cptlf_register_interrupts(struct otx2_cptlfs_info *lfs);
+void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs);
+void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs);
+int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs);
+
+#endif /* __OTX2_CPTLF_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
new file mode 100644
index 000000000000..b5201295162b
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2018 Marvell.
+ */
+
+#ifndef __OTX2_CPTPF_H
+#define __OTX2_CPTPF_H
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptpf_ucode.h"
+#include "otx2_cptlf.h"
+
+struct otx2_cptpf_dev;
+struct otx2_cptvf_info {
+ struct otx2_cptpf_dev *cptpf; /* PF pointer this VF belongs to */
+ struct work_struct vfpf_mbox_work;
+ struct pci_dev *vf_dev;
+ int vf_id;
+ int intr_idx;
+};
+
+struct cptpf_flr_work {
+ struct work_struct work;
+ struct otx2_cptpf_dev *pf;
+};
+
+struct otx2_cptpf_dev {
+ void __iomem *reg_base; /* CPT PF registers start address */
+ void __iomem *afpf_mbox_base; /* PF-AF mbox start address */
+ void __iomem *vfpf_mbox_base; /* VF-PF mbox start address */
+ struct pci_dev *pdev; /* PCI device handle */
+ struct otx2_cptvf_info vf[OTX2_CPT_MAX_VFS_NUM];
+ struct otx2_cpt_eng_grps eng_grps;/* Engine groups information */
+ struct otx2_cptlfs_info lfs; /* CPT LFs attached to this PF */
+ struct otx2_cptlfs_info cpt1_lfs; /* CPT1 LFs attached to this PF */
+ /* HW capabilities for each engine type */
+ union otx2_cpt_eng_caps eng_caps[OTX2_CPT_MAX_ENG_TYPES];
+ bool is_eng_caps_discovered;
+
+ /* AF <=> PF mbox */
+ struct otx2_mbox afpf_mbox;
+ struct work_struct afpf_mbox_work;
+ struct workqueue_struct *afpf_mbox_wq;
+
+ struct otx2_mbox afpf_mbox_up;
+ struct work_struct afpf_mbox_up_work;
+
+ /* VF <=> PF mbox */
+ struct otx2_mbox vfpf_mbox;
+ struct workqueue_struct *vfpf_mbox_wq;
+
+ struct workqueue_struct *flr_wq;
+ struct cptpf_flr_work *flr_work;
+ struct mutex lock; /* serialize mailbox access */
+
+ unsigned long cap_flag;
+ u8 pf_id; /* RVU PF number */
+ u8 max_vfs; /* Maximum number of VFs supported by CPT */
+ u8 enabled_vfs; /* Number of enabled VFs */
+ u8 sso_pf_func_ovrd; /* SSO PF_FUNC override bit */
+ u8 kvf_limits; /* Kernel VF limits */
+ bool has_cpt1;
+ u8 rsrc_req_blkaddr;
+
+ /* Devlink */
+ struct devlink *dl;
+};
+
+irqreturn_t otx2_cptpf_afpf_mbox_intr(int irq, void *arg);
+void otx2_cptpf_afpf_mbox_handler(struct work_struct *work);
+void otx2_cptpf_afpf_mbox_up_handler(struct work_struct *work);
+irqreturn_t otx2_cptpf_vfpf_mbox_intr(int irq, void *arg);
+void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work);
+int otx2_cptpf_lf_init(struct otx2_cptpf_dev *cptpf, u8 eng_grp_mask,
+ int pri, int lfs_num);
+void otx2_cptpf_lf_cleanup(struct otx2_cptlfs_info *lfs);
+
+#endif /* __OTX2_CPTPF_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
new file mode 100644
index 000000000000..b274a2cddeae
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -0,0 +1,881 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2018 Marvell. */
+
+#include <linux/firmware.h>
+#include "otx2_cpt_hw_types.h"
+#include "otx2_cpt_common.h"
+#include "otx2_cpt_devlink.h"
+#include "otx2_cptpf_ucode.h"
+#include "otx2_cptpf.h"
+#include "cn10k_cpt.h"
+#include "rvu_reg.h"
+
+#define OTX2_CPT_DRV_NAME "rvu_cptpf"
+#define OTX2_CPT_DRV_STRING "Marvell RVU CPT Physical Function Driver"
+
+static void cptpf_enable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
+ int numvfs)
+{
+ /* Clear FLR interrupt if any */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
+ INTR_MASK(numvfs));
+
+ /* Enable VF FLR interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(numvfs));
+ /* Clear ME interrupt if any */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(0),
+ INTR_MASK(numvfs));
+ /* Enable VF ME interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(numvfs));
+
+ if (numvfs <= 64)
+ return;
+
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
+ INTR_MASK(numvfs - 64));
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(numvfs - 64));
+
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(1),
+ INTR_MASK(numvfs - 64));
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(numvfs - 64));
+}
+
+static void cptpf_disable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
+ int numvfs)
+{
+ int vector;
+
+ /* Disable VF FLR interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(numvfs));
+ vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR0);
+ free_irq(vector, cptpf);
+
+ /* Disable VF ME interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(numvfs));
+ vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME0);
+ free_irq(vector, cptpf);
+
+ if (numvfs <= 64)
+ return;
+
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(numvfs - 64));
+ vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR1);
+ free_irq(vector, cptpf);
+
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(numvfs - 64));
+ vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME1);
+ free_irq(vector, cptpf);
+}
+
+static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
+ int num_vfs)
+{
+ int ena_bits;
+
+ /* Clear any pending interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(0), ~0x0ULL);
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(1), ~0x0ULL);
+
+ /* Enable VF interrupts for VFs from 0 to 63 */
+ ena_bits = ((num_vfs - 1) % 64);
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0),
+ GENMASK_ULL(ena_bits, 0));
+
+ if (num_vfs > 64) {
+ /* Enable VF interrupts for VFs from 64 to 127 */
+ ena_bits = num_vfs - 64 - 1;
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
+ GENMASK_ULL(ena_bits, 0));
+ }
+}
+
+static void cptpf_disable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
+ int num_vfs)
+{
+ int vector;
+
+ /* Disable VF-PF interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0x0ULL);
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0x0ULL);
+
+ /* Clear any pending interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(0), ~0x0ULL);
+
+ vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
+ free_irq(vector, cptpf);
+
+ if (num_vfs > 64) {
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(1), ~0ULL);
+ vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
+ free_irq(vector, cptpf);
+ }
+}
+
+static void cptpf_flr_wq_handler(struct work_struct *work)
+{
+ struct cptpf_flr_work *flr_work;
+ struct otx2_cptpf_dev *pf;
+ struct mbox_msghdr *req;
+ struct otx2_mbox *mbox;
+ int vf, reg = 0;
+
+ flr_work = container_of(work, struct cptpf_flr_work, work);
+ pf = flr_work->pf;
+ mbox = &pf->afpf_mbox;
+
+ vf = flr_work - pf->flr_work;
+
+ mutex_lock(&pf->lock);
+ req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct msg_rsp));
+ if (!req)
+ return;
+
+ req->sig = OTX2_MBOX_REQ_SIG;
+ req->id = MBOX_MSG_VF_FLR;
+ req->pcifunc &= RVU_PFVF_FUNC_MASK;
+ req->pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
+
+ otx2_cpt_send_mbox_msg(mbox, pf->pdev);
+ if (!otx2_cpt_sync_mbox_msg(&pf->afpf_mbox)) {
+
+ if (vf >= 64) {
+ reg = 1;
+ vf = vf - 64;
+ }
+ /* Clear transaction pending register */
+ otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
+ otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
+ }
+ mutex_unlock(&pf->lock);
+}
+
+static irqreturn_t cptpf_vf_flr_intr(int __always_unused irq, void *arg)
+{
+ int reg, dev, vf, start_vf, num_reg = 1;
+ struct otx2_cptpf_dev *cptpf = arg;
+ u64 intr;
+
+ if (cptpf->max_vfs > 64)
+ num_reg = 2;
+
+ for (reg = 0; reg < num_reg; reg++) {
+ intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INTX(reg));
+ if (!intr)
+ continue;
+ start_vf = 64 * reg;
+ for (vf = 0; vf < 64; vf++) {
+ if (!(intr & BIT_ULL(vf)))
+ continue;
+ dev = vf + start_vf;
+ queue_work(cptpf->flr_wq, &cptpf->flr_work[dev].work);
+ /* Clear interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
+ /* Disable the interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1CX(reg),
+ BIT_ULL(vf));
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t cptpf_vf_me_intr(int __always_unused irq, void *arg)
+{
+ struct otx2_cptpf_dev *cptpf = arg;
+ int reg, vf, num_reg = 1;
+ u64 intr;
+
+ if (cptpf->max_vfs > 64)
+ num_reg = 2;
+
+ for (reg = 0; reg < num_reg; reg++) {
+ intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFME_INTX(reg));
+ if (!intr)
+ continue;
+ for (vf = 0; vf < 64; vf++) {
+ if (!(intr & BIT_ULL(vf)))
+ continue;
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
+ /* Clear interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static void cptpf_unregister_vfpf_intr(struct otx2_cptpf_dev *cptpf,
+ int num_vfs)
+{
+ cptpf_disable_vfpf_mbox_intr(cptpf, num_vfs);
+ cptpf_disable_vf_flr_me_intrs(cptpf, num_vfs);
+}
+
+static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
+{
+ struct pci_dev *pdev = cptpf->pdev;
+ struct device *dev = &pdev->dev;
+ int ret, vector;
+
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
+ /* Register VF-PF mailbox interrupt handler */
+ ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0, "CPTVFPF Mbox0",
+ cptpf);
+ if (ret) {
+ dev_err(dev,
+ "IRQ registration failed for PFVF mbox0 irq\n");
+ return ret;
+ }
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
+ /* Register VF FLR interrupt handler */
+ ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR0", cptpf);
+ if (ret) {
+ dev_err(dev,
+ "IRQ registration failed for VFFLR0 irq\n");
+ goto free_mbox0_irq;
+ }
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
+ /* Register VF ME interrupt handler */
+ ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME0", cptpf);
+ if (ret) {
+ dev_err(dev,
+ "IRQ registration failed for PFVF mbox0 irq\n");
+ goto free_flr0_irq;
+ }
+
+ if (num_vfs > 64) {
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
+ ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0,
+ "CPTVFPF Mbox1", cptpf);
+ if (ret) {
+ dev_err(dev,
+ "IRQ registration failed for PFVF mbox1 irq\n");
+ goto free_me0_irq;
+ }
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
+ /* Register VF FLR interrupt handler */
+ ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR1",
+ cptpf);
+ if (ret) {
+ dev_err(dev,
+ "IRQ registration failed for VFFLR1 irq\n");
+ goto free_mbox1_irq;
+ }
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME1);
+ /* Register VF FLR interrupt handler */
+ ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME1",
+ cptpf);
+ if (ret) {
+ dev_err(dev,
+ "IRQ registration failed for VFFLR1 irq\n");
+ goto free_flr1_irq;
+ }
+ }
+ cptpf_enable_vfpf_mbox_intr(cptpf, num_vfs);
+ cptpf_enable_vf_flr_me_intrs(cptpf, num_vfs);
+
+ return 0;
+
+free_flr1_irq:
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
+ free_irq(vector, cptpf);
+free_mbox1_irq:
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
+ free_irq(vector, cptpf);
+free_me0_irq:
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
+ free_irq(vector, cptpf);
+free_flr0_irq:
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
+ free_irq(vector, cptpf);
+free_mbox0_irq:
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
+ free_irq(vector, cptpf);
+ return ret;
+}
+
+static void cptpf_flr_wq_destroy(struct otx2_cptpf_dev *pf)
+{
+ if (!pf->flr_wq)
+ return;
+ destroy_workqueue(pf->flr_wq);
+ pf->flr_wq = NULL;
+ kfree(pf->flr_work);
+}
+
+static int cptpf_flr_wq_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
+{
+ int vf;
+
+ cptpf->flr_wq = alloc_ordered_workqueue("cptpf_flr_wq", 0);
+ if (!cptpf->flr_wq)
+ return -ENOMEM;
+
+ cptpf->flr_work = kcalloc(num_vfs, sizeof(struct cptpf_flr_work),
+ GFP_KERNEL);
+ if (!cptpf->flr_work)
+ goto destroy_wq;
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ cptpf->flr_work[vf].pf = cptpf;
+ INIT_WORK(&cptpf->flr_work[vf].work, cptpf_flr_wq_handler);
+ }
+
+ return 0;
+
+destroy_wq:
+ destroy_workqueue(cptpf->flr_wq);
+ return -ENOMEM;
+}
+
+static void cptpf_disable_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
+{
+ /* Disable AF-PF interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1C,
+ 0x1ULL);
+ /* Clear interrupt if any */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
+}
+
+static int cptpf_register_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
+{
+ struct pci_dev *pdev = cptpf->pdev;
+ struct device *dev = &pdev->dev;
+ int ret, irq;
+
+ irq = pci_irq_vector(pdev, RVU_PF_INT_VEC_AFPF_MBOX);
+ /* Register AF-PF mailbox interrupt handler */
+ ret = devm_request_irq(dev, irq, otx2_cptpf_afpf_mbox_intr, 0,
+ "CPTAFPF Mbox", cptpf);
+ if (ret) {
+ dev_err(dev,
+ "IRQ registration failed for PFAF mbox irq\n");
+ return ret;
+ }
+ /* Clear interrupt if any, to avoid spurious interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
+ /* Enable AF-PF interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1S,
+ 0x1ULL);
+
+ ret = otx2_cpt_send_ready_msg(&cptpf->afpf_mbox, cptpf->pdev);
+ if (ret) {
+ dev_warn(dev,
+ "AF not responding to mailbox, deferring probe\n");
+ cptpf_disable_afpf_mbox_intr(cptpf);
+ return -EPROBE_DEFER;
+ }
+ return 0;
+}
+
+static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
+{
+ struct pci_dev *pdev = cptpf->pdev;
+ resource_size_t offset;
+ int err;
+
+ cptpf->afpf_mbox_wq = alloc_workqueue("cpt_afpf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 1);
+ if (!cptpf->afpf_mbox_wq)
+ return -ENOMEM;
+
+ offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
+ /* Map AF-PF mailbox memory */
+ cptpf->afpf_mbox_base = devm_ioremap_wc(&pdev->dev, offset, MBOX_SIZE);
+ if (!cptpf->afpf_mbox_base) {
+ dev_err(&pdev->dev, "Unable to map BAR4\n");
+ err = -ENOMEM;
+ goto error;
+ }
+
+ err = otx2_mbox_init(&cptpf->afpf_mbox, cptpf->afpf_mbox_base,
+ pdev, cptpf->reg_base, MBOX_DIR_PFAF, 1);
+ if (err)
+ goto error;
+
+ err = otx2_mbox_init(&cptpf->afpf_mbox_up, cptpf->afpf_mbox_base,
+ pdev, cptpf->reg_base, MBOX_DIR_PFAF_UP, 1);
+ if (err)
+ goto mbox_cleanup;
+
+ INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler);
+ INIT_WORK(&cptpf->afpf_mbox_up_work, otx2_cptpf_afpf_mbox_up_handler);
+ mutex_init(&cptpf->lock);
+
+ return 0;
+
+mbox_cleanup:
+ otx2_mbox_destroy(&cptpf->afpf_mbox);
+error:
+ destroy_workqueue(cptpf->afpf_mbox_wq);
+ return err;
+}
+
+static void cptpf_afpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
+{
+ destroy_workqueue(cptpf->afpf_mbox_wq);
+ otx2_mbox_destroy(&cptpf->afpf_mbox);
+ otx2_mbox_destroy(&cptpf->afpf_mbox_up);
+}
+
+static int cptpf_vfpf_mbox_init(struct otx2_cptpf_dev *cptpf, int numvfs)
+{
+ struct device *dev = &cptpf->pdev->dev;
+ u64 vfpf_mbox_base;
+ int err, i;
+
+ cptpf->vfpf_mbox_wq = alloc_workqueue("cpt_vfpf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 1);
+ if (!cptpf->vfpf_mbox_wq)
+ return -ENOMEM;
+
+ /* Map VF-PF mailbox memory */
+ if (test_bit(CN10K_MBOX, &cptpf->cap_flag))
+ vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_MBOX_ADDR);
+ else
+ vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_BAR4_ADDR);
+
+ if (!vfpf_mbox_base) {
+ dev_err(dev, "VF-PF mailbox address not configured\n");
+ err = -ENOMEM;
+ goto free_wqe;
+ }
+ cptpf->vfpf_mbox_base = devm_ioremap_wc(dev, vfpf_mbox_base,
+ MBOX_SIZE * cptpf->max_vfs);
+ if (!cptpf->vfpf_mbox_base) {
+ dev_err(dev, "Mapping of VF-PF mailbox address failed\n");
+ err = -ENOMEM;
+ goto free_wqe;
+ }
+ err = otx2_mbox_init(&cptpf->vfpf_mbox, cptpf->vfpf_mbox_base,
+ cptpf->pdev, cptpf->reg_base, MBOX_DIR_PFVF,
+ numvfs);
+ if (err)
+ goto free_wqe;
+
+ for (i = 0; i < numvfs; i++) {
+ cptpf->vf[i].vf_id = i;
+ cptpf->vf[i].cptpf = cptpf;
+ cptpf->vf[i].intr_idx = i % 64;
+ INIT_WORK(&cptpf->vf[i].vfpf_mbox_work,
+ otx2_cptpf_vfpf_mbox_handler);
+ }
+ return 0;
+
+free_wqe:
+ destroy_workqueue(cptpf->vfpf_mbox_wq);
+ return err;
+}
+
+static void cptpf_vfpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
+{
+ destroy_workqueue(cptpf->vfpf_mbox_wq);
+ otx2_mbox_destroy(&cptpf->vfpf_mbox);
+}
+
+static int cptx_device_reset(struct otx2_cptpf_dev *cptpf, int blkaddr)
+{
+ int timeout = 10, ret;
+ u64 reg = 0;
+
+ ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_BLK_RST, 0x1, blkaddr);
+ if (ret)
+ return ret;
+
+ do {
+ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_BLK_RST, &reg, blkaddr);
+ if (ret)
+ return ret;
+
+ if (!((reg >> 63) & 0x1))
+ break;
+
+ usleep_range(10000, 20000);
+ if (timeout-- < 0)
+ return -EBUSY;
+ } while (1);
+
+ return ret;
+}
+
+static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
+{
+ int ret = 0;
+
+ if (cptpf->has_cpt1) {
+ ret = cptx_device_reset(cptpf, BLKADDR_CPT1);
+ if (ret)
+ return ret;
+ }
+ return cptx_device_reset(cptpf, BLKADDR_CPT0);
+}
+
+static void cptpf_check_block_implemented(struct otx2_cptpf_dev *cptpf)
+{
+ u64 cfg;
+
+ cfg = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_CPT1));
+ if (cfg & BIT_ULL(11))
+ cptpf->has_cpt1 = true;
+}
+
+static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
+{
+ union otx2_cptx_af_constants1 af_cnsts1 = {0};
+ int ret = 0;
+
+ /* check if 'implemented' bit is set for block BLKADDR_CPT1 */
+ cptpf_check_block_implemented(cptpf);
+ /* Reset the CPT PF device */
+ ret = cptpf_device_reset(cptpf);
+ if (ret)
+ return ret;
+
+ /* Get number of SE, IE and AE engines */
+ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_CONSTANTS1, &af_cnsts1.u,
+ BLKADDR_CPT0);
+ if (ret)
+ return ret;
+
+ cptpf->eng_grps.avail.max_se_cnt = af_cnsts1.s.se;
+ cptpf->eng_grps.avail.max_ie_cnt = af_cnsts1.s.ie;
+ cptpf->eng_grps.avail.max_ae_cnt = af_cnsts1.s.ae;
+
+ /* Disable all cores */
+ ret = otx2_cpt_disable_all_cores(cptpf);
+
+ return ret;
+}
+
+static ssize_t sso_pf_func_ovrd_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", cptpf->sso_pf_func_ovrd);
+}
+
+static ssize_t sso_pf_func_ovrd_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
+ u8 sso_pf_func_ovrd;
+
+ if (kstrtou8(buf, 0, &sso_pf_func_ovrd))
+ return -EINVAL;
+
+ cptpf->sso_pf_func_ovrd = sso_pf_func_ovrd;
+
+ return count;
+}
+
+static ssize_t kvf_limits_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", cptpf->kvf_limits);
+}
+
+static ssize_t kvf_limits_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
+ int lfs_num;
+
+ if (kstrtoint(buf, 0, &lfs_num)) {
+ dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
+ lfs_num, num_online_cpus());
+ return -EINVAL;
+ }
+ if (lfs_num < 1 || lfs_num > num_online_cpus()) {
+ dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
+ lfs_num, num_online_cpus());
+ return -EINVAL;
+ }
+ cptpf->kvf_limits = lfs_num;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(kvf_limits);
+static DEVICE_ATTR_RW(sso_pf_func_ovrd);
+
+static struct attribute *cptpf_attrs[] = {
+ &dev_attr_kvf_limits.attr,
+ &dev_attr_sso_pf_func_ovrd.attr,
+ NULL
+};
+
+static const struct attribute_group cptpf_sysfs_group = {
+ .attrs = cptpf_attrs,
+};
+
+static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
+{
+ u64 rev;
+
+ rev = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
+ rev = (rev >> 12) & 0xFF;
+ /*
+ * Check if AF has setup revision for RVUM block, otherwise
+ * driver probe should be deferred until AF driver comes up
+ */
+ if (!rev) {
+ dev_warn(&cptpf->pdev->dev,
+ "AF is not initialized, deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+ return 0;
+}
+
+static int cptpf_sriov_disable(struct pci_dev *pdev)
+{
+ struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
+ int num_vfs = pci_num_vf(pdev);
+
+ if (!num_vfs)
+ return 0;
+
+ pci_disable_sriov(pdev);
+ cptpf_unregister_vfpf_intr(cptpf, num_vfs);
+ cptpf_flr_wq_destroy(cptpf);
+ cptpf_vfpf_mbox_destroy(cptpf);
+ module_put(THIS_MODULE);
+ cptpf->enabled_vfs = 0;
+
+ return 0;
+}
+
+static int cptpf_sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+ struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
+ int ret;
+
+ /* Initialize VF<=>PF mailbox */
+ ret = cptpf_vfpf_mbox_init(cptpf, num_vfs);
+ if (ret)
+ return ret;
+
+ ret = cptpf_flr_wq_init(cptpf, num_vfs);
+ if (ret)
+ goto destroy_mbox;
+ /* Register VF<=>PF mailbox interrupt */
+ ret = cptpf_register_vfpf_intr(cptpf, num_vfs);
+ if (ret)
+ goto destroy_flr;
+
+ /* Get CPT HW capabilities using LOAD_FVC operation. */
+ ret = otx2_cpt_discover_eng_capabilities(cptpf);
+ if (ret)
+ goto disable_intr;
+
+ ret = otx2_cpt_create_eng_grps(cptpf, &cptpf->eng_grps);
+ if (ret)
+ goto disable_intr;
+
+ cptpf->enabled_vfs = num_vfs;
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret)
+ goto disable_intr;
+
+ dev_notice(&cptpf->pdev->dev, "VFs enabled: %d\n", num_vfs);
+
+ try_module_get(THIS_MODULE);
+ return num_vfs;
+
+disable_intr:
+ cptpf_unregister_vfpf_intr(cptpf, num_vfs);
+ cptpf->enabled_vfs = 0;
+destroy_flr:
+ cptpf_flr_wq_destroy(cptpf);
+destroy_mbox:
+ cptpf_vfpf_mbox_destroy(cptpf);
+ return ret;
+}
+
+static int otx2_cptpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ if (num_vfs > 0) {
+ return cptpf_sriov_enable(pdev, num_vfs);
+ } else {
+ return cptpf_sriov_disable(pdev);
+ }
+}
+
+static int otx2_cptpf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct otx2_cptpf_dev *cptpf;
+ void __iomem * const *iomap;
+ int err;
+
+ cptpf = devm_kzalloc(dev, sizeof(*cptpf), GFP_KERNEL);
+ if (!cptpf)
+ return -ENOMEM;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto clear_drvdata;
+ }
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get usable DMA configuration\n");
+ goto clear_drvdata;
+ }
+ /* Map PF's configuration registers */
+ err = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
+ OTX2_CPT_DRV_NAME);
+ if (err) {
+ dev_err(dev, "Couldn't get PCI resources 0x%x\n", err);
+ goto clear_drvdata;
+ }
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, cptpf);
+ cptpf->pdev = pdev;
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap) {
+ dev_err(dev, "Failed to get iomap table\n");
+ err = -ENODEV;
+ goto clear_drvdata;
+ }
+ cptpf->reg_base = iomap[PCI_PF_REG_BAR_NUM];
+
+ /* Check if AF driver is up, otherwise defer probe */
+ err = cpt_is_pf_usable(cptpf);
+ if (err)
+ goto clear_drvdata;
+
+ err = pci_alloc_irq_vectors(pdev, RVU_PF_INT_VEC_CNT,
+ RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(dev, "Request for %d msix vectors failed\n",
+ RVU_PF_INT_VEC_CNT);
+ goto clear_drvdata;
+ }
+ otx2_cpt_set_hw_caps(pdev, &cptpf->cap_flag);
+ /* Initialize AF-PF mailbox */
+ err = cptpf_afpf_mbox_init(cptpf);
+ if (err)
+ goto clear_drvdata;
+ /* Register mailbox interrupt */
+ err = cptpf_register_afpf_mbox_intr(cptpf);
+ if (err)
+ goto destroy_afpf_mbox;
+
+ cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
+
+ err = cn10k_cptpf_lmtst_init(cptpf);
+ if (err)
+ goto unregister_intr;
+
+ /* Initialize CPT PF device */
+ err = cptpf_device_init(cptpf);
+ if (err)
+ goto unregister_intr;
+
+ /* Initialize engine groups */
+ err = otx2_cpt_init_eng_grps(pdev, &cptpf->eng_grps);
+ if (err)
+ goto unregister_intr;
+
+ err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group);
+ if (err)
+ goto cleanup_eng_grps;
+
+ err = otx2_cpt_register_dl(cptpf);
+ if (err)
+ goto sysfs_grp_del;
+
+ return 0;
+
+sysfs_grp_del:
+ sysfs_remove_group(&dev->kobj, &cptpf_sysfs_group);
+cleanup_eng_grps:
+ otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
+unregister_intr:
+ cptpf_disable_afpf_mbox_intr(cptpf);
+destroy_afpf_mbox:
+ cptpf_afpf_mbox_destroy(cptpf);
+clear_drvdata:
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void otx2_cptpf_remove(struct pci_dev *pdev)
+{
+ struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
+
+ if (!cptpf)
+ return;
+
+ cptpf_sriov_disable(pdev);
+ otx2_cpt_unregister_dl(cptpf);
+ /* Delete sysfs entry created for kernel VF limits */
+ sysfs_remove_group(&pdev->dev.kobj, &cptpf_sysfs_group);
+ /* Cleanup engine groups */
+ otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
+ /* Disable AF-PF mailbox interrupt */
+ cptpf_disable_afpf_mbox_intr(cptpf);
+ /* Destroy AF-PF mbox */
+ cptpf_afpf_mbox_destroy(cptpf);
+ pci_set_drvdata(pdev, NULL);
+}
+
+/* Supported devices */
+static const struct pci_device_id otx2_cpt_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX2_CPT_PCI_PF_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, CN10K_CPT_PCI_PF_DEVICE_ID) },
+ { 0, } /* end of table */
+};
+
+static struct pci_driver otx2_cpt_pci_driver = {
+ .name = OTX2_CPT_DRV_NAME,
+ .id_table = otx2_cpt_id_table,
+ .probe = otx2_cptpf_probe,
+ .remove = otx2_cptpf_remove,
+ .sriov_configure = otx2_cptpf_sriov_configure
+};
+
+module_pci_driver(otx2_cpt_pci_driver);
+
+MODULE_AUTHOR("Marvell");
+MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING);
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, otx2_cpt_id_table);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
new file mode 100644
index 000000000000..0f45d612a37a
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
@@ -0,0 +1,600 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2018 Marvell. */
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptpf.h"
+#include "rvu_reg.h"
+
+/* Fastpath ipsec opcode with inplace processing */
+#define CPT_INLINE_RX_OPCODE (0x26 | (1 << 6))
+#define CN10K_CPT_INLINE_RX_OPCODE (0x29 | (1 << 6))
+
+#define cpt_inline_rx_opcode(pdev) \
+({ \
+ u8 opcode; \
+ if (is_dev_otx2(pdev)) \
+ opcode = CPT_INLINE_RX_OPCODE; \
+ else \
+ opcode = CN10K_CPT_INLINE_RX_OPCODE; \
+ (opcode); \
+})
+
+/*
+ * CPT PF driver version, It will be incremented by 1 for every feature
+ * addition in CPT mailbox messages.
+ */
+#define OTX2_CPT_PF_DRV_VERSION 0x3
+
+static int forward_to_af(struct otx2_cptpf_dev *cptpf,
+ struct otx2_cptvf_info *vf,
+ struct mbox_msghdr *req, int size)
+{
+ struct mbox_msghdr *msg;
+ int ret;
+
+ mutex_lock(&cptpf->lock);
+ msg = otx2_mbox_alloc_msg(&cptpf->afpf_mbox, 0, size);
+ if (msg == NULL)
+ return -ENOMEM;
+
+ memcpy((uint8_t *)msg + sizeof(struct mbox_msghdr),
+ (uint8_t *)req + sizeof(struct mbox_msghdr), size);
+ msg->id = req->id;
+ msg->pcifunc = req->pcifunc;
+ msg->sig = req->sig;
+ msg->ver = req->ver;
+
+ ret = otx2_cpt_sync_mbox_msg(&cptpf->afpf_mbox);
+ /* Error code -EIO indicate there is a communication failure
+ * to the AF. Rest of the error codes indicate that AF processed
+ * VF messages and set the error codes in response messages
+ * (if any) so simply forward responses to VF.
+ */
+ if (ret == -EIO) {
+ dev_warn(&cptpf->pdev->dev,
+ "AF not responding to VF%d messages\n", vf->vf_id);
+ mutex_unlock(&cptpf->lock);
+ return ret;
+ }
+ mutex_unlock(&cptpf->lock);
+ return 0;
+}
+
+static int handle_msg_get_caps(struct otx2_cptpf_dev *cptpf,
+ struct otx2_cptvf_info *vf,
+ struct mbox_msghdr *req)
+{
+ struct otx2_cpt_caps_rsp *rsp;
+
+ rsp = (struct otx2_cpt_caps_rsp *)
+ otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id,
+ sizeof(*rsp));
+ if (!rsp)
+ return -ENOMEM;
+
+ rsp->hdr.id = MBOX_MSG_GET_CAPS;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.pcifunc = req->pcifunc;
+ rsp->cpt_pf_drv_version = OTX2_CPT_PF_DRV_VERSION;
+ rsp->cpt_revision = cptpf->pdev->revision;
+ memcpy(&rsp->eng_caps, &cptpf->eng_caps, sizeof(rsp->eng_caps));
+
+ return 0;
+}
+
+static int handle_msg_get_eng_grp_num(struct otx2_cptpf_dev *cptpf,
+ struct otx2_cptvf_info *vf,
+ struct mbox_msghdr *req)
+{
+ struct otx2_cpt_egrp_num_msg *grp_req;
+ struct otx2_cpt_egrp_num_rsp *rsp;
+
+ grp_req = (struct otx2_cpt_egrp_num_msg *)req;
+ rsp = (struct otx2_cpt_egrp_num_rsp *)
+ otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, sizeof(*rsp));
+ if (!rsp)
+ return -ENOMEM;
+
+ rsp->hdr.id = MBOX_MSG_GET_ENG_GRP_NUM;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.pcifunc = req->pcifunc;
+ rsp->eng_type = grp_req->eng_type;
+ rsp->eng_grp_num = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
+ grp_req->eng_type);
+
+ return 0;
+}
+
+static int handle_msg_kvf_limits(struct otx2_cptpf_dev *cptpf,
+ struct otx2_cptvf_info *vf,
+ struct mbox_msghdr *req)
+{
+ struct otx2_cpt_kvf_limits_rsp *rsp;
+
+ rsp = (struct otx2_cpt_kvf_limits_rsp *)
+ otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, sizeof(*rsp));
+ if (!rsp)
+ return -ENOMEM;
+
+ rsp->hdr.id = MBOX_MSG_GET_KVF_LIMITS;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.pcifunc = req->pcifunc;
+ rsp->kvf_limits = cptpf->kvf_limits;
+
+ return 0;
+}
+
+static int send_inline_ipsec_inbound_msg(struct otx2_cptpf_dev *cptpf,
+ int sso_pf_func, u8 slot)
+{
+ struct cpt_inline_ipsec_cfg_msg *req;
+ struct pci_dev *pdev = cptpf->pdev;
+
+ req = (struct cpt_inline_ipsec_cfg_msg *)
+ otx2_mbox_alloc_msg_rsp(&cptpf->afpf_mbox, 0,
+ sizeof(*req), sizeof(struct msg_rsp));
+ if (req == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+ memset(req, 0, sizeof(*req));
+ req->hdr.id = MBOX_MSG_CPT_INLINE_IPSEC_CFG;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0);
+ req->dir = CPT_INLINE_INBOUND;
+ req->slot = slot;
+ req->sso_pf_func_ovrd = cptpf->sso_pf_func_ovrd;
+ req->sso_pf_func = sso_pf_func;
+ req->enable = 1;
+
+ return otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev);
+}
+
+static int rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, u8 egrp,
+ struct otx2_cpt_rx_inline_lf_cfg *req)
+{
+ struct nix_inline_ipsec_cfg *nix_req;
+ struct pci_dev *pdev = cptpf->pdev;
+ int ret;
+
+ nix_req = (struct nix_inline_ipsec_cfg *)
+ otx2_mbox_alloc_msg_rsp(&cptpf->afpf_mbox, 0,
+ sizeof(*nix_req),
+ sizeof(struct msg_rsp));
+ if (nix_req == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+ memset(nix_req, 0, sizeof(*nix_req));
+ nix_req->hdr.id = MBOX_MSG_NIX_INLINE_IPSEC_CFG;
+ nix_req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ nix_req->enable = 1;
+ nix_req->cpt_credit = OTX2_CPT_INST_QLEN_MSGS - 1;
+ nix_req->gen_cfg.egrp = egrp;
+ nix_req->gen_cfg.opcode = cpt_inline_rx_opcode(pdev);
+ nix_req->gen_cfg.param1 = req->param1;
+ nix_req->gen_cfg.param2 = req->param2;
+ nix_req->inst_qsel.cpt_pf_func = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0);
+ nix_req->inst_qsel.cpt_slot = 0;
+ ret = otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev);
+ if (ret)
+ return ret;
+
+ if (cptpf->has_cpt1) {
+ ret = send_inline_ipsec_inbound_msg(cptpf, req->sso_pf_func, 1);
+ if (ret)
+ return ret;
+ }
+
+ return send_inline_ipsec_inbound_msg(cptpf, req->sso_pf_func, 0);
+}
+
+static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf,
+ struct mbox_msghdr *req)
+{
+ struct otx2_cpt_rx_inline_lf_cfg *cfg_req;
+ u8 egrp;
+ int ret;
+
+ cfg_req = (struct otx2_cpt_rx_inline_lf_cfg *)req;
+ if (cptpf->lfs.lfs_num) {
+ dev_err(&cptpf->pdev->dev,
+ "LF is already configured for RX inline ipsec.\n");
+ return -EEXIST;
+ }
+ /*
+ * Allow LFs to execute requests destined to only grp IE_TYPES and
+ * set queue priority of each LF to high
+ */
+ egrp = otx2_cpt_get_eng_grp(&cptpf->eng_grps, OTX2_CPT_IE_TYPES);
+ if (egrp == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
+ dev_err(&cptpf->pdev->dev,
+ "Engine group for inline ipsec is not available\n");
+ return -ENOENT;
+ }
+
+ otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base,
+ &cptpf->afpf_mbox, BLKADDR_CPT0);
+ ret = otx2_cptlf_init(&cptpf->lfs, 1 << egrp, OTX2_CPT_QUEUE_HI_PRIO,
+ 1);
+ if (ret) {
+ dev_err(&cptpf->pdev->dev,
+ "LF configuration failed for RX inline ipsec.\n");
+ return ret;
+ }
+
+ if (cptpf->has_cpt1) {
+ cptpf->rsrc_req_blkaddr = BLKADDR_CPT1;
+ otx2_cptlf_set_dev_info(&cptpf->cpt1_lfs, cptpf->pdev,
+ cptpf->reg_base, &cptpf->afpf_mbox,
+ BLKADDR_CPT1);
+ ret = otx2_cptlf_init(&cptpf->cpt1_lfs, 1 << egrp,
+ OTX2_CPT_QUEUE_HI_PRIO, 1);
+ if (ret) {
+ dev_err(&cptpf->pdev->dev,
+ "LF configuration failed for RX inline ipsec.\n");
+ goto lf_cleanup;
+ }
+ cptpf->rsrc_req_blkaddr = 0;
+ }
+
+ ret = rx_inline_ipsec_lf_cfg(cptpf, egrp, cfg_req);
+ if (ret)
+ goto lf1_cleanup;
+
+ return 0;
+
+lf1_cleanup:
+ otx2_cptlf_shutdown(&cptpf->cpt1_lfs);
+lf_cleanup:
+ otx2_cptlf_shutdown(&cptpf->lfs);
+ return ret;
+}
+
+static int cptpf_handle_vf_req(struct otx2_cptpf_dev *cptpf,
+ struct otx2_cptvf_info *vf,
+ struct mbox_msghdr *req, int size)
+{
+ int err = 0;
+
+ /* Check if msg is valid, if not reply with an invalid msg */
+ if (req->sig != OTX2_MBOX_REQ_SIG)
+ goto inval_msg;
+
+ switch (req->id) {
+ case MBOX_MSG_GET_ENG_GRP_NUM:
+ err = handle_msg_get_eng_grp_num(cptpf, vf, req);
+ break;
+ case MBOX_MSG_GET_CAPS:
+ err = handle_msg_get_caps(cptpf, vf, req);
+ break;
+ case MBOX_MSG_GET_KVF_LIMITS:
+ err = handle_msg_kvf_limits(cptpf, vf, req);
+ break;
+ case MBOX_MSG_RX_INLINE_IPSEC_LF_CFG:
+ err = handle_msg_rx_inline_ipsec_lf_cfg(cptpf, req);
+ break;
+
+ default:
+ err = forward_to_af(cptpf, vf, req, size);
+ break;
+ }
+ return err;
+
+inval_msg:
+ otx2_reply_invalid_msg(&cptpf->vfpf_mbox, vf->vf_id, 0, req->id);
+ otx2_mbox_msg_send(&cptpf->vfpf_mbox, vf->vf_id);
+ return err;
+}
+
+irqreturn_t otx2_cptpf_vfpf_mbox_intr(int __always_unused irq, void *arg)
+{
+ struct otx2_cptpf_dev *cptpf = arg;
+ struct otx2_cptvf_info *vf;
+ int i, vf_idx;
+ u64 intr;
+
+ /*
+ * Check which VF has raised an interrupt and schedule
+ * corresponding work queue to process the messages
+ */
+ for (i = 0; i < 2; i++) {
+ /* Read the interrupt bits */
+ intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(i));
+
+ for (vf_idx = i * 64; vf_idx < cptpf->enabled_vfs; vf_idx++) {
+ vf = &cptpf->vf[vf_idx];
+ if (intr & (1ULL << vf->intr_idx)) {
+ queue_work(cptpf->vfpf_mbox_wq,
+ &vf->vfpf_mbox_work);
+ /* Clear the interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM,
+ 0, RVU_PF_VFPF_MBOX_INTX(i),
+ BIT_ULL(vf->intr_idx));
+ }
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work)
+{
+ struct otx2_cptpf_dev *cptpf;
+ struct otx2_cptvf_info *vf;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ int offset, i, err;
+
+ vf = container_of(work, struct otx2_cptvf_info, vfpf_mbox_work);
+ cptpf = vf->cptpf;
+ mbox = &cptpf->vfpf_mbox;
+ /* sync with mbox memory region */
+ smp_rmb();
+ mdev = &mbox->dev[vf->vf_id];
+ /* Process received mbox messages */
+ req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < req_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+
+ /* Set which VF sent this message based on mbox IRQ */
+ msg->pcifunc = ((u16)cptpf->pf_id << RVU_PFVF_PF_SHIFT) |
+ ((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK);
+
+ err = cptpf_handle_vf_req(cptpf, vf, msg,
+ msg->next_msgoff - offset);
+ /*
+ * Behave as the AF, drop the msg if there is
+ * no memory, timeout handling also goes here
+ */
+ if (err == -ENOMEM || err == -EIO)
+ break;
+ offset = msg->next_msgoff;
+ /* Write barrier required for VF responses which are handled by
+ * PF driver and not forwarded to AF.
+ */
+ smp_wmb();
+ }
+ /* Send mbox responses to VF */
+ if (mdev->num_msgs)
+ otx2_mbox_msg_send(mbox, vf->vf_id);
+}
+
+irqreturn_t otx2_cptpf_afpf_mbox_intr(int __always_unused irq, void *arg)
+{
+ struct otx2_cptpf_dev *cptpf = arg;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+ u64 intr;
+
+ /* Read the interrupt bits */
+ intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT);
+
+ if (intr & 0x1ULL) {
+ mbox = &cptpf->afpf_mbox;
+ mdev = &mbox->dev[0];
+ hdr = mdev->mbase + mbox->rx_start;
+ if (hdr->num_msgs)
+ /* Schedule work queue function to process the MBOX request */
+ queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_work);
+
+ mbox = &cptpf->afpf_mbox_up;
+ mdev = &mbox->dev[0];
+ hdr = mdev->mbase + mbox->rx_start;
+ if (hdr->num_msgs)
+ /* Schedule work queue function to process the MBOX request */
+ queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_up_work);
+ /* Clear and ack the interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT,
+ 0x1ULL);
+ }
+ return IRQ_HANDLED;
+}
+
+static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
+ struct mbox_msghdr *msg)
+{
+ struct otx2_cptlfs_info *lfs = &cptpf->lfs;
+ struct device *dev = &cptpf->pdev->dev;
+ struct cpt_rd_wr_reg_msg *rsp_rd_wr;
+
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(dev, "MBOX msg with unknown ID %d\n", msg->id);
+ return;
+ }
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(dev, "MBOX msg with wrong signature %x, ID %d\n",
+ msg->sig, msg->id);
+ return;
+ }
+ if (cptpf->rsrc_req_blkaddr == BLKADDR_CPT1)
+ lfs = &cptpf->cpt1_lfs;
+
+ switch (msg->id) {
+ case MBOX_MSG_READY:
+ cptpf->pf_id = (msg->pcifunc >> RVU_PFVF_PF_SHIFT) &
+ RVU_PFVF_PF_MASK;
+ break;
+ case MBOX_MSG_CPT_RD_WR_REGISTER:
+ rsp_rd_wr = (struct cpt_rd_wr_reg_msg *)msg;
+ if (msg->rc) {
+ dev_err(dev, "Reg %llx rd/wr(%d) failed %d\n",
+ rsp_rd_wr->reg_offset, rsp_rd_wr->is_write,
+ msg->rc);
+ return;
+ }
+ if (!rsp_rd_wr->is_write)
+ *rsp_rd_wr->ret_val = rsp_rd_wr->val;
+ break;
+ case MBOX_MSG_ATTACH_RESOURCES:
+ if (!msg->rc)
+ lfs->are_lfs_attached = 1;
+ break;
+ case MBOX_MSG_DETACH_RESOURCES:
+ if (!msg->rc)
+ lfs->are_lfs_attached = 0;
+ break;
+ case MBOX_MSG_CPT_INLINE_IPSEC_CFG:
+ case MBOX_MSG_NIX_INLINE_IPSEC_CFG:
+ break;
+
+ default:
+ dev_err(dev,
+ "Unsupported msg %d received.\n", msg->id);
+ break;
+ }
+}
+
+static void forward_to_vf(struct otx2_cptpf_dev *cptpf, struct mbox_msghdr *msg,
+ int vf_id, int size)
+{
+ struct otx2_mbox *vfpf_mbox;
+ struct mbox_msghdr *fwd;
+
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(&cptpf->pdev->dev,
+ "MBOX msg with unknown ID %d\n", msg->id);
+ return;
+ }
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(&cptpf->pdev->dev,
+ "MBOX msg with wrong signature %x, ID %d\n",
+ msg->sig, msg->id);
+ return;
+ }
+ vfpf_mbox = &cptpf->vfpf_mbox;
+ vf_id--;
+ if (vf_id >= cptpf->enabled_vfs) {
+ dev_err(&cptpf->pdev->dev,
+ "MBOX msg to unknown VF: %d >= %d\n",
+ vf_id, cptpf->enabled_vfs);
+ return;
+ }
+ if (msg->id == MBOX_MSG_VF_FLR)
+ return;
+
+ fwd = otx2_mbox_alloc_msg(vfpf_mbox, vf_id, size);
+ if (!fwd) {
+ dev_err(&cptpf->pdev->dev,
+ "Forwarding to VF%d failed.\n", vf_id);
+ return;
+ }
+ memcpy((uint8_t *)fwd + sizeof(struct mbox_msghdr),
+ (uint8_t *)msg + sizeof(struct mbox_msghdr), size);
+ fwd->id = msg->id;
+ fwd->pcifunc = msg->pcifunc;
+ fwd->sig = msg->sig;
+ fwd->ver = msg->ver;
+ fwd->rc = msg->rc;
+}
+
+/* Handle mailbox messages received from AF */
+void otx2_cptpf_afpf_mbox_handler(struct work_struct *work)
+{
+ struct otx2_cptpf_dev *cptpf;
+ struct otx2_mbox *afpf_mbox;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ int offset, vf_id, i;
+
+ cptpf = container_of(work, struct otx2_cptpf_dev, afpf_mbox_work);
+ afpf_mbox = &cptpf->afpf_mbox;
+ mdev = &afpf_mbox->dev[0];
+ /* Sync mbox data into memory */
+ smp_wmb();
+
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + afpf_mbox->rx_start);
+ offset = ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < rsp_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + afpf_mbox->rx_start +
+ offset);
+ vf_id = (msg->pcifunc >> RVU_PFVF_FUNC_SHIFT) &
+ RVU_PFVF_FUNC_MASK;
+ if (vf_id > 0)
+ forward_to_vf(cptpf, msg, vf_id,
+ msg->next_msgoff - offset);
+ else
+ process_afpf_mbox_msg(cptpf, msg);
+
+ offset = msg->next_msgoff;
+ /* Sync VF response ready to be sent */
+ smp_wmb();
+ mdev->msgs_acked++;
+ }
+ otx2_mbox_reset(afpf_mbox, 0);
+}
+
+static void handle_msg_cpt_inst_lmtst(struct otx2_cptpf_dev *cptpf,
+ struct mbox_msghdr *msg)
+{
+ struct cpt_inst_lmtst_req *req = (struct cpt_inst_lmtst_req *)msg;
+ struct otx2_cptlfs_info *lfs = &cptpf->lfs;
+ struct msg_rsp *rsp;
+
+ if (cptpf->lfs.lfs_num)
+ lfs->ops->send_cmd((union otx2_cpt_inst_s *)req->inst, 1,
+ &lfs->lf[0]);
+
+ rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(&cptpf->afpf_mbox_up, 0,
+ sizeof(*rsp));
+ if (!rsp)
+ return;
+
+ rsp->hdr.id = msg->id;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.pcifunc = 0;
+ rsp->hdr.rc = 0;
+}
+
+static void process_afpf_mbox_up_msg(struct otx2_cptpf_dev *cptpf,
+ struct mbox_msghdr *msg)
+{
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(&cptpf->pdev->dev,
+ "MBOX msg with unknown ID %d\n", msg->id);
+ return;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_CPT_INST_LMTST:
+ handle_msg_cpt_inst_lmtst(cptpf, msg);
+ break;
+ default:
+ otx2_reply_invalid_msg(&cptpf->afpf_mbox_up, 0, 0, msg->id);
+ }
+}
+
+void otx2_cptpf_afpf_mbox_up_handler(struct work_struct *work)
+{
+ struct otx2_cptpf_dev *cptpf;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ int offset, i;
+
+ cptpf = container_of(work, struct otx2_cptpf_dev, afpf_mbox_up_work);
+ mbox = &cptpf->afpf_mbox_up;
+ mdev = &mbox->dev[0];
+ /* Sync mbox data into memory */
+ smp_wmb();
+
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < rsp_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+
+ process_afpf_mbox_up_msg(cptpf, msg);
+
+ offset = mbox->rx_start + msg->next_msgoff;
+ }
+ otx2_mbox_msg_send(mbox, 0);
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
new file mode 100644
index 000000000000..f2985a26267b
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -0,0 +1,1885 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2018 Marvell. */
+
+#include <linux/ctype.h>
+#include <linux/firmware.h>
+#include "otx2_cptpf_ucode.h"
+#include "otx2_cpt_common.h"
+#include "otx2_cptpf.h"
+#include "otx2_cptlf.h"
+#include "otx2_cpt_reqmgr.h"
+#include "rvu_reg.h"
+
+#define CSR_DELAY 30
+
+#define LOADFVC_RLEN 8
+#define LOADFVC_MAJOR_OP 0x01
+#define LOADFVC_MINOR_OP 0x08
+
+/*
+ * Interval to flush dirty data for next CTX entry. The interval is measured
+ * in increments of 10ns(interval time = CTX_FLUSH_TIMER_COUNT * 10ns).
+ */
+#define CTX_FLUSH_TIMER_CNT 0xFFFFFF
+
+struct fw_info_t {
+ struct list_head ucodes;
+};
+
+static struct otx2_cpt_bitmap get_cores_bmap(struct device *dev,
+ struct otx2_cpt_eng_grp_info *eng_grp)
+{
+ struct otx2_cpt_bitmap bmap = { {0} };
+ bool found = false;
+ int i;
+
+ if (eng_grp->g->engs_num > OTX2_CPT_MAX_ENGINES) {
+ dev_err(dev, "unsupported number of engines %d on octeontx2\n",
+ eng_grp->g->engs_num);
+ return bmap;
+ }
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (eng_grp->engs[i].type) {
+ bitmap_or(bmap.bits, bmap.bits,
+ eng_grp->engs[i].bmap,
+ eng_grp->g->engs_num);
+ bmap.size = eng_grp->g->engs_num;
+ found = true;
+ }
+ }
+
+ if (!found)
+ dev_err(dev, "No engines reserved for engine group %d\n",
+ eng_grp->idx);
+ return bmap;
+}
+
+static int is_eng_type(int val, int eng_type)
+{
+ return val & (1 << eng_type);
+}
+
+static int is_2nd_ucode_used(struct otx2_cpt_eng_grp_info *eng_grp)
+{
+ if (eng_grp->ucode[1].type)
+ return true;
+ else
+ return false;
+}
+
+static void set_ucode_filename(struct otx2_cpt_ucode *ucode,
+ const char *filename)
+{
+ strlcpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);
+}
+
+static char *get_eng_type_str(int eng_type)
+{
+ char *str = "unknown";
+
+ switch (eng_type) {
+ case OTX2_CPT_SE_TYPES:
+ str = "SE";
+ break;
+
+ case OTX2_CPT_IE_TYPES:
+ str = "IE";
+ break;
+
+ case OTX2_CPT_AE_TYPES:
+ str = "AE";
+ break;
+ }
+ return str;
+}
+
+static char *get_ucode_type_str(int ucode_type)
+{
+ char *str = "unknown";
+
+ switch (ucode_type) {
+ case (1 << OTX2_CPT_SE_TYPES):
+ str = "SE";
+ break;
+
+ case (1 << OTX2_CPT_IE_TYPES):
+ str = "IE";
+ break;
+
+ case (1 << OTX2_CPT_AE_TYPES):
+ str = "AE";
+ break;
+
+ case (1 << OTX2_CPT_SE_TYPES | 1 << OTX2_CPT_IE_TYPES):
+ str = "SE+IPSEC";
+ break;
+ }
+ return str;
+}
+
+static int get_ucode_type(struct device *dev,
+ struct otx2_cpt_ucode_hdr *ucode_hdr,
+ int *ucode_type)
+{
+ struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
+ char ver_str_prefix[OTX2_CPT_UCODE_VER_STR_SZ];
+ char tmp_ver_str[OTX2_CPT_UCODE_VER_STR_SZ];
+ struct pci_dev *pdev = cptpf->pdev;
+ int i, val = 0;
+ u8 nn;
+
+ strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);
+ for (i = 0; i < strlen(tmp_ver_str); i++)
+ tmp_ver_str[i] = tolower(tmp_ver_str[i]);
+
+ sprintf(ver_str_prefix, "ocpt-%02d", pdev->revision);
+ if (!strnstr(tmp_ver_str, ver_str_prefix, OTX2_CPT_UCODE_VER_STR_SZ))
+ return -EINVAL;
+
+ nn = ucode_hdr->ver_num.nn;
+ if (strnstr(tmp_ver_str, "se-", OTX2_CPT_UCODE_VER_STR_SZ) &&
+ (nn == OTX2_CPT_SE_UC_TYPE1 || nn == OTX2_CPT_SE_UC_TYPE2 ||
+ nn == OTX2_CPT_SE_UC_TYPE3))
+ val |= 1 << OTX2_CPT_SE_TYPES;
+ if (strnstr(tmp_ver_str, "ie-", OTX2_CPT_UCODE_VER_STR_SZ) &&
+ (nn == OTX2_CPT_IE_UC_TYPE1 || nn == OTX2_CPT_IE_UC_TYPE2 ||
+ nn == OTX2_CPT_IE_UC_TYPE3))
+ val |= 1 << OTX2_CPT_IE_TYPES;
+ if (strnstr(tmp_ver_str, "ae", OTX2_CPT_UCODE_VER_STR_SZ) &&
+ nn == OTX2_CPT_AE_UC_TYPE)
+ val |= 1 << OTX2_CPT_AE_TYPES;
+
+ *ucode_type = val;
+
+ if (!val)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng,
+ dma_addr_t dma_addr, int blkaddr)
+{
+ return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_EXEX_UCODE_BASE(eng),
+ (u64)dma_addr, blkaddr);
+}
+
+static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp,
+ struct otx2_cptpf_dev *cptpf, int blkaddr)
+{
+ struct otx2_cpt_engs_rsvd *engs;
+ dma_addr_t dma_addr;
+ int i, bit, ret;
+
+ /* Set PF number for microcode fetches */
+ ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_PF_FUNC,
+ cptpf->pf_id << RVU_PFVF_PF_SHIFT, blkaddr);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ engs = &eng_grp->engs[i];
+ if (!engs->type)
+ continue;
+
+ dma_addr = engs->ucode->dma;
+
+ /*
+ * Set UCODE_BASE only for the cores which are not used,
+ * other cores should have already valid UCODE_BASE set
+ */
+ for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num)
+ if (!eng_grp->g->eng_ref_cnt[bit]) {
+ ret = __write_ucode_base(cptpf, bit, dma_addr,
+ blkaddr);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
+{
+ struct otx2_cptpf_dev *cptpf = obj;
+ int ret;
+
+ if (cptpf->has_cpt1) {
+ ret = cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT1);
+ if (ret)
+ return ret;
+ }
+ return cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT0);
+}
+
+static int cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
+ struct otx2_cptpf_dev *cptpf,
+ struct otx2_cpt_bitmap bmap,
+ int blkaddr)
+{
+ int i, timeout = 10;
+ int busy, ret;
+ u64 reg = 0;
+
+ /* Detach the cores from group */
+ for_each_set_bit(i, bmap.bits, bmap.size) {
+ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
+ if (ret)
+ return ret;
+
+ if (reg & (1ull << eng_grp->idx)) {
+ eng_grp->g->eng_ref_cnt[i]--;
+ reg &= ~(1ull << eng_grp->idx);
+
+ ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
+ cptpf->pdev,
+ CPT_AF_EXEX_CTL2(i), reg,
+ blkaddr);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* Wait for cores to become idle */
+ do {
+ busy = 0;
+ usleep_range(10000, 20000);
+ if (timeout-- < 0)
+ return -EBUSY;
+
+ for_each_set_bit(i, bmap.bits, bmap.size) {
+ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
+ cptpf->pdev,
+ CPT_AF_EXEX_STS(i), &reg,
+ blkaddr);
+ if (ret)
+ return ret;
+
+ if (reg & 0x1) {
+ busy = 1;
+ break;
+ }
+ }
+ } while (busy);
+
+ /* Disable the cores only if they are not used anymore */
+ for_each_set_bit(i, bmap.bits, bmap.size) {
+ if (!eng_grp->g->eng_ref_cnt[i]) {
+ ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
+ cptpf->pdev,
+ CPT_AF_EXEX_CTL(i), 0x0,
+ blkaddr);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ struct otx2_cptpf_dev *cptpf = obj;
+ struct otx2_cpt_bitmap bmap;
+ int ret;
+
+ bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
+ if (!bmap.size)
+ return -EINVAL;
+
+ if (cptpf->has_cpt1) {
+ ret = cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
+ BLKADDR_CPT1);
+ if (ret)
+ return ret;
+ }
+ return cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
+ BLKADDR_CPT0);
+}
+
+static int cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
+ struct otx2_cptpf_dev *cptpf,
+ struct otx2_cpt_bitmap bmap,
+ int blkaddr)
+{
+ u64 reg = 0;
+ int i, ret;
+
+ /* Attach the cores to the group */
+ for_each_set_bit(i, bmap.bits, bmap.size) {
+ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
+ if (ret)
+ return ret;
+
+ if (!(reg & (1ull << eng_grp->idx))) {
+ eng_grp->g->eng_ref_cnt[i]++;
+ reg |= 1ull << eng_grp->idx;
+
+ ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
+ cptpf->pdev,
+ CPT_AF_EXEX_CTL2(i), reg,
+ blkaddr);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* Enable the cores */
+ for_each_set_bit(i, bmap.bits, bmap.size) {
+ ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_EXEX_CTL(i), 0x1,
+ blkaddr);
+ if (ret)
+ return ret;
+ }
+ return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
+}
+
+static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ struct otx2_cptpf_dev *cptpf = obj;
+ struct otx2_cpt_bitmap bmap;
+ int ret;
+
+ bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
+ if (!bmap.size)
+ return -EINVAL;
+
+ if (cptpf->has_cpt1) {
+ ret = cptx_attach_and_enable_cores(eng_grp, cptpf, bmap,
+ BLKADDR_CPT1);
+ if (ret)
+ return ret;
+ }
+ return cptx_attach_and_enable_cores(eng_grp, cptpf, bmap, BLKADDR_CPT0);
+}
+
+static int load_fw(struct device *dev, struct fw_info_t *fw_info,
+ char *filename)
+{
+ struct otx2_cpt_ucode_hdr *ucode_hdr;
+ struct otx2_cpt_uc_info_t *uc_info;
+ int ucode_type, ucode_size;
+ int ret;
+
+ uc_info = kzalloc(sizeof(*uc_info), GFP_KERNEL);
+ if (!uc_info)
+ return -ENOMEM;
+
+ ret = request_firmware(&uc_info->fw, filename, dev);
+ if (ret)
+ goto free_uc_info;
+
+ ucode_hdr = (struct otx2_cpt_ucode_hdr *)uc_info->fw->data;
+ ret = get_ucode_type(dev, ucode_hdr, &ucode_type);
+ if (ret)
+ goto release_fw;
+
+ ucode_size = ntohl(ucode_hdr->code_length) * 2;
+ if (!ucode_size) {
+ dev_err(dev, "Ucode %s invalid size\n", filename);
+ ret = -EINVAL;
+ goto release_fw;
+ }
+
+ set_ucode_filename(&uc_info->ucode, filename);
+ memcpy(uc_info->ucode.ver_str, ucode_hdr->ver_str,
+ OTX2_CPT_UCODE_VER_STR_SZ);
+ uc_info->ucode.ver_num = ucode_hdr->ver_num;
+ uc_info->ucode.type = ucode_type;
+ uc_info->ucode.size = ucode_size;
+ list_add_tail(&uc_info->list, &fw_info->ucodes);
+
+ return 0;
+
+release_fw:
+ release_firmware(uc_info->fw);
+free_uc_info:
+ kfree(uc_info);
+ return ret;
+}
+
+static void cpt_ucode_release_fw(struct fw_info_t *fw_info)
+{
+ struct otx2_cpt_uc_info_t *curr, *temp;
+
+ if (!fw_info)
+ return;
+
+ list_for_each_entry_safe(curr, temp, &fw_info->ucodes, list) {
+ list_del(&curr->list);
+ release_firmware(curr->fw);
+ kfree(curr);
+ }
+}
+
+static struct otx2_cpt_uc_info_t *get_ucode(struct fw_info_t *fw_info,
+ int ucode_type)
+{
+ struct otx2_cpt_uc_info_t *curr;
+
+ list_for_each_entry(curr, &fw_info->ucodes, list) {
+ if (!is_eng_type(curr->ucode.type, ucode_type))
+ continue;
+
+ return curr;
+ }
+ return NULL;
+}
+
+static void print_uc_info(struct fw_info_t *fw_info)
+{
+ struct otx2_cpt_uc_info_t *curr;
+
+ list_for_each_entry(curr, &fw_info->ucodes, list) {
+ pr_debug("Ucode filename %s\n", curr->ucode.filename);
+ pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
+ pr_debug("Ucode version %d.%d.%d.%d\n",
+ curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
+ curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
+ pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
+ get_ucode_type_str(curr->ucode.type));
+ pr_debug("Ucode size %d\n", curr->ucode.size);
+ pr_debug("Ucode ptr %p\n", curr->fw->data);
+ }
+}
+
+static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info)
+{
+ char filename[OTX2_CPT_NAME_LENGTH];
+ char eng_type[8] = {0};
+ int ret, e, i;
+
+ INIT_LIST_HEAD(&fw_info->ucodes);
+
+ for (e = 1; e < OTX2_CPT_MAX_ENG_TYPES; e++) {
+ strcpy(eng_type, get_eng_type_str(e));
+ for (i = 0; i < strlen(eng_type); i++)
+ eng_type[i] = tolower(eng_type[i]);
+
+ snprintf(filename, sizeof(filename), "mrvl/cpt%02d/%s.out",
+ pdev->revision, eng_type);
+ /* Request firmware for each engine type */
+ ret = load_fw(&pdev->dev, fw_info, filename);
+ if (ret)
+ goto release_fw;
+ }
+ print_uc_info(fw_info);
+ return 0;
+
+release_fw:
+ cpt_ucode_release_fw(fw_info);
+ return ret;
+}
+
+static struct otx2_cpt_engs_rsvd *find_engines_by_type(
+ struct otx2_cpt_eng_grp_info *eng_grp,
+ int eng_type)
+{
+ int i;
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (!eng_grp->engs[i].type)
+ continue;
+
+ if (eng_grp->engs[i].type == eng_type)
+ return &eng_grp->engs[i];
+ }
+ return NULL;
+}
+
+static int eng_grp_has_eng_type(struct otx2_cpt_eng_grp_info *eng_grp,
+ int eng_type)
+{
+ struct otx2_cpt_engs_rsvd *engs;
+
+ engs = find_engines_by_type(eng_grp, eng_type);
+
+ return (engs != NULL ? 1 : 0);
+}
+
+static int update_engines_avail_count(struct device *dev,
+ struct otx2_cpt_engs_available *avail,
+ struct otx2_cpt_engs_rsvd *engs, int val)
+{
+ switch (engs->type) {
+ case OTX2_CPT_SE_TYPES:
+ avail->se_cnt += val;
+ break;
+
+ case OTX2_CPT_IE_TYPES:
+ avail->ie_cnt += val;
+ break;
+
+ case OTX2_CPT_AE_TYPES:
+ avail->ae_cnt += val;
+ break;
+
+ default:
+ dev_err(dev, "Invalid engine type %d\n", engs->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int update_engines_offset(struct device *dev,
+ struct otx2_cpt_engs_available *avail,
+ struct otx2_cpt_engs_rsvd *engs)
+{
+ switch (engs->type) {
+ case OTX2_CPT_SE_TYPES:
+ engs->offset = 0;
+ break;
+
+ case OTX2_CPT_IE_TYPES:
+ engs->offset = avail->max_se_cnt;
+ break;
+
+ case OTX2_CPT_AE_TYPES:
+ engs->offset = avail->max_se_cnt + avail->max_ie_cnt;
+ break;
+
+ default:
+ dev_err(dev, "Invalid engine type %d\n", engs->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int release_engines(struct device *dev,
+ struct otx2_cpt_eng_grp_info *grp)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (!grp->engs[i].type)
+ continue;
+
+ if (grp->engs[i].count > 0) {
+ ret = update_engines_avail_count(dev, &grp->g->avail,
+ &grp->engs[i],
+ grp->engs[i].count);
+ if (ret)
+ return ret;
+ }
+
+ grp->engs[i].type = 0;
+ grp->engs[i].count = 0;
+ grp->engs[i].offset = 0;
+ grp->engs[i].ucode = NULL;
+ bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);
+ }
+ return 0;
+}
+
+static int do_reserve_engines(struct device *dev,
+ struct otx2_cpt_eng_grp_info *grp,
+ struct otx2_cpt_engines *req_engs)
+{
+ struct otx2_cpt_engs_rsvd *engs = NULL;
+ int i, ret;
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (!grp->engs[i].type) {
+ engs = &grp->engs[i];
+ break;
+ }
+ }
+
+ if (!engs)
+ return -ENOMEM;
+
+ engs->type = req_engs->type;
+ engs->count = req_engs->count;
+
+ ret = update_engines_offset(dev, &grp->g->avail, engs);
+ if (ret)
+ return ret;
+
+ if (engs->count > 0) {
+ ret = update_engines_avail_count(dev, &grp->g->avail, engs,
+ -engs->count);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int check_engines_availability(struct device *dev,
+ struct otx2_cpt_eng_grp_info *grp,
+ struct otx2_cpt_engines *req_eng)
+{
+ int avail_cnt = 0;
+
+ switch (req_eng->type) {
+ case OTX2_CPT_SE_TYPES:
+ avail_cnt = grp->g->avail.se_cnt;
+ break;
+
+ case OTX2_CPT_IE_TYPES:
+ avail_cnt = grp->g->avail.ie_cnt;
+ break;
+
+ case OTX2_CPT_AE_TYPES:
+ avail_cnt = grp->g->avail.ae_cnt;
+ break;
+
+ default:
+ dev_err(dev, "Invalid engine type %d\n", req_eng->type);
+ return -EINVAL;
+ }
+
+ if (avail_cnt < req_eng->count) {
+ dev_err(dev,
+ "Error available %s engines %d < than requested %d\n",
+ get_eng_type_str(req_eng->type),
+ avail_cnt, req_eng->count);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int reserve_engines(struct device *dev,
+ struct otx2_cpt_eng_grp_info *grp,
+ struct otx2_cpt_engines *req_engs, int ucodes_cnt)
+{
+ int i, ret = 0;
+
+ /* Validate if a number of requested engines are available */
+ for (i = 0; i < ucodes_cnt; i++) {
+ ret = check_engines_availability(dev, grp, &req_engs[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* Reserve requested engines for this engine group */
+ for (i = 0; i < ucodes_cnt; i++) {
+ ret = do_reserve_engines(dev, grp, &req_engs[i]);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static void ucode_unload(struct device *dev, struct otx2_cpt_ucode *ucode)
+{
+ if (ucode->va) {
+ dma_free_coherent(dev, OTX2_CPT_UCODE_SZ, ucode->va,
+ ucode->dma);
+ ucode->va = NULL;
+ ucode->dma = 0;
+ ucode->size = 0;
+ }
+
+ memset(&ucode->ver_str, 0, OTX2_CPT_UCODE_VER_STR_SZ);
+ memset(&ucode->ver_num, 0, sizeof(struct otx2_cpt_ucode_ver_num));
+ set_ucode_filename(ucode, "");
+ ucode->type = 0;
+}
+
+static int copy_ucode_to_dma_mem(struct device *dev,
+ struct otx2_cpt_ucode *ucode,
+ const u8 *ucode_data)
+{
+ u32 i;
+
+ /* Allocate DMAable space */
+ ucode->va = dma_alloc_coherent(dev, OTX2_CPT_UCODE_SZ, &ucode->dma,
+ GFP_KERNEL);
+ if (!ucode->va)
+ return -ENOMEM;
+
+ memcpy(ucode->va, ucode_data + sizeof(struct otx2_cpt_ucode_hdr),
+ ucode->size);
+
+ /* Byte swap 64-bit */
+ for (i = 0; i < (ucode->size / 8); i++)
+ cpu_to_be64s(&((u64 *)ucode->va)[i]);
+ /* Ucode needs 16-bit swap */
+ for (i = 0; i < (ucode->size / 2); i++)
+ cpu_to_be16s(&((u16 *)ucode->va)[i]);
+ return 0;
+}
+
+static int enable_eng_grp(struct otx2_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ int ret;
+
+ /* Point microcode to each core of the group */
+ ret = cpt_set_ucode_base(eng_grp, obj);
+ if (ret)
+ return ret;
+
+ /* Attach the cores to the group and enable them */
+ ret = cpt_attach_and_enable_cores(eng_grp, obj);
+
+ return ret;
+}
+
+static int disable_eng_grp(struct device *dev,
+ struct otx2_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ int i, ret;
+
+ /* Disable all engines used by this group */
+ ret = cpt_detach_and_disable_cores(eng_grp, obj);
+ if (ret)
+ return ret;
+
+ /* Unload ucode used by this engine group */
+ ucode_unload(dev, &eng_grp->ucode[0]);
+ ucode_unload(dev, &eng_grp->ucode[1]);
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (!eng_grp->engs[i].type)
+ continue;
+
+ eng_grp->engs[i].ucode = &eng_grp->ucode[0];
+ }
+
+ /* Clear UCODE_BASE register for each engine used by this group */
+ ret = cpt_set_ucode_base(eng_grp, obj);
+
+ return ret;
+}
+
+static void setup_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp,
+ struct otx2_cpt_eng_grp_info *src_grp)
+{
+ /* Setup fields for engine group which is mirrored */
+ src_grp->mirror.is_ena = false;
+ src_grp->mirror.idx = 0;
+ src_grp->mirror.ref_count++;
+
+ /* Setup fields for mirroring engine group */
+ dst_grp->mirror.is_ena = true;
+ dst_grp->mirror.idx = src_grp->idx;
+ dst_grp->mirror.ref_count = 0;
+}
+
+static void remove_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp)
+{
+ struct otx2_cpt_eng_grp_info *src_grp;
+
+ if (!dst_grp->mirror.is_ena)
+ return;
+
+ src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];
+
+ src_grp->mirror.ref_count--;
+ dst_grp->mirror.is_ena = false;
+ dst_grp->mirror.idx = 0;
+ dst_grp->mirror.ref_count = 0;
+}
+
+static void update_requested_engs(struct otx2_cpt_eng_grp_info *mirror_eng_grp,
+ struct otx2_cpt_engines *engs, int engs_cnt)
+{
+ struct otx2_cpt_engs_rsvd *mirrored_engs;
+ int i;
+
+ for (i = 0; i < engs_cnt; i++) {
+ mirrored_engs = find_engines_by_type(mirror_eng_grp,
+ engs[i].type);
+ if (!mirrored_engs)
+ continue;
+
+ /*
+ * If mirrored group has this type of engines attached then
+ * there are 3 scenarios possible:
+ * 1) mirrored_engs.count == engs[i].count then all engines
+ * from mirrored engine group will be shared with this engine
+ * group
+ * 2) mirrored_engs.count > engs[i].count then only a subset of
+ * engines from mirrored engine group will be shared with this
+ * engine group
+ * 3) mirrored_engs.count < engs[i].count then all engines
+ * from mirrored engine group will be shared with this group
+ * and additional engines will be reserved for exclusively use
+ * by this engine group
+ */
+ engs[i].count -= mirrored_engs->count;
+ }
+}
+
+static struct otx2_cpt_eng_grp_info *find_mirrored_eng_grp(
+ struct otx2_cpt_eng_grp_info *grp)
+{
+ struct otx2_cpt_eng_grps *eng_grps = grp->g;
+ int i;
+
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
+ if (!eng_grps->grp[i].is_enabled)
+ continue;
+ if (eng_grps->grp[i].ucode[0].type &&
+ eng_grps->grp[i].ucode[1].type)
+ continue;
+ if (grp->idx == i)
+ continue;
+ if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
+ grp->ucode[0].ver_str,
+ OTX2_CPT_UCODE_VER_STR_SZ))
+ return &eng_grps->grp[i];
+ }
+
+ return NULL;
+}
+
+static struct otx2_cpt_eng_grp_info *find_unused_eng_grp(
+ struct otx2_cpt_eng_grps *eng_grps)
+{
+ int i;
+
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
+ if (!eng_grps->grp[i].is_enabled)
+ return &eng_grps->grp[i];
+ }
+ return NULL;
+}
+
+static int eng_grp_update_masks(struct device *dev,
+ struct otx2_cpt_eng_grp_info *eng_grp)
+{
+ struct otx2_cpt_engs_rsvd *engs, *mirrored_engs;
+ struct otx2_cpt_bitmap tmp_bmap = { {0} };
+ int i, j, cnt, max_cnt;
+ int bit;
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ engs = &eng_grp->engs[i];
+ if (!engs->type)
+ continue;
+ if (engs->count <= 0)
+ continue;
+
+ switch (engs->type) {
+ case OTX2_CPT_SE_TYPES:
+ max_cnt = eng_grp->g->avail.max_se_cnt;
+ break;
+
+ case OTX2_CPT_IE_TYPES:
+ max_cnt = eng_grp->g->avail.max_ie_cnt;
+ break;
+
+ case OTX2_CPT_AE_TYPES:
+ max_cnt = eng_grp->g->avail.max_ae_cnt;
+ break;
+
+ default:
+ dev_err(dev, "Invalid engine type %d\n", engs->type);
+ return -EINVAL;
+ }
+
+ cnt = engs->count;
+ WARN_ON(engs->offset + max_cnt > OTX2_CPT_MAX_ENGINES);
+ bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);
+ for (j = engs->offset; j < engs->offset + max_cnt; j++) {
+ if (!eng_grp->g->eng_ref_cnt[j]) {
+ bitmap_set(tmp_bmap.bits, j, 1);
+ cnt--;
+ if (!cnt)
+ break;
+ }
+ }
+
+ if (cnt)
+ return -ENOSPC;
+
+ bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);
+ }
+
+ if (!eng_grp->mirror.is_ena)
+ return 0;
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ engs = &eng_grp->engs[i];
+ if (!engs->type)
+ continue;
+
+ mirrored_engs = find_engines_by_type(
+ &eng_grp->g->grp[eng_grp->mirror.idx],
+ engs->type);
+ WARN_ON(!mirrored_engs && engs->count <= 0);
+ if (!mirrored_engs)
+ continue;
+
+ bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,
+ eng_grp->g->engs_num);
+ if (engs->count < 0) {
+ bit = find_first_bit(mirrored_engs->bmap,
+ eng_grp->g->engs_num);
+ bitmap_clear(tmp_bmap.bits, bit, -engs->count);
+ }
+ bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,
+ eng_grp->g->engs_num);
+ }
+ return 0;
+}
+
+static int delete_engine_group(struct device *dev,
+ struct otx2_cpt_eng_grp_info *eng_grp)
+{
+ int ret;
+
+ if (!eng_grp->is_enabled)
+ return 0;
+
+ if (eng_grp->mirror.ref_count)
+ return -EINVAL;
+
+ /* Removing engine group mirroring if enabled */
+ remove_eng_grp_mirroring(eng_grp);
+
+ /* Disable engine group */
+ ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);
+ if (ret)
+ return ret;
+
+ /* Release all engines held by this engine group */
+ ret = release_engines(dev, eng_grp);
+ if (ret)
+ return ret;
+
+ eng_grp->is_enabled = false;
+
+ return 0;
+}
+
+static void update_ucode_ptrs(struct otx2_cpt_eng_grp_info *eng_grp)
+{
+ struct otx2_cpt_ucode *ucode;
+
+ if (eng_grp->mirror.is_ena)
+ ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
+ else
+ ucode = &eng_grp->ucode[0];
+ WARN_ON(!eng_grp->engs[0].type);
+ eng_grp->engs[0].ucode = ucode;
+
+ if (eng_grp->engs[1].type) {
+ if (is_2nd_ucode_used(eng_grp))
+ eng_grp->engs[1].ucode = &eng_grp->ucode[1];
+ else
+ eng_grp->engs[1].ucode = ucode;
+ }
+}
+
+static int create_engine_group(struct device *dev,
+ struct otx2_cpt_eng_grps *eng_grps,
+ struct otx2_cpt_engines *engs, int ucodes_cnt,
+ void *ucode_data[], int is_print)
+{
+ struct otx2_cpt_eng_grp_info *mirrored_eng_grp;
+ struct otx2_cpt_eng_grp_info *eng_grp;
+ struct otx2_cpt_uc_info_t *uc_info;
+ int i, ret = 0;
+
+ /* Find engine group which is not used */
+ eng_grp = find_unused_eng_grp(eng_grps);
+ if (!eng_grp) {
+ dev_err(dev, "Error all engine groups are being used\n");
+ return -ENOSPC;
+ }
+ /* Load ucode */
+ for (i = 0; i < ucodes_cnt; i++) {
+ uc_info = (struct otx2_cpt_uc_info_t *) ucode_data[i];
+ eng_grp->ucode[i] = uc_info->ucode;
+ ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
+ uc_info->fw->data);
+ if (ret)
+ goto unload_ucode;
+ }
+
+ /* Check if this group mirrors another existing engine group */
+ mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);
+ if (mirrored_eng_grp) {
+ /* Setup mirroring */
+ setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
+
+ /*
+ * Update count of requested engines because some
+ * of them might be shared with mirrored group
+ */
+ update_requested_engs(mirrored_eng_grp, engs, ucodes_cnt);
+ }
+ ret = reserve_engines(dev, eng_grp, engs, ucodes_cnt);
+ if (ret)
+ goto unload_ucode;
+
+ /* Update ucode pointers used by engines */
+ update_ucode_ptrs(eng_grp);
+
+ /* Update engine masks used by this group */
+ ret = eng_grp_update_masks(dev, eng_grp);
+ if (ret)
+ goto release_engs;
+
+ /* Enable engine group */
+ ret = enable_eng_grp(eng_grp, eng_grps->obj);
+ if (ret)
+ goto release_engs;
+
+ /*
+ * If this engine group mirrors another engine group
+ * then we need to unload ucode as we will use ucode
+ * from mirrored engine group
+ */
+ if (eng_grp->mirror.is_ena)
+ ucode_unload(dev, &eng_grp->ucode[0]);
+
+ eng_grp->is_enabled = true;
+
+ if (!is_print)
+ return 0;
+
+ if (mirrored_eng_grp)
+ dev_info(dev,
+ "Engine_group%d: reuse microcode %s from group %d\n",
+ eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
+ mirrored_eng_grp->idx);
+ else
+ dev_info(dev, "Engine_group%d: microcode loaded %s\n",
+ eng_grp->idx, eng_grp->ucode[0].ver_str);
+ if (is_2nd_ucode_used(eng_grp))
+ dev_info(dev, "Engine_group%d: microcode loaded %s\n",
+ eng_grp->idx, eng_grp->ucode[1].ver_str);
+
+ return 0;
+
+release_engs:
+ release_engines(dev, eng_grp);
+unload_ucode:
+ ucode_unload(dev, &eng_grp->ucode[0]);
+ ucode_unload(dev, &eng_grp->ucode[1]);
+ return ret;
+}
+
+static void delete_engine_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps)
+{
+ int i;
+
+ /* First delete all mirroring engine groups */
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
+ if (eng_grps->grp[i].mirror.is_ena)
+ delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
+
+ /* Delete remaining engine groups */
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
+ delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
+}
+
+#define PCI_DEVID_CN10K_RNM 0xA098
+#define RNM_ENTROPY_STATUS 0x8
+
+static void rnm_to_cpt_errata_fixup(struct device *dev)
+{
+ struct pci_dev *pdev;
+ void __iomem *base;
+ int timeout = 5000;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RNM, NULL);
+ if (!pdev)
+ return;
+
+ base = pci_ioremap_bar(pdev, 0);
+ if (!base)
+ goto put_pdev;
+
+ while ((readq(base + RNM_ENTROPY_STATUS) & 0x7F) != 0x40) {
+ cpu_relax();
+ udelay(1);
+ timeout--;
+ if (!timeout) {
+ dev_warn(dev, "RNM is not producing entropy\n");
+ break;
+ }
+ }
+
+ iounmap(base);
+
+put_pdev:
+ pci_dev_put(pdev);
+}
+
+int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type)
+{
+
+ int eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
+ struct otx2_cpt_eng_grp_info *grp;
+ int i;
+
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
+ grp = &eng_grps->grp[i];
+ if (!grp->is_enabled)
+ continue;
+
+ if (eng_type == OTX2_CPT_SE_TYPES) {
+ if (eng_grp_has_eng_type(grp, eng_type) &&
+ !eng_grp_has_eng_type(grp, OTX2_CPT_IE_TYPES)) {
+ eng_grp_num = i;
+ break;
+ }
+ } else {
+ if (eng_grp_has_eng_type(grp, eng_type)) {
+ eng_grp_num = i;
+ break;
+ }
+ }
+ }
+ return eng_grp_num;
+}
+
+int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
+ struct otx2_cpt_eng_grps *eng_grps)
+{
+ struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = { };
+ struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
+ struct pci_dev *pdev = cptpf->pdev;
+ struct fw_info_t fw_info;
+ u64 reg_val;
+ int ret = 0;
+
+ mutex_lock(&eng_grps->lock);
+ /*
+ * We don't create engine groups if it was already
+ * made (when user enabled VFs for the first time)
+ */
+ if (eng_grps->is_grps_created)
+ goto unlock;
+
+ ret = cpt_ucode_load_fw(pdev, &fw_info);
+ if (ret)
+ goto unlock;
+
+ /*
+ * Create engine group with SE engines for kernel
+ * crypto functionality (symmetric crypto)
+ */
+ uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
+ if (uc_info[0] == NULL) {
+ dev_err(&pdev->dev, "Unable to find firmware for SE\n");
+ ret = -EINVAL;
+ goto release_fw;
+ }
+ engs[0].type = OTX2_CPT_SE_TYPES;
+ engs[0].count = eng_grps->avail.max_se_cnt;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
+ (void **) uc_info, 1);
+ if (ret)
+ goto release_fw;
+
+ /*
+ * Create engine group with SE+IE engines for IPSec.
+ * All SE engines will be shared with engine group 0.
+ */
+ uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
+ uc_info[1] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
+
+ if (uc_info[1] == NULL) {
+ dev_err(&pdev->dev, "Unable to find firmware for IE");
+ ret = -EINVAL;
+ goto delete_eng_grp;
+ }
+ engs[0].type = OTX2_CPT_SE_TYPES;
+ engs[0].count = eng_grps->avail.max_se_cnt;
+ engs[1].type = OTX2_CPT_IE_TYPES;
+ engs[1].count = eng_grps->avail.max_ie_cnt;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 2,
+ (void **) uc_info, 1);
+ if (ret)
+ goto delete_eng_grp;
+
+ /*
+ * Create engine group with AE engines for asymmetric
+ * crypto functionality.
+ */
+ uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
+ if (uc_info[0] == NULL) {
+ dev_err(&pdev->dev, "Unable to find firmware for AE");
+ ret = -EINVAL;
+ goto delete_eng_grp;
+ }
+ engs[0].type = OTX2_CPT_AE_TYPES;
+ engs[0].count = eng_grps->avail.max_ae_cnt;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
+ (void **) uc_info, 1);
+ if (ret)
+ goto delete_eng_grp;
+
+ eng_grps->is_grps_created = true;
+
+ cpt_ucode_release_fw(&fw_info);
+
+ if (is_dev_otx2(pdev))
+ goto unlock;
+
+ /*
+ * Ensure RNM_ENTROPY_STATUS[NORMAL_CNT] = 0x40 before writing
+ * CPT_AF_CTL[RNM_REQ_EN] = 1 as a workaround for HW errata.
+ */
+ rnm_to_cpt_errata_fixup(&pdev->dev);
+
+ /*
+ * Configure engine group mask to allow context prefetching
+ * for the groups and enable random number request, to enable
+ * CPT to request random numbers from RNM.
+ */
+ otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL,
+ OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16),
+ BLKADDR_CPT0);
+ /*
+ * Set interval to periodically flush dirty data for the next
+ * CTX cache entry. Set the interval count to maximum supported
+ * value.
+ */
+ otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTX_FLUSH_TIMER,
+ CTX_FLUSH_TIMER_CNT, BLKADDR_CPT0);
+
+ /*
+ * Set CPT_AF_DIAG[FLT_DIS], as a workaround for HW errata, when
+ * CPT_AF_DIAG[FLT_DIS] = 0 and a CPT engine access to LLC/DRAM
+ * encounters a fault/poison, a rare case may result in
+ * unpredictable data being delivered to a CPT engine.
+ */
+ otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG, &reg_val,
+ BLKADDR_CPT0);
+ otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,
+ reg_val | BIT_ULL(24), BLKADDR_CPT0);
+
+ mutex_unlock(&eng_grps->lock);
+ return 0;
+
+delete_eng_grp:
+ delete_engine_grps(pdev, eng_grps);
+release_fw:
+ cpt_ucode_release_fw(&fw_info);
+unlock:
+ mutex_unlock(&eng_grps->lock);
+ return ret;
+}
+
+static int cptx_disable_all_cores(struct otx2_cptpf_dev *cptpf, int total_cores,
+ int blkaddr)
+{
+ int timeout = 10, ret;
+ int i, busy;
+ u64 reg;
+
+ /* Disengage the cores from groups */
+ for (i = 0; i < total_cores; i++) {
+ ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_EXEX_CTL2(i), 0x0,
+ blkaddr);
+ if (ret)
+ return ret;
+
+ cptpf->eng_grps.eng_ref_cnt[i] = 0;
+ }
+ ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
+ if (ret)
+ return ret;
+
+ /* Wait for cores to become idle */
+ do {
+ busy = 0;
+ usleep_range(10000, 20000);
+ if (timeout-- < 0)
+ return -EBUSY;
+
+ for (i = 0; i < total_cores; i++) {
+ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
+ cptpf->pdev,
+ CPT_AF_EXEX_STS(i), &reg,
+ blkaddr);
+ if (ret)
+ return ret;
+
+ if (reg & 0x1) {
+ busy = 1;
+ break;
+ }
+ }
+ } while (busy);
+
+ /* Disable the cores */
+ for (i = 0; i < total_cores; i++) {
+ ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_EXEX_CTL(i), 0x0,
+ blkaddr);
+ if (ret)
+ return ret;
+ }
+ return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
+}
+
+int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
+{
+ int total_cores, ret;
+
+ total_cores = cptpf->eng_grps.avail.max_se_cnt +
+ cptpf->eng_grps.avail.max_ie_cnt +
+ cptpf->eng_grps.avail.max_ae_cnt;
+
+ if (cptpf->has_cpt1) {
+ ret = cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT1);
+ if (ret)
+ return ret;
+ }
+ return cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT0);
+}
+
+void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps)
+{
+ struct otx2_cpt_eng_grp_info *grp;
+ int i, j;
+
+ mutex_lock(&eng_grps->lock);
+ delete_engine_grps(pdev, eng_grps);
+ /* Release memory */
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
+ grp = &eng_grps->grp[i];
+ for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
+ kfree(grp->engs[j].bmap);
+ grp->engs[j].bmap = NULL;
+ }
+ }
+ mutex_unlock(&eng_grps->lock);
+}
+
+int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps)
+{
+ struct otx2_cpt_eng_grp_info *grp;
+ int i, j, ret;
+
+ mutex_init(&eng_grps->lock);
+ eng_grps->obj = pci_get_drvdata(pdev);
+ eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
+ eng_grps->avail.ie_cnt = eng_grps->avail.max_ie_cnt;
+ eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;
+
+ eng_grps->engs_num = eng_grps->avail.max_se_cnt +
+ eng_grps->avail.max_ie_cnt +
+ eng_grps->avail.max_ae_cnt;
+ if (eng_grps->engs_num > OTX2_CPT_MAX_ENGINES) {
+ dev_err(&pdev->dev,
+ "Number of engines %d > than max supported %d\n",
+ eng_grps->engs_num, OTX2_CPT_MAX_ENGINES);
+ ret = -EINVAL;
+ goto cleanup_eng_grps;
+ }
+
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
+ grp = &eng_grps->grp[i];
+ grp->g = eng_grps;
+ grp->idx = i;
+
+ for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
+ grp->engs[j].bmap =
+ kcalloc(BITS_TO_LONGS(eng_grps->engs_num),
+ sizeof(long), GFP_KERNEL);
+ if (!grp->engs[j].bmap) {
+ ret = -ENOMEM;
+ goto cleanup_eng_grps;
+ }
+ }
+ }
+ return 0;
+
+cleanup_eng_grps:
+ otx2_cpt_cleanup_eng_grps(pdev, eng_grps);
+ return ret;
+}
+
+static int create_eng_caps_discovery_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps)
+{
+ struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = { };
+ struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
+ struct fw_info_t fw_info;
+ int ret;
+
+ mutex_lock(&eng_grps->lock);
+ ret = cpt_ucode_load_fw(pdev, &fw_info);
+ if (ret) {
+ mutex_unlock(&eng_grps->lock);
+ return ret;
+ }
+
+ uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
+ if (uc_info[0] == NULL) {
+ dev_err(&pdev->dev, "Unable to find firmware for AE\n");
+ ret = -EINVAL;
+ goto release_fw;
+ }
+ engs[0].type = OTX2_CPT_AE_TYPES;
+ engs[0].count = 2;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
+ (void **) uc_info, 0);
+ if (ret)
+ goto release_fw;
+
+ uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
+ if (uc_info[0] == NULL) {
+ dev_err(&pdev->dev, "Unable to find firmware for SE\n");
+ ret = -EINVAL;
+ goto delete_eng_grp;
+ }
+ engs[0].type = OTX2_CPT_SE_TYPES;
+ engs[0].count = 2;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
+ (void **) uc_info, 0);
+ if (ret)
+ goto delete_eng_grp;
+
+ uc_info[0] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
+ if (uc_info[0] == NULL) {
+ dev_err(&pdev->dev, "Unable to find firmware for IE\n");
+ ret = -EINVAL;
+ goto delete_eng_grp;
+ }
+ engs[0].type = OTX2_CPT_IE_TYPES;
+ engs[0].count = 2;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
+ (void **) uc_info, 0);
+ if (ret)
+ goto delete_eng_grp;
+
+ cpt_ucode_release_fw(&fw_info);
+ mutex_unlock(&eng_grps->lock);
+ return 0;
+
+delete_eng_grp:
+ delete_engine_grps(pdev, eng_grps);
+release_fw:
+ cpt_ucode_release_fw(&fw_info);
+ mutex_unlock(&eng_grps->lock);
+ return ret;
+}
+
+/*
+ * Get CPT HW capabilities using LOAD_FVC operation.
+ */
+int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
+{
+ struct otx2_cptlfs_info *lfs = &cptpf->lfs;
+ struct otx2_cpt_iq_command iq_cmd;
+ union otx2_cpt_opcode opcode;
+ union otx2_cpt_res_s *result;
+ union otx2_cpt_inst_s inst;
+ dma_addr_t rptr_baddr;
+ struct pci_dev *pdev;
+ u32 len, compl_rlen;
+ int ret, etype;
+ void *rptr;
+
+ /*
+ * We don't get capabilities if it was already done
+ * (when user enabled VFs for the first time)
+ */
+ if (cptpf->is_eng_caps_discovered)
+ return 0;
+
+ pdev = cptpf->pdev;
+ /*
+ * Create engine groups for each type to submit LOAD_FVC op and
+ * get engine's capabilities.
+ */
+ ret = create_eng_caps_discovery_grps(pdev, &cptpf->eng_grps);
+ if (ret)
+ goto delete_grps;
+
+ otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base,
+ &cptpf->afpf_mbox, BLKADDR_CPT0);
+ ret = otx2_cptlf_init(&cptpf->lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
+ OTX2_CPT_QUEUE_HI_PRIO, 1);
+ if (ret)
+ goto delete_grps;
+
+ compl_rlen = ALIGN(sizeof(union otx2_cpt_res_s), OTX2_CPT_DMA_MINALIGN);
+ len = compl_rlen + LOADFVC_RLEN;
+
+ result = kzalloc(len, GFP_KERNEL);
+ if (!result) {
+ ret = -ENOMEM;
+ goto lf_cleanup;
+ }
+ rptr_baddr = dma_map_single(&pdev->dev, (void *)result, len,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&pdev->dev, rptr_baddr)) {
+ dev_err(&pdev->dev, "DMA mapping failed\n");
+ ret = -EFAULT;
+ goto free_result;
+ }
+ rptr = (u8 *)result + compl_rlen;
+
+ /* Fill in the command */
+ opcode.s.major = LOADFVC_MAJOR_OP;
+ opcode.s.minor = LOADFVC_MINOR_OP;
+
+ iq_cmd.cmd.u = 0;
+ iq_cmd.cmd.s.opcode = cpu_to_be16(opcode.flags);
+
+ /* 64-bit swap for microcode data reads, not needed for addresses */
+ cpu_to_be64s(&iq_cmd.cmd.u);
+ iq_cmd.dptr = 0;
+ iq_cmd.rptr = rptr_baddr + compl_rlen;
+ iq_cmd.cptr.u = 0;
+
+ for (etype = 1; etype < OTX2_CPT_MAX_ENG_TYPES; etype++) {
+ result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
+ iq_cmd.cptr.s.grp = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
+ etype);
+ otx2_cpt_fill_inst(&inst, &iq_cmd, rptr_baddr);
+ lfs->ops->send_cmd(&inst, 1, &cptpf->lfs.lf[0]);
+
+ while (lfs->ops->cpt_get_compcode(result) ==
+ OTX2_CPT_COMPLETION_CODE_INIT)
+ cpu_relax();
+
+ cptpf->eng_caps[etype].u = be64_to_cpup(rptr);
+ }
+ dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
+ cptpf->is_eng_caps_discovered = true;
+
+free_result:
+ kfree(result);
+lf_cleanup:
+ otx2_cptlf_shutdown(lfs);
+delete_grps:
+ delete_engine_grps(pdev, &cptpf->eng_grps);
+
+ return ret;
+}
+
+static void swap_engines(struct otx2_cpt_engines *engsl,
+ struct otx2_cpt_engines *engsr)
+{
+ struct otx2_cpt_engines engs;
+
+ engs = *engsl;
+ *engsl = *engsr;
+ *engsr = engs;
+}
+
+int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = { };
+ struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
+ struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
+ char *ucode_filename[OTX2_CPT_MAX_ETYPES_PER_GRP];
+ char tmp_buf[OTX2_CPT_NAME_LENGTH] = { 0 };
+ struct device *dev = &cptpf->pdev->dev;
+ char *start, *val, *err_msg, *tmp;
+ int grp_idx = 0, ret = -EINVAL;
+ bool has_se, has_ie, has_ae;
+ struct fw_info_t fw_info;
+ int ucode_idx = 0;
+
+ if (!eng_grps->is_grps_created) {
+ dev_err(dev, "Not allowed before creating the default groups\n");
+ return -EINVAL;
+ }
+ err_msg = "Invalid engine group format";
+ strlcpy(tmp_buf, ctx->val.vstr, strlen(ctx->val.vstr) + 1);
+ start = tmp_buf;
+
+ has_se = has_ie = has_ae = false;
+
+ for (;;) {
+ val = strsep(&start, ";");
+ if (!val)
+ break;
+ val = strim(val);
+ if (!*val)
+ continue;
+
+ if (!strncasecmp(val, "se", 2) && strchr(val, ':')) {
+ if (has_se || ucode_idx)
+ goto err_print;
+ tmp = strsep(&val, ":");
+ if (tmp != NULL)
+ tmp = strim(tmp);
+ else
+ goto err_print;
+ if (!val)
+ goto err_print;
+ if (strlen(tmp) != 2)
+ goto err_print;
+ if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
+ goto err_print;
+ engs[grp_idx++].type = OTX2_CPT_SE_TYPES;
+ has_se = true;
+ } else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) {
+ if (has_ae || ucode_idx)
+ goto err_print;
+ tmp = strsep(&val, ":");
+ if (tmp != NULL)
+ tmp = strim(tmp);
+ else
+ goto err_print;
+ if (!val)
+ goto err_print;
+ if (strlen(tmp) != 2)
+ goto err_print;
+ if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
+ goto err_print;
+ engs[grp_idx++].type = OTX2_CPT_AE_TYPES;
+ has_ae = true;
+ } else if (!strncasecmp(val, "ie", 2) && strchr(val, ':')) {
+ if (has_ie || ucode_idx)
+ goto err_print;
+ tmp = strsep(&val, ":");
+ if (tmp != NULL)
+ tmp = strim(tmp);
+ else
+ goto err_print;
+ if (!val)
+ goto err_print;
+ if (strlen(tmp) != 2)
+ goto err_print;
+ if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
+ goto err_print;
+ engs[grp_idx++].type = OTX2_CPT_IE_TYPES;
+ has_ie = true;
+ } else {
+ if (ucode_idx > 1)
+ goto err_print;
+ if (!strlen(val))
+ goto err_print;
+ if (strnstr(val, " ", strlen(val)))
+ goto err_print;
+ ucode_filename[ucode_idx++] = val;
+ }
+ }
+
+ /* Validate input parameters */
+ if (!(grp_idx && ucode_idx))
+ goto err_print;
+
+ if (ucode_idx > 1 && grp_idx < 2)
+ goto err_print;
+
+ if (grp_idx > OTX2_CPT_MAX_ETYPES_PER_GRP) {
+ err_msg = "Error max 2 engine types can be attached";
+ goto err_print;
+ }
+
+ if (grp_idx > 1) {
+ if ((engs[0].type + engs[1].type) !=
+ (OTX2_CPT_SE_TYPES + OTX2_CPT_IE_TYPES)) {
+ err_msg =
+ "Only combination of SE+IE engines is allowed";
+ goto err_print;
+ }
+ /* Keep SE engines at zero index */
+ if (engs[1].type == OTX2_CPT_SE_TYPES)
+ swap_engines(&engs[0], &engs[1]);
+ }
+ mutex_lock(&eng_grps->lock);
+
+ if (cptpf->enabled_vfs) {
+ dev_err(dev, "Disable VFs before modifying engine groups\n");
+ ret = -EACCES;
+ goto err_unlock;
+ }
+ INIT_LIST_HEAD(&fw_info.ucodes);
+ ret = load_fw(dev, &fw_info, ucode_filename[0]);
+ if (ret) {
+ dev_err(dev, "Unable to load firmware %s\n",
+ ucode_filename[0]);
+ goto err_unlock;
+ }
+ if (ucode_idx > 1) {
+ ret = load_fw(dev, &fw_info, ucode_filename[1]);
+ if (ret) {
+ dev_err(dev, "Unable to load firmware %s\n",
+ ucode_filename[1]);
+ goto release_fw;
+ }
+ }
+ uc_info[0] = get_ucode(&fw_info, engs[0].type);
+ if (uc_info[0] == NULL) {
+ dev_err(dev, "Unable to find firmware for %s\n",
+ get_eng_type_str(engs[0].type));
+ ret = -EINVAL;
+ goto release_fw;
+ }
+ if (ucode_idx > 1) {
+ uc_info[1] = get_ucode(&fw_info, engs[1].type);
+ if (uc_info[1] == NULL) {
+ dev_err(dev, "Unable to find firmware for %s\n",
+ get_eng_type_str(engs[1].type));
+ ret = -EINVAL;
+ goto release_fw;
+ }
+ }
+ ret = create_engine_group(dev, eng_grps, engs, grp_idx,
+ (void **) uc_info, 1);
+
+release_fw:
+ cpt_ucode_release_fw(&fw_info);
+err_unlock:
+ mutex_unlock(&eng_grps->lock);
+ return ret;
+err_print:
+ dev_err(dev, "%s\n", err_msg);
+ return ret;
+}
+
+int otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev *cptpf,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
+ struct device *dev = &cptpf->pdev->dev;
+ char *tmp, *err_msg;
+ int egrp;
+ int ret;
+
+ err_msg = "Invalid input string format(ex: egrp:0)";
+ if (strncasecmp(ctx->val.vstr, "egrp", 4))
+ goto err_print;
+ tmp = ctx->val.vstr;
+ strsep(&tmp, ":");
+ if (!tmp)
+ goto err_print;
+ if (kstrtoint(tmp, 10, &egrp))
+ goto err_print;
+
+ if (egrp >= OTX2_CPT_MAX_ENGINE_GROUPS) {
+ dev_err(dev, "Invalid engine group %d", egrp);
+ return -EINVAL;
+ }
+ if (!eng_grps->grp[egrp].is_enabled) {
+ dev_err(dev, "Error engine_group%d is not configured",
+ egrp);
+ return -EINVAL;
+ }
+ mutex_lock(&eng_grps->lock);
+ ret = delete_engine_group(dev, &eng_grps->grp[egrp]);
+ mutex_unlock(&eng_grps->lock);
+
+ return ret;
+
+err_print:
+ dev_err(dev, "%s\n", err_msg);
+ return -EINVAL;
+}
+
+static void get_engs_info(struct otx2_cpt_eng_grp_info *eng_grp,
+ char *buf, int size, int idx)
+{
+ struct otx2_cpt_engs_rsvd *mirrored_engs = NULL;
+ struct otx2_cpt_engs_rsvd *engs;
+ int len, i;
+
+ buf[0] = '\0';
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ engs = &eng_grp->engs[i];
+ if (!engs->type)
+ continue;
+ if (idx != -1 &&
+ idx != i)
+ continue;
+
+ if (eng_grp->mirror.is_ena)
+ mirrored_engs = find_engines_by_type(
+ &eng_grp->g->grp[eng_grp->mirror.idx],
+ engs->type);
+ if (i > 0 &&
+ idx == -1) {
+ len = strlen(buf);
+ scnprintf(buf+len, size-len, ", ");
+ }
+
+ len = strlen(buf);
+ scnprintf(buf+len, size-len, "%d %s ", mirrored_engs ?
+ engs->count + mirrored_engs->count : engs->count,
+ get_eng_type_str(engs->type));
+ if (mirrored_engs) {
+ len = strlen(buf);
+ scnprintf(buf+len, size-len,
+ "(%d shared with engine_group%d) ",
+ engs->count <= 0 ? engs->count +
+ mirrored_engs->count : mirrored_engs->count,
+ eng_grp->mirror.idx);
+ }
+ }
+}
+
+void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf)
+{
+ struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
+ struct otx2_cpt_eng_grp_info *mirrored_grp;
+ char engs_info[2 * OTX2_CPT_NAME_LENGTH];
+ struct otx2_cpt_eng_grp_info *grp;
+ struct otx2_cpt_engs_rsvd *engs;
+ u32 mask[4];
+ int i, j;
+
+ pr_debug("Engine groups global info");
+ pr_debug("max SE %d, max IE %d, max AE %d",
+ eng_grps->avail.max_se_cnt, eng_grps->avail.max_ie_cnt,
+ eng_grps->avail.max_ae_cnt);
+ pr_debug("free SE %d", eng_grps->avail.se_cnt);
+ pr_debug("free IE %d", eng_grps->avail.ie_cnt);
+ pr_debug("free AE %d", eng_grps->avail.ae_cnt);
+
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
+ grp = &eng_grps->grp[i];
+ pr_debug("engine_group%d, state %s", i, grp->is_enabled ?
+ "enabled" : "disabled");
+ if (grp->is_enabled) {
+ mirrored_grp = &eng_grps->grp[grp->mirror.idx];
+ pr_debug("Ucode0 filename %s, version %s",
+ grp->mirror.is_ena ?
+ mirrored_grp->ucode[0].filename :
+ grp->ucode[0].filename,
+ grp->mirror.is_ena ?
+ mirrored_grp->ucode[0].ver_str :
+ grp->ucode[0].ver_str);
+ if (is_2nd_ucode_used(grp))
+ pr_debug("Ucode1 filename %s, version %s",
+ grp->ucode[1].filename,
+ grp->ucode[1].ver_str);
+ }
+
+ for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
+ engs = &grp->engs[j];
+ if (engs->type) {
+ get_engs_info(grp, engs_info,
+ 2 * OTX2_CPT_NAME_LENGTH, j);
+ pr_debug("Slot%d: %s", j, engs_info);
+ bitmap_to_arr32(mask, engs->bmap,
+ eng_grps->engs_num);
+ if (is_dev_otx2(cptpf->pdev))
+ pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x",
+ mask[3], mask[2], mask[1],
+ mask[0]);
+ else
+ pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x %8.8x",
+ mask[4], mask[3], mask[2], mask[1],
+ mask[0]);
+ }
+ }
+ }
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
new file mode 100644
index 000000000000..8a90ca2b95c3
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2018 Marvell.
+ */
+
+#ifndef __OTX2_CPTPF_UCODE_H
+#define __OTX2_CPTPF_UCODE_H
+
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include "otx2_cpt_hw_types.h"
+#include "otx2_cpt_common.h"
+
+/*
+ * On OcteonTX2 platform IPSec ucode can use both IE and SE engines therefore
+ * IE and SE engines can be attached to the same engine group.
+ */
+#define OTX2_CPT_MAX_ETYPES_PER_GRP 2
+
+/* CPT ucode signature size */
+#define OTX2_CPT_UCODE_SIGN_LEN 256
+
+/* Microcode version string length */
+#define OTX2_CPT_UCODE_VER_STR_SZ 44
+
+/* Maximum number of supported engines/cores on OcteonTX2/CN10K platform */
+#define OTX2_CPT_MAX_ENGINES 144
+
+#define OTX2_CPT_ENGS_BITMASK_LEN BITS_TO_LONGS(OTX2_CPT_MAX_ENGINES)
+
+#define OTX2_CPT_UCODE_SZ (64 * 1024)
+
+/* Microcode types */
+enum otx2_cpt_ucode_type {
+ OTX2_CPT_AE_UC_TYPE = 1, /* AE-MAIN */
+ OTX2_CPT_SE_UC_TYPE1 = 20,/* SE-MAIN - combination of 21 and 22 */
+ OTX2_CPT_SE_UC_TYPE2 = 21,/* Fast Path IPSec + AirCrypto */
+ OTX2_CPT_SE_UC_TYPE3 = 22,/*
+ * Hash + HMAC + FlexiCrypto + RNG +
+ * Full Feature IPSec + AirCrypto + Kasumi
+ */
+ OTX2_CPT_IE_UC_TYPE1 = 30, /* IE-MAIN - combination of 31 and 32 */
+ OTX2_CPT_IE_UC_TYPE2 = 31, /* Fast Path IPSec */
+ OTX2_CPT_IE_UC_TYPE3 = 32, /*
+ * Hash + HMAC + FlexiCrypto + RNG +
+ * Full Future IPSec
+ */
+};
+
+struct otx2_cpt_bitmap {
+ unsigned long bits[OTX2_CPT_ENGS_BITMASK_LEN];
+ int size;
+};
+
+struct otx2_cpt_engines {
+ int type;
+ int count;
+};
+
+/* Microcode version number */
+struct otx2_cpt_ucode_ver_num {
+ u8 nn;
+ u8 xx;
+ u8 yy;
+ u8 zz;
+};
+
+struct otx2_cpt_ucode_hdr {
+ struct otx2_cpt_ucode_ver_num ver_num;
+ u8 ver_str[OTX2_CPT_UCODE_VER_STR_SZ];
+ __be32 code_length;
+ u32 padding[3];
+};
+
+struct otx2_cpt_ucode {
+ u8 ver_str[OTX2_CPT_UCODE_VER_STR_SZ];/*
+ * ucode version in readable
+ * format
+ */
+ struct otx2_cpt_ucode_ver_num ver_num;/* ucode version number */
+ char filename[OTX2_CPT_NAME_LENGTH];/* ucode filename */
+ dma_addr_t dma; /* phys address of ucode image */
+ void *va; /* virt address of ucode image */
+ u32 size; /* ucode image size */
+ int type; /* ucode image type SE, IE, AE or SE+IE */
+};
+
+struct otx2_cpt_uc_info_t {
+ struct list_head list;
+ struct otx2_cpt_ucode ucode;/* microcode information */
+ const struct firmware *fw;
+};
+
+/* Maximum and current number of engines available for all engine groups */
+struct otx2_cpt_engs_available {
+ int max_se_cnt;
+ int max_ie_cnt;
+ int max_ae_cnt;
+ int se_cnt;
+ int ie_cnt;
+ int ae_cnt;
+};
+
+/* Engines reserved to an engine group */
+struct otx2_cpt_engs_rsvd {
+ int type; /* engine type */
+ int count; /* number of engines attached */
+ int offset; /* constant offset of engine type in the bitmap */
+ unsigned long *bmap; /* attached engines bitmap */
+ struct otx2_cpt_ucode *ucode; /* ucode used by these engines */
+};
+
+struct otx2_cpt_mirror_info {
+ int is_ena; /*
+ * is mirroring enabled, it is set only for engine
+ * group which mirrors another engine group
+ */
+ int idx; /*
+ * index of engine group which is mirrored by this
+ * group, set only for engine group which mirrors
+ * another group
+ */
+ int ref_count; /*
+ * number of times this engine group is mirrored by
+ * other groups, this is set only for engine group
+ * which is mirrored by other group(s)
+ */
+};
+
+struct otx2_cpt_eng_grp_info {
+ struct otx2_cpt_eng_grps *g; /* pointer to engine_groups structure */
+ /* engines attached */
+ struct otx2_cpt_engs_rsvd engs[OTX2_CPT_MAX_ETYPES_PER_GRP];
+ /* ucodes information */
+ struct otx2_cpt_ucode ucode[OTX2_CPT_MAX_ETYPES_PER_GRP];
+ /* engine group mirroring information */
+ struct otx2_cpt_mirror_info mirror;
+ int idx; /* engine group index */
+ bool is_enabled; /*
+ * is engine group enabled, engine group is enabled
+ * when it has engines attached and ucode loaded
+ */
+};
+
+struct otx2_cpt_eng_grps {
+ struct mutex lock;
+ struct otx2_cpt_eng_grp_info grp[OTX2_CPT_MAX_ENGINE_GROUPS];
+ struct otx2_cpt_engs_available avail;
+ void *obj; /* device specific data */
+ int engs_num; /* total number of engines supported */
+ u8 eng_ref_cnt[OTX2_CPT_MAX_ENGINES];/* engines reference count */
+ bool is_grps_created; /* Is the engine groups are already created */
+};
+struct otx2_cptpf_dev;
+int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps);
+void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps);
+int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
+ struct otx2_cpt_eng_grps *eng_grps);
+int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf);
+int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type);
+int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf);
+int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
+ struct devlink_param_gset_ctx *ctx);
+int otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev *cptpf,
+ struct devlink_param_gset_ctx *ctx);
+void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf);
+#endif /* __OTX2_CPTPF_UCODE_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf.h b/drivers/crypto/marvell/octeontx2/otx2_cptvf.h
new file mode 100644
index 000000000000..cb4528df2f48
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2018 Marvell.
+ */
+
+#ifndef __OTX2_CPTVF_H
+#define __OTX2_CPTVF_H
+
+#include "mbox.h"
+#include "otx2_cptlf.h"
+
+struct otx2_cptvf_dev {
+ void __iomem *reg_base; /* Register start address */
+ void __iomem *pfvf_mbox_base; /* PF-VF mbox start address */
+ struct pci_dev *pdev; /* PCI device handle */
+ struct otx2_cptlfs_info lfs; /* CPT LFs attached to this VF */
+ u8 vf_id; /* Virtual function index */
+
+ /* PF <=> VF mbox */
+ struct otx2_mbox pfvf_mbox;
+ struct work_struct pfvf_mbox_work;
+ struct workqueue_struct *pfvf_mbox_wq;
+ int blkaddr;
+ void *bbuf_base;
+ unsigned long cap_flag;
+};
+
+irqreturn_t otx2_cptvf_pfvf_mbox_intr(int irq, void *arg);
+void otx2_cptvf_pfvf_mbox_handler(struct work_struct *work);
+int otx2_cptvf_send_eng_grp_num_msg(struct otx2_cptvf_dev *cptvf, int eng_type);
+int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf);
+int otx2_cpt_mbox_bbuf_init(struct otx2_cptvf_dev *cptvf, struct pci_dev *pdev);
+
+#endif /* __OTX2_CPTVF_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
new file mode 100644
index 000000000000..ddfa82566bfc
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
@@ -0,0 +1,1771 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2019 Marvell. */
+
+#include <crypto/aes.h>
+#include <crypto/authenc.h>
+#include <crypto/cryptd.h>
+#include <crypto/des.h>
+#include <crypto/internal/aead.h>
+#include <crypto/sha.h>
+#include <crypto/xts.h>
+#include <crypto/gcm.h>
+#include <crypto/scatterwalk.h>
+#include <linux/rtnetlink.h>
+#include <linux/sort.h>
+#include <linux/module.h>
+#include "otx2_cptvf.h"
+#include "otx2_cptvf_algs.h"
+#include "otx2_cpt_reqmgr.h"
+
+/* Size of salt in AES GCM mode */
+#define AES_GCM_SALT_SIZE 4
+/* Size of IV in AES GCM mode */
+#define AES_GCM_IV_SIZE 8
+/* Size of ICV (Integrity Check Value) in AES GCM mode */
+#define AES_GCM_ICV_SIZE 16
+/* Offset of IV in AES GCM mode */
+#define AES_GCM_IV_OFFSET 8
+#define CONTROL_WORD_LEN 8
+#define KEY2_OFFSET 48
+#define DMA_MODE_FLAG(dma_mode) \
+ (((dma_mode) == OTX2_CPT_DMA_MODE_SG) ? (1 << 7) : 0)
+
+/* Truncated SHA digest size */
+#define SHA1_TRUNC_DIGEST_SIZE 12
+#define SHA256_TRUNC_DIGEST_SIZE 16
+#define SHA384_TRUNC_DIGEST_SIZE 24
+#define SHA512_TRUNC_DIGEST_SIZE 32
+
+static DEFINE_MUTEX(mutex);
+static int is_crypto_registered;
+
+struct cpt_device_desc {
+ struct pci_dev *dev;
+ int num_queues;
+};
+
+struct cpt_device_table {
+ atomic_t count;
+ struct cpt_device_desc desc[OTX2_CPT_MAX_LFS_NUM];
+};
+
+static struct cpt_device_table se_devices = {
+ .count = ATOMIC_INIT(0)
+};
+
+static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
+{
+ int count;
+
+ count = atomic_read(&se_devices.count);
+ if (count < 1)
+ return -ENODEV;
+
+ *cpu_num = get_cpu();
+ /*
+ * On OcteonTX2 platform CPT instruction queue is bound to each
+ * local function LF, in turn LFs can be attached to PF
+ * or VF therefore we always use first device. We get maximum
+ * performance if one CPT queue is available for each cpu
+ * otherwise CPT queues need to be shared between cpus.
+ */
+ if (*cpu_num >= se_devices.desc[0].num_queues)
+ *cpu_num %= se_devices.desc[0].num_queues;
+ *pdev = se_devices.desc[0].dev;
+
+ put_cpu();
+
+ return 0;
+}
+
+static inline int validate_hmac_cipher_null(struct otx2_cpt_req_info *cpt_req)
+{
+ struct otx2_cpt_req_ctx *rctx;
+ struct aead_request *req;
+ struct crypto_aead *tfm;
+
+ req = container_of(cpt_req->areq, struct aead_request, base);
+ tfm = crypto_aead_reqtfm(req);
+ rctx = aead_request_ctx(req);
+ if (memcmp(rctx->fctx.hmac.s.hmac_calc,
+ rctx->fctx.hmac.s.hmac_recv,
+ crypto_aead_authsize(tfm)) != 0)
+ return -EBADMSG;
+
+ return 0;
+}
+
+static void otx2_cpt_aead_callback(int status, void *arg1, void *arg2)
+{
+ struct otx2_cpt_inst_info *inst_info = arg2;
+ struct crypto_async_request *areq = arg1;
+ struct otx2_cpt_req_info *cpt_req;
+ struct pci_dev *pdev;
+
+ if (inst_info) {
+ cpt_req = inst_info->req;
+ if (!status) {
+ /*
+ * When selected cipher is NULL we need to manually
+ * verify whether calculated hmac value matches
+ * received hmac value
+ */
+ if (cpt_req->req_type ==
+ OTX2_CPT_AEAD_ENC_DEC_NULL_REQ &&
+ !cpt_req->is_enc)
+ status = validate_hmac_cipher_null(cpt_req);
+ }
+ pdev = inst_info->pdev;
+ otx2_cpt_info_destroy(pdev, inst_info);
+ }
+ if (areq)
+ areq->complete(areq, status);
+}
+
+static void output_iv_copyback(struct crypto_async_request *areq)
+{
+ struct otx2_cpt_req_info *req_info;
+ struct otx2_cpt_req_ctx *rctx;
+ struct skcipher_request *sreq;
+ struct crypto_skcipher *stfm;
+ struct otx2_cpt_enc_ctx *ctx;
+ u32 start, ivsize;
+
+ sreq = container_of(areq, struct skcipher_request, base);
+ stfm = crypto_skcipher_reqtfm(sreq);
+ ctx = crypto_skcipher_ctx(stfm);
+ if (ctx->cipher_type == OTX2_CPT_AES_CBC ||
+ ctx->cipher_type == OTX2_CPT_DES3_CBC) {
+ rctx = skcipher_request_ctx(sreq);
+ req_info = &rctx->cpt_req;
+ ivsize = crypto_skcipher_ivsize(stfm);
+ start = sreq->cryptlen - ivsize;
+
+ if (req_info->is_enc) {
+ scatterwalk_map_and_copy(sreq->iv, sreq->dst, start,
+ ivsize, 0);
+ } else {
+ if (sreq->src != sreq->dst) {
+ scatterwalk_map_and_copy(sreq->iv, sreq->src,
+ start, ivsize, 0);
+ } else {
+ memcpy(sreq->iv, req_info->iv_out, ivsize);
+ kfree(req_info->iv_out);
+ }
+ }
+ }
+}
+
+static void otx2_cpt_skcipher_callback(int status, void *arg1, void *arg2)
+{
+ struct otx2_cpt_inst_info *inst_info = arg2;
+ struct crypto_async_request *areq = arg1;
+ struct pci_dev *pdev;
+
+ if (areq) {
+ if (!status)
+ output_iv_copyback(areq);
+ if (inst_info) {
+ pdev = inst_info->pdev;
+ otx2_cpt_info_destroy(pdev, inst_info);
+ }
+ areq->complete(areq, status);
+ }
+}
+
+static inline void update_input_data(struct otx2_cpt_req_info *req_info,
+ struct scatterlist *inp_sg,
+ u32 nbytes, u32 *argcnt)
+{
+ req_info->req.dlen += nbytes;
+
+ while (nbytes) {
+ u32 len = (nbytes < inp_sg->length) ? nbytes : inp_sg->length;
+ u8 *ptr = sg_virt(inp_sg);
+
+ req_info->in[*argcnt].vptr = (void *)ptr;
+ req_info->in[*argcnt].size = len;
+ nbytes -= len;
+ ++(*argcnt);
+ inp_sg = sg_next(inp_sg);
+ }
+}
+
+static inline void update_output_data(struct otx2_cpt_req_info *req_info,
+ struct scatterlist *outp_sg,
+ u32 offset, u32 nbytes, u32 *argcnt)
+{
+ u32 len, sg_len;
+ u8 *ptr;
+
+ req_info->rlen += nbytes;
+
+ while (nbytes) {
+ sg_len = outp_sg->length - offset;
+ len = (nbytes < sg_len) ? nbytes : sg_len;
+ ptr = sg_virt(outp_sg);
+
+ req_info->out[*argcnt].vptr = (void *) (ptr + offset);
+ req_info->out[*argcnt].size = len;
+ nbytes -= len;
+ ++(*argcnt);
+ offset = 0;
+ outp_sg = sg_next(outp_sg);
+ }
+}
+
+static inline int create_ctx_hdr(struct skcipher_request *req, u32 enc,
+ u32 *argcnt)
+{
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ struct otx2_cpt_fc_ctx *fctx = &rctx->fctx;
+ int ivsize = crypto_skcipher_ivsize(stfm);
+ u32 start = req->cryptlen - ivsize;
+ gfp_t flags;
+
+ flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
+ req_info->ctrl.s.se_req = 1;
+
+ req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_FC |
+ DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
+ if (enc) {
+ req_info->req.opcode.s.minor = 2;
+ } else {
+ req_info->req.opcode.s.minor = 3;
+ if ((ctx->cipher_type == OTX2_CPT_AES_CBC ||
+ ctx->cipher_type == OTX2_CPT_DES3_CBC) &&
+ req->src == req->dst) {
+ req_info->iv_out = kmalloc(ivsize, flags);
+ if (!req_info->iv_out)
+ return -ENOMEM;
+
+ scatterwalk_map_and_copy(req_info->iv_out, req->src,
+ start, ivsize, 0);
+ }
+ }
+ /* Encryption data length */
+ req_info->req.param1 = req->cryptlen;
+ /* Authentication data length */
+ req_info->req.param2 = 0;
+
+ fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
+ fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
+ fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_CPTR;
+
+ if (ctx->cipher_type == OTX2_CPT_AES_XTS)
+ memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
+ else
+ memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
+
+ memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm));
+
+ cpu_to_be64s(&fctx->enc.enc_ctrl.u);
+
+ /*
+ * Storing Packet Data Information in offset
+ * Control Word First 8 bytes
+ */
+ req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
+ req_info->in[*argcnt].size = CONTROL_WORD_LEN;
+ req_info->req.dlen += CONTROL_WORD_LEN;
+ ++(*argcnt);
+
+ req_info->in[*argcnt].vptr = (u8 *)fctx;
+ req_info->in[*argcnt].size = sizeof(struct otx2_cpt_fc_ctx);
+ req_info->req.dlen += sizeof(struct otx2_cpt_fc_ctx);
+
+ ++(*argcnt);
+
+ return 0;
+}
+
+static inline int create_input_list(struct skcipher_request *req, u32 enc,
+ u32 enc_iv_len)
+{
+ struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 argcnt = 0;
+ int ret;
+
+ ret = create_ctx_hdr(req, enc, &argcnt);
+ if (ret)
+ return ret;
+
+ update_input_data(req_info, req->src, req->cryptlen, &argcnt);
+ req_info->in_cnt = argcnt;
+
+ return 0;
+}
+
+static inline void create_output_list(struct skcipher_request *req,
+ u32 enc_iv_len)
+{
+ struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 argcnt = 0;
+
+ /*
+ * OUTPUT Buffer Processing
+ * AES encryption/decryption output would be
+ * received in the following format
+ *
+ * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
+ * [ 16 Bytes/ [ Request Enc/Dec/ DATA Len AES CBC ]
+ */
+ update_output_data(req_info, req->dst, 0, req->cryptlen, &argcnt);
+ req_info->out_cnt = argcnt;
+}
+
+static int skcipher_do_fallback(struct skcipher_request *req, bool is_enc)
+{
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
+ int ret;
+
+ if (ctx->fbk_cipher) {
+ skcipher_request_set_tfm(&rctx->sk_fbk_req, ctx->fbk_cipher);
+ skcipher_request_set_callback(&rctx->sk_fbk_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->sk_fbk_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+ ret = is_enc ? crypto_skcipher_encrypt(&rctx->sk_fbk_req) :
+ crypto_skcipher_decrypt(&rctx->sk_fbk_req);
+ } else {
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
+{
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 enc_iv_len = crypto_skcipher_ivsize(stfm);
+ struct pci_dev *pdev;
+ int status, cpu_num;
+
+ if (req->cryptlen == 0)
+ return 0;
+
+ if (!IS_ALIGNED(req->cryptlen, ctx->enc_align_len))
+ return -EINVAL;
+
+ if (req->cryptlen > OTX2_CPT_MAX_REQ_SIZE)
+ return skcipher_do_fallback(req, enc);
+
+ /* Clear control words */
+ rctx->ctrl_word.flags = 0;
+ rctx->fctx.enc.enc_ctrl.u = 0;
+
+ status = create_input_list(req, enc, enc_iv_len);
+ if (status)
+ return status;
+ create_output_list(req, enc_iv_len);
+
+ status = get_se_device(&pdev, &cpu_num);
+ if (status)
+ return status;
+
+ req_info->callback = otx2_cpt_skcipher_callback;
+ req_info->areq = &req->base;
+ req_info->req_type = OTX2_CPT_ENC_DEC_REQ;
+ req_info->is_enc = enc;
+ req_info->is_trunc_hmac = false;
+ req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
+
+ /*
+ * We perform an asynchronous send and once
+ * the request is completed the driver would
+ * intimate through registered call back functions
+ */
+ status = otx2_cpt_do_request(pdev, req_info, cpu_num);
+
+ return status;
+}
+
+static int otx2_cpt_skcipher_encrypt(struct skcipher_request *req)
+{
+ return cpt_enc_dec(req, true);
+}
+
+static int otx2_cpt_skcipher_decrypt(struct skcipher_request *req)
+{
+ return cpt_enc_dec(req, false);
+}
+
+static int otx2_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ const u8 *key2 = key + (keylen / 2);
+ const u8 *key1 = key;
+ int ret;
+
+ ret = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen);
+ if (ret)
+ return ret;
+ ctx->key_len = keylen;
+ ctx->enc_align_len = 1;
+ memcpy(ctx->enc_key, key1, keylen / 2);
+ memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
+ ctx->cipher_type = OTX2_CPT_AES_XTS;
+ switch (ctx->key_len) {
+ case 2 * AES_KEYSIZE_128:
+ ctx->key_type = OTX2_CPT_AES_128_BIT;
+ break;
+ case 2 * AES_KEYSIZE_192:
+ ctx->key_type = OTX2_CPT_AES_192_BIT;
+ break;
+ case 2 * AES_KEYSIZE_256:
+ ctx->key_type = OTX2_CPT_AES_256_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
+}
+
+static int cpt_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ u32 keylen, u8 cipher_type)
+{
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (keylen != DES3_EDE_KEY_SIZE)
+ return -EINVAL;
+
+ ctx->key_len = keylen;
+ ctx->cipher_type = cipher_type;
+ ctx->enc_align_len = 8;
+
+ memcpy(ctx->enc_key, key, keylen);
+
+ return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
+}
+
+static int cpt_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ u32 keylen, u8 cipher_type)
+{
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ ctx->key_type = OTX2_CPT_AES_128_BIT;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->key_type = OTX2_CPT_AES_192_BIT;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->key_type = OTX2_CPT_AES_256_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (cipher_type == OTX2_CPT_AES_CBC || cipher_type == OTX2_CPT_AES_ECB)
+ ctx->enc_align_len = 16;
+ else
+ ctx->enc_align_len = 1;
+
+ ctx->key_len = keylen;
+ ctx->cipher_type = cipher_type;
+
+ memcpy(ctx->enc_key, key, keylen);
+
+ return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
+}
+
+static int otx2_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ return cpt_aes_setkey(tfm, key, keylen, OTX2_CPT_AES_CBC);
+}
+
+static int otx2_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ return cpt_aes_setkey(tfm, key, keylen, OTX2_CPT_AES_ECB);
+}
+
+static int otx2_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ return cpt_des_setkey(tfm, key, keylen, OTX2_CPT_DES3_CBC);
+}
+
+static int otx2_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ return cpt_des_setkey(tfm, key, keylen, OTX2_CPT_DES3_ECB);
+}
+
+static int cpt_skcipher_fallback_init(struct otx2_cpt_enc_ctx *ctx,
+ struct crypto_alg *alg)
+{
+ if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
+ ctx->fbk_cipher =
+ crypto_alloc_skcipher(alg->cra_name, 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fbk_cipher)) {
+ pr_err("%s() failed to allocate fallback for %s\n",
+ __func__, alg->cra_name);
+ return PTR_ERR(ctx->fbk_cipher);
+ }
+ }
+ return 0;
+}
+
+static int otx2_cpt_enc_dec_init(struct crypto_skcipher *stfm)
+{
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+
+ memset(ctx, 0, sizeof(*ctx));
+ /*
+ * Additional memory for skcipher_request is
+ * allocated since the cryptd daemon uses
+ * this memory for request_ctx information
+ */
+ crypto_skcipher_set_reqsize(stfm, sizeof(struct otx2_cpt_req_ctx) +
+ sizeof(struct skcipher_request));
+
+ return cpt_skcipher_fallback_init(ctx, alg);
+}
+
+static void otx2_cpt_skcipher_exit(struct crypto_skcipher *tfm)
+{
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (ctx->fbk_cipher) {
+ crypto_free_skcipher(ctx->fbk_cipher);
+ ctx->fbk_cipher = NULL;
+ }
+}
+
+static int cpt_aead_fallback_init(struct otx2_cpt_aead_ctx *ctx,
+ struct crypto_alg *alg)
+{
+ if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
+ ctx->fbk_cipher =
+ crypto_alloc_aead(alg->cra_name, 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fbk_cipher)) {
+ pr_err("%s() failed to allocate fallback for %s\n",
+ __func__, alg->cra_name);
+ return PTR_ERR(ctx->fbk_cipher);
+ }
+ }
+ return 0;
+}
+
+static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(atfm);
+ struct crypto_tfm *tfm = crypto_aead_tfm(atfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+
+ ctx->cipher_type = cipher_type;
+ ctx->mac_type = mac_type;
+
+ /*
+ * When selected cipher is NULL we use HMAC opcode instead of
+ * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
+ * for calculating ipad and opad
+ */
+ if (ctx->cipher_type != OTX2_CPT_CIPHER_NULL) {
+ switch (ctx->mac_type) {
+ case OTX2_CPT_SHA1:
+ ctx->hashalg = crypto_alloc_shash("sha1", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+ break;
+
+ case OTX2_CPT_SHA256:
+ ctx->hashalg = crypto_alloc_shash("sha256", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+ break;
+
+ case OTX2_CPT_SHA384:
+ ctx->hashalg = crypto_alloc_shash("sha384", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+ break;
+
+ case OTX2_CPT_SHA512:
+ ctx->hashalg = crypto_alloc_shash("sha512", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+ break;
+ }
+ }
+ switch (ctx->cipher_type) {
+ case OTX2_CPT_AES_CBC:
+ case OTX2_CPT_AES_ECB:
+ ctx->enc_align_len = 16;
+ break;
+ case OTX2_CPT_DES3_CBC:
+ case OTX2_CPT_DES3_ECB:
+ ctx->enc_align_len = 8;
+ break;
+ case OTX2_CPT_AES_GCM:
+ case OTX2_CPT_CIPHER_NULL:
+ ctx->enc_align_len = 1;
+ break;
+ }
+ crypto_aead_set_reqsize(atfm, sizeof(struct otx2_cpt_req_ctx));
+
+ return cpt_aead_fallback_init(ctx, alg);
+}
+
+static int otx2_cpt_aead_cbc_aes_sha1_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA1);
+}
+
+static int otx2_cpt_aead_cbc_aes_sha256_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA256);
+}
+
+static int otx2_cpt_aead_cbc_aes_sha384_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA384);
+}
+
+static int otx2_cpt_aead_cbc_aes_sha512_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA512);
+}
+
+static int otx2_cpt_aead_ecb_null_sha1_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA1);
+}
+
+static int otx2_cpt_aead_ecb_null_sha256_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA256);
+}
+
+static int otx2_cpt_aead_ecb_null_sha384_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA384);
+}
+
+static int otx2_cpt_aead_ecb_null_sha512_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA512);
+}
+
+static int otx2_cpt_aead_gcm_aes_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_AES_GCM, OTX2_CPT_MAC_NULL);
+}
+
+static void otx2_cpt_aead_exit(struct crypto_aead *tfm)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ kfree(ctx->ipad);
+ kfree(ctx->opad);
+ if (ctx->hashalg)
+ crypto_free_shash(ctx->hashalg);
+ kfree(ctx->sdesc);
+
+ if (ctx->fbk_cipher) {
+ crypto_free_aead(ctx->fbk_cipher);
+ ctx->fbk_cipher = NULL;
+ }
+}
+
+static int otx2_cpt_aead_gcm_set_authsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ if (crypto_rfc4106_check_authsize(authsize))
+ return -EINVAL;
+
+ tfm->authsize = authsize;
+ /* Set authsize for fallback case */
+ if (ctx->fbk_cipher)
+ ctx->fbk_cipher->authsize = authsize;
+
+ return 0;
+}
+
+static int otx2_cpt_aead_set_authsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ tfm->authsize = authsize;
+
+ return 0;
+}
+
+static int otx2_cpt_aead_null_set_authsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ ctx->is_trunc_hmac = true;
+ tfm->authsize = authsize;
+
+ return 0;
+}
+
+static struct otx2_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg)
+{
+ struct otx2_cpt_sdesc *sdesc;
+ int size;
+
+ size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
+ sdesc = kmalloc(size, GFP_KERNEL);
+ if (!sdesc)
+ return NULL;
+
+ sdesc->shash.tfm = alg;
+
+ return sdesc;
+}
+
+static inline void swap_data32(void *buf, u32 len)
+{
+ cpu_to_be32_array(buf, buf, len / 4);
+}
+
+static inline void swap_data64(void *buf, u32 len)
+{
+ u64 *src = buf;
+ int i = 0;
+
+ for (i = 0 ; i < len / 8; i++, src++)
+ cpu_to_be64s(src);
+}
+
+static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
+{
+ struct sha512_state *sha512;
+ struct sha256_state *sha256;
+ struct sha1_state *sha1;
+
+ switch (mac_type) {
+ case OTX2_CPT_SHA1:
+ sha1 = (struct sha1_state *) in_pad;
+ swap_data32(sha1->state, SHA1_DIGEST_SIZE);
+ memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
+ break;
+
+ case OTX2_CPT_SHA256:
+ sha256 = (struct sha256_state *) in_pad;
+ swap_data32(sha256->state, SHA256_DIGEST_SIZE);
+ memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
+ break;
+
+ case OTX2_CPT_SHA384:
+ case OTX2_CPT_SHA512:
+ sha512 = (struct sha512_state *) in_pad;
+ swap_data64(sha512->state, SHA512_DIGEST_SIZE);
+ memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int aead_hmac_init(struct crypto_aead *cipher)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+ int state_size = crypto_shash_statesize(ctx->hashalg);
+ int ds = crypto_shash_digestsize(ctx->hashalg);
+ int bs = crypto_shash_blocksize(ctx->hashalg);
+ int authkeylen = ctx->auth_key_len;
+ u8 *ipad = NULL, *opad = NULL;
+ int ret = 0, icount = 0;
+
+ ctx->sdesc = alloc_sdesc(ctx->hashalg);
+ if (!ctx->sdesc)
+ return -ENOMEM;
+
+ ctx->ipad = kzalloc(bs, GFP_KERNEL);
+ if (!ctx->ipad) {
+ ret = -ENOMEM;
+ goto calc_fail;
+ }
+
+ ctx->opad = kzalloc(bs, GFP_KERNEL);
+ if (!ctx->opad) {
+ ret = -ENOMEM;
+ goto calc_fail;
+ }
+
+ ipad = kzalloc(state_size, GFP_KERNEL);
+ if (!ipad) {
+ ret = -ENOMEM;
+ goto calc_fail;
+ }
+
+ opad = kzalloc(state_size, GFP_KERNEL);
+ if (!opad) {
+ ret = -ENOMEM;
+ goto calc_fail;
+ }
+
+ if (authkeylen > bs) {
+ ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
+ authkeylen, ipad);
+ if (ret)
+ goto calc_fail;
+
+ authkeylen = ds;
+ } else {
+ memcpy(ipad, ctx->key, authkeylen);
+ }
+
+ memset(ipad + authkeylen, 0, bs - authkeylen);
+ memcpy(opad, ipad, bs);
+
+ for (icount = 0; icount < bs; icount++) {
+ ipad[icount] ^= 0x36;
+ opad[icount] ^= 0x5c;
+ }
+
+ /*
+ * Partial Hash calculated from the software
+ * algorithm is retrieved for IPAD & OPAD
+ */
+
+ /* IPAD Calculation */
+ crypto_shash_init(&ctx->sdesc->shash);
+ crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
+ crypto_shash_export(&ctx->sdesc->shash, ipad);
+ ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
+ if (ret)
+ goto calc_fail;
+
+ /* OPAD Calculation */
+ crypto_shash_init(&ctx->sdesc->shash);
+ crypto_shash_update(&ctx->sdesc->shash, opad, bs);
+ crypto_shash_export(&ctx->sdesc->shash, opad);
+ ret = copy_pad(ctx->mac_type, ctx->opad, opad);
+ if (ret)
+ goto calc_fail;
+
+ kfree(ipad);
+ kfree(opad);
+
+ return 0;
+
+calc_fail:
+ kfree(ctx->ipad);
+ ctx->ipad = NULL;
+ kfree(ctx->opad);
+ ctx->opad = NULL;
+ kfree(ipad);
+ kfree(opad);
+ kfree(ctx->sdesc);
+ ctx->sdesc = NULL;
+
+ return ret;
+}
+
+static int otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
+ const unsigned char *key,
+ unsigned int keylen)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+ struct crypto_authenc_key_param *param;
+ int enckeylen = 0, authkeylen = 0;
+ struct rtattr *rta = (void *)key;
+ int status;
+
+ if (!RTA_OK(rta, keylen))
+ return -EINVAL;
+
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ return -EINVAL;
+
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ return -EINVAL;
+
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->enckeylen);
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+ if (keylen < enckeylen)
+ return -EINVAL;
+
+ if (keylen > OTX2_CPT_MAX_KEY_SIZE)
+ return -EINVAL;
+
+ authkeylen = keylen - enckeylen;
+ memcpy(ctx->key, key, keylen);
+
+ switch (enckeylen) {
+ case AES_KEYSIZE_128:
+ ctx->key_type = OTX2_CPT_AES_128_BIT;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->key_type = OTX2_CPT_AES_192_BIT;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->key_type = OTX2_CPT_AES_256_BIT;
+ break;
+ default:
+ /* Invalid key length */
+ return -EINVAL;
+ }
+
+ ctx->enc_key_len = enckeylen;
+ ctx->auth_key_len = authkeylen;
+
+ status = aead_hmac_init(cipher);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+static int otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
+ const unsigned char *key,
+ unsigned int keylen)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+ struct crypto_authenc_key_param *param;
+ struct rtattr *rta = (void *)key;
+ int enckeylen = 0;
+
+ if (!RTA_OK(rta, keylen))
+ return -EINVAL;
+
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ return -EINVAL;
+
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ return -EINVAL;
+
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->enckeylen);
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+ if (enckeylen != 0)
+ return -EINVAL;
+
+ if (keylen > OTX2_CPT_MAX_KEY_SIZE)
+ return -EINVAL;
+
+ memcpy(ctx->key, key, keylen);
+ ctx->enc_key_len = enckeylen;
+ ctx->auth_key_len = keylen;
+
+ return 0;
+}
+
+static int otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
+ const unsigned char *key,
+ unsigned int keylen)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+
+ /*
+ * For aes gcm we expect to get encryption key (16, 24, 32 bytes)
+ * and salt (4 bytes)
+ */
+ switch (keylen) {
+ case AES_KEYSIZE_128 + AES_GCM_SALT_SIZE:
+ ctx->key_type = OTX2_CPT_AES_128_BIT;
+ ctx->enc_key_len = AES_KEYSIZE_128;
+ break;
+ case AES_KEYSIZE_192 + AES_GCM_SALT_SIZE:
+ ctx->key_type = OTX2_CPT_AES_192_BIT;
+ ctx->enc_key_len = AES_KEYSIZE_192;
+ break;
+ case AES_KEYSIZE_256 + AES_GCM_SALT_SIZE:
+ ctx->key_type = OTX2_CPT_AES_256_BIT;
+ ctx->enc_key_len = AES_KEYSIZE_256;
+ break;
+ default:
+ /* Invalid key and salt length */
+ return -EINVAL;
+ }
+
+ /* Store encryption key and salt */
+ memcpy(ctx->key, key, keylen);
+
+ return crypto_aead_setkey(ctx->fbk_cipher, key, keylen);
+}
+
+static inline int create_aead_ctx_hdr(struct aead_request *req, u32 enc,
+ u32 *argcnt)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ struct otx2_cpt_fc_ctx *fctx = &rctx->fctx;
+ int mac_len = crypto_aead_authsize(tfm);
+ int ds;
+
+ rctx->ctrl_word.e.enc_data_offset = req->assoclen;
+
+ switch (ctx->cipher_type) {
+ case OTX2_CPT_AES_CBC:
+ if (req->assoclen > 248 || !IS_ALIGNED(req->assoclen, 8))
+ return -EINVAL;
+
+ fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_CPTR;
+ /* Copy encryption key to context */
+ memcpy(fctx->enc.encr_key, ctx->key + ctx->auth_key_len,
+ ctx->enc_key_len);
+ /* Copy IV to context */
+ memcpy(fctx->enc.encr_iv, req->iv, crypto_aead_ivsize(tfm));
+
+ ds = crypto_shash_digestsize(ctx->hashalg);
+ if (ctx->mac_type == OTX2_CPT_SHA384)
+ ds = SHA512_DIGEST_SIZE;
+ if (ctx->ipad)
+ memcpy(fctx->hmac.e.ipad, ctx->ipad, ds);
+ if (ctx->opad)
+ memcpy(fctx->hmac.e.opad, ctx->opad, ds);
+ break;
+
+ case OTX2_CPT_AES_GCM:
+ if (crypto_ipsec_check_assoclen(req->assoclen))
+ return -EINVAL;
+
+ fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_DPTR;
+ /* Copy encryption key to context */
+ memcpy(fctx->enc.encr_key, ctx->key, ctx->enc_key_len);
+ /* Copy salt to context */
+ memcpy(fctx->enc.encr_iv, ctx->key + ctx->enc_key_len,
+ AES_GCM_SALT_SIZE);
+
+ rctx->ctrl_word.e.iv_offset = req->assoclen - AES_GCM_IV_OFFSET;
+ break;
+
+ default:
+ /* Unknown cipher type */
+ return -EINVAL;
+ }
+ cpu_to_be64s(&rctx->ctrl_word.flags);
+
+ req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
+ req_info->ctrl.s.se_req = 1;
+ req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_FC |
+ DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
+ if (enc) {
+ req_info->req.opcode.s.minor = 2;
+ req_info->req.param1 = req->cryptlen;
+ req_info->req.param2 = req->cryptlen + req->assoclen;
+ } else {
+ req_info->req.opcode.s.minor = 3;
+ req_info->req.param1 = req->cryptlen - mac_len;
+ req_info->req.param2 = req->cryptlen + req->assoclen - mac_len;
+ }
+
+ fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
+ fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
+ fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type;
+ fctx->enc.enc_ctrl.e.mac_len = mac_len;
+ cpu_to_be64s(&fctx->enc.enc_ctrl.u);
+
+ /*
+ * Storing Packet Data Information in offset
+ * Control Word First 8 bytes
+ */
+ req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
+ req_info->in[*argcnt].size = CONTROL_WORD_LEN;
+ req_info->req.dlen += CONTROL_WORD_LEN;
+ ++(*argcnt);
+
+ req_info->in[*argcnt].vptr = (u8 *)fctx;
+ req_info->in[*argcnt].size = sizeof(struct otx2_cpt_fc_ctx);
+ req_info->req.dlen += sizeof(struct otx2_cpt_fc_ctx);
+ ++(*argcnt);
+
+ return 0;
+}
+
+static inline void create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
+ u32 enc)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+
+ req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
+ req_info->ctrl.s.se_req = 1;
+ req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_HMAC |
+ DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
+ req_info->is_trunc_hmac = ctx->is_trunc_hmac;
+
+ req_info->req.opcode.s.minor = 0;
+ req_info->req.param1 = ctx->auth_key_len;
+ req_info->req.param2 = ctx->mac_type << 8;
+
+ /* Add authentication key */
+ req_info->in[*argcnt].vptr = ctx->key;
+ req_info->in[*argcnt].size = round_up(ctx->auth_key_len, 8);
+ req_info->req.dlen += round_up(ctx->auth_key_len, 8);
+ ++(*argcnt);
+}
+
+static inline int create_aead_input_list(struct aead_request *req, u32 enc)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 inputlen = req->cryptlen + req->assoclen;
+ u32 status, argcnt = 0;
+
+ status = create_aead_ctx_hdr(req, enc, &argcnt);
+ if (status)
+ return status;
+ update_input_data(req_info, req->src, inputlen, &argcnt);
+ req_info->in_cnt = argcnt;
+
+ return 0;
+}
+
+static inline void create_aead_output_list(struct aead_request *req, u32 enc,
+ u32 mac_len)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 argcnt = 0, outputlen = 0;
+
+ if (enc)
+ outputlen = req->cryptlen + req->assoclen + mac_len;
+ else
+ outputlen = req->cryptlen + req->assoclen - mac_len;
+
+ update_output_data(req_info, req->dst, 0, outputlen, &argcnt);
+ req_info->out_cnt = argcnt;
+}
+
+static inline void create_aead_null_input_list(struct aead_request *req,
+ u32 enc, u32 mac_len)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 inputlen, argcnt = 0;
+
+ if (enc)
+ inputlen = req->cryptlen + req->assoclen;
+ else
+ inputlen = req->cryptlen + req->assoclen - mac_len;
+
+ create_hmac_ctx_hdr(req, &argcnt, enc);
+ update_input_data(req_info, req->src, inputlen, &argcnt);
+ req_info->in_cnt = argcnt;
+}
+
+static inline int create_aead_null_output_list(struct aead_request *req,
+ u32 enc, u32 mac_len)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ struct scatterlist *dst;
+ u8 *ptr = NULL;
+ int argcnt = 0, status, offset;
+ u32 inputlen;
+
+ if (enc)
+ inputlen = req->cryptlen + req->assoclen;
+ else
+ inputlen = req->cryptlen + req->assoclen - mac_len;
+
+ /*
+ * If source and destination are different
+ * then copy payload to destination
+ */
+ if (req->src != req->dst) {
+
+ ptr = kmalloc(inputlen, (req_info->areq->flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!ptr)
+ return -ENOMEM;
+
+ status = sg_copy_to_buffer(req->src, sg_nents(req->src), ptr,
+ inputlen);
+ if (status != inputlen) {
+ status = -EINVAL;
+ goto error_free;
+ }
+ status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr,
+ inputlen);
+ if (status != inputlen) {
+ status = -EINVAL;
+ goto error_free;
+ }
+ kfree(ptr);
+ }
+
+ if (enc) {
+ /*
+ * In an encryption scenario hmac needs
+ * to be appended after payload
+ */
+ dst = req->dst;
+ offset = inputlen;
+ while (offset >= dst->length) {
+ offset -= dst->length;
+ dst = sg_next(dst);
+ if (!dst)
+ return -ENOENT;
+ }
+
+ update_output_data(req_info, dst, offset, mac_len, &argcnt);
+ } else {
+ /*
+ * In a decryption scenario calculated hmac for received
+ * payload needs to be compare with hmac received
+ */
+ status = sg_copy_buffer(req->src, sg_nents(req->src),
+ rctx->fctx.hmac.s.hmac_recv, mac_len,
+ inputlen, true);
+ if (status != mac_len)
+ return -EINVAL;
+
+ req_info->out[argcnt].vptr = rctx->fctx.hmac.s.hmac_calc;
+ req_info->out[argcnt].size = mac_len;
+ argcnt++;
+ }
+
+ req_info->out_cnt = argcnt;
+ return 0;
+
+error_free:
+ kfree(ptr);
+ return status;
+}
+
+static int aead_do_fallback(struct aead_request *req, bool is_enc)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(aead);
+ int ret;
+
+ if (ctx->fbk_cipher) {
+ /* Store the cipher tfm and then use the fallback tfm */
+ aead_request_set_tfm(&rctx->fbk_req, ctx->fbk_cipher);
+ aead_request_set_callback(&rctx->fbk_req, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(&rctx->fbk_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+ ret = is_enc ? crypto_aead_encrypt(&rctx->fbk_req) :
+ crypto_aead_decrypt(&rctx->fbk_req);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct pci_dev *pdev;
+ int status, cpu_num;
+
+ /* Clear control words */
+ rctx->ctrl_word.flags = 0;
+ rctx->fctx.enc.enc_ctrl.u = 0;
+
+ req_info->callback = otx2_cpt_aead_callback;
+ req_info->areq = &req->base;
+ req_info->req_type = reg_type;
+ req_info->is_enc = enc;
+ req_info->is_trunc_hmac = false;
+
+ switch (reg_type) {
+ case OTX2_CPT_AEAD_ENC_DEC_REQ:
+ status = create_aead_input_list(req, enc);
+ if (status)
+ return status;
+ create_aead_output_list(req, enc, crypto_aead_authsize(tfm));
+ break;
+
+ case OTX2_CPT_AEAD_ENC_DEC_NULL_REQ:
+ create_aead_null_input_list(req, enc,
+ crypto_aead_authsize(tfm));
+ status = create_aead_null_output_list(req, enc,
+ crypto_aead_authsize(tfm));
+ if (status)
+ return status;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(req_info->req.param1, ctx->enc_align_len))
+ return -EINVAL;
+
+ if (!req_info->req.param2 ||
+ (req_info->req.param1 > OTX2_CPT_MAX_REQ_SIZE) ||
+ (req_info->req.param2 > OTX2_CPT_MAX_REQ_SIZE))
+ return aead_do_fallback(req, enc);
+
+ status = get_se_device(&pdev, &cpu_num);
+ if (status)
+ return status;
+
+ req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
+
+ /*
+ * We perform an asynchronous send and once
+ * the request is completed the driver would
+ * intimate through registered call back functions
+ */
+ return otx2_cpt_do_request(pdev, req_info, cpu_num);
+}
+
+static int otx2_cpt_aead_encrypt(struct aead_request *req)
+{
+ return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_REQ, true);
+}
+
+static int otx2_cpt_aead_decrypt(struct aead_request *req)
+{
+ return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_REQ, false);
+}
+
+static int otx2_cpt_aead_null_encrypt(struct aead_request *req)
+{
+ return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_NULL_REQ, true);
+}
+
+static int otx2_cpt_aead_null_decrypt(struct aead_request *req)
+{
+ return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_NULL_REQ, false);
+}
+
+static struct skcipher_alg otx2_cpt_skciphers[] = { {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "cpt_xts_aes",
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx2_cpt_enc_dec_init,
+ .exit = otx2_cpt_skcipher_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .setkey = otx2_cpt_skcipher_xts_setkey,
+ .encrypt = otx2_cpt_skcipher_encrypt,
+ .decrypt = otx2_cpt_skcipher_decrypt,
+}, {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cpt_cbc_aes",
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx2_cpt_enc_dec_init,
+ .exit = otx2_cpt_skcipher_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = otx2_cpt_skcipher_cbc_aes_setkey,
+ .encrypt = otx2_cpt_skcipher_encrypt,
+ .decrypt = otx2_cpt_skcipher_decrypt,
+}, {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "cpt_ecb_aes",
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx2_cpt_enc_dec_init,
+ .exit = otx2_cpt_skcipher_exit,
+ .ivsize = 0,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = otx2_cpt_skcipher_ecb_aes_setkey,
+ .encrypt = otx2_cpt_skcipher_encrypt,
+ .decrypt = otx2_cpt_skcipher_decrypt,
+}, {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cpt_cbc_des3_ede",
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx2_cpt_enc_dec_init,
+ .exit = otx2_cpt_skcipher_exit,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = otx2_cpt_skcipher_cbc_des3_setkey,
+ .encrypt = otx2_cpt_skcipher_encrypt,
+ .decrypt = otx2_cpt_skcipher_decrypt,
+}, {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "cpt_ecb_des3_ede",
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx2_cpt_enc_dec_init,
+ .exit = otx2_cpt_skcipher_exit,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = 0,
+ .setkey = otx2_cpt_skcipher_ecb_des3_setkey,
+ .encrypt = otx2_cpt_skcipher_encrypt,
+ .decrypt = otx2_cpt_skcipher_decrypt,
+} };
+
+static struct aead_alg otx2_cpt_aeads[] = { {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "cpt_hmac_sha1_cbc_aes",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_cbc_aes_sha1_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
+ .setauthsize = otx2_cpt_aead_set_authsize,
+ .encrypt = otx2_cpt_aead_encrypt,
+ .decrypt = otx2_cpt_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "cpt_hmac_sha256_cbc_aes",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_cbc_aes_sha256_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
+ .setauthsize = otx2_cpt_aead_set_authsize,
+ .encrypt = otx2_cpt_aead_encrypt,
+ .decrypt = otx2_cpt_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name = "cpt_hmac_sha384_cbc_aes",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_cbc_aes_sha384_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
+ .setauthsize = otx2_cpt_aead_set_authsize,
+ .encrypt = otx2_cpt_aead_encrypt,
+ .decrypt = otx2_cpt_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "cpt_hmac_sha512_cbc_aes",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_cbc_aes_sha512_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
+ .setauthsize = otx2_cpt_aead_set_authsize,
+ .encrypt = otx2_cpt_aead_encrypt,
+ .decrypt = otx2_cpt_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
+ .cra_driver_name = "cpt_hmac_sha1_ecb_null",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_ecb_null_sha1_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_ecb_null_sha_setkey,
+ .setauthsize = otx2_cpt_aead_null_set_authsize,
+ .encrypt = otx2_cpt_aead_null_encrypt,
+ .decrypt = otx2_cpt_aead_null_decrypt,
+ .ivsize = 0,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),ecb(cipher_null))",
+ .cra_driver_name = "cpt_hmac_sha256_ecb_null",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_ecb_null_sha256_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_ecb_null_sha_setkey,
+ .setauthsize = otx2_cpt_aead_null_set_authsize,
+ .encrypt = otx2_cpt_aead_null_encrypt,
+ .decrypt = otx2_cpt_aead_null_decrypt,
+ .ivsize = 0,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),ecb(cipher_null))",
+ .cra_driver_name = "cpt_hmac_sha384_ecb_null",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_ecb_null_sha384_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_ecb_null_sha_setkey,
+ .setauthsize = otx2_cpt_aead_null_set_authsize,
+ .encrypt = otx2_cpt_aead_null_encrypt,
+ .decrypt = otx2_cpt_aead_null_decrypt,
+ .ivsize = 0,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),ecb(cipher_null))",
+ .cra_driver_name = "cpt_hmac_sha512_ecb_null",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_ecb_null_sha512_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_ecb_null_sha_setkey,
+ .setauthsize = otx2_cpt_aead_null_set_authsize,
+ .encrypt = otx2_cpt_aead_null_encrypt,
+ .decrypt = otx2_cpt_aead_null_decrypt,
+ .ivsize = 0,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "cpt_rfc4106_gcm_aes",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_gcm_aes_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_gcm_aes_setkey,
+ .setauthsize = otx2_cpt_aead_gcm_set_authsize,
+ .encrypt = otx2_cpt_aead_encrypt,
+ .decrypt = otx2_cpt_aead_decrypt,
+ .ivsize = AES_GCM_IV_SIZE,
+ .maxauthsize = AES_GCM_ICV_SIZE,
+} };
+
+static inline int is_any_alg_used(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++)
+ if (refcount_read(&otx2_cpt_skciphers[i].base.cra_refcnt) != 1)
+ return true;
+ for (i = 0; i < ARRAY_SIZE(otx2_cpt_aeads); i++)
+ if (refcount_read(&otx2_cpt_aeads[i].base.cra_refcnt) != 1)
+ return true;
+ return false;
+}
+
+static inline int cpt_register_algs(void)
+{
+ int i, err = 0;
+
+ if (!IS_ENABLED(CONFIG_DM_CRYPT)) {
+ for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++)
+ otx2_cpt_skciphers[i].base.cra_flags &=
+ ~CRYPTO_ALG_DEAD;
+
+ err = crypto_register_skciphers(otx2_cpt_skciphers,
+ ARRAY_SIZE(otx2_cpt_skciphers));
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(otx2_cpt_aeads); i++)
+ otx2_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
+
+ err = crypto_register_aeads(otx2_cpt_aeads,
+ ARRAY_SIZE(otx2_cpt_aeads));
+ if (err) {
+ crypto_unregister_skciphers(otx2_cpt_skciphers,
+ ARRAY_SIZE(otx2_cpt_skciphers));
+ return err;
+ }
+
+ return 0;
+}
+
+static inline void cpt_unregister_algs(void)
+{
+ crypto_unregister_skciphers(otx2_cpt_skciphers,
+ ARRAY_SIZE(otx2_cpt_skciphers));
+ crypto_unregister_aeads(otx2_cpt_aeads, ARRAY_SIZE(otx2_cpt_aeads));
+}
+
+static int compare_func(const void *lptr, const void *rptr)
+{
+ const struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
+ const struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
+
+ if (ldesc->dev->devfn < rdesc->dev->devfn)
+ return -1;
+ if (ldesc->dev->devfn > rdesc->dev->devfn)
+ return 1;
+ return 0;
+}
+
+static void swap_func(void *lptr, void *rptr, int size)
+{
+ struct cpt_device_desc *ldesc = lptr;
+ struct cpt_device_desc *rdesc = rptr;
+ struct cpt_device_desc desc;
+
+ desc = *ldesc;
+ *ldesc = *rdesc;
+ *rdesc = desc;
+}
+
+int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
+ int num_queues, int num_devices)
+{
+ int ret = 0;
+ int count;
+
+ mutex_lock(&mutex);
+ count = atomic_read(&se_devices.count);
+ if (count >= OTX2_CPT_MAX_LFS_NUM) {
+ dev_err(&pdev->dev, "No space to add a new device\n");
+ ret = -ENOSPC;
+ goto unlock;
+ }
+ se_devices.desc[count].num_queues = num_queues;
+ se_devices.desc[count++].dev = pdev;
+ atomic_inc(&se_devices.count);
+
+ if (atomic_read(&se_devices.count) == num_devices &&
+ is_crypto_registered == false) {
+ if (cpt_register_algs()) {
+ dev_err(&pdev->dev,
+ "Error in registering crypto algorithms\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+ try_module_get(mod);
+ is_crypto_registered = true;
+ }
+ sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
+ compare_func, swap_func);
+
+unlock:
+ mutex_unlock(&mutex);
+ return ret;
+}
+
+void otx2_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod)
+{
+ struct cpt_device_table *dev_tbl;
+ bool dev_found = false;
+ int i, j, count;
+
+ mutex_lock(&mutex);
+
+ dev_tbl = &se_devices;
+ count = atomic_read(&dev_tbl->count);
+ for (i = 0; i < count; i++) {
+ if (pdev == dev_tbl->desc[i].dev) {
+ for (j = i; j < count-1; j++)
+ dev_tbl->desc[j] = dev_tbl->desc[j+1];
+ dev_found = true;
+ break;
+ }
+ }
+
+ if (!dev_found) {
+ dev_err(&pdev->dev, "%s device not found\n", __func__);
+ goto unlock;
+ }
+ if (atomic_dec_and_test(&se_devices.count) &&
+ !is_any_alg_used()) {
+ cpt_unregister_algs();
+ module_put(mod);
+ is_crypto_registered = false;
+ }
+
+unlock:
+ mutex_unlock(&mutex);
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h
new file mode 100644
index 000000000000..cfd6e2c50424
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2019 Marvell.
+ */
+
+#ifndef __OTX2_CPT_ALGS_H
+#define __OTX2_CPT_ALGS_H
+
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
+#include <crypto/aead.h>
+#include "otx2_cpt_common.h"
+
+#define OTX2_CPT_MAX_ENC_KEY_SIZE 32
+#define OTX2_CPT_MAX_HASH_KEY_SIZE 64
+#define OTX2_CPT_MAX_KEY_SIZE (OTX2_CPT_MAX_ENC_KEY_SIZE + \
+ OTX2_CPT_MAX_HASH_KEY_SIZE)
+enum otx2_cpt_request_type {
+ OTX2_CPT_ENC_DEC_REQ = 0x1,
+ OTX2_CPT_AEAD_ENC_DEC_REQ = 0x2,
+ OTX2_CPT_AEAD_ENC_DEC_NULL_REQ = 0x3,
+ OTX2_CPT_PASSTHROUGH_REQ = 0x4
+};
+
+enum otx2_cpt_major_opcodes {
+ OTX2_CPT_MAJOR_OP_MISC = 0x01,
+ OTX2_CPT_MAJOR_OP_FC = 0x33,
+ OTX2_CPT_MAJOR_OP_HMAC = 0x35,
+};
+
+enum otx2_cpt_cipher_type {
+ OTX2_CPT_CIPHER_NULL = 0x0,
+ OTX2_CPT_DES3_CBC = 0x1,
+ OTX2_CPT_DES3_ECB = 0x2,
+ OTX2_CPT_AES_CBC = 0x3,
+ OTX2_CPT_AES_ECB = 0x4,
+ OTX2_CPT_AES_CFB = 0x5,
+ OTX2_CPT_AES_CTR = 0x6,
+ OTX2_CPT_AES_GCM = 0x7,
+ OTX2_CPT_AES_XTS = 0x8
+};
+
+enum otx2_cpt_mac_type {
+ OTX2_CPT_MAC_NULL = 0x0,
+ OTX2_CPT_MD5 = 0x1,
+ OTX2_CPT_SHA1 = 0x2,
+ OTX2_CPT_SHA224 = 0x3,
+ OTX2_CPT_SHA256 = 0x4,
+ OTX2_CPT_SHA384 = 0x5,
+ OTX2_CPT_SHA512 = 0x6,
+ OTX2_CPT_GMAC = 0x7
+};
+
+enum otx2_cpt_aes_key_len {
+ OTX2_CPT_AES_128_BIT = 0x1,
+ OTX2_CPT_AES_192_BIT = 0x2,
+ OTX2_CPT_AES_256_BIT = 0x3
+};
+
+union otx2_cpt_encr_ctrl {
+ u64 u;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 enc_cipher:4;
+ u64 reserved_59:1;
+ u64 aes_key:2;
+ u64 iv_source:1;
+ u64 mac_type:4;
+ u64 reserved_49_51:3;
+ u64 auth_input_type:1;
+ u64 mac_len:8;
+ u64 reserved_32_39:8;
+ u64 encr_offset:16;
+ u64 iv_offset:8;
+ u64 auth_offset:8;
+#else
+ u64 auth_offset:8;
+ u64 iv_offset:8;
+ u64 encr_offset:16;
+ u64 reserved_32_39:8;
+ u64 mac_len:8;
+ u64 auth_input_type:1;
+ u64 reserved_49_51:3;
+ u64 mac_type:4;
+ u64 iv_source:1;
+ u64 aes_key:2;
+ u64 reserved_59:1;
+ u64 enc_cipher:4;
+#endif
+ } e;
+};
+
+struct otx2_cpt_cipher {
+ const char *name;
+ u8 value;
+};
+
+struct otx2_cpt_fc_enc_ctx {
+ union otx2_cpt_encr_ctrl enc_ctrl;
+ u8 encr_key[32];
+ u8 encr_iv[16];
+};
+
+union otx2_cpt_fc_hmac_ctx {
+ struct {
+ u8 ipad[64];
+ u8 opad[64];
+ } e;
+ struct {
+ u8 hmac_calc[64]; /* HMAC calculated */
+ u8 hmac_recv[64]; /* HMAC received */
+ } s;
+};
+
+struct otx2_cpt_fc_ctx {
+ struct otx2_cpt_fc_enc_ctx enc;
+ union otx2_cpt_fc_hmac_ctx hmac;
+};
+
+struct otx2_cpt_enc_ctx {
+ u32 key_len;
+ u8 enc_key[OTX2_CPT_MAX_KEY_SIZE];
+ u8 cipher_type;
+ u8 key_type;
+ u8 enc_align_len;
+ struct crypto_skcipher *fbk_cipher;
+};
+
+union otx2_cpt_offset_ctrl {
+ u64 flags;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved:32;
+ u64 enc_data_offset:16;
+ u64 iv_offset:8;
+ u64 auth_offset:8;
+#else
+ u64 auth_offset:8;
+ u64 iv_offset:8;
+ u64 enc_data_offset:16;
+ u64 reserved:32;
+#endif
+ } e;
+};
+
+struct otx2_cpt_req_ctx {
+ struct otx2_cpt_req_info cpt_req;
+ union otx2_cpt_offset_ctrl ctrl_word;
+ struct otx2_cpt_fc_ctx fctx;
+ union {
+ struct skcipher_request sk_fbk_req;
+ struct aead_request fbk_req;
+ };
+};
+
+struct otx2_cpt_sdesc {
+ struct shash_desc shash;
+};
+
+struct otx2_cpt_aead_ctx {
+ u8 key[OTX2_CPT_MAX_KEY_SIZE];
+ struct crypto_shash *hashalg;
+ struct otx2_cpt_sdesc *sdesc;
+ struct crypto_aead *fbk_cipher;
+ u8 *ipad;
+ u8 *opad;
+ u32 enc_key_len;
+ u32 auth_key_len;
+ u8 cipher_type;
+ u8 mac_type;
+ u8 key_type;
+ u8 is_trunc_hmac;
+ u8 enc_align_len;
+};
+int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
+ int num_queues, int num_devices);
+void otx2_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod);
+
+#endif /* __OTX2_CPT_ALGS_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
new file mode 100644
index 000000000000..690be93ad566
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
@@ -0,0 +1,446 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2018 Marvell. */
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptvf.h"
+#include "otx2_cptlf.h"
+#include "otx2_cptvf_algs.h"
+#include "cn10k_cpt.h"
+#include <rvu_reg.h>
+
+#define OTX2_CPTVF_DRV_NAME "rvu_cptvf"
+
+static unsigned int cpt_block_num;
+module_param(cpt_block_num, uint, 0644);
+MODULE_PARM_DESC(cpt_block_num, "cpt block number (0=CPT0 1=CPT1, default 0)");
+
+static void cptvf_enable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf)
+{
+ /* Clear interrupt if any */
+ otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, OTX2_RVU_VF_INT,
+ 0x1ULL);
+
+ /* Enable PF-VF interrupt */
+ otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0,
+ OTX2_RVU_VF_INT_ENA_W1S, 0x1ULL);
+}
+
+static void cptvf_disable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf)
+{
+ /* Disable PF-VF interrupt */
+ otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0,
+ OTX2_RVU_VF_INT_ENA_W1C, 0x1ULL);
+
+ /* Clear interrupt if any */
+ otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, OTX2_RVU_VF_INT,
+ 0x1ULL);
+}
+
+static int cptvf_register_interrupts(struct otx2_cptvf_dev *cptvf)
+{
+ int ret, irq;
+ int num_vec;
+
+ num_vec = pci_msix_vec_count(cptvf->pdev);
+ if (num_vec <= 0)
+ return -EINVAL;
+
+ /* Enable MSI-X */
+ ret = pci_alloc_irq_vectors(cptvf->pdev, num_vec, num_vec,
+ PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(&cptvf->pdev->dev,
+ "Request for %d msix vectors failed\n", num_vec);
+ return ret;
+ }
+ irq = pci_irq_vector(cptvf->pdev, OTX2_CPT_VF_INT_VEC_E_MBOX);
+ /* Register VF<=>PF mailbox interrupt handler */
+ ret = devm_request_irq(&cptvf->pdev->dev, irq,
+ otx2_cptvf_pfvf_mbox_intr, 0,
+ "CPTPFVF Mbox", cptvf);
+ if (ret)
+ return ret;
+ /* Enable PF-VF mailbox interrupts */
+ cptvf_enable_pfvf_mbox_intrs(cptvf);
+
+ ret = otx2_cpt_send_ready_msg(&cptvf->pfvf_mbox, cptvf->pdev);
+ if (ret) {
+ dev_warn(&cptvf->pdev->dev,
+ "PF not responding to mailbox, deferring probe\n");
+ cptvf_disable_pfvf_mbox_intrs(cptvf);
+ return -EPROBE_DEFER;
+ }
+ return 0;
+}
+
+static int cptvf_pfvf_mbox_init(struct otx2_cptvf_dev *cptvf)
+{
+ struct pci_dev *pdev = cptvf->pdev;
+ resource_size_t offset, size;
+ int ret;
+
+ cptvf->pfvf_mbox_wq = alloc_workqueue("cpt_pfvf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 1);
+ if (!cptvf->pfvf_mbox_wq)
+ return -ENOMEM;
+
+ if (test_bit(CN10K_MBOX, &cptvf->cap_flag)) {
+ /* For cn10k platform, VF mailbox region is in its BAR2
+ * register space
+ */
+ cptvf->pfvf_mbox_base = cptvf->reg_base +
+ CN10K_CPT_VF_MBOX_REGION;
+ } else {
+ offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
+ size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
+ /* Map PF-VF mailbox memory */
+ cptvf->pfvf_mbox_base = devm_ioremap_wc(&pdev->dev, offset,
+ size);
+ if (!cptvf->pfvf_mbox_base) {
+ dev_err(&pdev->dev, "Unable to map BAR4\n");
+ ret = -ENOMEM;
+ goto free_wqe;
+ }
+ }
+
+ ret = otx2_mbox_init(&cptvf->pfvf_mbox, cptvf->pfvf_mbox_base,
+ pdev, cptvf->reg_base, MBOX_DIR_VFPF, 1);
+ if (ret)
+ goto free_wqe;
+
+ ret = otx2_cpt_mbox_bbuf_init(cptvf, pdev);
+ if (ret)
+ goto destroy_mbox;
+
+ INIT_WORK(&cptvf->pfvf_mbox_work, otx2_cptvf_pfvf_mbox_handler);
+ return 0;
+
+destroy_mbox:
+ otx2_mbox_destroy(&cptvf->pfvf_mbox);
+free_wqe:
+ destroy_workqueue(cptvf->pfvf_mbox_wq);
+ return ret;
+}
+
+static void cptvf_pfvf_mbox_destroy(struct otx2_cptvf_dev *cptvf)
+{
+ destroy_workqueue(cptvf->pfvf_mbox_wq);
+ otx2_mbox_destroy(&cptvf->pfvf_mbox);
+}
+
+static void cptlf_work_handler(unsigned long data)
+{
+ otx2_cpt_post_process((struct otx2_cptlf_wqe *) data);
+}
+
+static void cleanup_tasklet_work(struct otx2_cptlfs_info *lfs)
+{
+ int i;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ if (!lfs->lf[i].wqe)
+ continue;
+
+ tasklet_kill(&lfs->lf[i].wqe->work);
+ kfree(lfs->lf[i].wqe);
+ lfs->lf[i].wqe = NULL;
+ }
+}
+
+static int init_tasklet_work(struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_cptlf_wqe *wqe;
+ int i, ret = 0;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ wqe = kzalloc(sizeof(struct otx2_cptlf_wqe), GFP_KERNEL);
+ if (!wqe) {
+ ret = -ENOMEM;
+ goto cleanup_tasklet;
+ }
+
+ tasklet_init(&wqe->work, cptlf_work_handler, (u64) wqe);
+ wqe->lfs = lfs;
+ wqe->lf_num = i;
+ lfs->lf[i].wqe = wqe;
+ }
+ return 0;
+
+cleanup_tasklet:
+ cleanup_tasklet_work(lfs);
+ return ret;
+}
+
+static void free_pending_queues(struct otx2_cptlfs_info *lfs)
+{
+ int i;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ kfree(lfs->lf[i].pqueue.head);
+ lfs->lf[i].pqueue.head = NULL;
+ }
+}
+
+static int alloc_pending_queues(struct otx2_cptlfs_info *lfs)
+{
+ int size, ret, i;
+
+ if (!lfs->lfs_num)
+ return -EINVAL;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ lfs->lf[i].pqueue.qlen = OTX2_CPT_INST_QLEN_MSGS;
+ size = lfs->lf[i].pqueue.qlen *
+ sizeof(struct otx2_cpt_pending_entry);
+
+ lfs->lf[i].pqueue.head = kzalloc(size, GFP_KERNEL);
+ if (!lfs->lf[i].pqueue.head) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* Initialize spin lock */
+ spin_lock_init(&lfs->lf[i].pqueue.lock);
+ }
+ return 0;
+
+error:
+ free_pending_queues(lfs);
+ return ret;
+}
+
+static void lf_sw_cleanup(struct otx2_cptlfs_info *lfs)
+{
+ cleanup_tasklet_work(lfs);
+ free_pending_queues(lfs);
+}
+
+static int lf_sw_init(struct otx2_cptlfs_info *lfs)
+{
+ int ret;
+
+ ret = alloc_pending_queues(lfs);
+ if (ret) {
+ dev_err(&lfs->pdev->dev,
+ "Allocating pending queues failed\n");
+ return ret;
+ }
+ ret = init_tasklet_work(lfs);
+ if (ret) {
+ dev_err(&lfs->pdev->dev,
+ "Tasklet work init failed\n");
+ goto pending_queues_free;
+ }
+ return 0;
+
+pending_queues_free:
+ free_pending_queues(lfs);
+ return ret;
+}
+
+static void cptvf_lf_shutdown(struct otx2_cptlfs_info *lfs)
+{
+ atomic_set(&lfs->state, OTX2_CPTLF_IN_RESET);
+
+ /* Remove interrupts affinity */
+ otx2_cptlf_free_irqs_affinity(lfs);
+ /* Disable instruction queue */
+ otx2_cptlf_disable_iqueues(lfs);
+ /* Unregister crypto algorithms */
+ otx2_cpt_crypto_exit(lfs->pdev, THIS_MODULE);
+ /* Unregister LFs interrupts */
+ otx2_cptlf_unregister_interrupts(lfs);
+ /* Cleanup LFs software side */
+ lf_sw_cleanup(lfs);
+ /* Send request to detach LFs */
+ otx2_cpt_detach_rsrcs_msg(lfs);
+}
+
+static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
+{
+ struct otx2_cptlfs_info *lfs = &cptvf->lfs;
+ struct device *dev = &cptvf->pdev->dev;
+ int ret, lfs_num;
+ u8 eng_grp_msk;
+
+ /* Get engine group number for symmetric crypto */
+ cptvf->lfs.kcrypto_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
+ ret = otx2_cptvf_send_eng_grp_num_msg(cptvf, OTX2_CPT_SE_TYPES);
+ if (ret)
+ return ret;
+
+ if (cptvf->lfs.kcrypto_eng_grp_num == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
+ dev_err(dev, "Engine group for kernel crypto not available\n");
+ ret = -ENOENT;
+ return ret;
+ }
+ eng_grp_msk = 1 << cptvf->lfs.kcrypto_eng_grp_num;
+
+ ret = otx2_cptvf_send_kvf_limits_msg(cptvf);
+ if (ret)
+ return ret;
+
+ lfs_num = cptvf->lfs.kvf_limits ? cptvf->lfs.kvf_limits :
+ num_online_cpus();
+
+ otx2_cptlf_set_dev_info(lfs, cptvf->pdev, cptvf->reg_base,
+ &cptvf->pfvf_mbox, cptvf->blkaddr);
+ ret = otx2_cptlf_init(lfs, eng_grp_msk, OTX2_CPT_QUEUE_HI_PRIO,
+ lfs_num);
+ if (ret)
+ return ret;
+
+ /* Get msix offsets for attached LFs */
+ ret = otx2_cpt_msix_offset_msg(lfs);
+ if (ret)
+ goto cleanup_lf;
+
+ /* Initialize LFs software side */
+ ret = lf_sw_init(lfs);
+ if (ret)
+ goto cleanup_lf;
+
+ /* Register LFs interrupts */
+ ret = otx2_cptlf_register_interrupts(lfs);
+ if (ret)
+ goto cleanup_lf_sw;
+
+ /* Set interrupts affinity */
+ ret = otx2_cptlf_set_irqs_affinity(lfs);
+ if (ret)
+ goto unregister_intr;
+
+ atomic_set(&lfs->state, OTX2_CPTLF_STARTED);
+ /* Register crypto algorithms */
+ ret = otx2_cpt_crypto_init(lfs->pdev, THIS_MODULE, lfs_num, 1);
+ if (ret) {
+ dev_err(&lfs->pdev->dev, "algorithms registration failed\n");
+ goto disable_irqs;
+ }
+ return 0;
+
+disable_irqs:
+ otx2_cptlf_free_irqs_affinity(lfs);
+unregister_intr:
+ otx2_cptlf_unregister_interrupts(lfs);
+cleanup_lf_sw:
+ lf_sw_cleanup(lfs);
+cleanup_lf:
+ otx2_cptlf_shutdown(lfs);
+
+ return ret;
+}
+
+static int otx2_cptvf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct otx2_cptvf_dev *cptvf;
+ void __iomem * const *iomap;
+ int ret;
+
+ cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
+ if (!cptvf)
+ return -ENOMEM;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto clear_drvdata;
+ }
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (ret) {
+ dev_err(dev, "Unable to get usable DMA configuration\n");
+ goto clear_drvdata;
+ }
+ /* Map VF's configuration registers */
+ ret = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
+ OTX2_CPTVF_DRV_NAME);
+ if (ret) {
+ dev_err(dev, "Couldn't get PCI resources 0x%x\n", ret);
+ goto clear_drvdata;
+ }
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, cptvf);
+ cptvf->pdev = pdev;
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap) {
+ dev_err(dev, "Failed to get iomap table\n");
+ ret = -ENODEV;
+ goto clear_drvdata;
+ }
+ cptvf->reg_base = iomap[PCI_PF_REG_BAR_NUM];
+
+ otx2_cpt_set_hw_caps(pdev, &cptvf->cap_flag);
+
+ ret = cn10k_cptvf_lmtst_init(cptvf);
+ if (ret)
+ goto clear_drvdata;
+
+ /* Initialize PF<=>VF mailbox */
+ ret = cptvf_pfvf_mbox_init(cptvf);
+ if (ret)
+ goto clear_drvdata;
+
+ /* Register interrupts */
+ ret = cptvf_register_interrupts(cptvf);
+ if (ret)
+ goto destroy_pfvf_mbox;
+
+ cptvf->blkaddr = (cpt_block_num == 0) ? BLKADDR_CPT0 : BLKADDR_CPT1;
+ /* Initialize CPT LFs */
+ ret = cptvf_lf_init(cptvf);
+ if (ret)
+ goto unregister_interrupts;
+
+ return 0;
+
+unregister_interrupts:
+ cptvf_disable_pfvf_mbox_intrs(cptvf);
+destroy_pfvf_mbox:
+ cptvf_pfvf_mbox_destroy(cptvf);
+clear_drvdata:
+ pci_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static void otx2_cptvf_remove(struct pci_dev *pdev)
+{
+ struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
+
+ if (!cptvf) {
+ dev_err(&pdev->dev, "Invalid CPT VF device.\n");
+ return;
+ }
+ cptvf_lf_shutdown(&cptvf->lfs);
+ /* Disable PF-VF mailbox interrupt */
+ cptvf_disable_pfvf_mbox_intrs(cptvf);
+ /* Destroy PF-VF mbox */
+ cptvf_pfvf_mbox_destroy(cptvf);
+ pci_set_drvdata(pdev, NULL);
+}
+
+/* Supported devices */
+static const struct pci_device_id otx2_cptvf_id_table[] = {
+ {PCI_VDEVICE(CAVIUM, OTX2_CPT_PCI_VF_DEVICE_ID), 0},
+ {PCI_VDEVICE(CAVIUM, CN10K_CPT_PCI_VF_DEVICE_ID), 0},
+ { 0, } /* end of table */
+};
+
+static struct pci_driver otx2_cptvf_pci_driver = {
+ .name = OTX2_CPTVF_DRV_NAME,
+ .id_table = otx2_cptvf_id_table,
+ .probe = otx2_cptvf_probe,
+ .remove = otx2_cptvf_remove,
+};
+
+module_pci_driver(otx2_cptvf_pci_driver);
+
+MODULE_AUTHOR("Marvell");
+MODULE_DESCRIPTION("Marvell RVU CPT Virtual Function Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, otx2_cptvf_id_table);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
new file mode 100644
index 000000000000..2f90ebbbc5cb
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2018 Marvell. */
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptvf.h"
+#include <rvu_reg.h>
+
+int otx2_cpt_mbox_bbuf_init(struct otx2_cptvf_dev *cptvf, struct pci_dev *pdev)
+{
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *otx2_mbox;
+
+ cptvf->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL);
+ if (!cptvf->bbuf_base)
+ return -ENOMEM;
+ /*
+ * Overwrite mbox mbase to point to bounce buffer, so that PF/VF
+ * prepare all mbox messages in bounce buffer instead of directly
+ * in hw mbox memory.
+ */
+ otx2_mbox = &cptvf->pfvf_mbox;
+ mdev = &otx2_mbox->dev[0];
+ mdev->mbase = cptvf->bbuf_base;
+
+ return 0;
+}
+
+static void otx2_cpt_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid)
+{
+ u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_hdr *hdr;
+ u64 msg_size;
+
+ if (mdev->mbase == hw_mbase)
+ return;
+
+ hdr = hw_mbase + mbox->rx_start;
+ msg_size = hdr->msg_size;
+
+ if (msg_size > mbox->rx_size - msgs_offset)
+ msg_size = mbox->rx_size - msgs_offset;
+
+ /* Copy mbox messages from mbox memory to bounce buffer */
+ memcpy(mdev->mbase + mbox->rx_start,
+ hw_mbase + mbox->rx_start, msg_size + msgs_offset);
+}
+
+irqreturn_t otx2_cptvf_pfvf_mbox_intr(int __always_unused irq, void *arg)
+{
+ struct otx2_cptvf_dev *cptvf = arg;
+ u64 intr;
+
+ /* Read the interrupt bits */
+ intr = otx2_cpt_read64(cptvf->reg_base, BLKADDR_RVUM, 0,
+ OTX2_RVU_VF_INT);
+
+ if (intr & 0x1ULL) {
+ /* Schedule work queue function to process the MBOX request */
+ queue_work(cptvf->pfvf_mbox_wq, &cptvf->pfvf_mbox_work);
+ /* Clear and ack the interrupt */
+ otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0,
+ OTX2_RVU_VF_INT, 0x1ULL);
+ }
+ return IRQ_HANDLED;
+}
+
+static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf,
+ struct mbox_msghdr *msg)
+{
+ struct otx2_cptlfs_info *lfs = &cptvf->lfs;
+ struct otx2_cpt_kvf_limits_rsp *rsp_limits;
+ struct otx2_cpt_egrp_num_rsp *rsp_grp;
+ struct cpt_rd_wr_reg_msg *rsp_reg;
+ struct msix_offset_rsp *rsp_msix;
+ int i;
+
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(&cptvf->pdev->dev,
+ "MBOX msg with unknown ID %d\n", msg->id);
+ return;
+ }
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(&cptvf->pdev->dev,
+ "MBOX msg with wrong signature %x, ID %d\n",
+ msg->sig, msg->id);
+ return;
+ }
+ switch (msg->id) {
+ case MBOX_MSG_READY:
+ cptvf->vf_id = ((msg->pcifunc >> RVU_PFVF_FUNC_SHIFT)
+ & RVU_PFVF_FUNC_MASK) - 1;
+ break;
+ case MBOX_MSG_ATTACH_RESOURCES:
+ /* Check if resources were successfully attached */
+ if (!msg->rc)
+ lfs->are_lfs_attached = 1;
+ break;
+ case MBOX_MSG_DETACH_RESOURCES:
+ /* Check if resources were successfully detached */
+ if (!msg->rc)
+ lfs->are_lfs_attached = 0;
+ break;
+ case MBOX_MSG_MSIX_OFFSET:
+ rsp_msix = (struct msix_offset_rsp *) msg;
+ for (i = 0; i < rsp_msix->cptlfs; i++)
+ lfs->lf[i].msix_offset = rsp_msix->cptlf_msixoff[i];
+
+ for (i = 0; i < rsp_msix->cpt1_lfs; i++)
+ lfs->lf[i].msix_offset = rsp_msix->cpt1_lf_msixoff[i];
+ break;
+ case MBOX_MSG_CPT_RD_WR_REGISTER:
+ rsp_reg = (struct cpt_rd_wr_reg_msg *) msg;
+ if (msg->rc) {
+ dev_err(&cptvf->pdev->dev,
+ "Reg %llx rd/wr(%d) failed %d\n",
+ rsp_reg->reg_offset, rsp_reg->is_write,
+ msg->rc);
+ return;
+ }
+ if (!rsp_reg->is_write)
+ *rsp_reg->ret_val = rsp_reg->val;
+ break;
+ case MBOX_MSG_GET_ENG_GRP_NUM:
+ rsp_grp = (struct otx2_cpt_egrp_num_rsp *) msg;
+ cptvf->lfs.kcrypto_eng_grp_num = rsp_grp->eng_grp_num;
+ break;
+ case MBOX_MSG_GET_KVF_LIMITS:
+ rsp_limits = (struct otx2_cpt_kvf_limits_rsp *) msg;
+ cptvf->lfs.kvf_limits = rsp_limits->kvf_limits;
+ break;
+ default:
+ dev_err(&cptvf->pdev->dev, "Unsupported msg %d received.\n",
+ msg->id);
+ break;
+ }
+}
+
+void otx2_cptvf_pfvf_mbox_handler(struct work_struct *work)
+{
+ struct otx2_cptvf_dev *cptvf;
+ struct otx2_mbox *pfvf_mbox;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ int offset, i;
+
+ /* sync with mbox memory region */
+ smp_rmb();
+
+ cptvf = container_of(work, struct otx2_cptvf_dev, pfvf_mbox_work);
+ pfvf_mbox = &cptvf->pfvf_mbox;
+ otx2_cpt_sync_mbox_bbuf(pfvf_mbox, 0);
+ mdev = &pfvf_mbox->dev[0];
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + pfvf_mbox->rx_start);
+ if (rsp_hdr->num_msgs == 0)
+ return;
+ offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < rsp_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + pfvf_mbox->rx_start +
+ offset);
+ process_pfvf_mbox_mbox_msg(cptvf, msg);
+ offset = msg->next_msgoff;
+ mdev->msgs_acked++;
+ }
+ otx2_mbox_reset(pfvf_mbox, 0);
+}
+
+int otx2_cptvf_send_eng_grp_num_msg(struct otx2_cptvf_dev *cptvf, int eng_type)
+{
+ struct otx2_mbox *mbox = &cptvf->pfvf_mbox;
+ struct pci_dev *pdev = cptvf->pdev;
+ struct otx2_cpt_egrp_num_msg *req;
+
+ req = (struct otx2_cpt_egrp_num_msg *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct otx2_cpt_egrp_num_rsp));
+ if (req == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+ req->hdr.id = MBOX_MSG_GET_ENG_GRP_NUM;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
+ req->eng_type = eng_type;
+
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
+}
+
+int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf)
+{
+ struct otx2_mbox *mbox = &cptvf->pfvf_mbox;
+ struct pci_dev *pdev = cptvf->pdev;
+ struct mbox_msghdr *req;
+ int ret;
+
+ req = (struct mbox_msghdr *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct otx2_cpt_kvf_limits_rsp));
+ if (req == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+ req->id = MBOX_MSG_GET_KVF_LIMITS;
+ req->sig = OTX2_MBOX_REQ_SIG;
+ req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
+
+ ret = otx2_cpt_send_mbox_msg(mbox, pdev);
+
+ return ret;
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
new file mode 100644
index 000000000000..ad0e751bc151
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2019 Marvell. */
+
+#include "otx2_cptvf.h"
+#include "otx2_cpt_common.h"
+
+/* SG list header size in bytes */
+#define SG_LIST_HDR_SIZE 8
+
+/* Default timeout when waiting for free pending entry in us */
+#define CPT_PENTRY_TIMEOUT 1000
+#define CPT_PENTRY_STEP 50
+
+/* Default threshold for stopping and resuming sender requests */
+#define CPT_IQ_STOP_MARGIN 128
+#define CPT_IQ_RESUME_MARGIN 512
+
+/* Default command timeout in seconds */
+#define CPT_COMMAND_TIMEOUT 4
+#define CPT_TIME_IN_RESET_COUNT 5
+
+static void otx2_cpt_dump_sg_list(struct pci_dev *pdev,
+ struct otx2_cpt_req_info *req)
+{
+ int i;
+
+ pr_debug("Gather list size %d\n", req->in_cnt);
+ for (i = 0; i < req->in_cnt; i++) {
+ pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i,
+ req->in[i].size, req->in[i].vptr,
+ (void *) req->in[i].dma_addr);
+ pr_debug("Buffer hexdump (%d bytes)\n",
+ req->in[i].size);
+ print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1,
+ req->in[i].vptr, req->in[i].size, false);
+ }
+ pr_debug("Scatter list size %d\n", req->out_cnt);
+ for (i = 0; i < req->out_cnt; i++) {
+ pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i,
+ req->out[i].size, req->out[i].vptr,
+ (void *) req->out[i].dma_addr);
+ pr_debug("Buffer hexdump (%d bytes)\n", req->out[i].size);
+ print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1,
+ req->out[i].vptr, req->out[i].size, false);
+ }
+}
+
+static inline struct otx2_cpt_pending_entry *get_free_pending_entry(
+ struct otx2_cpt_pending_queue *q,
+ int qlen)
+{
+ struct otx2_cpt_pending_entry *ent = NULL;
+
+ ent = &q->head[q->rear];
+ if (unlikely(ent->busy))
+ return NULL;
+
+ q->rear++;
+ if (unlikely(q->rear == qlen))
+ q->rear = 0;
+
+ return ent;
+}
+
+static inline u32 modulo_inc(u32 index, u32 length, u32 inc)
+{
+ if (WARN_ON(inc > length))
+ inc = length;
+
+ index += inc;
+ if (unlikely(index >= length))
+ index -= length;
+
+ return index;
+}
+
+static inline void free_pentry(struct otx2_cpt_pending_entry *pentry)
+{
+ pentry->completion_addr = NULL;
+ pentry->info = NULL;
+ pentry->callback = NULL;
+ pentry->areq = NULL;
+ pentry->resume_sender = false;
+ pentry->busy = false;
+}
+
+static inline int setup_sgio_components(struct pci_dev *pdev,
+ struct otx2_cpt_buf_ptr *list,
+ int buf_count, u8 *buffer)
+{
+ struct otx2_cpt_sglist_component *sg_ptr = NULL;
+ int ret = 0, i, j;
+ int components;
+
+ if (unlikely(!list)) {
+ dev_err(&pdev->dev, "Input list pointer is NULL\n");
+ return -EFAULT;
+ }
+
+ for (i = 0; i < buf_count; i++) {
+ if (unlikely(!list[i].vptr))
+ continue;
+ list[i].dma_addr = dma_map_single(&pdev->dev, list[i].vptr,
+ list[i].size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(&pdev->dev, list[i].dma_addr))) {
+ dev_err(&pdev->dev, "Dma mapping failed\n");
+ ret = -EIO;
+ goto sg_cleanup;
+ }
+ }
+ components = buf_count / 4;
+ sg_ptr = (struct otx2_cpt_sglist_component *)buffer;
+ for (i = 0; i < components; i++) {
+ sg_ptr->len0 = cpu_to_be16(list[i * 4 + 0].size);
+ sg_ptr->len1 = cpu_to_be16(list[i * 4 + 1].size);
+ sg_ptr->len2 = cpu_to_be16(list[i * 4 + 2].size);
+ sg_ptr->len3 = cpu_to_be16(list[i * 4 + 3].size);
+ sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
+ sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
+ sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
+ sg_ptr->ptr3 = cpu_to_be64(list[i * 4 + 3].dma_addr);
+ sg_ptr++;
+ }
+ components = buf_count % 4;
+
+ switch (components) {
+ case 3:
+ sg_ptr->len2 = cpu_to_be16(list[i * 4 + 2].size);
+ sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
+ fallthrough;
+ case 2:
+ sg_ptr->len1 = cpu_to_be16(list[i * 4 + 1].size);
+ sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
+ fallthrough;
+ case 1:
+ sg_ptr->len0 = cpu_to_be16(list[i * 4 + 0].size);
+ sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
+ break;
+ default:
+ break;
+ }
+ return ret;
+
+sg_cleanup:
+ for (j = 0; j < i; j++) {
+ if (list[j].dma_addr) {
+ dma_unmap_single(&pdev->dev, list[j].dma_addr,
+ list[j].size, DMA_BIDIRECTIONAL);
+ }
+
+ list[j].dma_addr = 0;
+ }
+ return ret;
+}
+
+static inline struct otx2_cpt_inst_info *info_create(struct pci_dev *pdev,
+ struct otx2_cpt_req_info *req,
+ gfp_t gfp)
+{
+ int align = OTX2_CPT_DMA_MINALIGN;
+ struct otx2_cpt_inst_info *info;
+ u32 dlen, align_dlen, info_len;
+ u16 g_sz_bytes, s_sz_bytes;
+ u32 total_mem_len;
+
+ if (unlikely(req->in_cnt > OTX2_CPT_MAX_SG_IN_CNT ||
+ req->out_cnt > OTX2_CPT_MAX_SG_OUT_CNT)) {
+ dev_err(&pdev->dev, "Error too many sg components\n");
+ return NULL;
+ }
+
+ g_sz_bytes = ((req->in_cnt + 3) / 4) *
+ sizeof(struct otx2_cpt_sglist_component);
+ s_sz_bytes = ((req->out_cnt + 3) / 4) *
+ sizeof(struct otx2_cpt_sglist_component);
+
+ dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
+ align_dlen = ALIGN(dlen, align);
+ info_len = ALIGN(sizeof(*info), align);
+ total_mem_len = align_dlen + info_len + sizeof(union otx2_cpt_res_s);
+
+ info = kzalloc(total_mem_len, gfp);
+ if (unlikely(!info))
+ return NULL;
+
+ info->dlen = dlen;
+ info->in_buffer = (u8 *)info + info_len;
+
+ ((u16 *)info->in_buffer)[0] = req->out_cnt;
+ ((u16 *)info->in_buffer)[1] = req->in_cnt;
+ ((u16 *)info->in_buffer)[2] = 0;
+ ((u16 *)info->in_buffer)[3] = 0;
+ cpu_to_be64s((u64 *)info->in_buffer);
+
+ /* Setup gather (input) components */
+ if (setup_sgio_components(pdev, req->in, req->in_cnt,
+ &info->in_buffer[8])) {
+ dev_err(&pdev->dev, "Failed to setup gather list\n");
+ goto destroy_info;
+ }
+
+ if (setup_sgio_components(pdev, req->out, req->out_cnt,
+ &info->in_buffer[8 + g_sz_bytes])) {
+ dev_err(&pdev->dev, "Failed to setup scatter list\n");
+ goto destroy_info;
+ }
+
+ info->dma_len = total_mem_len - info_len;
+ info->dptr_baddr = dma_map_single(&pdev->dev, info->in_buffer,
+ info->dma_len, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(&pdev->dev, info->dptr_baddr))) {
+ dev_err(&pdev->dev, "DMA Mapping failed for cpt req\n");
+ goto destroy_info;
+ }
+ /*
+ * Get buffer for union otx2_cpt_res_s response
+ * structure and its physical address
+ */
+ info->completion_addr = info->in_buffer + align_dlen;
+ info->comp_baddr = info->dptr_baddr + align_dlen;
+
+ return info;
+
+destroy_info:
+ otx2_cpt_info_destroy(pdev, info);
+ return NULL;
+}
+
+static int process_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
+ struct otx2_cpt_pending_queue *pqueue,
+ struct otx2_cptlf_info *lf)
+{
+ struct otx2_cptvf_request *cpt_req = &req->req;
+ struct otx2_cpt_pending_entry *pentry = NULL;
+ union otx2_cpt_ctrl_info *ctrl = &req->ctrl;
+ struct otx2_cpt_inst_info *info = NULL;
+ union otx2_cpt_res_s *result = NULL;
+ struct otx2_cpt_iq_command iq_cmd;
+ union otx2_cpt_inst_s cptinst;
+ int retry, ret = 0;
+ u8 resume_sender;
+ gfp_t gfp;
+
+ gfp = (req->areq->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
+ GFP_ATOMIC;
+ if (unlikely(!otx2_cptlf_started(lf->lfs)))
+ return -ENODEV;
+
+ info = info_create(pdev, req, gfp);
+ if (unlikely(!info)) {
+ dev_err(&pdev->dev, "Setting up cpt inst info failed");
+ return -ENOMEM;
+ }
+ cpt_req->dlen = info->dlen;
+
+ result = info->completion_addr;
+ result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
+
+ spin_lock_bh(&pqueue->lock);
+ pentry = get_free_pending_entry(pqueue, pqueue->qlen);
+ retry = CPT_PENTRY_TIMEOUT / CPT_PENTRY_STEP;
+ while (unlikely(!pentry) && retry--) {
+ spin_unlock_bh(&pqueue->lock);
+ udelay(CPT_PENTRY_STEP);
+ spin_lock_bh(&pqueue->lock);
+ pentry = get_free_pending_entry(pqueue, pqueue->qlen);
+ }
+
+ if (unlikely(!pentry)) {
+ ret = -ENOSPC;
+ goto destroy_info;
+ }
+
+ /*
+ * Check if we are close to filling in entire pending queue,
+ * if so then tell the sender to stop/sleep by returning -EBUSY
+ * We do it only for context which can sleep (GFP_KERNEL)
+ */
+ if (gfp == GFP_KERNEL &&
+ pqueue->pending_count > (pqueue->qlen - CPT_IQ_STOP_MARGIN)) {
+ pentry->resume_sender = true;
+ } else
+ pentry->resume_sender = false;
+ resume_sender = pentry->resume_sender;
+ pqueue->pending_count++;
+
+ pentry->completion_addr = info->completion_addr;
+ pentry->info = info;
+ pentry->callback = req->callback;
+ pentry->areq = req->areq;
+ pentry->busy = true;
+ info->pentry = pentry;
+ info->time_in = jiffies;
+ info->req = req;
+
+ /* Fill in the command */
+ iq_cmd.cmd.u = 0;
+ iq_cmd.cmd.s.opcode = cpu_to_be16(cpt_req->opcode.flags);
+ iq_cmd.cmd.s.param1 = cpu_to_be16(cpt_req->param1);
+ iq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2);
+ iq_cmd.cmd.s.dlen = cpu_to_be16(cpt_req->dlen);
+
+ /* 64-bit swap for microcode data reads, not needed for addresses*/
+ cpu_to_be64s(&iq_cmd.cmd.u);
+ iq_cmd.dptr = info->dptr_baddr;
+ iq_cmd.rptr = 0;
+ iq_cmd.cptr.u = 0;
+ iq_cmd.cptr.s.grp = ctrl->s.grp;
+
+ /* Fill in the CPT_INST_S type command for HW interpretation */
+ otx2_cpt_fill_inst(&cptinst, &iq_cmd, info->comp_baddr);
+
+ /* Print debug info if enabled */
+ otx2_cpt_dump_sg_list(pdev, req);
+ pr_debug("Cpt_inst_s hexdump (%d bytes)\n", OTX2_CPT_INST_SIZE);
+ print_hex_dump_debug("", 0, 16, 1, &cptinst, OTX2_CPT_INST_SIZE, false);
+ pr_debug("Dptr hexdump (%d bytes)\n", cpt_req->dlen);
+ print_hex_dump_debug("", 0, 16, 1, info->in_buffer,
+ cpt_req->dlen, false);
+
+ /* Send CPT command */
+ lf->lfs->ops->send_cmd(&cptinst, 1, lf);
+
+ /*
+ * We allocate and prepare pending queue entry in critical section
+ * together with submitting CPT instruction to CPT instruction queue
+ * to make sure that order of CPT requests is the same in both
+ * pending and instruction queues
+ */
+ spin_unlock_bh(&pqueue->lock);
+
+ ret = resume_sender ? -EBUSY : -EINPROGRESS;
+ return ret;
+
+destroy_info:
+ spin_unlock_bh(&pqueue->lock);
+ otx2_cpt_info_destroy(pdev, info);
+ return ret;
+}
+
+int otx2_cpt_do_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
+ int cpu_num)
+{
+ struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
+ struct otx2_cptlfs_info *lfs = &cptvf->lfs;
+
+ return process_request(lfs->pdev, req, &lfs->lf[cpu_num].pqueue,
+ &lfs->lf[cpu_num]);
+}
+
+static int cpt_process_ccode(struct otx2_cptlfs_info *lfs,
+ union otx2_cpt_res_s *cpt_status,
+ struct otx2_cpt_inst_info *info,
+ u32 *res_code)
+{
+ u8 uc_ccode = lfs->ops->cpt_get_uc_compcode(cpt_status);
+ u8 ccode = lfs->ops->cpt_get_compcode(cpt_status);
+ struct pci_dev *pdev = lfs->pdev;
+
+ switch (ccode) {
+ case OTX2_CPT_COMP_E_FAULT:
+ dev_err(&pdev->dev,
+ "Request failed with DMA fault\n");
+ otx2_cpt_dump_sg_list(pdev, info->req);
+ break;
+
+ case OTX2_CPT_COMP_E_HWERR:
+ dev_err(&pdev->dev,
+ "Request failed with hardware error\n");
+ otx2_cpt_dump_sg_list(pdev, info->req);
+ break;
+
+ case OTX2_CPT_COMP_E_INSTERR:
+ dev_err(&pdev->dev,
+ "Request failed with instruction error\n");
+ otx2_cpt_dump_sg_list(pdev, info->req);
+ break;
+
+ case OTX2_CPT_COMP_E_NOTDONE:
+ /* check for timeout */
+ if (time_after_eq(jiffies, info->time_in +
+ CPT_COMMAND_TIMEOUT * HZ))
+ dev_warn(&pdev->dev,
+ "Request timed out 0x%p", info->req);
+ else if (info->extra_time < CPT_TIME_IN_RESET_COUNT) {
+ info->time_in = jiffies;
+ info->extra_time++;
+ }
+ return 1;
+
+ case OTX2_CPT_COMP_E_GOOD:
+ case OTX2_CPT_COMP_E_WARN:
+ /*
+ * Check microcode completion code, it is only valid
+ * when completion code is CPT_COMP_E::GOOD
+ */
+ if (uc_ccode != OTX2_CPT_UCC_SUCCESS) {
+ /*
+ * If requested hmac is truncated and ucode returns
+ * s/g write length error then we report success
+ * because ucode writes as many bytes of calculated
+ * hmac as available in gather buffer and reports
+ * s/g write length error if number of bytes in gather
+ * buffer is less than full hmac size.
+ */
+ if (info->req->is_trunc_hmac &&
+ uc_ccode == OTX2_CPT_UCC_SG_WRITE_LENGTH) {
+ *res_code = 0;
+ break;
+ }
+
+ dev_err(&pdev->dev,
+ "Request failed with software error code 0x%x\n",
+ cpt_status->s.uc_compcode);
+ otx2_cpt_dump_sg_list(pdev, info->req);
+ break;
+ }
+ /* Request has been processed with success */
+ *res_code = 0;
+ break;
+
+ default:
+ dev_err(&pdev->dev,
+ "Request returned invalid status %d\n", ccode);
+ break;
+ }
+ return 0;
+}
+
+static inline void process_pending_queue(struct otx2_cptlfs_info *lfs,
+ struct otx2_cpt_pending_queue *pqueue)
+{
+ struct otx2_cpt_pending_entry *resume_pentry = NULL;
+ void (*callback)(int status, void *arg, void *req);
+ struct otx2_cpt_pending_entry *pentry = NULL;
+ union otx2_cpt_res_s *cpt_status = NULL;
+ struct otx2_cpt_inst_info *info = NULL;
+ struct otx2_cpt_req_info *req = NULL;
+ struct crypto_async_request *areq;
+ struct pci_dev *pdev = lfs->pdev;
+ u32 res_code, resume_index;
+
+ while (1) {
+ spin_lock_bh(&pqueue->lock);
+ pentry = &pqueue->head[pqueue->front];
+
+ if (WARN_ON(!pentry)) {
+ spin_unlock_bh(&pqueue->lock);
+ break;
+ }
+
+ res_code = -EINVAL;
+ if (unlikely(!pentry->busy)) {
+ spin_unlock_bh(&pqueue->lock);
+ break;
+ }
+
+ if (unlikely(!pentry->callback)) {
+ dev_err(&pdev->dev, "Callback NULL\n");
+ goto process_pentry;
+ }
+
+ info = pentry->info;
+ if (unlikely(!info)) {
+ dev_err(&pdev->dev, "Pending entry post arg NULL\n");
+ goto process_pentry;
+ }
+
+ req = info->req;
+ if (unlikely(!req)) {
+ dev_err(&pdev->dev, "Request NULL\n");
+ goto process_pentry;
+ }
+
+ cpt_status = pentry->completion_addr;
+ if (unlikely(!cpt_status)) {
+ dev_err(&pdev->dev, "Completion address NULL\n");
+ goto process_pentry;
+ }
+
+ if (cpt_process_ccode(lfs, cpt_status, info, &res_code)) {
+ spin_unlock_bh(&pqueue->lock);
+ return;
+ }
+ info->pdev = pdev;
+
+process_pentry:
+ /*
+ * Check if we should inform sending side to resume
+ * We do it CPT_IQ_RESUME_MARGIN elements in advance before
+ * pending queue becomes empty
+ */
+ resume_index = modulo_inc(pqueue->front, pqueue->qlen,
+ CPT_IQ_RESUME_MARGIN);
+ resume_pentry = &pqueue->head[resume_index];
+ if (resume_pentry &&
+ resume_pentry->resume_sender) {
+ resume_pentry->resume_sender = false;
+ callback = resume_pentry->callback;
+ areq = resume_pentry->areq;
+
+ if (callback) {
+ spin_unlock_bh(&pqueue->lock);
+
+ /*
+ * EINPROGRESS is an indication for sending
+ * side that it can resume sending requests
+ */
+ callback(-EINPROGRESS, areq, info);
+ spin_lock_bh(&pqueue->lock);
+ }
+ }
+
+ callback = pentry->callback;
+ areq = pentry->areq;
+ free_pentry(pentry);
+
+ pqueue->pending_count--;
+ pqueue->front = modulo_inc(pqueue->front, pqueue->qlen, 1);
+ spin_unlock_bh(&pqueue->lock);
+
+ /*
+ * Call callback after current pending entry has been
+ * processed, we don't do it if the callback pointer is
+ * invalid.
+ */
+ if (callback)
+ callback(res_code, areq, info);
+ }
+}
+
+void otx2_cpt_post_process(struct otx2_cptlf_wqe *wqe)
+{
+ process_pending_queue(wqe->lfs,
+ &wqe->lfs->lf[wqe->lf_num].pqueue);
+}
+
+int otx2_cpt_get_kcrypto_eng_grp_num(struct pci_dev *pdev)
+{
+ struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
+
+ return cptvf->lfs.kcrypto_eng_grp_num;
+}
diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
index 4626404be541..5e04010bc851 100644
--- a/drivers/firmware/arm_scmi/mailbox.c
+++ b/drivers/firmware/arm_scmi/mailbox.c
@@ -195,7 +195,7 @@ static const struct scmi_transport_ops scmi_mailbox_ops = {
const struct scmi_desc scmi_mailbox_desc = {
.ops = &scmi_mailbox_ops,
- .max_rx_timeout_ms = 30, /* We may increase this if required */
+ .max_rx_timeout_ms = 300, /* We may increase this if required */
.max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
- .max_msg_size = 128,
+ .max_msg_size = 256,
};
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 82fb3babff72..14fd2c1f179b 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -640,6 +640,23 @@ static int scmi_dev_domain_id(struct device *dev)
return clkspec.args[0];
}
+static uint32_t roundoff_to_nearest_100(uint32_t freq)
+{
+ uint32_t quotient, remainder;
+ uint32_t freq_mhz;
+
+ freq_mhz = (freq / 1000000);
+ quotient = freq_mhz / 100;
+ remainder = freq_mhz % 100;
+
+ if (remainder >= 50)
+ freq_mhz = quotient * 100 + 100;
+ else
+ freq_mhz = quotient * 100;
+
+ return freq_mhz * 1000000;
+}
+
static int scmi_dvfs_device_opps_add(const struct scmi_handle *handle,
struct device *dev)
{
@@ -658,6 +675,12 @@ static int scmi_dvfs_device_opps_add(const struct scmi_handle *handle,
for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
freq = opp->perf * dom->mult_factor;
+ /*
+ * marvell specific: need to round off to nearest hundered
+ * if the calcuated frequency is not a multiple of 100 in MHz
+ */
+ freq = roundoff_to_nearest_100(freq);
+
ret = dev_pm_opp_add(dev, freq, 0);
if (ret) {
dev_warn(dev, "failed to add opp %luHz\n", freq);
@@ -707,7 +730,7 @@ static int scmi_dvfs_freq_get(const struct scmi_handle *handle, u32 domain,
ret = scmi_perf_level_get(handle, domain, &level, poll);
if (!ret)
- *freq = level * dom->mult_factor;
+ *freq = roundoff_to_nearest_100(level * dom->mult_factor);
return ret;
}
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
index 840754dcc6ca..57204c03d386 100644
--- a/drivers/firmware/arm_sdei.c
+++ b/drivers/firmware/arm_sdei.c
@@ -68,7 +68,7 @@ struct sdei_event {
static DEFINE_MUTEX(sdei_events_lock);
/* and then hold this when modifying the list */
-static DEFINE_SPINLOCK(sdei_list_lock);
+static DEFINE_RAW_SPINLOCK(sdei_list_lock);
static LIST_HEAD(sdei_list);
/* Private events are registered/enabled via IPI passing one of these */
@@ -168,14 +168,14 @@ static struct sdei_event *sdei_event_find(u32 event_num)
lockdep_assert_held(&sdei_events_lock);
- spin_lock(&sdei_list_lock);
+ raw_spin_lock(&sdei_list_lock);
list_for_each_entry(e, &sdei_list, list) {
if (e->event_num == event_num) {
found = e;
break;
}
}
- spin_unlock(&sdei_list_lock);
+ raw_spin_unlock(&sdei_list_lock);
return found;
}
@@ -260,9 +260,9 @@ static struct sdei_event *sdei_event_create(u32 event_num,
event->private_registered = regs;
}
- spin_lock(&sdei_list_lock);
+ raw_spin_lock(&sdei_list_lock);
list_add(&event->list, &sdei_list);
- spin_unlock(&sdei_list_lock);
+ raw_spin_unlock(&sdei_list_lock);
return event;
@@ -288,9 +288,9 @@ static void sdei_event_destroy_llocked(struct sdei_event *event)
static void sdei_event_destroy(struct sdei_event *event)
{
- spin_lock(&sdei_list_lock);
+ raw_spin_lock(&sdei_list_lock);
sdei_event_destroy_llocked(event);
- spin_unlock(&sdei_list_lock);
+ raw_spin_unlock(&sdei_list_lock);
}
static int sdei_api_get_version(u64 *version)
@@ -417,9 +417,9 @@ int sdei_event_enable(u32 event_num)
err = sdei_do_cross_call(_local_event_enable, event);
if (!err) {
- spin_lock(&sdei_list_lock);
+ raw_spin_lock(&sdei_list_lock);
event->reenable = true;
- spin_unlock(&sdei_list_lock);
+ raw_spin_unlock(&sdei_list_lock);
}
cpus_read_unlock();
mutex_unlock(&sdei_events_lock);
@@ -455,9 +455,9 @@ int sdei_event_disable(u32 event_num)
return -ENOENT;
}
- spin_lock(&sdei_list_lock);
+ raw_spin_lock(&sdei_list_lock);
event->reenable = false;
- spin_unlock(&sdei_list_lock);
+ raw_spin_unlock(&sdei_list_lock);
if (event->type == SDEI_EVENT_TYPE_SHARED)
err = sdei_api_event_disable(event->event_num);
@@ -502,10 +502,10 @@ int sdei_event_unregister(u32 event_num)
goto unlock;
}
- spin_lock(&sdei_list_lock);
+ raw_spin_lock(&sdei_list_lock);
event->reregister = false;
event->reenable = false;
- spin_unlock(&sdei_list_lock);
+ raw_spin_unlock(&sdei_list_lock);
if (event->type == SDEI_EVENT_TYPE_SHARED)
err = sdei_api_event_unregister(event->event_num);
@@ -532,7 +532,7 @@ static int sdei_unregister_shared(void)
struct sdei_event *event;
mutex_lock(&sdei_events_lock);
- spin_lock(&sdei_list_lock);
+ raw_spin_lock(&sdei_list_lock);
list_for_each_entry(event, &sdei_list, list) {
if (event->type != SDEI_EVENT_TYPE_SHARED)
continue;
@@ -541,7 +541,7 @@ static int sdei_unregister_shared(void)
if (err)
break;
}
- spin_unlock(&sdei_list_lock);
+ raw_spin_unlock(&sdei_list_lock);
mutex_unlock(&sdei_events_lock);
return err;
@@ -610,9 +610,9 @@ int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
goto cpu_unlock;
}
- spin_lock(&sdei_list_lock);
+ raw_spin_lock(&sdei_list_lock);
event->reregister = true;
- spin_unlock(&sdei_list_lock);
+ raw_spin_unlock(&sdei_list_lock);
cpu_unlock:
cpus_read_unlock();
unlock:
@@ -626,7 +626,7 @@ static int sdei_reregister_shared(void)
struct sdei_event *event;
mutex_lock(&sdei_events_lock);
- spin_lock(&sdei_list_lock);
+ raw_spin_lock(&sdei_list_lock);
list_for_each_entry(event, &sdei_list, list) {
if (event->type != SDEI_EVENT_TYPE_SHARED)
continue;
@@ -652,7 +652,7 @@ static int sdei_reregister_shared(void)
}
}
}
- spin_unlock(&sdei_list_lock);
+ raw_spin_unlock(&sdei_list_lock);
mutex_unlock(&sdei_events_lock);
return err;
@@ -664,7 +664,7 @@ static int sdei_cpuhp_down(unsigned int cpu)
int err;
/* un-register private events */
- spin_lock(&sdei_list_lock);
+ raw_spin_lock(&sdei_list_lock);
list_for_each_entry(event, &sdei_list, list) {
if (event->type == SDEI_EVENT_TYPE_SHARED)
continue;
@@ -675,7 +675,7 @@ static int sdei_cpuhp_down(unsigned int cpu)
event->event_num, err);
}
}
- spin_unlock(&sdei_list_lock);
+ raw_spin_unlock(&sdei_list_lock);
return sdei_mask_local_cpu();
}
@@ -686,7 +686,7 @@ static int sdei_cpuhp_up(unsigned int cpu)
int err;
/* re-register/enable private events */
- spin_lock(&sdei_list_lock);
+ raw_spin_lock(&sdei_list_lock);
list_for_each_entry(event, &sdei_list, list) {
if (event->type == SDEI_EVENT_TYPE_SHARED)
continue;
@@ -707,7 +707,7 @@ static int sdei_cpuhp_up(unsigned int cpu)
}
}
}
- spin_unlock(&sdei_list_lock);
+ raw_spin_unlock(&sdei_list_lock);
return sdei_unmask_local_cpu();
}
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
index 9f66deab46ea..29964c168996 100644
--- a/drivers/gpio/gpio-thunderx.c
+++ b/drivers/gpio/gpio-thunderx.c
@@ -16,7 +16,16 @@
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <asm-generic/msi.h>
-
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+#include <linux/arm-smccc.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/moduleparam.h>
+#include <linux/uaccess.h>
+#include <linux/mmu_context.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#endif
#define GPIO_RX_DAT 0x0
#define GPIO_TX_SET 0x8
@@ -32,17 +41,66 @@
#define GPIO_BIT_CFG_FIL_CNT_SHIFT 4
#define GPIO_BIT_CFG_FIL_SEL_SHIFT 8
#define GPIO_BIT_CFG_TX_OD BIT(12)
-#define GPIO_BIT_CFG_PIN_SEL_MASK GENMASK(25, 16)
+#define GPIO_BIT_CFG_PIN_SEL_MASK GENMASK(26, 16)
#define GPIO_INTR 0x800
#define GPIO_INTR_INTR BIT(0)
#define GPIO_INTR_INTR_W1S BIT(1)
#define GPIO_INTR_ENA_W1C BIT(2)
#define GPIO_INTR_ENA_W1S BIT(3)
#define GPIO_2ND_BANK 0x1400
+#define MRVL_OCTEONTX2_96XX_PARTNUM 0xB2
+
#define GLITCH_FILTER_400NS ((4u << GPIO_BIT_CFG_FIL_SEL_SHIFT) | \
(9u << GPIO_BIT_CFG_FIL_CNT_SHIFT))
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+#define DEVICE_NAME "otx-gpio-ctr"
+#define OTX_IOC_MAGIC 0xF2
+#define MAX_GPIO 80
+
+static struct device *otx_device;
+static struct class *otx_class;
+static struct cdev *otx_cdev;
+static dev_t otx_dev;
+static DEFINE_SPINLOCK(el3_inthandler_lock);
+static int gpio_in_use;
+static int gpio_installed[MAX_GPIO];
+static struct thread_info *gpio_installed_threads[MAX_GPIO];
+static struct task_struct *gpio_installed_tasks[MAX_GPIO];
+
+/* THUNDERX SMC definitons */
+/* X1 - gpio_num, X2 - sp, X3 - cpu, X4 - ttbr0 */
+#define THUNDERX_INSTALL_GPIO_INT 0xC2000801
+/* X1 - gpio_num */
+#define THUNDERX_REMOVE_GPIO_INT 0xC2000802
+
+struct intr_hand {
+ u64 mask;
+ char name[50];
+ u64 coffset;
+ u64 soffset;
+ irqreturn_t (*handler)(int msix, void *data);
+};
+
+struct otx_gpio_usr_data {
+ u64 isr_base;
+ u64 sp;
+ u64 cpu;
+ u64 gpio_num;
+};
+
+
+#define OTX_IOC_SET_GPIO_HANDLER \
+ _IOW(OTX_IOC_MAGIC, 1, struct otx_gpio_usr_data)
+
+#define OTX_IOC_CLR_GPIO_HANDLER \
+ _IO(OTX_IOC_MAGIC, 2)
+
+#define OTX_IOC_TRIGGER_GPIO_HANDLER \
+ _IO(OTX_IOC_MAGIC, 3)
+#endif
+
struct thunderx_gpio;
struct thunderx_line {
@@ -62,14 +120,192 @@ struct thunderx_gpio {
int base_msi;
};
-static unsigned int bit_cfg_reg(unsigned int line)
+static unsigned int intr_reg(unsigned int line)
{
- return 8 * line + GPIO_BIT_CFG;
+ return 8 * line + GPIO_INTR;
}
-static unsigned int intr_reg(unsigned int line)
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+static inline int __install_el3_inthandler(unsigned long gpio_num,
+ unsigned long sp,
+ unsigned long cpu,
+ unsigned long ttbr0)
{
- return 8 * line + GPIO_INTR;
+ struct arm_smccc_res res;
+ unsigned long flags;
+ int retval = -1;
+
+ spin_lock_irqsave(&el3_inthandler_lock, flags);
+ if (!gpio_installed[gpio_num]) {
+ lock_context(current->group_leader->mm, gpio_num);
+ arm_smccc_smc(THUNDERX_INSTALL_GPIO_INT, gpio_num,
+ sp, cpu, ttbr0, 0, 0, 0, &res);
+ if (res.a0 == 0) {
+ gpio_installed[gpio_num] = 1;
+ gpio_installed_threads[gpio_num]
+ = current_thread_info();
+ gpio_installed_tasks[gpio_num]
+ = current->group_leader;
+ retval = 0;
+ } else {
+ unlock_context_by_index(gpio_num);
+ }
+ }
+ spin_unlock_irqrestore(&el3_inthandler_lock, flags);
+ return retval;
+}
+
+static inline int __remove_el3_inthandler(unsigned long gpio_num)
+{
+ struct arm_smccc_res res;
+ unsigned long flags;
+ unsigned int retval;
+
+ spin_lock_irqsave(&el3_inthandler_lock, flags);
+ if (gpio_installed[gpio_num]) {
+ arm_smccc_smc(THUNDERX_REMOVE_GPIO_INT, gpio_num,
+ 0, 0, 0, 0, 0, 0, &res);
+ gpio_installed[gpio_num] = 0;
+ gpio_installed_threads[gpio_num] = NULL;
+ gpio_installed_tasks[gpio_num] = NULL;
+ unlock_context_by_index(gpio_num);
+ retval = 0;
+ } else {
+ retval = -1;
+ }
+ spin_unlock_irqrestore(&el3_inthandler_lock, flags);
+ return retval;
+}
+
+static inline int __trigger_el3_inthandler(unsigned long gpio_num)
+{
+ struct thunderx_gpio *txgpio = dev_get_drvdata(otx_device);
+ unsigned int retval = -1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&el3_inthandler_lock, flags);
+ if (gpio_installed[gpio_num]) {
+ writeq(GPIO_INTR_INTR_W1S,
+ txgpio->register_base + intr_reg(gpio_num));
+ retval = 0;
+ }
+ spin_unlock_irqrestore(&el3_inthandler_lock, flags);
+
+ return retval;
+}
+
+static long otx_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ int err = 0;
+ struct otx_gpio_usr_data gpio_usr;
+ u64 gpio_ttbr, gpio_isr_base, gpio_sp, gpio_cpu, gpio_num;
+ int ret;
+ //struct task_struct *task = current;
+
+ if (!gpio_in_use)
+ return -EINVAL;
+
+ if (_IOC_TYPE(cmd) != OTX_IOC_MAGIC)
+ return -ENOTTY;
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ err = !access_ok((void __user *)arg, _IOC_SIZE(cmd));
+ else if (_IOC_TYPE(cmd) & _IOC_WRITE)
+ err = !access_ok((void __user *)arg, _IOC_SIZE(cmd));
+
+ if (err)
+ return -EFAULT;
+
+ switch (cmd) {
+ case OTX_IOC_SET_GPIO_HANDLER: /*Install GPIO ISR handler*/
+ ret = copy_from_user(&gpio_usr, (void *)arg, _IOC_SIZE(cmd));
+ if (gpio_usr.gpio_num >= MAX_GPIO)
+ return -EINVAL;
+ if (ret)
+ return -EFAULT;
+ gpio_ttbr = 0;
+ //TODO: reserve a asid to avoid asid rollovers
+ asm volatile("mrs %0, ttbr0_el1\n\t" : "=r"(gpio_ttbr));
+ gpio_isr_base = gpio_usr.isr_base;
+ gpio_sp = gpio_usr.sp;
+ gpio_cpu = gpio_usr.cpu;
+ gpio_num = gpio_usr.gpio_num;
+ ret = __install_el3_inthandler(gpio_num, gpio_sp,
+ gpio_cpu, gpio_isr_base);
+ if (ret != 0)
+ return -EEXIST;
+ break;
+ case OTX_IOC_CLR_GPIO_HANDLER: /*Clear GPIO ISR handler*/
+ gpio_usr.gpio_num = arg;
+ if (gpio_usr.gpio_num >= MAX_GPIO)
+ return -EINVAL;
+ ret = __remove_el3_inthandler(gpio_usr.gpio_num);
+ if (ret != 0)
+ return -ENOENT;
+ break;
+ case OTX_IOC_TRIGGER_GPIO_HANDLER: /* Trigger GPIO ISR handler */
+ gpio_usr.gpio_num = arg;
+ if (gpio_usr.gpio_num >= MAX_GPIO)
+ return -EINVAL;
+ ret = __trigger_el3_inthandler(gpio_usr.gpio_num);
+ if (ret != 0)
+ return -ENODEV;
+ break;
+ default:
+ return -ENOTTY;
+ }
+ return 0;
+}
+
+static void cleanup_el3_irqs(struct task_struct *task)
+{
+ int i;
+
+ for (i = 0; i < MAX_GPIO; i++) {
+ if (gpio_installed[i] &&
+ gpio_installed_tasks[i] &&
+ (gpio_installed_tasks[i] == task)) {
+ pr_alert("Exiting, removing handler for GPIO %d\n",
+ i);
+ __remove_el3_inthandler(i);
+ pr_alert("Exited, removed handler for GPIO %d\n",
+ i);
+ } else {
+ if (gpio_installed[i] &&
+ (gpio_installed_threads[i]
+ == current_thread_info()))
+ pr_alert(
+ "Exiting, thread info matches, not removing handler for GPIO %d\n",
+ i);
+ }
+ }
+}
+
+static int otx_dev_open(struct inode *inode, struct file *fp)
+{
+ gpio_in_use = 1;
+ return 0;
+}
+
+static int otx_dev_release(struct inode *inode, struct file *fp)
+{
+ if (gpio_in_use == 0)
+ return -EINVAL;
+ gpio_in_use = 0;
+ return 0;
+}
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = otx_dev_open,
+ .release = otx_dev_release,
+ .unlocked_ioctl = otx_dev_ioctl
+};
+#endif
+
+static unsigned int bit_cfg_reg(unsigned int line)
+{
+ return 8 * line + GPIO_BIT_CFG;
}
static bool thunderx_gpio_is_gpio_nowarn(struct thunderx_gpio *txgpio,
@@ -104,16 +340,17 @@ static int thunderx_gpio_request(struct gpio_chip *chip, unsigned int line)
static int thunderx_gpio_dir_in(struct gpio_chip *chip, unsigned int line)
{
struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
+ unsigned long flags;
if (!thunderx_gpio_is_gpio(txgpio, line))
return -EIO;
- raw_spin_lock(&txgpio->lock);
+ raw_spin_lock_irqsave(&txgpio->lock, flags);
clear_bit(line, txgpio->invert_mask);
clear_bit(line, txgpio->od_mask);
writeq(txgpio->line_entries[line].fil_bits,
txgpio->register_base + bit_cfg_reg(line));
- raw_spin_unlock(&txgpio->lock);
+ raw_spin_unlock_irqrestore(&txgpio->lock, flags);
return 0;
}
@@ -135,11 +372,12 @@ static int thunderx_gpio_dir_out(struct gpio_chip *chip, unsigned int line,
{
struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
u64 bit_cfg = txgpio->line_entries[line].fil_bits | GPIO_BIT_CFG_TX_OE;
+ unsigned long flags;
if (!thunderx_gpio_is_gpio(txgpio, line))
return -EIO;
- raw_spin_lock(&txgpio->lock);
+ raw_spin_lock_irqsave(&txgpio->lock, flags);
thunderx_gpio_set(chip, line, value);
@@ -151,7 +389,7 @@ static int thunderx_gpio_dir_out(struct gpio_chip *chip, unsigned int line,
writeq(bit_cfg, txgpio->register_base + bit_cfg_reg(line));
- raw_spin_unlock(&txgpio->lock);
+ raw_spin_unlock_irqrestore(&txgpio->lock, flags);
return 0;
}
@@ -188,11 +426,12 @@ static int thunderx_gpio_set_config(struct gpio_chip *chip,
int ret = -ENOTSUPP;
struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
void __iomem *reg = txgpio->register_base + (bank * GPIO_2ND_BANK) + GPIO_TX_SET;
+ unsigned long flags;
if (!thunderx_gpio_is_gpio(txgpio, line))
return -EIO;
- raw_spin_lock(&txgpio->lock);
+ raw_spin_lock_irqsave(&txgpio->lock, flags);
orig_invert = test_bit(line, txgpio->invert_mask);
new_invert = orig_invert;
orig_od = test_bit(line, txgpio->od_mask);
@@ -243,7 +482,7 @@ static int thunderx_gpio_set_config(struct gpio_chip *chip,
default:
break;
}
- raw_spin_unlock(&txgpio->lock);
+ raw_spin_unlock_irqrestore(&txgpio->lock, flags);
/*
* If currently output and OPEN_DRAIN changed, install the new
@@ -330,19 +569,20 @@ static int thunderx_gpio_irq_set_type(struct irq_data *d,
struct thunderx_line *txline =
&txgpio->line_entries[irqd_to_hwirq(d)];
u64 bit_cfg;
+ unsigned long flags;
irqd_set_trigger_type(d, flow_type);
bit_cfg = txline->fil_bits | GPIO_BIT_CFG_INT_EN;
+ raw_spin_lock_irqsave(&txgpio->lock, flags);
if (flow_type & IRQ_TYPE_EDGE_BOTH) {
irq_set_handler_locked(d, handle_fasteoi_ack_irq);
bit_cfg |= GPIO_BIT_CFG_INT_TYPE;
} else {
- irq_set_handler_locked(d, handle_fasteoi_mask_irq);
+ irq_set_handler_locked(d, handle_level_irq);
}
- raw_spin_lock(&txgpio->lock);
if (flow_type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)) {
bit_cfg |= GPIO_BIT_CFG_PIN_XOR;
set_bit(txline->line, txgpio->invert_mask);
@@ -351,7 +591,7 @@ static int thunderx_gpio_irq_set_type(struct irq_data *d,
}
clear_bit(txline->line, txgpio->od_mask);
writeq(bit_cfg, txgpio->register_base + bit_cfg_reg(txline->line));
- raw_spin_unlock(&txgpio->lock);
+ raw_spin_unlock_irqrestore(&txgpio->lock, flags);
return IRQ_SET_MASK_OK;
}
@@ -422,6 +662,31 @@ static void *thunderx_gpio_populate_parent_alloc_info(struct gpio_chip *chip,
return info;
}
+static void thunderx_gpio_pinsel(struct device *dev,
+ struct thunderx_gpio *txgpio)
+{
+ struct device_node *node;
+ const __be32 *pinsel;
+ int npins, rlen, i;
+ uint32_t pin, sel;
+
+ node = dev_of_node(dev);
+ if (!node)
+ return;
+
+ pinsel = of_get_property(node, "pin-cfg", &rlen);
+ if (!pinsel || rlen % 2)
+ return;
+ npins = rlen / sizeof(__be32) / 2;
+
+ for (i = 0; i < npins; i++) {
+ pin = of_read_number(pinsel++, 1);
+ sel = of_read_number(pinsel++, 1);
+ dev_info(dev, "Set GPIO pin %d CFG register to %x\n", pin, sel);
+ writeq(sel, txgpio->register_base + bit_cfg_reg(pin));
+ }
+}
+
static int thunderx_gpio_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -470,7 +735,14 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
u64 c = readq(txgpio->register_base + GPIO_CONST);
ngpio = c & GPIO_CONST_GPIOS_MASK;
- txgpio->base_msi = (c >> 8) & 0xff;
+
+ /* Workaround for all passes of T96xx */
+ if (((pdev->subsystem_device >> 8) & 0xFF) ==
+ MRVL_OCTEONTX2_96XX_PARTNUM) {
+ txgpio->base_msi = 0x36;
+ } else {
+ txgpio->base_msi = (c >> 8) & 0xff;
+ }
}
txgpio->msix_entries = devm_kcalloc(dev,
@@ -561,7 +833,68 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
dev_info(dev, "ThunderX GPIO: %d lines with base %d.\n",
ngpio, chip->base);
+
+ /* Configure default functions of GPIO pins */
+ thunderx_gpio_pinsel(dev, txgpio);
+
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+ /* Register task cleanup handler */
+ err = task_cleanup_handler_add(cleanup_el3_irqs);
+ if (err != 0) {
+ dev_err(dev, "Failed to register cleanup handler: %d\n", err);
+ goto cleanup_handler_err;
+ }
+
+ /* create a character device */
+ err = alloc_chrdev_region(&otx_dev, 1, 1, DEVICE_NAME);
+ if (err != 0) {
+ dev_err(dev, "Failed to create device: %d\n", err);
+ goto alloc_chrdev_err;
+ }
+
+ otx_cdev = cdev_alloc();
+ if (!otx_cdev) {
+ err = -ENODEV;
+ goto cdev_alloc_err;
+ }
+
+ cdev_init(otx_cdev, &fops);
+ err = cdev_add(otx_cdev, otx_dev, 1);
+ if (err < 0) {
+ err = -ENODEV;
+ goto cdev_add_err;
+ }
+
+ /* create new class for sysfs*/
+ otx_class = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(otx_class)) {
+ err = -ENODEV;
+ goto class_create_err;
+ }
+
+ otx_device = device_create(otx_class, NULL, otx_dev, txgpio,
+ DEVICE_NAME);
+ if (IS_ERR(otx_device)) {
+ err = -ENODEV;
+ goto device_create_err;
+ }
+#endif
+
return 0;
+
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+device_create_err:
+ class_destroy(otx_class);
+
+class_create_err:
+cdev_add_err:
+ cdev_del(otx_cdev);
+cdev_alloc_err:
+ unregister_chrdev_region(otx_dev, 1);
+alloc_chrdev_err:
+ task_cleanup_handler_remove(cleanup_el3_irqs);
+cleanup_handler_err:
+#endif
out:
pci_set_drvdata(pdev, NULL);
return err;
@@ -579,6 +912,15 @@ static void thunderx_gpio_remove(struct pci_dev *pdev)
irq_domain_remove(txgpio->chip.irq.domain);
pci_set_drvdata(pdev, NULL);
+
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+ device_destroy(otx_class, otx_dev);
+ class_destroy(otx_class);
+ cdev_del(otx_cdev);
+ unregister_chrdev_region(otx_dev, 1);
+
+ task_cleanup_handler_remove(cleanup_el3_irqs);
+#endif
}
static const struct pci_device_id thunderx_gpio_id_table[] = {
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index c1198245461d..84530fd80998 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -97,19 +97,27 @@ config CORESIGHT_SOURCE_ETM3X
module will be called coresight-etm3x.
config CORESIGHT_SOURCE_ETM4X
- tristate "CoreSight Embedded Trace Macrocell 4.x driver"
+ tristate "CoreSight ETMv4.x / ETE driver"
depends on ARM64
select CORESIGHT_LINKS_AND_SINKS
select PID_IN_CONTEXTIDR
help
- This driver provides support for the ETM4.x tracer module, tracing the
- instructions that a processor is executing. This is primarily useful
- for instruction level tracing. Depending on the implemented version
- data tracing may also be available.
+ This driver provides support for the CoreSight Embedded Trace Macrocell
+ version 4.x and the Embedded Trace Extensions (ETE). Both are CPU tracer
+ modules, tracing the instructions that a processor is executing. This is
+ primarily useful for instruction level tracing.
To compile this driver as a module, choose M here: the
module will be called coresight-etm4x.
+config ETM4X_IMPDEF_FEATURE
+ bool "Control implementation defined overflow support in ETM 4.x driver"
+ depends on CORESIGHT_SOURCE_ETM4X
+ help
+ This control provides implementation define control for CoreSight
+ ETM 4.x tracer module that can't reduce commit rate automatically.
+ This avoids overflow between the ETM tracer module and the cpu core.
+
config CORESIGHT_STM
tristate "CoreSight System Trace Macrocell driver"
depends on (ARM && !(CPU_32v3 || CPU_32v4 || CPU_32v4T)) || ARM64
@@ -165,4 +173,18 @@ config CORESIGHT_CTI_INTEGRATION_REGS
CTI trigger connections between this and other devices.These
registers are not used in normal operation and can leave devices in
an inconsistent state.
+
+config CORESIGHT_TRBE
+ tristate "Trace Buffer Extension (TRBE) driver"
+ depends on ARM64 && CORESIGHT_SOURCE_ETM4X
+ help
+ This driver provides support for percpu Trace Buffer Extension (TRBE).
+ TRBE always needs to be used along with it's corresponding percpu ETE
+ component. ETE generates trace data which is then captured with TRBE.
+ Unlike traditional sink devices, TRBE is a CPU feature accessible via
+ system registers. But it's explicit dependency with trace unit (ETE)
+ requires it to be plugged in as a coresight sink device.
+
+ To compile this driver as a module, choose M here: the module will be
+ called coresight-trbe.
endif
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index f20e357758d1..99a05781c1f0 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -4,10 +4,10 @@
#
obj-$(CONFIG_CORESIGHT) += coresight.o
coresight-y := coresight-core.o coresight-etm-perf.o coresight-platform.o \
- coresight-sysfs.o
+ coresight-sysfs.o coresight-quirks.o
obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o
coresight-tmc-y := coresight-tmc-core.o coresight-tmc-etf.o \
- coresight-tmc-etr.o
+ coresight-tmc-etr.o coresight-tmc-secure-etr.o
obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o
obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o
obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \
@@ -21,5 +21,6 @@ obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o
obj-$(CONFIG_CORESIGHT_CATU) += coresight-catu.o
obj-$(CONFIG_CORESIGHT_CTI) += coresight-cti.o
+obj-$(CONFIG_CORESIGHT_TRBE) += coresight-trbe.o
coresight-cti-y := coresight-cti-core.o coresight-cti-platform.o \
coresight-cti-sysfs.o
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
index 8e19e8cdcce5..e0740c6dbd54 100644
--- a/drivers/hwtracing/coresight/coresight-catu.c
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -401,8 +401,9 @@ static const struct attribute_group *catu_groups[] = {
static inline int catu_wait_for_ready(struct catu_drvdata *drvdata)
{
- return coresight_timeout(drvdata->base,
- CATU_STATUS, CATU_STATUS_READY, 1);
+ struct csdev_access *csa = &drvdata->csdev->access;
+
+ return coresight_timeout(csa, CATU_STATUS, CATU_STATUS_READY, 1);
}
static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
@@ -411,6 +412,7 @@ static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
u32 control, mode;
struct etr_buf *etr_buf = data;
struct device *dev = &drvdata->csdev->dev;
+ struct coresight_device *csdev = drvdata->csdev;
if (catu_wait_for_ready(drvdata))
dev_warn(dev, "Timeout while waiting for READY\n");
@@ -421,7 +423,7 @@ static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
return -EBUSY;
}
- rc = coresight_claim_device_unlocked(drvdata->base);
+ rc = coresight_claim_device_unlocked(csdev);
if (rc)
return rc;
@@ -465,9 +467,10 @@ static int catu_disable_hw(struct catu_drvdata *drvdata)
{
int rc = 0;
struct device *dev = &drvdata->csdev->dev;
+ struct coresight_device *csdev = drvdata->csdev;
catu_write_control(drvdata, 0);
- coresight_disclaim_device_unlocked(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
if (catu_wait_for_ready(drvdata)) {
dev_info(dev, "Timeout while waiting for READY\n");
rc = -EAGAIN;
@@ -551,6 +554,7 @@ static int catu_probe(struct amba_device *adev, const struct amba_id *id)
dev->platform_data = pdata;
drvdata->base = base;
+ catu_desc.access = CSDEV_ACCESS_IOMEM(base);
catu_desc.pdata = pdata;
catu_desc.dev = dev;
catu_desc.groups = catu_groups;
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index 424c296845db..f89d41951c16 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -21,8 +21,10 @@
#include "coresight-etm-perf.h"
#include "coresight-priv.h"
+#include "coresight-tmc.h"
static DEFINE_MUTEX(coresight_mutex);
+static DEFINE_PER_CPU(struct coresight_device *, csdev_sink);
/**
* struct coresight_node - elements of a path, from source to sink
@@ -70,6 +72,18 @@ void coresight_remove_cti_ops(void)
}
EXPORT_SYMBOL_GPL(coresight_remove_cti_ops);
+void coresight_set_percpu_sink(int cpu, struct coresight_device *csdev)
+{
+ per_cpu(csdev_sink, cpu) = csdev;
+}
+EXPORT_SYMBOL_GPL(coresight_set_percpu_sink);
+
+struct coresight_device *coresight_get_percpu_sink(int cpu)
+{
+ return per_cpu(csdev_sink, cpu);
+}
+EXPORT_SYMBOL_GPL(coresight_get_percpu_sink);
+
static int coresight_id_match(struct device *dev, void *data)
{
int trace_id, i_trace_id;
@@ -86,7 +100,7 @@ static int coresight_id_match(struct device *dev, void *data)
i_csdev->type != CORESIGHT_DEV_TYPE_SOURCE)
return 0;
- /* Get the source ID for both compoment */
+ /* Get the source ID for both components */
trace_id = source_ops(csdev)->trace_id(csdev);
i_trace_id = source_ops(i_csdev)->trace_id(i_csdev);
@@ -145,30 +159,32 @@ static int coresight_find_link_outport(struct coresight_device *csdev,
return -ENODEV;
}
-static inline u32 coresight_read_claim_tags(void __iomem *base)
+static inline u32 coresight_read_claim_tags(struct coresight_device *csdev)
{
- return readl_relaxed(base + CORESIGHT_CLAIMCLR);
+ return csdev_access_relaxed_read32(&csdev->access, CORESIGHT_CLAIMCLR);
}
-static inline bool coresight_is_claimed_self_hosted(void __iomem *base)
+static inline bool coresight_is_claimed_self_hosted(struct coresight_device *csdev)
{
- return coresight_read_claim_tags(base) == CORESIGHT_CLAIM_SELF_HOSTED;
+ return coresight_read_claim_tags(csdev) == CORESIGHT_CLAIM_SELF_HOSTED;
}
-static inline bool coresight_is_claimed_any(void __iomem *base)
+static inline bool coresight_is_claimed_any(struct coresight_device *csdev)
{
- return coresight_read_claim_tags(base) != 0;
+ return coresight_read_claim_tags(csdev) != 0;
}
-static inline void coresight_set_claim_tags(void __iomem *base)
+static inline void coresight_set_claim_tags(struct coresight_device *csdev)
{
- writel_relaxed(CORESIGHT_CLAIM_SELF_HOSTED, base + CORESIGHT_CLAIMSET);
+ csdev_access_relaxed_write32(&csdev->access, CORESIGHT_CLAIM_SELF_HOSTED,
+ CORESIGHT_CLAIMSET);
isb();
}
-static inline void coresight_clear_claim_tags(void __iomem *base)
+static inline void coresight_clear_claim_tags(struct coresight_device *csdev)
{
- writel_relaxed(CORESIGHT_CLAIM_SELF_HOSTED, base + CORESIGHT_CLAIMCLR);
+ csdev_access_relaxed_write32(&csdev->access, CORESIGHT_CLAIM_SELF_HOSTED,
+ CORESIGHT_CLAIMCLR);
isb();
}
@@ -182,27 +198,33 @@ static inline void coresight_clear_claim_tags(void __iomem *base)
* Called with CS_UNLOCKed for the component.
* Returns : 0 on success
*/
-int coresight_claim_device_unlocked(void __iomem *base)
+int coresight_claim_device_unlocked(struct coresight_device *csdev)
{
- if (coresight_is_claimed_any(base))
+ if (WARN_ON(!csdev))
+ return -EINVAL;
+
+ if (coresight_is_claimed_any(csdev))
return -EBUSY;
- coresight_set_claim_tags(base);
- if (coresight_is_claimed_self_hosted(base))
+ coresight_set_claim_tags(csdev);
+ if (coresight_is_claimed_self_hosted(csdev))
return 0;
/* There was a race setting the tags, clean up and fail */
- coresight_clear_claim_tags(base);
+ coresight_clear_claim_tags(csdev);
return -EBUSY;
}
EXPORT_SYMBOL_GPL(coresight_claim_device_unlocked);
-int coresight_claim_device(void __iomem *base)
+int coresight_claim_device(struct coresight_device *csdev)
{
int rc;
- CS_UNLOCK(base);
- rc = coresight_claim_device_unlocked(base);
- CS_LOCK(base);
+ if (WARN_ON(!csdev))
+ return -EINVAL;
+
+ CS_UNLOCK(csdev->access.base);
+ rc = coresight_claim_device_unlocked(csdev);
+ CS_LOCK(csdev->access.base);
return rc;
}
@@ -212,11 +234,14 @@ EXPORT_SYMBOL_GPL(coresight_claim_device);
* coresight_disclaim_device_unlocked : Clear the claim tags for the device.
* Called with CS_UNLOCKed for the component.
*/
-void coresight_disclaim_device_unlocked(void __iomem *base)
+void coresight_disclaim_device_unlocked(struct coresight_device *csdev)
{
- if (coresight_is_claimed_self_hosted(base))
- coresight_clear_claim_tags(base);
+ if (WARN_ON(!csdev))
+ return;
+
+ if (coresight_is_claimed_self_hosted(csdev))
+ coresight_clear_claim_tags(csdev);
else
/*
* The external agent may have not honoured our claim
@@ -227,11 +252,14 @@ void coresight_disclaim_device_unlocked(void __iomem *base)
}
EXPORT_SYMBOL_GPL(coresight_disclaim_device_unlocked);
-void coresight_disclaim_device(void __iomem *base)
+void coresight_disclaim_device(struct coresight_device *csdev)
{
- CS_UNLOCK(base);
- coresight_disclaim_device_unlocked(base);
- CS_LOCK(base);
+ if (WARN_ON(!csdev))
+ return;
+
+ CS_UNLOCK(csdev->access.base);
+ coresight_disclaim_device_unlocked(csdev);
+ CS_LOCK(csdev->access.base);
}
EXPORT_SYMBOL_GPL(coresight_disclaim_device);
@@ -418,7 +446,7 @@ static int coresight_enable_source(struct coresight_device *csdev, u32 mode)
if (ret) {
coresight_control_assoc_ectdev(csdev, false);
return ret;
- };
+ }
}
csdev->enable = true;
}
@@ -432,7 +460,7 @@ static int coresight_enable_source(struct coresight_device *csdev, u32 mode)
* coresight_disable_source - Drop the reference count by 1 and disable
* the device if there are no users left.
*
- * @csdev - The coresight device to disable
+ * @csdev: The coresight device to disable
*
* Returns true if the device has been disabled.
*/
@@ -663,6 +691,9 @@ struct coresight_device *coresight_get_sink_by_id(u32 id)
/**
* coresight_get_ref- Helper function to increase reference count to module
* and device.
+ *
+ * @csdev: The coresight device to get a reference on.
+ *
* Return true in successful case and power up the device.
* Return false when failed to get reference of module.
*/
@@ -682,6 +713,8 @@ static inline bool coresight_get_ref(struct coresight_device *csdev)
/**
* coresight_put_ref- Helper function to decrease reference count to module
* and device. Power off the device.
+ *
+ * @csdev: The coresight device to decrement a reference from.
*/
static inline void coresight_put_ref(struct coresight_device *csdev)
{
@@ -744,6 +777,7 @@ static void coresight_drop_device(struct coresight_device *csdev)
/**
* _coresight_build_path - recursively build a path from a @csdev to a sink.
* @csdev: The device to start from.
+ * @sink: The final sink we want in this path.
* @path: The list to add devices to.
*
* The tree of Coresight device is traversed until an activated sink is
@@ -764,6 +798,14 @@ static int _coresight_build_path(struct coresight_device *csdev,
if (csdev == sink)
goto out;
+ if (coresight_is_percpu_source(csdev) && coresight_is_percpu_sink(sink) &&
+ sink == per_cpu(csdev_sink, source_ops(csdev)->cpu_id(csdev))) {
+ if (_coresight_build_path(sink, sink, path) == 0) {
+ found = true;
+ goto out;
+ }
+ }
+
/* Not a sink - recursively explore each port found on this element */
for (i = 0; i < csdev->pdata->nr_outport; i++) {
struct coresight_device *child_dev;
@@ -979,8 +1021,12 @@ coresight_find_default_sink(struct coresight_device *csdev)
int depth = 0;
/* look for a default sink if we have not found for this device */
- if (!csdev->def_sink)
- csdev->def_sink = coresight_find_sink(csdev, &depth);
+ if (!csdev->def_sink) {
+ if (coresight_is_percpu_source(csdev))
+ csdev->def_sink = per_cpu(csdev_sink, source_ops(csdev)->cpu_id(csdev));
+ if (!csdev->def_sink)
+ csdev->def_sink = coresight_find_sink(csdev, &depth);
+ }
return csdev->def_sink;
}
@@ -1046,6 +1092,7 @@ int coresight_enable(struct coresight_device *csdev)
int cpu, ret = 0;
struct coresight_device *sink;
struct list_head *path;
+ struct tmc_drvdata *drvdata;
enum coresight_dev_subtype_source subtype;
subtype = csdev->subtype.source_subtype;
@@ -1080,6 +1127,9 @@ int coresight_enable(struct coresight_device *csdev)
goto out;
}
+ drvdata = dev_get_drvdata(sink->dev.parent);
+ drvdata->etm_source = csdev;
+
ret = coresight_enable_path(path, CS_MODE_SYSFS, NULL);
if (ret)
goto err_path;
@@ -1412,23 +1462,24 @@ static void coresight_remove_conns(struct coresight_device *csdev)
}
/**
- * coresight_timeout - loop until a bit has changed to a specific state.
- * @addr: base address of the area of interest.
- * @offset: address of a register, starting from @addr.
+ * coresight_timeout - loop until a bit has changed to a specific register
+ * state.
+ * @csa: coresight device access for the device
+ * @offset: Offset of the register from the base of the device.
* @position: the position of the bit of interest.
* @value: the value the bit should have.
*
* Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
* TIMEOUT_US has elapsed, which ever happens first.
*/
-
-int coresight_timeout(void __iomem *addr, u32 offset, int position, int value)
+int coresight_timeout(struct csdev_access *csa, u32 offset,
+ int position, int value)
{
int i;
u32 val;
for (i = TIMEOUT_US; i > 0; i--) {
- val = __raw_readl(addr + offset);
+ val = csdev_access_read32(csa, offset);
/* waiting on the bit to go from 0 to 1 */
if (value) {
if (val & BIT(position))
@@ -1452,6 +1503,48 @@ int coresight_timeout(void __iomem *addr, u32 offset, int position, int value)
}
EXPORT_SYMBOL_GPL(coresight_timeout);
+u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset)
+{
+ return csdev_access_relaxed_read32(&csdev->access, offset);
+}
+
+u32 coresight_read32(struct coresight_device *csdev, u32 offset)
+{
+ return csdev_access_read32(&csdev->access, offset);
+}
+
+void coresight_relaxed_write32(struct coresight_device *csdev,
+ u32 val, u32 offset)
+{
+ csdev_access_relaxed_write32(&csdev->access, val, offset);
+}
+
+void coresight_write32(struct coresight_device *csdev, u32 val, u32 offset)
+{
+ csdev_access_write32(&csdev->access, val, offset);
+}
+
+u64 coresight_relaxed_read64(struct coresight_device *csdev, u32 offset)
+{
+ return csdev_access_relaxed_read64(&csdev->access, offset);
+}
+
+u64 coresight_read64(struct coresight_device *csdev, u32 offset)
+{
+ return csdev_access_read64(&csdev->access, offset);
+}
+
+void coresight_relaxed_write64(struct coresight_device *csdev,
+ u64 val, u32 offset)
+{
+ csdev_access_relaxed_write64(&csdev->access, val, offset);
+}
+
+void coresight_write64(struct coresight_device *csdev, u64 val, u32 offset)
+{
+ csdev_access_write64(&csdev->access, val, offset);
+}
+
/*
* coresight_release_platform_data: Release references to the devices connected
* to the output port of this device.
@@ -1516,6 +1609,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
csdev->type = desc->type;
csdev->subtype = desc->subtype;
csdev->ops = desc->ops;
+ csdev->access = desc->access;
csdev->orphan = false;
csdev->dev.type = &coresight_dev_type[desc->type];
diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c
index 0276700c246d..8988b2ed2ea6 100644
--- a/drivers/hwtracing/coresight/coresight-cti-core.c
+++ b/drivers/hwtracing/coresight/coresight-cti-core.c
@@ -102,7 +102,7 @@ static int cti_enable_hw(struct cti_drvdata *drvdata)
goto cti_state_unchanged;
/* claim the device */
- rc = coresight_claim_device(drvdata->base);
+ rc = coresight_claim_device(drvdata->csdev);
if (rc)
goto cti_err_not_enabled;
@@ -136,7 +136,7 @@ static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata)
goto cti_hp_not_enabled;
/* try to claim the device */
- if (coresight_claim_device(drvdata->base))
+ if (coresight_claim_device(drvdata->csdev))
goto cti_hp_not_enabled;
cti_write_all_hw_regs(drvdata);
@@ -154,6 +154,7 @@ static int cti_disable_hw(struct cti_drvdata *drvdata)
{
struct cti_config *config = &drvdata->config;
struct device *dev = &drvdata->csdev->dev;
+ struct coresight_device *csdev = drvdata->csdev;
spin_lock(&drvdata->spinlock);
@@ -171,7 +172,7 @@ static int cti_disable_hw(struct cti_drvdata *drvdata)
writel_relaxed(0, drvdata->base + CTICONTROL);
config->hw_enabled = false;
- coresight_disclaim_device_unlocked(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
spin_unlock(&drvdata->spinlock);
pm_runtime_put(dev->parent);
@@ -655,6 +656,7 @@ static int cti_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
void *v)
{
struct cti_drvdata *drvdata;
+ struct coresight_device *csdev;
unsigned int cpu = smp_processor_id();
int notify_res = NOTIFY_OK;
@@ -662,6 +664,7 @@ static int cti_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
return NOTIFY_OK;
drvdata = cti_cpu_drvdata[cpu];
+ csdev = drvdata->csdev;
if (WARN_ON_ONCE(drvdata->ctidev.cpu != cpu))
return NOTIFY_BAD;
@@ -673,13 +676,13 @@ static int cti_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
/* CTI regs all static - we have a copy & nothing to save */
drvdata->config.hw_powered = false;
if (drvdata->config.hw_enabled)
- coresight_disclaim_device(drvdata->base);
+ coresight_disclaim_device(csdev);
break;
case CPU_PM_ENTER_FAILED:
drvdata->config.hw_powered = true;
if (drvdata->config.hw_enabled) {
- if (coresight_claim_device(drvdata->base))
+ if (coresight_claim_device(csdev))
drvdata->config.hw_enabled = false;
}
break;
@@ -692,7 +695,7 @@ static int cti_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
/* check enable reference count to enable HW */
if (atomic_read(&drvdata->config.enable_req_count)) {
/* check we can claim the device as we re-power */
- if (coresight_claim_device(drvdata->base))
+ if (coresight_claim_device(csdev))
goto cti_notify_exit;
drvdata->config.hw_enabled = true;
@@ -736,7 +739,7 @@ static int cti_dying_cpu(unsigned int cpu)
spin_lock(&drvdata->spinlock);
drvdata->config.hw_powered = false;
if (drvdata->config.hw_enabled)
- coresight_disclaim_device(drvdata->base);
+ coresight_disclaim_device(drvdata->csdev);
spin_unlock(&drvdata->spinlock);
return 0;
}
@@ -868,6 +871,7 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
return PTR_ERR(base);
drvdata->base = base;
+ cti_desc.access = CSDEV_ACCESS_IOMEM(base);
dev_set_drvdata(dev, drvdata);
diff --git a/drivers/hwtracing/coresight/coresight-cti-platform.c b/drivers/hwtracing/coresight/coresight-cti-platform.c
index 98f830c6ed50..ccef04f27f12 100644
--- a/drivers/hwtracing/coresight/coresight-cti-platform.c
+++ b/drivers/hwtracing/coresight/coresight-cti-platform.c
@@ -343,7 +343,6 @@ static int cti_plat_create_connection(struct device *dev,
{
struct cti_trig_con *tc = NULL;
int cpuid = -1, err = 0;
- struct fwnode_handle *cs_fwnode = NULL;
struct coresight_device *csdev = NULL;
const char *assoc_name = "unknown";
char cpu_name_str[16];
@@ -397,8 +396,9 @@ static int cti_plat_create_connection(struct device *dev,
assoc_name = cpu_name_str;
} else {
/* associated device ? */
- cs_fwnode = fwnode_find_reference(fwnode,
- CTI_DT_CSDEV_ASSOC, 0);
+ struct fwnode_handle *cs_fwnode = fwnode_find_reference(fwnode,
+ CTI_DT_CSDEV_ASSOC,
+ 0);
if (!IS_ERR(cs_fwnode)) {
assoc_name = cti_plat_get_csdev_or_node_name(cs_fwnode,
&csdev);
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 51c801c05e5c..f775cbee12b8 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -132,7 +132,7 @@ static void __etb_enable_hw(struct etb_drvdata *drvdata)
static int etb_enable_hw(struct etb_drvdata *drvdata)
{
- int rc = coresight_claim_device(drvdata->base);
+ int rc = coresight_claim_device(drvdata->csdev);
if (rc)
return rc;
@@ -252,6 +252,7 @@ static void __etb_disable_hw(struct etb_drvdata *drvdata)
{
u32 ffcr;
struct device *dev = &drvdata->csdev->dev;
+ struct csdev_access *csa = &drvdata->csdev->access;
CS_UNLOCK(drvdata->base);
@@ -263,7 +264,7 @@ static void __etb_disable_hw(struct etb_drvdata *drvdata)
ffcr |= ETB_FFCR_FON_MAN;
writel_relaxed(ffcr, drvdata->base + ETB_FFCR);
- if (coresight_timeout(drvdata->base, ETB_FFCR, ETB_FFCR_BIT, 0)) {
+ if (coresight_timeout(csa, ETB_FFCR, ETB_FFCR_BIT, 0)) {
dev_err(dev,
"timeout while waiting for completion of Manual Flush\n");
}
@@ -271,7 +272,7 @@ static void __etb_disable_hw(struct etb_drvdata *drvdata)
/* disable trace capture */
writel_relaxed(0x0, drvdata->base + ETB_CTL_REG);
- if (coresight_timeout(drvdata->base, ETB_FFSR, ETB_FFSR_BIT, 1)) {
+ if (coresight_timeout(csa, ETB_FFSR, ETB_FFSR_BIT, 1)) {
dev_err(dev,
"timeout while waiting for Formatter to Stop\n");
}
@@ -344,7 +345,7 @@ static void etb_disable_hw(struct etb_drvdata *drvdata)
{
__etb_disable_hw(drvdata);
etb_dump_hw(drvdata);
- coresight_disclaim_device(drvdata->base);
+ coresight_disclaim_device(drvdata->csdev);
}
static int etb_disable(struct coresight_device *csdev)
@@ -757,6 +758,7 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
return PTR_ERR(base);
drvdata->base = base;
+ desc.access = CSDEV_ACCESS_IOMEM(base);
spin_lock_init(&drvdata->spinlock);
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index bdc34ca449f7..51c7f9f4eea6 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -24,20 +24,67 @@
static struct pmu etm_pmu;
static bool etm_perf_up;
-static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle);
+/*
+ * An ETM context for a running event includes the perf aux handle
+ * and aux_data. For ETM, the aux_data (etm_event_data), consists of
+ * the trace path and the sink configuration. The event data is accessible
+ * via perf_get_aux(handle). However, a sink could "end" a perf output
+ * handle via the IRQ handler. And if the "sink" encounters a failure
+ * to "begin" another session (e.g due to lack of space in the buffer),
+ * the handle will be cleared. Thus, the event_data may not be accessible
+ * from the handle when we get to the etm_event_stop(), which is required
+ * for stopping the trace path. The event_data is guaranteed to stay alive
+ * until "free_aux()", which cannot happen as long as the event is active on
+ * the ETM. Thus the event_data for the session must be part of the ETM context
+ * to make sure we can disable the trace path.
+ */
+struct etm_ctxt {
+ struct perf_output_handle handle;
+ struct etm_event_data *event_data;
+};
+
+static DEFINE_PER_CPU(struct etm_ctxt, etm_ctxt);
static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
-/* ETMv3.5/PTM's ETMCR is 'config' */
+/*
+ * The PMU formats were orignally for ETMv3.5/PTM's ETMCR 'config';
+ * now take them as general formats and apply on all ETMs.
+ */
PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC));
-PMU_FORMAT_ATTR(contextid, "config:" __stringify(ETM_OPT_CTXTID));
+/* contextid1 enables tracing CONTEXTIDR_EL1 for ETMv4 */
+PMU_FORMAT_ATTR(contextid1, "config:" __stringify(ETM_OPT_CTXTID));
+/* contextid2 enables tracing CONTEXTIDR_EL2 for ETMv4 */
+PMU_FORMAT_ATTR(contextid2, "config:" __stringify(ETM_OPT_CTXTID2));
PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS));
PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK));
/* Sink ID - same for all ETMs */
PMU_FORMAT_ATTR(sinkid, "config2:0-31");
+/*
+ * contextid always traces the "PID". The PID is in CONTEXTIDR_EL1
+ * when the kernel is running at EL1; when the kernel is at EL2,
+ * the PID is in CONTEXTIDR_EL2.
+ */
+static ssize_t format_attr_contextid_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ int pid_fmt = ETM_OPT_CTXTID;
+
+#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X)
+ pid_fmt = is_kernel_in_hyp_mode() ? ETM_OPT_CTXTID2 : ETM_OPT_CTXTID;
+#endif
+ return sprintf(page, "config:%d\n", pid_fmt);
+}
+
+static struct device_attribute format_attr_contextid =
+ __ATTR(contextid, 0444, format_attr_contextid_show, NULL);
+
static struct attribute *etm_config_formats_attr[] = {
&format_attr_cycacc.attr,
&format_attr_contextid.attr,
+ &format_attr_contextid1.attr,
+ &format_attr_contextid2.attr,
&format_attr_timestamp.attr,
&format_attr_retstack.attr,
&format_attr_sinkid.attr,
@@ -204,6 +251,25 @@ static void etm_free_aux(void *data)
schedule_work(&event_data->work);
}
+/*
+ * Check if two given sinks are compatible with each other,
+ * so that they can use the same sink buffers, when an event
+ * moves around.
+ */
+static bool sinks_compatible(struct coresight_device *a,
+ struct coresight_device *b)
+{
+ if (!a || !b)
+ return false;
+ /*
+ * If the sinks are of the same subtype and driven
+ * by the same driver, we can use the same buffer
+ * on these sinks.
+ */
+ return (a->subtype.sink_subtype == b->subtype.sink_subtype) &&
+ (sink_ops(a) == sink_ops(b));
+}
+
static void *etm_setup_aux(struct perf_event *event, void **pages,
int nr_pages, bool overwrite)
{
@@ -211,6 +277,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
int cpu = event->cpu;
cpumask_t *mask;
struct coresight_device *sink = NULL;
+ struct coresight_device *user_sink = NULL, *last_sink = NULL;
struct etm_event_data *event_data = NULL;
event_data = alloc_event_data(cpu);
@@ -221,7 +288,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
/* First get the selected sink from user space. */
if (event->attr.config2) {
id = (u32)event->attr.config2;
- sink = coresight_get_sink_by_id(id);
+ sink = user_sink = coresight_get_sink_by_id(id);
}
mask = &event_data->mask;
@@ -249,14 +316,33 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
}
/*
- * No sink provided - look for a default sink for one of the
- * devices. At present we only support topology where all CPUs
- * use the same sink [N:1], so only need to find one sink. The
- * coresight_build_path later will remove any CPU that does not
- * attach to the sink, or if we have not found a sink.
+ * No sink provided - look for a default sink for all the ETMs,
+ * where this event can be scheduled.
+ * We allocate the sink specific buffers only once for this
+ * event. If the ETMs have different default sink devices, we
+ * can only use a single "type" of sink as the event can carry
+ * only one sink specific buffer. Thus we have to make sure
+ * that the sinks are of the same type and driven by the same
+ * driver, as the one we allocate the buffer for. As such
+ * we choose the first sink and check if the remaining ETMs
+ * have a compatible default sink. We don't trace on a CPU
+ * if the sink is not compatible.
*/
- if (!sink)
+ if (!user_sink) {
+ /* Find the default sink for this ETM */
sink = coresight_find_default_sink(csdev);
+ if (!sink) {
+ cpumask_clear_cpu(cpu, mask);
+ continue;
+ }
+
+ /* Check if this sink compatible with the last sink */
+ if (last_sink && !sinks_compatible(last_sink, sink)) {
+ cpumask_clear_cpu(cpu, mask);
+ continue;
+ }
+ last_sink = sink;
+ }
/*
* Building a path doesn't enable it, it simply builds a
@@ -284,7 +370,12 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
if (!sink_ops(sink)->alloc_buffer || !sink_ops(sink)->free_buffer)
goto err;
- /* Allocate the sink buffer for this session */
+ /*
+ * Allocate the sink buffer for this session. All the sinks
+ * where this event can be scheduled are ensured to be of the
+ * same type. Thus the same sink configuration is used by the
+ * sinks.
+ */
event_data->snk_config =
sink_ops(sink)->alloc_buffer(sink, event, pages,
nr_pages, overwrite);
@@ -304,13 +395,18 @@ static void etm_event_start(struct perf_event *event, int flags)
{
int cpu = smp_processor_id();
struct etm_event_data *event_data;
- struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
+ struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt);
+ struct perf_output_handle *handle = &ctxt->handle;
struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
struct list_head *path;
if (!csdev)
goto fail;
+ /* Have we messed up our tracking ? */
+ if (WARN_ON(ctxt->event_data))
+ goto fail;
+
/*
* Deal with the ring buffer API and get a handle on the
* session's information.
@@ -346,6 +442,8 @@ static void etm_event_start(struct perf_event *event, int flags)
if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
goto fail_disable_path;
+ /* Save the event_data for this ETM */
+ ctxt->event_data = event_data;
out:
return;
@@ -364,13 +462,30 @@ static void etm_event_stop(struct perf_event *event, int mode)
int cpu = smp_processor_id();
unsigned long size;
struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
- struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
- struct etm_event_data *event_data = perf_get_aux(handle);
+ struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt);
+ struct perf_output_handle *handle = &ctxt->handle;
+ struct etm_event_data *event_data;
struct list_head *path;
+ /*
+ * If we still have access to the event_data via handle,
+ * confirm that we haven't messed up the tracking.
+ */
+ if (handle->event &&
+ WARN_ON(perf_get_aux(handle) != ctxt->event_data))
+ return;
+
+ event_data = ctxt->event_data;
+ /* Clear the event_data as this ETM is stopping the trace. */
+ ctxt->event_data = NULL;
+
if (event->hw.state == PERF_HES_STOPPED)
return;
+ /* We must have a valid event_data for a running event */
+ if (WARN_ON(!event_data))
+ return;
+
if (!csdev)
return;
@@ -388,7 +503,13 @@ static void etm_event_stop(struct perf_event *event, int mode)
/* tell the core */
event->hw.state = PERF_HES_STOPPED;
- if (mode & PERF_EF_UPDATE) {
+ /*
+ * If the handle is not bound to an event anymore
+ * (e.g, the sink driver was unable to restart the
+ * handle due to lack of buffer space), we don't
+ * have to do anything here.
+ */
+ if (handle->event && (mode & PERF_EF_UPDATE)) {
if (WARN_ON_ONCE(handle->event != event))
return;
@@ -545,6 +666,7 @@ int etm_perf_add_symlink_sink(struct coresight_device *csdev)
struct device *pmu_dev = etm_pmu.dev;
struct device *dev = &csdev->dev;
struct dev_ext_attribute *ea;
+ char sink_name[sizeof("sink_tmc_etxxxxx")];
if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
@@ -579,6 +701,10 @@ int etm_perf_add_symlink_sink(struct coresight_device *csdev)
if (!ret)
csdev->ea = ea;
+ /* Create a symlink to the actual sink device */
+ sprintf(sink_name, "sink_%s", name);
+ ret = sysfs_create_link(&pmu_dev->kobj, &dev->kobj, sink_name);
+
return ret;
}
@@ -586,6 +712,8 @@ void etm_perf_del_symlink_sink(struct coresight_device *csdev)
{
struct device *pmu_dev = etm_pmu.dev;
struct dev_ext_attribute *ea = csdev->ea;
+ const char *name;
+ char sink_name[sizeof("sink_tmc_etxxxxx")];
if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
@@ -594,6 +722,9 @@ void etm_perf_del_symlink_sink(struct coresight_device *csdev)
if (!ea)
return;
+ name = dev_name(&csdev->dev);
+ sprintf(sink_name, "sink_%s", name);
+ sysfs_remove_link(&pmu_dev->kobj, sink_name);
sysfs_remove_file_from_group(&pmu_dev->kobj,
&ea->attr.attr, "sinks");
csdev->ea = NULL;
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c
index 683a69e88efd..cf64ce73a741 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c
@@ -358,10 +358,11 @@ static int etm_enable_hw(struct etm_drvdata *drvdata)
int i, rc;
u32 etmcr;
struct etm_config *config = &drvdata->config;
+ struct coresight_device *csdev = drvdata->csdev;
CS_UNLOCK(drvdata->base);
- rc = coresight_claim_device_unlocked(drvdata->base);
+ rc = coresight_claim_device_unlocked(csdev);
if (rc)
goto done;
@@ -566,6 +567,7 @@ static void etm_disable_hw(void *info)
int i;
struct etm_drvdata *drvdata = info;
struct etm_config *config = &drvdata->config;
+ struct coresight_device *csdev = drvdata->csdev;
CS_UNLOCK(drvdata->base);
etm_set_prog(drvdata);
@@ -577,7 +579,7 @@ static void etm_disable_hw(void *info)
config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
etm_set_pwrdwn(drvdata);
- coresight_disclaim_device_unlocked(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
@@ -602,7 +604,7 @@ static void etm_disable_perf(struct coresight_device *csdev)
* power down the tracer.
*/
etm_set_pwrdwn(drvdata);
- coresight_disclaim_device_unlocked(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
}
@@ -839,6 +841,7 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
return PTR_ERR(base);
drvdata->base = base;
+ desc.access = CSDEV_ACCESS_IOMEM(base);
spin_lock_init(&drvdata->spinlock);
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 99df453575f5..7e5b54d259e6 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -3,6 +3,7 @@
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*/
+#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
@@ -26,14 +27,19 @@
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/perf_event.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
+
+#include <asm/barrier.h>
#include <asm/sections.h>
+#include <asm/sysreg.h>
#include <asm/local.h>
#include <asm/virt.h>
#include "coresight-etm4x.h"
#include "coresight-etm-perf.h"
+#include "coresight-quirks.h"
static int boot_enable;
module_param(boot_enable, int, 0444);
@@ -43,7 +49,7 @@ MODULE_PARM_DESC(boot_enable, "Enable tracing on boot");
#define PARAM_PM_SAVE_NEVER 1 /* never save any state */
#define PARAM_PM_SAVE_SELF_HOSTED 2 /* save self-hosted state only */
-static int pm_save_enable = PARAM_PM_SAVE_FIRMWARE;
+static int pm_save_enable = PARAM_PM_SAVE_NEVER;
module_param(pm_save_enable, int, 0444);
MODULE_PARM_DESC(pm_save_enable,
"Save/restore state on power down: 1 = never, 2 = self-hosted");
@@ -56,32 +62,163 @@ static u64 etm4_get_access_type(struct etmv4_config *config);
static enum cpuhp_state hp_online;
-static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
+struct etm4_init_arg {
+ unsigned int pid;
+ struct etmv4_drvdata *drvdata;
+ struct csdev_access *csa;
+};
+
+/*
+ * Check if TRCSSPCICRn(i) is implemented for a given instance.
+ *
+ * TRCSSPCICRn is implemented only if :
+ * TRCSSPCICR<n> is present only if all of the following are true:
+ * TRCIDR4.NUMSSCC > n.
+ * TRCIDR4.NUMPC > 0b0000 .
+ * TRCSSCSR<n>.PC == 0b1
+ */
+static inline bool etm4x_sspcicrn_present(struct etmv4_drvdata *drvdata, int n)
{
- /* Writing 0 to TRCOSLAR unlocks the trace registers */
- writel_relaxed(0x0, drvdata->base + TRCOSLAR);
- drvdata->os_unlock = true;
- isb();
+ return (n < drvdata->nr_ss_cmp) &&
+ drvdata->nr_pe &&
+ (drvdata->config.ss_status[n] & TRCSSCSRn_PC);
}
-static void etm4_os_lock(struct etmv4_drvdata *drvdata)
+u64 etm4x_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
{
- /* Writing 0x1 to TRCOSLAR locks the trace registers */
- writel_relaxed(0x1, drvdata->base + TRCOSLAR);
- drvdata->os_unlock = false;
- isb();
+ u64 res = 0;
+
+ switch (offset) {
+ ETM4x_READ_SYSREG_CASES(res)
+ default :
+ pr_warn_ratelimited("etm4x: trying to read unsupported register @%x\n",
+ offset);
+ }
+
+ if (!_relaxed)
+ __iormb(res); /* Imitate the !relaxed I/O helpers */
+
+ return res;
+}
+
+void etm4x_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
+{
+ if (!_relaxed)
+ __iowmb(); /* Imitate the !relaxed I/O helpers */
+ if (!_64bit)
+ val &= GENMASK(31, 0);
+
+ switch (offset) {
+ ETM4x_WRITE_SYSREG_CASES(val)
+ default :
+ pr_warn_ratelimited("etm4x: trying to write to unsupported register @%x\n",
+ offset);
+ }
+}
+
+static u64 ete_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
+{
+ u64 res = 0;
+
+ switch (offset) {
+ ETE_READ_CASES(res)
+ default :
+ pr_warn_ratelimited("ete: trying to read unsupported register @%x\n",
+ offset);
+ }
+
+ if (!_relaxed)
+ __iormb(res); /* Imitate the !relaxed I/O helpers */
+
+ return res;
}
-static bool etm4_arch_supported(u8 arch)
+static void ete_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
{
- /* Mask out the minor version number */
- switch (arch & 0xf0) {
- case ETM_ARCH_V4:
+ if (!_relaxed)
+ __iowmb(); /* Imitate the !relaxed I/O helpers */
+ if (!_64bit)
+ val &= GENMASK(31, 0);
+
+ switch (offset) {
+ ETE_WRITE_CASES(val)
+ default :
+ pr_warn_ratelimited("ete: trying to write to unsupported register @%x\n",
+ offset);
+ }
+}
+
+static void etm_detect_os_lock(struct etmv4_drvdata *drvdata,
+ struct csdev_access *csa)
+{
+ u32 oslsr = etm4x_relaxed_read32(csa, TRCOSLSR);
+
+ drvdata->os_lock_model = ETM_OSLSR_OSLM(oslsr);
+}
+
+static void etm_write_os_lock(struct etmv4_drvdata *drvdata,
+ struct csdev_access *csa, u32 val)
+{
+ val = !!val;
+
+ switch (drvdata->os_lock_model) {
+ case ETM_OSLOCK_PRESENT:
+ etm4x_relaxed_write32(csa, val, TRCOSLAR);
+ break;
+ case ETM_OSLOCK_PE:
+ write_sysreg_s(val, SYS_OSLAR_EL1);
break;
default:
- return false;
+ pr_warn_once("CPU%d: Unsupported Trace OSLock model: %x\n",
+ smp_processor_id(), drvdata->os_lock_model);
+ fallthrough;
+ case ETM_OSLOCK_NI:
+ return;
}
- return true;
+ isb();
+}
+
+static inline void etm4_os_unlock_csa(struct etmv4_drvdata *drvdata,
+ struct csdev_access *csa)
+{
+ /* Disable this warning for task isolation mode */
+#ifndef CONFIG_TASK_ISOLATION
+ WARN_ON(drvdata->cpu != smp_processor_id());
+#endif
+
+ /* Writing 0 to OS Lock unlocks the trace unit registers */
+ etm_write_os_lock(drvdata, csa, 0x0);
+ drvdata->os_unlock = true;
+}
+
+static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
+{
+ if (!WARN_ON(!drvdata->csdev))
+ etm4_os_unlock_csa(drvdata, &drvdata->csdev->access);
+}
+
+static void etm4_os_lock(struct etmv4_drvdata *drvdata)
+{
+ if (WARN_ON(!drvdata->csdev))
+ return;
+ /* Writing 0x1 to OS Lock locks the trace registers */
+ etm_write_os_lock(drvdata, &drvdata->csdev->access, 0x1);
+ drvdata->os_unlock = false;
+}
+
+static void etm4_cs_lock(struct etmv4_drvdata *drvdata,
+ struct csdev_access *csa)
+{
+ /* Software Lock is only accessible via memory mapped interface */
+ if (csa->io_mem)
+ CS_LOCK(csa->base);
+}
+
+static void etm4_cs_unlock(struct etmv4_drvdata *drvdata,
+ struct csdev_access *csa)
+{
+ if (csa->io_mem)
+ CS_UNLOCK(csa->base);
}
static int etm4_cpu_id(struct coresight_device *csdev)
@@ -103,61 +240,159 @@ struct etm4_enable_arg {
int rc;
};
+#ifdef CONFIG_ETM4X_IMPDEF_FEATURE
+
+#define HISI_HIP08_AMBA_ID 0x000b6d01
+#define ETM4_AMBA_MASK 0xfffff
+#define HISI_HIP08_CORE_COMMIT_MASK 0x3000
+#define HISI_HIP08_CORE_COMMIT_SHIFT 12
+#define HISI_HIP08_CORE_COMMIT_FULL 0b00
+#define HISI_HIP08_CORE_COMMIT_LVL_1 0b01
+#define HISI_HIP08_CORE_COMMIT_REG sys_reg(3, 1, 15, 2, 5)
+
+struct etm4_arch_features {
+ void (*arch_callback)(bool enable);
+};
+
+static bool etm4_hisi_match_pid(unsigned int id)
+{
+ return (id & ETM4_AMBA_MASK) == HISI_HIP08_AMBA_ID;
+}
+
+static void etm4_hisi_config_core_commit(bool enable)
+{
+ u8 commit = enable ? HISI_HIP08_CORE_COMMIT_LVL_1 :
+ HISI_HIP08_CORE_COMMIT_FULL;
+ u64 val;
+
+ /*
+ * bit 12 and 13 of HISI_HIP08_CORE_COMMIT_REG are used together
+ * to set core-commit, 2'b00 means cpu is at full speed, 2'b01,
+ * 2'b10, 2'b11 mean reduce pipeline speed, and 2'b01 means level-1
+ * speed(minimun value). So bit 12 and 13 should be cleared together.
+ */
+ val = read_sysreg_s(HISI_HIP08_CORE_COMMIT_REG);
+ val &= ~HISI_HIP08_CORE_COMMIT_MASK;
+ val |= commit << HISI_HIP08_CORE_COMMIT_SHIFT;
+ write_sysreg_s(val, HISI_HIP08_CORE_COMMIT_REG);
+}
+
+static struct etm4_arch_features etm4_features[] = {
+ [ETM4_IMPDEF_HISI_CORE_COMMIT] = {
+ .arch_callback = etm4_hisi_config_core_commit,
+ },
+ {},
+};
+
+static void etm4_enable_arch_specific(struct etmv4_drvdata *drvdata)
+{
+ struct etm4_arch_features *ftr;
+ int bit;
+
+ for_each_set_bit(bit, drvdata->arch_features, ETM4_IMPDEF_FEATURE_MAX) {
+ ftr = &etm4_features[bit];
+
+ if (ftr->arch_callback)
+ ftr->arch_callback(true);
+ }
+}
+
+static void etm4_disable_arch_specific(struct etmv4_drvdata *drvdata)
+{
+ struct etm4_arch_features *ftr;
+ int bit;
+
+ for_each_set_bit(bit, drvdata->arch_features, ETM4_IMPDEF_FEATURE_MAX) {
+ ftr = &etm4_features[bit];
+
+ if (ftr->arch_callback)
+ ftr->arch_callback(false);
+ }
+}
+
+static void etm4_check_arch_features(struct etmv4_drvdata *drvdata,
+ unsigned int id)
+{
+ if (etm4_hisi_match_pid(id))
+ set_bit(ETM4_IMPDEF_HISI_CORE_COMMIT, drvdata->arch_features);
+}
+#else
+static void etm4_enable_arch_specific(struct etmv4_drvdata *drvdata)
+{
+}
+
+static void etm4_disable_arch_specific(struct etmv4_drvdata *drvdata)
+{
+}
+
+static void etm4_check_arch_features(struct etmv4_drvdata *drvdata,
+ unsigned int id)
+{
+}
+#endif /* CONFIG_ETM4X_IMPDEF_FEATURE */
+
static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
{
int i, rc;
struct etmv4_config *config = &drvdata->config;
- struct device *etm_dev = &drvdata->csdev->dev;
+ struct coresight_device *csdev = drvdata->csdev;
+ struct device *etm_dev = &csdev->dev;
+ struct csdev_access *csa = &csdev->access;
- CS_UNLOCK(drvdata->base);
+
+ etm4_cs_unlock(drvdata, csa);
+ etm4_enable_arch_specific(drvdata);
etm4_os_unlock(drvdata);
- rc = coresight_claim_device_unlocked(drvdata->base);
+ rc = coresight_claim_device_unlocked(csdev);
if (rc)
goto done;
/* Disable the trace unit before programming trace registers */
- writel_relaxed(0, drvdata->base + TRCPRGCTLR);
+ etm4x_relaxed_write32(csa, 0, TRCPRGCTLR);
+
+ /*
+ * If we use system instructions, we need to synchronize the
+ * write to the TRCPRGCTLR, before accessing the TRCSTATR.
+ * See ARM IHI0064F, section
+ * "4.3.7 Synchronization of register updates"
+ */
+ if (!csa->io_mem)
+ isb();
/* wait for TRCSTATR.IDLE to go up */
- if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
+ if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
dev_err(etm_dev,
"timeout while waiting for Idle Trace Status\n");
if (drvdata->nr_pe)
- writel_relaxed(config->pe_sel, drvdata->base + TRCPROCSELR);
- writel_relaxed(config->cfg, drvdata->base + TRCCONFIGR);
+ etm4x_relaxed_write32(csa, config->pe_sel, TRCPROCSELR);
+ etm4x_relaxed_write32(csa, config->cfg, TRCCONFIGR);
/* nothing specific implemented */
- writel_relaxed(0x0, drvdata->base + TRCAUXCTLR);
- writel_relaxed(config->eventctrl0, drvdata->base + TRCEVENTCTL0R);
- writel_relaxed(config->eventctrl1, drvdata->base + TRCEVENTCTL1R);
+ etm4x_relaxed_write32(csa, 0x0, TRCAUXCTLR);
+ etm4x_relaxed_write32(csa, config->eventctrl0, TRCEVENTCTL0R);
+ etm4x_relaxed_write32(csa, config->eventctrl1, TRCEVENTCTL1R);
if (drvdata->stallctl)
- writel_relaxed(config->stall_ctrl, drvdata->base + TRCSTALLCTLR);
- writel_relaxed(config->ts_ctrl, drvdata->base + TRCTSCTLR);
- writel_relaxed(config->syncfreq, drvdata->base + TRCSYNCPR);
- writel_relaxed(config->ccctlr, drvdata->base + TRCCCCTLR);
- writel_relaxed(config->bb_ctrl, drvdata->base + TRCBBCTLR);
- writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR);
- writel_relaxed(config->vinst_ctrl, drvdata->base + TRCVICTLR);
- writel_relaxed(config->viiectlr, drvdata->base + TRCVIIECTLR);
- writel_relaxed(config->vissctlr,
- drvdata->base + TRCVISSCTLR);
+ etm4x_relaxed_write32(csa, config->stall_ctrl, TRCSTALLCTLR);
+ etm4x_relaxed_write32(csa, config->ts_ctrl, TRCTSCTLR);
+ etm4x_relaxed_write32(csa, config->syncfreq, TRCSYNCPR);
+ etm4x_relaxed_write32(csa, config->ccctlr, TRCCCCTLR);
+ etm4x_relaxed_write32(csa, config->bb_ctrl, TRCBBCTLR);
+ etm4x_relaxed_write32(csa, drvdata->trcid, TRCTRACEIDR);
+ etm4x_relaxed_write32(csa, config->vinst_ctrl, TRCVICTLR);
+ etm4x_relaxed_write32(csa, config->viiectlr, TRCVIIECTLR);
+ etm4x_relaxed_write32(csa, config->vissctlr, TRCVISSCTLR);
if (drvdata->nr_pe_cmp)
- writel_relaxed(config->vipcssctlr,
- drvdata->base + TRCVIPCSSCTLR);
+ etm4x_relaxed_write32(csa, config->vipcssctlr, TRCVIPCSSCTLR);
for (i = 0; i < drvdata->nrseqstate - 1; i++)
- writel_relaxed(config->seq_ctrl[i],
- drvdata->base + TRCSEQEVRn(i));
- writel_relaxed(config->seq_rst, drvdata->base + TRCSEQRSTEVR);
- writel_relaxed(config->seq_state, drvdata->base + TRCSEQSTR);
- writel_relaxed(config->ext_inp, drvdata->base + TRCEXTINSELR);
+ etm4x_relaxed_write32(csa, config->seq_ctrl[i], TRCSEQEVRn(i));
+ etm4x_relaxed_write32(csa, config->seq_rst, TRCSEQRSTEVR);
+ etm4x_relaxed_write32(csa, config->seq_state, TRCSEQSTR);
+ etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
- writel_relaxed(config->cntrldvr[i],
- drvdata->base + TRCCNTRLDVRn(i));
- writel_relaxed(config->cntr_ctrl[i],
- drvdata->base + TRCCNTCTLRn(i));
- writel_relaxed(config->cntr_val[i],
- drvdata->base + TRCCNTVRn(i));
+ etm4x_relaxed_write32(csa, config->cntrldvr[i], TRCCNTRLDVRn(i));
+ etm4x_relaxed_write32(csa, config->cntr_ctrl[i], TRCCNTCTLRn(i));
+ etm4x_relaxed_write32(csa, config->cntr_val[i], TRCCNTVRn(i));
}
/*
@@ -165,54 +400,59 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
* such start at 2.
*/
for (i = 2; i < drvdata->nr_resource * 2; i++)
- writel_relaxed(config->res_ctrl[i],
- drvdata->base + TRCRSCTLRn(i));
+ etm4x_relaxed_write32(csa, config->res_ctrl[i], TRCRSCTLRn(i));
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
/* always clear status bit on restart if using single-shot */
if (config->ss_ctrl[i] || config->ss_pe_cmp[i])
config->ss_status[i] &= ~BIT(31);
- writel_relaxed(config->ss_ctrl[i],
- drvdata->base + TRCSSCCRn(i));
- writel_relaxed(config->ss_status[i],
- drvdata->base + TRCSSCSRn(i));
- writel_relaxed(config->ss_pe_cmp[i],
- drvdata->base + TRCSSPCICRn(i));
+ etm4x_relaxed_write32(csa, config->ss_ctrl[i], TRCSSCCRn(i));
+ etm4x_relaxed_write32(csa, config->ss_status[i], TRCSSCSRn(i));
+ if (etm4x_sspcicrn_present(drvdata, i))
+ etm4x_relaxed_write32(csa, config->ss_pe_cmp[i], TRCSSPCICRn(i));
}
for (i = 0; i < drvdata->nr_addr_cmp; i++) {
- writeq_relaxed(config->addr_val[i],
- drvdata->base + TRCACVRn(i));
- writeq_relaxed(config->addr_acc[i],
- drvdata->base + TRCACATRn(i));
+ etm4x_relaxed_write64(csa, config->addr_val[i], TRCACVRn(i));
+ etm4x_relaxed_write64(csa, config->addr_acc[i], TRCACATRn(i));
}
for (i = 0; i < drvdata->numcidc; i++)
- writeq_relaxed(config->ctxid_pid[i],
- drvdata->base + TRCCIDCVRn(i));
- writel_relaxed(config->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
+ etm4x_relaxed_write64(csa, config->ctxid_pid[i], TRCCIDCVRn(i));
+ etm4x_relaxed_write32(csa, config->ctxid_mask0, TRCCIDCCTLR0);
if (drvdata->numcidc > 4)
- writel_relaxed(config->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
+ etm4x_relaxed_write32(csa, config->ctxid_mask1, TRCCIDCCTLR1);
for (i = 0; i < drvdata->numvmidc; i++)
- writeq_relaxed(config->vmid_val[i],
- drvdata->base + TRCVMIDCVRn(i));
- writel_relaxed(config->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
+ etm4x_relaxed_write64(csa, config->vmid_val[i], TRCVMIDCVRn(i));
+ etm4x_relaxed_write32(csa, config->vmid_mask0, TRCVMIDCCTLR0);
if (drvdata->numvmidc > 4)
- writel_relaxed(config->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
+ etm4x_relaxed_write32(csa, config->vmid_mask1, TRCVMIDCCTLR1);
if (!drvdata->skip_power_up) {
+ u32 trcpdcr = etm4x_relaxed_read32(csa, TRCPDCR);
+
/*
* Request to keep the trace unit powered and also
* emulation of powerdown
*/
- writel_relaxed(readl_relaxed(drvdata->base + TRCPDCR) |
- TRCPDCR_PU, drvdata->base + TRCPDCR);
+ etm4x_relaxed_write32(csa, trcpdcr | TRCPDCR_PU, TRCPDCR);
}
+ /*
+ * ETE mandates that the TRCRSR is written to before
+ * enabling it.
+ */
+ if (etm4x_is_ete(drvdata))
+ etm4x_relaxed_write32(csa, TRCRSR_TA, TRCRSR);
+
/* Enable the trace unit */
- writel_relaxed(1, drvdata->base + TRCPRGCTLR);
+ etm4x_relaxed_write32(csa, 1, TRCPRGCTLR);
+
+ /* Synchronize the register updates for sysreg access */
+ if (!csa->io_mem)
+ isb();
/* wait for TRCSTATR.IDLE to go back down to '0' */
- if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
+ if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
dev_err(etm_dev,
"timeout while waiting for Idle Trace Status\n");
@@ -224,7 +464,21 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
isb();
done:
- CS_LOCK(drvdata->base);
+ etm4_cs_lock(drvdata, csa);
+
+ /* For supporting SW sync insertion */
+ if (drvdata->etm_quirks & CORESIGHT_QUIRK_ETM_SW_SYNC) {
+ /* ETM sync insertions are gated in the ETR timer
+ * handler based on hw state.
+ */
+ drvdata->hw_state = USR_START;
+
+ /* Global timer handler not being associated with
+ * a specific ETM core, need to know the current
+ * list of acitve ETMs.
+ */
+ coresight_etm_active_enable(drvdata->cpu);
+ }
dev_dbg(etm_dev, "cpu: %d enable smp call done: %d\n",
drvdata->cpu, rc);
@@ -383,6 +637,19 @@ static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
/* bit[6], Context ID tracing bit */
config->cfg |= BIT(ETM4_CFG_BIT_CTXTID);
+ /*
+ * If set bit ETM_OPT_CTXTID2 in perf config, this asks to trace VMID
+ * for recording CONTEXTIDR_EL2. Do not enable VMID tracing if the
+ * kernel is not running in EL2.
+ */
+ if (attr->config & BIT(ETM_OPT_CTXTID2)) {
+ if (!is_kernel_in_hyp_mode()) {
+ ret = -EINVAL;
+ goto out;
+ }
+ config->cfg |= BIT(ETM4_CFG_BIT_VMID) | BIT(ETM4_CFG_BIT_VMID_OPT);
+ }
+
/* return stack - enable if selected and supported */
if ((attr->config & BIT(ETM_OPT_RETSTK)) && drvdata->retstack)
/* bit[12], Return stack enable bit */
@@ -420,20 +687,27 @@ static int etm4_enable_sysfs(struct coresight_device *csdev)
struct etm4_enable_arg arg = { };
int ret;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
/*
* Executing etm4_enable_hw on the cpu whose ETM is being enabled
* ensures that register writes occur when cpu is powered.
+ *
+ * Note: When task isolation is enabled, the target cpu used
+ * is always primary core and hence the above assumption of
+ * cpu associated with the ETM being in powered up state during
+ * register writes is not valid.
+ * But on the other hand, using smp call ensures that atomicity is
+ * not broken as well.
*/
arg.drvdata = drvdata;
- ret = smp_call_function_single(drvdata->cpu,
+ ret = smp_call_function_single(drvdata->rc_cpu,
etm4_enable_hw_smp_call, &arg, 1);
if (!ret)
ret = arg.rc;
if (!ret)
drvdata->sticky_enable = true;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
if (!ret)
dev_dbg(&csdev->dev, "ETM tracing enabled\n");
@@ -474,55 +748,77 @@ static int etm4_enable(struct coresight_device *csdev,
static void etm4_disable_hw(void *info)
{
u32 control;
+ u64 trfcr;
struct etmv4_drvdata *drvdata = info;
struct etmv4_config *config = &drvdata->config;
- struct device *etm_dev = &drvdata->csdev->dev;
+ struct coresight_device *csdev = drvdata->csdev;
+ struct device *etm_dev = &csdev->dev;
+ struct csdev_access *csa = &csdev->access;
int i;
- CS_UNLOCK(drvdata->base);
+ etm4_cs_unlock(drvdata, csa);
+ etm4_disable_arch_specific(drvdata);
if (!drvdata->skip_power_up) {
/* power can be removed from the trace unit now */
- control = readl_relaxed(drvdata->base + TRCPDCR);
+ control = etm4x_relaxed_read32(csa, TRCPDCR);
control &= ~TRCPDCR_PU;
- writel_relaxed(control, drvdata->base + TRCPDCR);
+ etm4x_relaxed_write32(csa, control, TRCPDCR);
}
- control = readl_relaxed(drvdata->base + TRCPRGCTLR);
+ control = etm4x_relaxed_read32(csa, TRCPRGCTLR);
/* EN, bit[0] Trace unit enable bit */
control &= ~0x1;
/*
+ * If the CPU supports v8.4 Trace filter Control,
+ * set the ETM to trace prohibited region.
+ */
+ if (drvdata->trfc) {
+ trfcr = read_sysreg_s(SYS_TRFCR_EL1);
+ write_sysreg_s(trfcr & ~(TRFCR_ELx_ExTRE | TRFCR_ELx_E0TRE),
+ SYS_TRFCR_EL1);
+ isb();
+ }
+ /*
* Make sure everything completes before disabling, as recommended
* by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register,
* SSTATUS") of ARM IHI 0064D
*/
dsb(sy);
isb();
- writel_relaxed(control, drvdata->base + TRCPRGCTLR);
+ /* Trace synchronization barrier, is a nop if not supported */
+ tsb_csync();
+ etm4x_relaxed_write32(csa, control, TRCPRGCTLR);
/* wait for TRCSTATR.PMSTABLE to go to '1' */
- if (coresight_timeout(drvdata->base, TRCSTATR,
- TRCSTATR_PMSTABLE_BIT, 1))
+ if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1))
dev_err(etm_dev,
"timeout while waiting for PM stable Trace Status\n");
+ if (drvdata->trfc)
+ write_sysreg_s(trfcr, SYS_TRFCR_EL1);
/* read the status of the single shot comparators */
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
config->ss_status[i] =
- readl_relaxed(drvdata->base + TRCSSCSRn(i));
+ etm4x_relaxed_read32(csa, TRCSSCSRn(i));
}
/* read back the current counter values */
for (i = 0; i < drvdata->nr_cntr; i++) {
config->cntr_val[i] =
- readl_relaxed(drvdata->base + TRCCNTVRn(i));
+ etm4x_relaxed_read32(csa, TRCCNTVRn(i));
}
- coresight_disclaim_device_unlocked(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
+ etm4_cs_lock(drvdata, csa);
- CS_LOCK(drvdata->base);
+ /* For supporting SW sync insertion */
+ if (drvdata->etm_quirks & CORESIGHT_QUIRK_ETM_SW_SYNC) {
+ drvdata->hw_state = USR_STOP;
+ coresight_etm_active_disable(drvdata->cpu);
+ }
dev_dbg(&drvdata->csdev->dev,
"cpu: %d disable smp call done\n", drvdata->cpu);
@@ -546,7 +842,7 @@ static int etm4_disable_perf(struct coresight_device *csdev,
* scheduled again. Configuration of the start/stop logic happens in
* function etm4_set_event_filters().
*/
- control = readl_relaxed(drvdata->base + TRCVICTLR);
+ control = etm4x_relaxed_read32(&csdev->access, TRCVICTLR);
/* TRCVICTLR::SSSTATUS, bit[9] */
filters->ssstatus = (control & BIT(9));
@@ -564,15 +860,22 @@ static void etm4_disable_sysfs(struct coresight_device *csdev)
* DYING hotplug callback is serviced by the ETM driver.
*/
cpus_read_lock();
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
/*
* Executing etm4_disable_hw on the cpu whose ETM is being disabled
* ensures that register writes occur when cpu is powered.
+ *
+ * Note: When task isolation is enabled, the target cpu used
+ * is always primary core and hence the above assumption of
+ * cpu associated with the ETM being in powered up state during
+ * register writes is not valid.
+ * But on the other hand, using smp call ensures that atomicity is
+ * not broken as well.
*/
- smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
+ smp_call_function_single(drvdata->rc_cpu, etm4_disable_hw, drvdata, 1);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
cpus_read_unlock();
dev_dbg(&csdev->dev, "ETM tracing disabled\n");
@@ -617,24 +920,160 @@ static const struct coresight_ops etm4_cs_ops = {
.source_ops = &etm4_source_ops,
};
+static inline bool cpu_supports_sysreg_trace(void)
+{
+ u64 dfr0 = read_sysreg_s(SYS_ID_AA64DFR0_EL1);
+
+ return ((dfr0 >> ID_AA64DFR0_TRACEVER_SHIFT) & 0xfUL) > 0;
+}
+
+static bool etm4_init_sysreg_access(struct etmv4_drvdata *drvdata,
+ struct csdev_access *csa)
+{
+ u32 devarch;
+
+ if (!cpu_supports_sysreg_trace())
+ return false;
+
+ /*
+ * ETMs implementing sysreg access must implement TRCDEVARCH.
+ */
+ devarch = read_etm4x_sysreg_const_offset(TRCDEVARCH);
+ switch (devarch & ETM_DEVARCH_ID_MASK) {
+ case ETM_DEVARCH_ETMv4x_ARCH:
+ *csa = (struct csdev_access) {
+ .io_mem = false,
+ .read = etm4x_sysreg_read,
+ .write = etm4x_sysreg_write,
+ };
+ break;
+ case ETM_DEVARCH_ETE_ARCH:
+ *csa = (struct csdev_access) {
+ .io_mem = false,
+ .read = ete_sysreg_read,
+ .write = ete_sysreg_write,
+ };
+ break;
+ default:
+ return false;
+ }
+
+ drvdata->arch = etm_devarch_to_arch(devarch);
+ return true;
+}
+
+static bool etm4_init_iomem_access(struct etmv4_drvdata *drvdata,
+ struct csdev_access *csa)
+{
+ u32 devarch = readl_relaxed(drvdata->base + TRCDEVARCH);
+ u32 idr1 = readl_relaxed(drvdata->base + TRCIDR1);
+
+ /* OcteonTX2 hardware reports version as ETMv4.2 but it supports
+ * Ignore Packet feature of ETMv4.3. Hence, treat this as comaptible
+ * with ETMv4.3.
+ */
+ if (drvdata->etm_quirks & CORESIGHT_QUIRK_ETM_TREAT_ETMv43) {
+ idr1 &= ~0xF0;
+ idr1 |= 0x30;
+ }
+
+ /*
+ * All ETMs must implement TRCDEVARCH to indicate that
+ * the component is an ETMv4. To support any broken
+ * implementations we fall back to TRCIDR1 check, which
+ * is not really reliable.
+ */
+ if ((devarch & ETM_DEVARCH_ID_MASK) == ETM_DEVARCH_ETMv4x_ARCH) {
+ drvdata->arch = etm_devarch_to_arch(devarch);
+ } else {
+ pr_warn("CPU%d: ETM4x incompatible TRCDEVARCH: %x, falling back to TRCIDR1\n",
+ smp_processor_id(), devarch);
+
+ if (ETM_TRCIDR1_ARCH_MAJOR(idr1) != ETM_TRCIDR1_ARCH_ETMv4)
+ return false;
+ drvdata->arch = etm_trcidr_to_arch(idr1);
+ }
+
+ *csa = CSDEV_ACCESS_IOMEM(drvdata->base);
+ return true;
+}
+
+static bool etm4_init_csdev_access(struct etmv4_drvdata *drvdata,
+ struct csdev_access *csa)
+{
+ /*
+ * Always choose the memory mapped io, if there is
+ * a memory map to prevent sysreg access on broken
+ * systems.
+ */
+ if (drvdata->base)
+ return etm4_init_iomem_access(drvdata, csa);
+
+ if (etm4_init_sysreg_access(drvdata, csa))
+ return true;
+
+ return false;
+}
+
+static void cpu_enable_tracing(struct etmv4_drvdata *drvdata)
+{
+ u64 dfr0 = read_sysreg(id_aa64dfr0_el1);
+ u64 trfcr;
+
+ if (!cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRACE_FILT_SHIFT))
+ return;
+
+ drvdata->trfc = true;
+ /*
+ * If the CPU supports v8.4 SelfHosted Tracing, enable
+ * tracing at the kernel EL and EL0, forcing to use the
+ * virtual time as the timestamp.
+ */
+ trfcr = (TRFCR_ELx_TS_VIRTUAL |
+ TRFCR_ELx_ExTRE |
+ TRFCR_ELx_E0TRE);
+
+ /* If we are running at EL2, allow tracing the CONTEXTIDR_EL2. */
+ if (is_kernel_in_hyp_mode())
+ trfcr |= TRFCR_EL2_CX;
+
+ write_sysreg_s(trfcr, SYS_TRFCR_EL1);
+}
+
static void etm4_init_arch_data(void *info)
{
u32 etmidr0;
- u32 etmidr1;
u32 etmidr2;
u32 etmidr3;
u32 etmidr4;
u32 etmidr5;
- struct etmv4_drvdata *drvdata = info;
+ struct etm4_init_arg *init_arg = info;
+ struct etmv4_drvdata *drvdata;
+ struct csdev_access *csa;
int i;
+ drvdata = init_arg->drvdata;
+ csa = init_arg->csa;
+
+ /*
+ * If we are unable to detect the access mechanism,
+ * or unable to detect the trace unit type, fail
+ * early.
+ */
+ if (!etm4_init_csdev_access(drvdata, csa))
+ return;
+
+ /* Detect the support for OS Lock before we actually use it */
+ etm_detect_os_lock(drvdata, csa);
+
/* Make sure all registers are accessible */
- etm4_os_unlock(drvdata);
+ etm4_os_unlock_csa(drvdata, csa);
+ etm4_cs_unlock(drvdata, csa);
- CS_UNLOCK(drvdata->base);
+ etm4_check_arch_features(drvdata, init_arg->pid);
/* find all capabilities of the tracing unit */
- etmidr0 = readl_relaxed(drvdata->base + TRCIDR0);
+ etmidr0 = etm4x_relaxed_read32(csa, TRCIDR0);
/* INSTP0, bits[2:1] P0 tracing support field */
if (BMVAL(etmidr0, 1, 1) && BMVAL(etmidr0, 2, 2))
@@ -673,17 +1112,8 @@ static void etm4_init_arch_data(void *info)
/* TSSIZE, bits[28:24] Global timestamp size field */
drvdata->ts_size = BMVAL(etmidr0, 24, 28);
- /* base architecture of trace unit */
- etmidr1 = readl_relaxed(drvdata->base + TRCIDR1);
- /*
- * TRCARCHMIN, bits[7:4] architecture the minor version number
- * TRCARCHMAJ, bits[11:8] architecture major versin number
- */
- drvdata->arch = BMVAL(etmidr1, 4, 11);
- drvdata->config.arch = drvdata->arch;
-
/* maximum size of resources */
- etmidr2 = readl_relaxed(drvdata->base + TRCIDR2);
+ etmidr2 = etm4x_relaxed_read32(csa, TRCIDR2);
/* CIDSIZE, bits[9:5] Indicates the Context ID size */
drvdata->ctxid_size = BMVAL(etmidr2, 5, 9);
/* VMIDSIZE, bits[14:10] Indicates the VMID size */
@@ -691,11 +1121,12 @@ static void etm4_init_arch_data(void *info)
/* CCSIZE, bits[28:25] size of the cycle counter in bits minus 12 */
drvdata->ccsize = BMVAL(etmidr2, 25, 28);
- etmidr3 = readl_relaxed(drvdata->base + TRCIDR3);
+ etmidr3 = etm4x_relaxed_read32(csa, TRCIDR3);
/* CCITMIN, bits[11:0] minimum threshold value that can be programmed */
drvdata->ccitmin = BMVAL(etmidr3, 0, 11);
/* EXLEVEL_S, bits[19:16] Secure state instruction tracing */
drvdata->s_ex_level = BMVAL(etmidr3, 16, 19);
+ drvdata->config.s_ex_level = drvdata->s_ex_level;
/* EXLEVEL_NS, bits[23:20] Non-secure state instruction tracing */
drvdata->ns_ex_level = BMVAL(etmidr3, 20, 23);
@@ -726,8 +1157,13 @@ static void etm4_init_arch_data(void *info)
else
drvdata->sysstall = false;
- /* NUMPROC, bits[30:28] the number of PEs available for tracing */
- drvdata->nr_pe = BMVAL(etmidr3, 28, 30);
+ /*
+ * NUMPROC - the number of PEs available for tracing, 5bits
+ * = TRCIDR3.bits[13:12]bits[30:28]
+ * bits[4:3] = TRCIDR3.bits[13:12] (since etm-v4.2, otherwise RES0)
+ * bits[3:0] = TRCIDR3.bits[30:28]
+ */
+ drvdata->nr_pe = (BMVAL(etmidr3, 12, 13) << 3) | BMVAL(etmidr3, 28, 30);
/* NOOVERFLOW, bit[31] is trace overflow prevention supported */
if (BMVAL(etmidr3, 31, 31))
@@ -736,7 +1172,7 @@ static void etm4_init_arch_data(void *info)
drvdata->nooverflow = false;
/* number of resources trace unit supports */
- etmidr4 = readl_relaxed(drvdata->base + TRCIDR4);
+ etmidr4 = etm4x_relaxed_read32(csa, TRCIDR4);
/* NUMACPAIRS, bits[0:3] number of addr comparator pairs for tracing */
drvdata->nr_addr_cmp = BMVAL(etmidr4, 0, 3);
/* NUMPC, bits[15:12] number of PE comparator inputs for tracing */
@@ -752,7 +1188,7 @@ static void etm4_init_arch_data(void *info)
* Otherwise for values 0x1 and above the number is N + 1 as per v4.2.
*/
drvdata->nr_resource = BMVAL(etmidr4, 16, 19);
- if ((drvdata->arch < ETM4X_ARCH_4V3) || (drvdata->nr_resource > 0))
+ if ((drvdata->arch < ETM_ARCH_V4_3) || (drvdata->nr_resource > 0))
drvdata->nr_resource += 1;
/*
* NUMSSCC, bits[23:20] the number of single-shot
@@ -762,14 +1198,14 @@ static void etm4_init_arch_data(void *info)
drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23);
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
drvdata->config.ss_status[i] =
- readl_relaxed(drvdata->base + TRCSSCSRn(i));
+ etm4x_relaxed_read32(csa, TRCSSCSRn(i));
}
/* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
drvdata->numcidc = BMVAL(etmidr4, 24, 27);
/* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
drvdata->numvmidc = BMVAL(etmidr4, 28, 31);
- etmidr5 = readl_relaxed(drvdata->base + TRCIDR5);
+ etmidr5 = etm4x_relaxed_read32(csa, TRCIDR5);
/* NUMEXTIN, bits[8:0] number of external inputs implemented */
drvdata->nr_ext_inp = BMVAL(etmidr5, 0, 8);
/* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
@@ -791,23 +1227,20 @@ static void etm4_init_arch_data(void *info)
drvdata->nrseqstate = BMVAL(etmidr5, 25, 27);
/* NUMCNTR, bits[30:28] number of counters available for tracing */
drvdata->nr_cntr = BMVAL(etmidr5, 28, 30);
- CS_LOCK(drvdata->base);
+ etm4_cs_lock(drvdata, csa);
+ cpu_enable_tracing(drvdata);
+}
+
+static inline u32 etm4_get_victlr_access_type(struct etmv4_config *config)
+{
+ return etm4_get_access_type(config) << TRCVICTLR_EXLEVEL_SHIFT;
}
/* Set ELx trace filter access in the TRCVICTLR register */
static void etm4_set_victlr_access(struct etmv4_config *config)
{
- u64 access_type;
-
- config->vinst_ctrl &= ~(ETM_EXLEVEL_S_VICTLR_MASK | ETM_EXLEVEL_NS_VICTLR_MASK);
-
- /*
- * TRCVICTLR::EXLEVEL_NS:EXLEVELS: Set kernel / user filtering
- * bits in vinst_ctrl, same bit pattern as TRCACATRn values returned by
- * etm4_get_access_type() but with a relative shift in this register.
- */
- access_type = etm4_get_access_type(config) << ETM_EXLEVEL_LSHIFT_TRCVICTLR;
- config->vinst_ctrl |= (u32)access_type;
+ config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_MASK;
+ config->vinst_ctrl |= etm4_get_victlr_access_type(config);
}
static void etm4_set_default_config(struct etmv4_config *config)
@@ -837,12 +1270,9 @@ static u64 etm4_get_ns_access_type(struct etmv4_config *config)
u64 access_type = 0;
/*
- * EXLEVEL_NS, bits[15:12]
- * The Exception levels are:
- * Bit[12] Exception level 0 - Application
- * Bit[13] Exception level 1 - OS
- * Bit[14] Exception level 2 - Hypervisor
- * Bit[15] Never implemented
+ * EXLEVEL_NS, for NonSecure Exception levels.
+ * The mask here is a generic value and must be
+ * shifted to the corresponding field for the registers
*/
if (!is_kernel_in_hyp_mode()) {
/* Stay away from hypervisor mode for non-VHE */
@@ -859,27 +1289,26 @@ static u64 etm4_get_ns_access_type(struct etmv4_config *config)
return access_type;
}
+/*
+ * Construct the exception level masks for a given config.
+ * This must be shifted to the corresponding register field
+ * for usage.
+ */
static u64 etm4_get_access_type(struct etmv4_config *config)
{
- u64 access_type = etm4_get_ns_access_type(config);
- u64 s_hyp = (config->arch & 0x0f) >= 0x4 ? ETM_EXLEVEL_S_HYP : 0;
-
- /*
- * EXLEVEL_S, bits[11:8], don't trace anything happening
- * in secure state.
- */
- access_type |= (ETM_EXLEVEL_S_APP |
- ETM_EXLEVEL_S_OS |
- s_hyp |
- ETM_EXLEVEL_S_MON);
+ /* All Secure exception levels are excluded from the trace */
+ return etm4_get_ns_access_type(config) | (u64)config->s_ex_level;
+}
- return access_type;
+static u64 etm4_get_comparator_access_type(struct etmv4_config *config)
+{
+ return etm4_get_access_type(config) << TRCACATR_EXLEVEL_SHIFT;
}
static void etm4_set_comparator_filter(struct etmv4_config *config,
u64 start, u64 stop, int comparator)
{
- u64 access_type = etm4_get_access_type(config);
+ u64 access_type = etm4_get_comparator_access_type(config);
/* First half of default address comparator */
config->addr_val[comparator] = start;
@@ -914,7 +1343,7 @@ static void etm4_set_start_stop_filter(struct etmv4_config *config,
enum etm_addr_type type)
{
int shift;
- u64 access_type = etm4_get_access_type(config);
+ u64 access_type = etm4_get_comparator_access_type(config);
/* Configure the comparator */
config->addr_val[comparator] = address;
@@ -1124,13 +1553,13 @@ static int etm4_starting_cpu(unsigned int cpu)
if (!etmdrvdata[cpu])
return 0;
- spin_lock(&etmdrvdata[cpu]->spinlock);
+ raw_spin_lock(&etmdrvdata[cpu]->spinlock);
if (!etmdrvdata[cpu]->os_unlock)
etm4_os_unlock(etmdrvdata[cpu]);
if (local_read(&etmdrvdata[cpu]->mode))
etm4_enable_hw(etmdrvdata[cpu]);
- spin_unlock(&etmdrvdata[cpu]->spinlock);
+ raw_spin_unlock(&etmdrvdata[cpu]->spinlock);
return 0;
}
@@ -1139,10 +1568,10 @@ static int etm4_dying_cpu(unsigned int cpu)
if (!etmdrvdata[cpu])
return 0;
- spin_lock(&etmdrvdata[cpu]->spinlock);
+ raw_spin_lock(&etmdrvdata[cpu]->spinlock);
if (local_read(&etmdrvdata[cpu]->mode))
etm4_disable_hw(etmdrvdata[cpu]);
- spin_unlock(&etmdrvdata[cpu]->spinlock);
+ raw_spin_unlock(&etmdrvdata[cpu]->spinlock);
return 0;
}
@@ -1155,7 +1584,15 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
{
int i, ret = 0;
struct etmv4_save_state *state;
- struct device *etm_dev = &drvdata->csdev->dev;
+ struct coresight_device *csdev = drvdata->csdev;
+ struct csdev_access *csa;
+ struct device *etm_dev;
+
+ if (WARN_ON(!csdev))
+ return -ENODEV;
+
+ etm_dev = &csdev->dev;
+ csa = &csdev->access;
/*
* As recommended by 3.4.1 ("The procedure when powering down the PE")
@@ -1164,14 +1601,12 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
dsb(sy);
isb();
- CS_UNLOCK(drvdata->base);
-
+ etm4_cs_unlock(drvdata, csa);
/* Lock the OS lock to disable trace and external debugger access */
etm4_os_lock(drvdata);
/* wait for TRCSTATR.PMSTABLE to go up */
- if (coresight_timeout(drvdata->base, TRCSTATR,
- TRCSTATR_PMSTABLE_BIT, 1)) {
+ if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1)) {
dev_err(etm_dev,
"timeout while waiting for PM Stable Status\n");
etm4_os_unlock(drvdata);
@@ -1181,56 +1616,57 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
state = drvdata->save_state;
- state->trcprgctlr = readl(drvdata->base + TRCPRGCTLR);
+ state->trcprgctlr = etm4x_read32(csa, TRCPRGCTLR);
if (drvdata->nr_pe)
- state->trcprocselr = readl(drvdata->base + TRCPROCSELR);
- state->trcconfigr = readl(drvdata->base + TRCCONFIGR);
- state->trcauxctlr = readl(drvdata->base + TRCAUXCTLR);
- state->trceventctl0r = readl(drvdata->base + TRCEVENTCTL0R);
- state->trceventctl1r = readl(drvdata->base + TRCEVENTCTL1R);
+ state->trcprocselr = etm4x_read32(csa, TRCPROCSELR);
+ state->trcconfigr = etm4x_read32(csa, TRCCONFIGR);
+ state->trcauxctlr = etm4x_read32(csa, TRCAUXCTLR);
+ state->trceventctl0r = etm4x_read32(csa, TRCEVENTCTL0R);
+ state->trceventctl1r = etm4x_read32(csa, TRCEVENTCTL1R);
if (drvdata->stallctl)
- state->trcstallctlr = readl(drvdata->base + TRCSTALLCTLR);
- state->trctsctlr = readl(drvdata->base + TRCTSCTLR);
- state->trcsyncpr = readl(drvdata->base + TRCSYNCPR);
- state->trcccctlr = readl(drvdata->base + TRCCCCTLR);
- state->trcbbctlr = readl(drvdata->base + TRCBBCTLR);
- state->trctraceidr = readl(drvdata->base + TRCTRACEIDR);
- state->trcqctlr = readl(drvdata->base + TRCQCTLR);
-
- state->trcvictlr = readl(drvdata->base + TRCVICTLR);
- state->trcviiectlr = readl(drvdata->base + TRCVIIECTLR);
- state->trcvissctlr = readl(drvdata->base + TRCVISSCTLR);
+ state->trcstallctlr = etm4x_read32(csa, TRCSTALLCTLR);
+ state->trctsctlr = etm4x_read32(csa, TRCTSCTLR);
+ state->trcsyncpr = etm4x_read32(csa, TRCSYNCPR);
+ state->trcccctlr = etm4x_read32(csa, TRCCCCTLR);
+ state->trcbbctlr = etm4x_read32(csa, TRCBBCTLR);
+ state->trctraceidr = etm4x_read32(csa, TRCTRACEIDR);
+ state->trcqctlr = etm4x_read32(csa, TRCQCTLR);
+
+ state->trcvictlr = etm4x_read32(csa, TRCVICTLR);
+ state->trcviiectlr = etm4x_read32(csa, TRCVIIECTLR);
+ state->trcvissctlr = etm4x_read32(csa, TRCVISSCTLR);
if (drvdata->nr_pe_cmp)
- state->trcvipcssctlr = readl(drvdata->base + TRCVIPCSSCTLR);
- state->trcvdctlr = readl(drvdata->base + TRCVDCTLR);
- state->trcvdsacctlr = readl(drvdata->base + TRCVDSACCTLR);
- state->trcvdarcctlr = readl(drvdata->base + TRCVDARCCTLR);
+ state->trcvipcssctlr = etm4x_read32(csa, TRCVIPCSSCTLR);
+ state->trcvdctlr = etm4x_read32(csa, TRCVDCTLR);
+ state->trcvdsacctlr = etm4x_read32(csa, TRCVDSACCTLR);
+ state->trcvdarcctlr = etm4x_read32(csa, TRCVDARCCTLR);
for (i = 0; i < drvdata->nrseqstate - 1; i++)
- state->trcseqevr[i] = readl(drvdata->base + TRCSEQEVRn(i));
+ state->trcseqevr[i] = etm4x_read32(csa, TRCSEQEVRn(i));
- state->trcseqrstevr = readl(drvdata->base + TRCSEQRSTEVR);
- state->trcseqstr = readl(drvdata->base + TRCSEQSTR);
- state->trcextinselr = readl(drvdata->base + TRCEXTINSELR);
+ state->trcseqrstevr = etm4x_read32(csa, TRCSEQRSTEVR);
+ state->trcseqstr = etm4x_read32(csa, TRCSEQSTR);
+ state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
- state->trccntrldvr[i] = readl(drvdata->base + TRCCNTRLDVRn(i));
- state->trccntctlr[i] = readl(drvdata->base + TRCCNTCTLRn(i));
- state->trccntvr[i] = readl(drvdata->base + TRCCNTVRn(i));
+ state->trccntrldvr[i] = etm4x_read32(csa, TRCCNTRLDVRn(i));
+ state->trccntctlr[i] = etm4x_read32(csa, TRCCNTCTLRn(i));
+ state->trccntvr[i] = etm4x_read32(csa, TRCCNTVRn(i));
}
for (i = 0; i < drvdata->nr_resource * 2; i++)
- state->trcrsctlr[i] = readl(drvdata->base + TRCRSCTLRn(i));
+ state->trcrsctlr[i] = etm4x_read32(csa, TRCRSCTLRn(i));
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
- state->trcssccr[i] = readl(drvdata->base + TRCSSCCRn(i));
- state->trcsscsr[i] = readl(drvdata->base + TRCSSCSRn(i));
- state->trcsspcicr[i] = readl(drvdata->base + TRCSSPCICRn(i));
+ state->trcssccr[i] = etm4x_read32(csa, TRCSSCCRn(i));
+ state->trcsscsr[i] = etm4x_read32(csa, TRCSSCSRn(i));
+ if (etm4x_sspcicrn_present(drvdata, i))
+ state->trcsspcicr[i] = etm4x_read32(csa, TRCSSPCICRn(i));
}
for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
- state->trcacvr[i] = readq(drvdata->base + TRCACVRn(i));
- state->trcacatr[i] = readq(drvdata->base + TRCACATRn(i));
+ state->trcacvr[i] = etm4x_read64(csa, TRCACVRn(i));
+ state->trcacatr[i] = etm4x_read64(csa, TRCACATRn(i));
}
/*
@@ -1241,26 +1677,26 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
*/
for (i = 0; i < drvdata->numcidc; i++)
- state->trccidcvr[i] = readq(drvdata->base + TRCCIDCVRn(i));
+ state->trccidcvr[i] = etm4x_read64(csa, TRCCIDCVRn(i));
for (i = 0; i < drvdata->numvmidc; i++)
- state->trcvmidcvr[i] = readq(drvdata->base + TRCVMIDCVRn(i));
+ state->trcvmidcvr[i] = etm4x_read64(csa, TRCVMIDCVRn(i));
- state->trccidcctlr0 = readl(drvdata->base + TRCCIDCCTLR0);
+ state->trccidcctlr0 = etm4x_read32(csa, TRCCIDCCTLR0);
if (drvdata->numcidc > 4)
- state->trccidcctlr1 = readl(drvdata->base + TRCCIDCCTLR1);
+ state->trccidcctlr1 = etm4x_read32(csa, TRCCIDCCTLR1);
- state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR0);
+ state->trcvmidcctlr0 = etm4x_read32(csa, TRCVMIDCCTLR0);
if (drvdata->numvmidc > 4)
- state->trcvmidcctlr1 = readl(drvdata->base + TRCVMIDCCTLR1);
+ state->trcvmidcctlr0 = etm4x_read32(csa, TRCVMIDCCTLR1);
- state->trcclaimset = readl(drvdata->base + TRCCLAIMCLR);
+ state->trcclaimset = etm4x_read32(csa, TRCCLAIMCLR);
if (!drvdata->skip_power_up)
- state->trcpdcr = readl(drvdata->base + TRCPDCR);
+ state->trcpdcr = etm4x_read32(csa, TRCPDCR);
/* wait for TRCSTATR.IDLE to go up */
- if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) {
+ if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) {
dev_err(etm_dev,
"timeout while waiting for Idle Trace Status\n");
etm4_os_unlock(drvdata);
@@ -1276,10 +1712,10 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
* despite requesting software to save/restore state.
*/
if (!drvdata->skip_power_up)
- writel_relaxed((state->trcpdcr & ~TRCPDCR_PU),
- drvdata->base + TRCPDCR);
+ etm4x_relaxed_write32(csa, (state->trcpdcr & ~TRCPDCR_PU),
+ TRCPDCR);
out:
- CS_LOCK(drvdata->base);
+ etm4_cs_lock(drvdata, csa);
return ret;
}
@@ -1287,93 +1723,83 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
{
int i;
struct etmv4_save_state *state = drvdata->save_state;
+ struct csdev_access tmp_csa = CSDEV_ACCESS_IOMEM(drvdata->base);
+ struct csdev_access *csa = &tmp_csa;
- CS_UNLOCK(drvdata->base);
-
- writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
+ etm4_cs_unlock(drvdata, csa);
+ etm4x_relaxed_write32(csa, state->trcclaimset, TRCCLAIMSET);
- writel_relaxed(state->trcprgctlr, drvdata->base + TRCPRGCTLR);
+ etm4x_relaxed_write32(csa, state->trcprgctlr, TRCPRGCTLR);
if (drvdata->nr_pe)
- writel_relaxed(state->trcprocselr, drvdata->base + TRCPROCSELR);
- writel_relaxed(state->trcconfigr, drvdata->base + TRCCONFIGR);
- writel_relaxed(state->trcauxctlr, drvdata->base + TRCAUXCTLR);
- writel_relaxed(state->trceventctl0r, drvdata->base + TRCEVENTCTL0R);
- writel_relaxed(state->trceventctl1r, drvdata->base + TRCEVENTCTL1R);
+ etm4x_relaxed_write32(csa, state->trcprocselr, TRCPROCSELR);
+ etm4x_relaxed_write32(csa, state->trcconfigr, TRCCONFIGR);
+ etm4x_relaxed_write32(csa, state->trcauxctlr, TRCAUXCTLR);
+ etm4x_relaxed_write32(csa, state->trceventctl0r, TRCEVENTCTL0R);
+ etm4x_relaxed_write32(csa, state->trceventctl1r, TRCEVENTCTL1R);
if (drvdata->stallctl)
- writel_relaxed(state->trcstallctlr, drvdata->base + TRCSTALLCTLR);
- writel_relaxed(state->trctsctlr, drvdata->base + TRCTSCTLR);
- writel_relaxed(state->trcsyncpr, drvdata->base + TRCSYNCPR);
- writel_relaxed(state->trcccctlr, drvdata->base + TRCCCCTLR);
- writel_relaxed(state->trcbbctlr, drvdata->base + TRCBBCTLR);
- writel_relaxed(state->trctraceidr, drvdata->base + TRCTRACEIDR);
- writel_relaxed(state->trcqctlr, drvdata->base + TRCQCTLR);
-
- writel_relaxed(state->trcvictlr, drvdata->base + TRCVICTLR);
- writel_relaxed(state->trcviiectlr, drvdata->base + TRCVIIECTLR);
- writel_relaxed(state->trcvissctlr, drvdata->base + TRCVISSCTLR);
+ etm4x_relaxed_write32(csa, state->trcstallctlr, TRCSTALLCTLR);
+ etm4x_relaxed_write32(csa, state->trctsctlr, TRCTSCTLR);
+ etm4x_relaxed_write32(csa, state->trcsyncpr, TRCSYNCPR);
+ etm4x_relaxed_write32(csa, state->trcccctlr, TRCCCCTLR);
+ etm4x_relaxed_write32(csa, state->trcbbctlr, TRCBBCTLR);
+ etm4x_relaxed_write32(csa, state->trctraceidr, TRCTRACEIDR);
+ etm4x_relaxed_write32(csa, state->trcqctlr, TRCQCTLR);
+
+ etm4x_relaxed_write32(csa, state->trcvictlr, TRCVICTLR);
+ etm4x_relaxed_write32(csa, state->trcviiectlr, TRCVIIECTLR);
+ etm4x_relaxed_write32(csa, state->trcvissctlr, TRCVISSCTLR);
if (drvdata->nr_pe_cmp)
- writel_relaxed(state->trcvipcssctlr, drvdata->base + TRCVIPCSSCTLR);
- writel_relaxed(state->trcvdctlr, drvdata->base + TRCVDCTLR);
- writel_relaxed(state->trcvdsacctlr, drvdata->base + TRCVDSACCTLR);
- writel_relaxed(state->trcvdarcctlr, drvdata->base + TRCVDARCCTLR);
+ etm4x_relaxed_write32(csa, state->trcvipcssctlr, TRCVIPCSSCTLR);
+ etm4x_relaxed_write32(csa, state->trcvdctlr, TRCVDCTLR);
+ etm4x_relaxed_write32(csa, state->trcvdsacctlr, TRCVDSACCTLR);
+ etm4x_relaxed_write32(csa, state->trcvdarcctlr, TRCVDARCCTLR);
for (i = 0; i < drvdata->nrseqstate - 1; i++)
- writel_relaxed(state->trcseqevr[i],
- drvdata->base + TRCSEQEVRn(i));
+ etm4x_relaxed_write32(csa, state->trcseqevr[i], TRCSEQEVRn(i));
- writel_relaxed(state->trcseqrstevr, drvdata->base + TRCSEQRSTEVR);
- writel_relaxed(state->trcseqstr, drvdata->base + TRCSEQSTR);
- writel_relaxed(state->trcextinselr, drvdata->base + TRCEXTINSELR);
+ etm4x_relaxed_write32(csa, state->trcseqrstevr, TRCSEQRSTEVR);
+ etm4x_relaxed_write32(csa, state->trcseqstr, TRCSEQSTR);
+ etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
- writel_relaxed(state->trccntrldvr[i],
- drvdata->base + TRCCNTRLDVRn(i));
- writel_relaxed(state->trccntctlr[i],
- drvdata->base + TRCCNTCTLRn(i));
- writel_relaxed(state->trccntvr[i],
- drvdata->base + TRCCNTVRn(i));
+ etm4x_relaxed_write32(csa, state->trccntrldvr[i], TRCCNTRLDVRn(i));
+ etm4x_relaxed_write32(csa, state->trccntctlr[i], TRCCNTCTLRn(i));
+ etm4x_relaxed_write32(csa, state->trccntvr[i], TRCCNTVRn(i));
}
for (i = 0; i < drvdata->nr_resource * 2; i++)
- writel_relaxed(state->trcrsctlr[i],
- drvdata->base + TRCRSCTLRn(i));
+ etm4x_relaxed_write32(csa, state->trcrsctlr[i], TRCRSCTLRn(i));
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
- writel_relaxed(state->trcssccr[i],
- drvdata->base + TRCSSCCRn(i));
- writel_relaxed(state->trcsscsr[i],
- drvdata->base + TRCSSCSRn(i));
- writel_relaxed(state->trcsspcicr[i],
- drvdata->base + TRCSSPCICRn(i));
+ etm4x_relaxed_write32(csa, state->trcssccr[i], TRCSSCCRn(i));
+ etm4x_relaxed_write32(csa, state->trcsscsr[i], TRCSSCSRn(i));
+ if (etm4x_sspcicrn_present(drvdata, i))
+ etm4x_relaxed_write32(csa, state->trcsspcicr[i], TRCSSPCICRn(i));
}
for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
- writeq_relaxed(state->trcacvr[i],
- drvdata->base + TRCACVRn(i));
- writeq_relaxed(state->trcacatr[i],
- drvdata->base + TRCACATRn(i));
+ etm4x_relaxed_write64(csa, state->trcacvr[i], TRCACVRn(i));
+ etm4x_relaxed_write64(csa, state->trcacatr[i], TRCACATRn(i));
}
for (i = 0; i < drvdata->numcidc; i++)
- writeq_relaxed(state->trccidcvr[i],
- drvdata->base + TRCCIDCVRn(i));
+ etm4x_relaxed_write64(csa, state->trccidcvr[i], TRCCIDCVRn(i));
for (i = 0; i < drvdata->numvmidc; i++)
- writeq_relaxed(state->trcvmidcvr[i],
- drvdata->base + TRCVMIDCVRn(i));
+ etm4x_relaxed_write64(csa, state->trcvmidcvr[i], TRCVMIDCVRn(i));
- writel_relaxed(state->trccidcctlr0, drvdata->base + TRCCIDCCTLR0);
+ etm4x_relaxed_write32(csa, state->trccidcctlr0, TRCCIDCCTLR0);
if (drvdata->numcidc > 4)
- writel_relaxed(state->trccidcctlr1, drvdata->base + TRCCIDCCTLR1);
+ etm4x_relaxed_write32(csa, state->trccidcctlr1, TRCCIDCCTLR1);
- writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR0);
+ etm4x_relaxed_write32(csa, state->trcvmidcctlr0, TRCVMIDCCTLR0);
if (drvdata->numvmidc > 4)
- writel_relaxed(state->trcvmidcctlr1, drvdata->base + TRCVMIDCCTLR1);
+ etm4x_relaxed_write32(csa, state->trcvmidcctlr0, TRCVMIDCCTLR1);
- writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
+ etm4x_relaxed_write32(csa, state->trcclaimset, TRCCLAIMSET);
if (!drvdata->skip_power_up)
- writel_relaxed(state->trcpdcr, drvdata->base + TRCPDCR);
+ etm4x_relaxed_write32(csa, state->trcpdcr, TRCPDCR);
drvdata->state_needs_restore = false;
@@ -1386,7 +1812,7 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
/* Unlock the OS lock to re-enable trace and external debug access */
etm4_os_unlock(drvdata);
- CS_LOCK(drvdata->base);
+ etm4_cs_lock(drvdata, csa);
}
static int etm4_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
@@ -1473,15 +1899,15 @@ static void etm4_pm_clear(void)
}
}
-static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
+static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid)
{
int ret;
- void __iomem *base;
- struct device *dev = &adev->dev;
struct coresight_platform_data *pdata = NULL;
struct etmv4_drvdata *drvdata;
- struct resource *res = &adev->res;
struct coresight_desc desc = { 0 };
+ struct etm4_init_arg init_arg = { 0 };
+ u8 major, minor;
+ char *type_name;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
@@ -1500,33 +1926,53 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
return -ENOMEM;
}
- if (fwnode_property_present(dev_fwnode(dev), "qcom,skip-power-up"))
- drvdata->skip_power_up = true;
-
- /* Validity for the resource is already checked by the AMBA core */
- base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
drvdata->base = base;
- spin_lock_init(&drvdata->spinlock);
+ raw_spin_lock_init(&drvdata->spinlock);
drvdata->cpu = coresight_get_cpu(dev);
if (drvdata->cpu < 0)
return drvdata->cpu;
- desc.name = devm_kasprintf(dev, GFP_KERNEL, "etm%d", drvdata->cpu);
- if (!desc.name)
- return -ENOMEM;
+ /* Update the SMP target cpu */
+ drvdata->rc_cpu = coresight_get_etm_sync_mode() == SYNC_MODE_SW_GLOBAL ?
+ SYNC_GLOBAL_CORE : drvdata->cpu;
+
+ /* Enable fixes for Silicon issues */
+ drvdata->etm_quirks = coresight_get_etm_quirks(etm_pid);
+
+ init_arg.drvdata = drvdata;
+ init_arg.csa = &desc.access;
+ init_arg.pid = etm_pid;
if (smp_call_function_single(drvdata->cpu,
- etm4_init_arch_data, drvdata, 1))
+ etm4_init_arch_data, &init_arg, 1))
dev_err(dev, "ETM arch init failed\n");
- if (etm4_arch_supported(drvdata->arch) == false)
+ if (!drvdata->arch)
return -EINVAL;
+ /* TRCPDCR is not accessible with system instructions. */
+ if (!desc.access.io_mem ||
+ fwnode_property_present(dev_fwnode(dev), "qcom,skip-power-up"))
+ drvdata->skip_power_up = true;
+
+ major = ETM_ARCH_MAJOR_VERSION(drvdata->arch);
+ minor = ETM_ARCH_MINOR_VERSION(drvdata->arch);
+
+ if (etm4x_is_ete(drvdata)) {
+ type_name = "ete";
+ /* ETE v1 has major version == 0b101. Adjust this for logging.*/
+ major -= 4;
+ } else {
+ type_name = "etm";
+ }
+
+ desc.name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s%d", type_name, drvdata->cpu);
+ if (!desc.name)
+ return -ENOMEM;
+
etm4_init_trace_id(drvdata);
etm4_set_default(&drvdata->config);
@@ -1534,7 +1980,7 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
if (IS_ERR(pdata))
return PTR_ERR(pdata);
- adev->dev.platform_data = pdata;
+ dev->platform_data = pdata;
desc.type = CORESIGHT_DEV_TYPE_SOURCE;
desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
@@ -1554,9 +2000,8 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
etmdrvdata[drvdata->cpu] = drvdata;
- pm_runtime_put(&adev->dev);
- dev_info(&drvdata->csdev->dev, "CPU%d: ETM v%d.%d initialized\n",
- drvdata->cpu, drvdata->arch >> 4, drvdata->arch & 0xf);
+ dev_info(&drvdata->csdev->dev, "CPU%d: %s v%d.%d initialized\n",
+ drvdata->cpu, type_name, major, minor);
if (boot_enable) {
coresight_enable(drvdata->csdev);
@@ -1566,11 +2011,49 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
}
+static int etm4_probe_amba(struct amba_device *adev, const struct amba_id *id)
+{
+ void __iomem *base;
+ struct device *dev = &adev->dev;
+ struct resource *res = &adev->res;
+ int ret;
+
+ /* Validity for the resource is already checked by the AMBA core */
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ ret = etm4_probe(dev, base, id->id);
+ if (!ret)
+ pm_runtime_put(&adev->dev);
+
+ return ret;
+}
+
+static int etm4_probe_platform_dev(struct platform_device *pdev)
+{
+ int ret;
+
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ /*
+ * System register based devices could match the
+ * HW by reading appropriate registers on the HW
+ * and thus we could skip the PID.
+ */
+ ret = etm4_probe(&pdev->dev, NULL, 0);
+
+ pm_runtime_put(&pdev->dev);
+ return ret;
+}
+
static struct amba_cs_uci_id uci_id_etm4[] = {
{
/* ETMv4 UCI data */
- .devarch = 0x47704a13,
- .devarch_mask = 0xfff0ffff,
+ .devarch = ETM_DEVARCH_ETMv4x_ARCH,
+ .devarch_mask = ETM_DEVARCH_ID_MASK,
.devtype = 0x00000013,
}
};
@@ -1582,15 +2065,12 @@ static void clear_etmdrvdata(void *info)
etmdrvdata[cpu] = NULL;
}
-static void etm4_remove(struct amba_device *adev)
+static void __exit etm4_remove_dev(struct etmv4_drvdata *drvdata)
{
- struct etmv4_drvdata *drvdata = dev_get_drvdata(&adev->dev);
-
etm_perf_symlink(drvdata->csdev, false);
-
/*
- * Taking hotplug lock here to avoid racing between etm4_remove and
- * CPU hotplug call backs.
+ * Taking hotplug lock here to avoid racing between etm4_remove_dev()
+ * and CPU hotplug call backs.
*/
cpus_read_lock();
/*
@@ -1607,13 +2087,35 @@ static void etm4_remove(struct amba_device *adev)
coresight_unregister(drvdata->csdev);
}
+static void __exit etm4_remove_amba(struct amba_device *adev)
+{
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(&adev->dev);
+
+ if (drvdata)
+ etm4_remove_dev(drvdata);
+}
+
+static int __exit etm4_remove_platform_dev(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(&pdev->dev);
+
+ if (drvdata)
+ etm4_remove_dev(drvdata);
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
static const struct amba_id etm4_ids[] = {
CS_AMBA_ID(0x000bb95d), /* Cortex-A53 */
CS_AMBA_ID(0x000bb95e), /* Cortex-A57 */
CS_AMBA_ID(0x000bb95a), /* Cortex-A72 */
CS_AMBA_ID(0x000bb959), /* Cortex-A73 */
CS_AMBA_UCI_ID(0x000bb9da, uci_id_etm4),/* Cortex-A35 */
+ CS_AMBA_UCI_ID(0x000bbd05, uci_id_etm4),/* Cortex-A55 */
+ CS_AMBA_UCI_ID(0x000bbd0a, uci_id_etm4),/* Cortex-A75 */
CS_AMBA_UCI_ID(0x000bbd0c, uci_id_etm4),/* Neoverse N1 */
+ CS_AMBA_UCI_ID(0x000bbd41, uci_id_etm4),/* Cortex-A78 */
CS_AMBA_UCI_ID(0x000f0205, uci_id_etm4),/* Qualcomm Kryo */
CS_AMBA_UCI_ID(0x000f0211, uci_id_etm4),/* Qualcomm Kryo */
CS_AMBA_UCI_ID(0x000bb802, uci_id_etm4),/* Qualcomm Kryo 385 Cortex-A55 */
@@ -1621,6 +2123,7 @@ static const struct amba_id etm4_ids[] = {
CS_AMBA_UCI_ID(0x000bb805, uci_id_etm4),/* Qualcomm Kryo 4XX Cortex-A55 */
CS_AMBA_UCI_ID(0x000bb804, uci_id_etm4),/* Qualcomm Kryo 4XX Cortex-A76 */
CS_AMBA_UCI_ID(0x000cc0af, uci_id_etm4),/* Marvell ThunderX2 */
+ CS_AMBA_UCI_ID(0x000cc210, uci_id_etm4),/* Marvell OcteonTX2 CN9XXX */
CS_AMBA_UCI_ID(0x000b6d01, uci_id_etm4),/* HiSilicon-Hip08 */
CS_AMBA_UCI_ID(0x000b6d02, uci_id_etm4),/* HiSilicon-Hip09 */
{},
@@ -1628,17 +2131,33 @@ static const struct amba_id etm4_ids[] = {
MODULE_DEVICE_TABLE(amba, etm4_ids);
-static struct amba_driver etm4x_driver = {
+static struct amba_driver etm4x_amba_driver = {
.drv = {
.name = "coresight-etm4x",
.owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
- .probe = etm4_probe,
- .remove = etm4_remove,
+ .probe = etm4_probe_amba,
+ .remove = etm4_remove_amba,
.id_table = etm4_ids,
};
+static const struct of_device_id etm4_sysreg_match[] = {
+ { .compatible = "arm,coresight-etm4x-sysreg" },
+ { .compatible = "arm,embedded-trace-extension" },
+ {}
+};
+
+static struct platform_driver etm4_platform_driver = {
+ .probe = etm4_probe_platform_dev,
+ .remove = etm4_remove_platform_dev,
+ .driver = {
+ .name = "coresight-etm4x",
+ .of_match_table = etm4_sysreg_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
static int __init etm4x_init(void)
{
int ret;
@@ -1649,18 +2168,28 @@ static int __init etm4x_init(void)
if (ret)
return ret;
- ret = amba_driver_register(&etm4x_driver);
+ ret = amba_driver_register(&etm4x_amba_driver);
if (ret) {
- pr_err("Error registering etm4x driver\n");
- etm4_pm_clear();
+ pr_err("Error registering etm4x AMBA driver\n");
+ goto clear_pm;
}
+ ret = platform_driver_register(&etm4_platform_driver);
+ if (!ret)
+ return 0;
+
+ pr_err("Error registering etm4x platform driver\n");
+ amba_driver_unregister(&etm4x_amba_driver);
+
+clear_pm:
+ etm4_pm_clear();
return ret;
}
static void __exit etm4x_exit(void)
{
- amba_driver_unregister(&etm4x_driver);
+ amba_driver_unregister(&etm4x_amba_driver);
+ platform_driver_unregister(&etm4_platform_driver);
etm4_pm_clear();
}
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index 42cc38c89f3b..91700ca1c770 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -9,6 +9,7 @@
#include <linux/sysfs.h>
#include "coresight-etm4x.h"
#include "coresight-priv.h"
+#include "coresight-quirks.h"
static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
{
@@ -173,7 +174,7 @@ static ssize_t reset_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
if (val)
config->mode = 0x0;
@@ -267,7 +268,7 @@ static ssize_t reset_store(struct device *dev,
drvdata->trcid = drvdata->cpu + 1;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
@@ -296,7 +297,7 @@ static ssize_t mode_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
config->mode = val & ETMv4_MODE_ALL;
if (drvdata->instrp0 == true) {
@@ -433,7 +434,7 @@ static ssize_t mode_store(struct device *dev,
if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
etm4_config_trace_mode(config);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
@@ -462,14 +463,14 @@ static ssize_t pe_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
if (val > drvdata->nr_pe) {
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return -EINVAL;
}
config->pe_sel = val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(pe);
@@ -497,7 +498,7 @@ static ssize_t event_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
switch (drvdata->nr_event) {
case 0x0:
/* EVENT0, bits[7:0] */
@@ -518,7 +519,7 @@ static ssize_t event_store(struct device *dev,
default:
break;
}
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(event);
@@ -546,7 +547,7 @@ static ssize_t event_instren_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
/* start by clearing all instruction event enable bits */
config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
switch (drvdata->nr_event) {
@@ -569,7 +570,7 @@ static ssize_t event_instren_store(struct device *dev,
default:
break;
}
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(event_instren);
@@ -730,11 +731,11 @@ static ssize_t event_vinst_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
val &= ETMv4_EVENT_MASK;
config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
config->vinst_ctrl |= val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(event_vinst);
@@ -747,7 +748,7 @@ static ssize_t s_exlevel_vinst_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- val = (config->vinst_ctrl & ETM_EXLEVEL_S_VICTLR_MASK) >> 16;
+ val = (config->vinst_ctrl & TRCVICTLR_EXLEVEL_S_MASK) >> TRCVICTLR_EXLEVEL_S_SHIFT;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -762,13 +763,13 @@ static ssize_t s_exlevel_vinst_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
/* clear all EXLEVEL_S bits */
- config->vinst_ctrl &= ~(ETM_EXLEVEL_S_VICTLR_MASK);
+ config->vinst_ctrl &= ~(TRCVICTLR_EXLEVEL_S_MASK);
/* enable instruction tracing for corresponding exception level */
val &= drvdata->s_ex_level;
- config->vinst_ctrl |= (val << 16);
- spin_unlock(&drvdata->spinlock);
+ config->vinst_ctrl |= (val << TRCVICTLR_EXLEVEL_S_SHIFT);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(s_exlevel_vinst);
@@ -782,7 +783,7 @@ static ssize_t ns_exlevel_vinst_show(struct device *dev,
struct etmv4_config *config = &drvdata->config;
/* EXLEVEL_NS, bits[23:20] */
- val = (config->vinst_ctrl & ETM_EXLEVEL_NS_VICTLR_MASK) >> 20;
+ val = (config->vinst_ctrl & TRCVICTLR_EXLEVEL_NS_MASK) >> TRCVICTLR_EXLEVEL_NS_SHIFT;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -797,13 +798,13 @@ static ssize_t ns_exlevel_vinst_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
/* clear EXLEVEL_NS bits */
- config->vinst_ctrl &= ~(ETM_EXLEVEL_NS_VICTLR_MASK);
+ config->vinst_ctrl &= ~(TRCVICTLR_EXLEVEL_NS_MASK);
/* enable instruction tracing for corresponding exception level */
val &= drvdata->ns_ex_level;
- config->vinst_ctrl |= (val << 20);
- spin_unlock(&drvdata->spinlock);
+ config->vinst_ctrl |= (val << TRCVICTLR_EXLEVEL_NS_SHIFT);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(ns_exlevel_vinst);
@@ -837,9 +838,9 @@ static ssize_t addr_idx_store(struct device *dev,
* Use spinlock to ensure index doesn't change while it gets
* dereferenced multiple times within a spinlock block elsewhere.
*/
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
config->addr_idx = val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(addr_idx);
@@ -853,7 +854,7 @@ static ssize_t addr_instdatatype_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
val = BMVAL(config->addr_acc[idx], 0, 1);
len = scnprintf(buf, PAGE_SIZE, "%s\n",
@@ -861,7 +862,7 @@ static ssize_t addr_instdatatype_show(struct device *dev,
(val == ETM_DATA_LOAD_ADDR ? "data_load" :
(val == ETM_DATA_STORE_ADDR ? "data_store" :
"data_load_store")));
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return len;
}
@@ -879,13 +880,13 @@ static ssize_t addr_instdatatype_store(struct device *dev,
if (sscanf(buf, "%s", str) != 1)
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
if (!strcmp(str, "instr"))
/* TYPE, bits[1:0] */
config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(addr_instdatatype);
@@ -900,14 +901,14 @@ static ssize_t addr_single_show(struct device *dev,
struct etmv4_config *config = &drvdata->config;
idx = config->addr_idx;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return -EPERM;
}
val = (unsigned long)config->addr_val[idx];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -923,17 +924,17 @@ static ssize_t addr_single_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return -EPERM;
}
config->addr_val[idx] = (u64)val;
config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(addr_single);
@@ -947,23 +948,23 @@ static ssize_t addr_range_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
if (idx % 2 != 0) {
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return -EPERM;
}
if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
(config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return -EPERM;
}
val1 = (unsigned long)config->addr_val[idx];
val2 = (unsigned long)config->addr_val[idx + 1];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
}
@@ -986,10 +987,10 @@ static ssize_t addr_range_store(struct device *dev,
if (val1 > val2)
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
if (idx % 2 != 0) {
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return -EPERM;
}
@@ -997,7 +998,7 @@ static ssize_t addr_range_store(struct device *dev,
config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
(config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return -EPERM;
}
@@ -1014,7 +1015,7 @@ static ssize_t addr_range_store(struct device *dev,
exclude = config->mode & ETM_MODE_EXCLUDE;
etm4_set_mode_exclude(drvdata, exclude ? true : false);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(addr_range);
@@ -1028,17 +1029,17 @@ static ssize_t addr_start_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return -EPERM;
}
val = (unsigned long)config->addr_val[idx];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1054,22 +1055,22 @@ static ssize_t addr_start_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
if (!drvdata->nr_addr_cmp) {
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return -EINVAL;
}
if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return -EPERM;
}
config->addr_val[idx] = (u64)val;
config->addr_type[idx] = ETM_ADDR_TYPE_START;
config->vissctlr |= BIT(idx);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(addr_start);
@@ -1083,17 +1084,17 @@ static ssize_t addr_stop_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return -EPERM;
}
val = (unsigned long)config->addr_val[idx];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1109,22 +1110,22 @@ static ssize_t addr_stop_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
if (!drvdata->nr_addr_cmp) {
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return -EINVAL;
}
if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return -EPERM;
}
config->addr_val[idx] = (u64)val;
config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
config->vissctlr |= BIT(idx + 16);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(addr_stop);
@@ -1138,14 +1139,14 @@ static ssize_t addr_ctxtype_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
/* CONTEXTTYPE, bits[3:2] */
val = BMVAL(config->addr_acc[idx], 2, 3);
len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
(val == ETM_CTX_CTXID ? "ctxid" :
(val == ETM_CTX_VMID ? "vmid" : "all")));
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return len;
}
@@ -1163,7 +1164,7 @@ static ssize_t addr_ctxtype_store(struct device *dev,
if (sscanf(buf, "%s", str) != 1)
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
if (!strcmp(str, "none"))
/* start by clearing context type bits */
@@ -1190,7 +1191,7 @@ static ssize_t addr_ctxtype_store(struct device *dev,
if (drvdata->numvmidc)
config->addr_acc[idx] |= BIT(3);
}
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(addr_ctxtype);
@@ -1204,11 +1205,11 @@ static ssize_t addr_context_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
/* context ID comparator bits[6:4] */
val = BMVAL(config->addr_acc[idx], 4, 6);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1229,12 +1230,12 @@ static ssize_t addr_context_store(struct device *dev,
drvdata->numcidc : drvdata->numvmidc))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
/* clear context ID comparator bits[6:4] */
config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
config->addr_acc[idx] |= (val << 4);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(addr_context);
@@ -1248,10 +1249,10 @@ static ssize_t addr_exlevel_s_ns_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
val = BMVAL(config->addr_acc[idx], 8, 14);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1270,12 +1271,12 @@ static ssize_t addr_exlevel_s_ns_store(struct device *dev,
if (val & ~((GENMASK(14, 8) >> 8)))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
/* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */
config->addr_acc[idx] &= ~(GENMASK(14, 8));
config->addr_acc[idx] |= (val << 8);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(addr_exlevel_s_ns);
@@ -1298,7 +1299,7 @@ static ssize_t addr_cmp_view_show(struct device *dev,
int size = 0;
bool exclude = false;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
addr_v = config->addr_val[idx];
addr_ctrl = config->addr_acc[idx];
@@ -1313,7 +1314,7 @@ static ssize_t addr_cmp_view_show(struct device *dev,
}
exclude = config->viiectlr & BIT(idx / 2 + 16);
}
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
if (addr_type) {
size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] %s %#lx", idx,
addr_type_names[addr_type], addr_v);
@@ -1357,9 +1358,9 @@ static ssize_t vinst_pe_cmp_start_stop_store(struct device *dev,
if (!drvdata->nr_pe_cmp)
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
config->vipcssctlr = val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(vinst_pe_cmp_start_stop);
@@ -1393,9 +1394,9 @@ static ssize_t seq_idx_store(struct device *dev,
* Use spinlock to ensure index doesn't change while it gets
* dereferenced multiple times within a spinlock block elsewhere.
*/
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
config->seq_idx = val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(seq_idx);
@@ -1439,10 +1440,10 @@ static ssize_t seq_event_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->seq_idx;
val = config->seq_ctrl[idx];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1458,11 +1459,11 @@ static ssize_t seq_event_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->seq_idx;
/* Seq control has two masks B[15:8] F[7:0] */
config->seq_ctrl[idx] = val & 0xFFFF;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(seq_event);
@@ -1526,9 +1527,9 @@ static ssize_t cntr_idx_store(struct device *dev,
* Use spinlock to ensure index doesn't change while it gets
* dereferenced multiple times within a spinlock block elsewhere.
*/
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
config->cntr_idx = val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(cntr_idx);
@@ -1542,10 +1543,10 @@ static ssize_t cntrldvr_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->cntr_idx;
val = config->cntrldvr[idx];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1563,10 +1564,10 @@ static ssize_t cntrldvr_store(struct device *dev,
if (val > ETM_CNTR_MAX_VAL)
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->cntr_idx;
config->cntrldvr[idx] = val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(cntrldvr);
@@ -1580,10 +1581,10 @@ static ssize_t cntr_val_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->cntr_idx;
val = config->cntr_val[idx];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1601,10 +1602,10 @@ static ssize_t cntr_val_store(struct device *dev,
if (val > ETM_CNTR_MAX_VAL)
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->cntr_idx;
config->cntr_val[idx] = val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(cntr_val);
@@ -1618,10 +1619,10 @@ static ssize_t cntr_ctrl_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->cntr_idx;
val = config->cntr_ctrl[idx];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1637,10 +1638,10 @@ static ssize_t cntr_ctrl_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->cntr_idx;
config->cntr_ctrl[idx] = val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(cntr_ctrl);
@@ -1678,9 +1679,9 @@ static ssize_t res_idx_store(struct device *dev,
* Use spinlock to ensure index doesn't change while it gets
* dereferenced multiple times within a spinlock block elsewhere.
*/
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
config->res_idx = val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(res_idx);
@@ -1694,10 +1695,10 @@ static ssize_t res_ctrl_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->res_idx;
val = config->res_ctrl[idx];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1713,14 +1714,14 @@ static ssize_t res_ctrl_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->res_idx;
/* For odd idx pair inversal bit is RES0 */
if (idx % 2 != 0)
/* PAIRINV, bit[21] */
val &= ~BIT(21);
config->res_ctrl[idx] = val & GENMASK(21, 0);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(res_ctrl);
@@ -1749,9 +1750,9 @@ static ssize_t sshot_idx_store(struct device *dev,
if (val >= drvdata->nr_ss_cmp)
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
config->ss_idx = val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(sshot_idx);
@@ -1764,9 +1765,9 @@ static ssize_t sshot_ctrl_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
val = config->ss_ctrl[config->ss_idx];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1782,12 +1783,12 @@ static ssize_t sshot_ctrl_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->ss_idx;
config->ss_ctrl[idx] = val & GENMASK(24, 0);
/* must clear bit 31 in related status register on programming */
config->ss_status[idx] &= ~BIT(31);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(sshot_ctrl);
@@ -1799,9 +1800,9 @@ static ssize_t sshot_status_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
val = config->ss_status[config->ss_idx];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static DEVICE_ATTR_RO(sshot_status);
@@ -1814,9 +1815,9 @@ static ssize_t sshot_pe_ctrl_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
val = config->ss_pe_cmp[config->ss_idx];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1832,12 +1833,12 @@ static ssize_t sshot_pe_ctrl_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->ss_idx;
config->ss_pe_cmp[idx] = val & GENMASK(7, 0);
/* must clear bit 31 in related status register on programming */
config->ss_status[idx] &= ~BIT(31);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(sshot_pe_ctrl);
@@ -1871,9 +1872,9 @@ static ssize_t ctxid_idx_store(struct device *dev,
* Use spinlock to ensure index doesn't change while it gets
* dereferenced multiple times within a spinlock block elsewhere.
*/
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
config->ctxid_idx = val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(ctxid_idx);
@@ -1894,10 +1895,10 @@ static ssize_t ctxid_pid_show(struct device *dev,
if (task_active_pid_ns(current) != &init_pid_ns)
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->ctxid_idx;
val = (unsigned long)config->ctxid_pid[idx];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1932,10 +1933,10 @@ static ssize_t ctxid_pid_store(struct device *dev,
if (kstrtoul(buf, 16, &pid))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->ctxid_idx;
config->ctxid_pid[idx] = (u64)pid;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(ctxid_pid);
@@ -1955,10 +1956,10 @@ static ssize_t ctxid_masks_show(struct device *dev,
if (task_active_pid_ns(current) != &init_pid_ns)
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
val1 = config->ctxid_mask0;
val2 = config->ctxid_mask1;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
}
@@ -1991,7 +1992,7 @@ static ssize_t ctxid_masks_store(struct device *dev,
if ((drvdata->numcidc > 4) && (nr_inputs != 2))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
/*
* each byte[0..3] controls mask value applied to ctxid
* comparator[0..3]
@@ -2063,7 +2064,7 @@ static ssize_t ctxid_masks_store(struct device *dev,
mask >>= 0x8;
}
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(ctxid_masks);
@@ -2097,9 +2098,9 @@ static ssize_t vmid_idx_store(struct device *dev,
* Use spinlock to ensure index doesn't change while it gets
* dereferenced multiple times within a spinlock block elsewhere.
*/
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
config->vmid_idx = val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(vmid_idx);
@@ -2133,9 +2134,9 @@ static ssize_t vmid_val_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
config->vmid_val[config->vmid_idx] = (u64)val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(vmid_val);
@@ -2147,10 +2148,10 @@ static ssize_t vmid_masks_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
val1 = config->vmid_mask0;
val2 = config->vmid_mask1;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
}
@@ -2175,7 +2176,7 @@ static ssize_t vmid_masks_store(struct device *dev,
if ((drvdata->numvmidc > 4) && (nr_inputs != 2))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
/*
* each byte[0..3] controls mask value applied to vmid
@@ -2248,7 +2249,7 @@ static ssize_t vmid_masks_store(struct device *dev,
else
mask >>= 0x8;
}
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(vmid_masks);
@@ -2323,7 +2324,8 @@ static struct attribute *coresight_etmv4_attrs[] = {
};
struct etmv4_reg {
- void __iomem *addr;
+ struct coresight_device *csdev;
+ u32 offset;
u32 data;
};
@@ -2331,89 +2333,169 @@ static void do_smp_cross_read(void *data)
{
struct etmv4_reg *reg = data;
- reg->data = readl_relaxed(reg->addr);
+ reg->data = etm4x_relaxed_read32(&reg->csdev->access, reg->offset);
}
-static u32 etmv4_cross_read(const struct device *dev, u32 offset)
+static u32 etmv4_cross_read(const struct etmv4_drvdata *drvdata, u32 offset)
{
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
struct etmv4_reg reg;
- reg.addr = drvdata->base + offset;
+ reg.offset = offset;
+ reg.csdev = drvdata->csdev;
+
/*
* smp cross call ensures the CPU will be powered up before
* accessing the ETMv4 trace core registers
+ *
+ * Note: When task isolation is enabled, the target cpu used
+ * is always primary core and hence the above assumption of
+ * cpu associated with the ETM being in powered up state during
+ * register writes is not valid.
+ * But on the other hand, using smp call ensures that atomicity is
+ * not broken as well.
+ */
+ smp_call_function_single(drvdata->rc_cpu, do_smp_cross_read, &reg, 1);
+
+ /* OcteonTX2 hardware reports version as ETMv4.2 but it supports
+ * Ignore Packet feature of ETMv4.3. Hence, treat this as comaptible
+ * with ETMv4.3.
*/
- smp_call_function_single(drvdata->cpu, do_smp_cross_read, &reg, 1);
+ if ((offset == TRCIDR1) &&
+ (drvdata->etm_quirks & CORESIGHT_QUIRK_ETM_TREAT_ETMv43)) {
+ reg.data &= ~0xF0;
+ reg.data |= 0x30;
+ }
+
return reg.data;
}
-#define coresight_etm4x_reg(name, offset) \
- coresight_simple_reg32(struct etmv4_drvdata, name, offset)
+static inline u32 coresight_etm4x_attr_to_offset(struct device_attribute *attr)
+{
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ return (u32)(unsigned long)eattr->var;
+}
+
+static ssize_t coresight_etm4x_reg_show(struct device *dev,
+ struct device_attribute *d_attr,
+ char *buf)
+{
+ u32 val, offset;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ offset = coresight_etm4x_attr_to_offset(d_attr);
+
+ pm_runtime_get_sync(dev->parent);
+ val = etmv4_cross_read(drvdata, offset);
+ pm_runtime_put_sync(dev->parent);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", val);
+}
+
+static inline bool
+etm4x_register_implemented(struct etmv4_drvdata *drvdata, u32 offset)
+{
+ switch (offset) {
+ ETM_COMMON_SYSREG_LIST_CASES
+ /*
+ * Common registers to ETE & ETM4x accessible via system
+ * instructions are always implemented.
+ */
+ return true;
+
+ ETM4x_ONLY_SYSREG_LIST_CASES
+ /*
+ * We only support etm4x and ete. So if the device is not
+ * ETE, it must be ETMv4x.
+ */
+ return !etm4x_is_ete(drvdata);
+
+ ETM4x_MMAP_LIST_CASES
+ /*
+ * Registers accessible only via memory-mapped registers
+ * must not be accessed via system instructions.
+ * We cannot access the drvdata->csdev here, as this
+ * function is called during the device creation, via
+ * coresight_register() and the csdev is not initialized
+ * until that is done. So rely on the drvdata->base to
+ * detect if we have a memory mapped access.
+ * Also ETE doesn't implement memory mapped access, thus
+ * it is sufficient to check that we are using mmio.
+ */
+ return !!drvdata->base;
+
+ ETE_ONLY_SYSREG_LIST_CASES
+ return etm4x_is_ete(drvdata);
+ }
+
+ return false;
+}
+
+/*
+ * Hide the ETM4x registers that may not be available on the
+ * hardware.
+ * There are certain management registers unavailable via system
+ * instructions. Make those sysfs attributes hidden on such
+ * systems.
+ */
+static umode_t
+coresight_etm4x_attr_reg_implemented(struct kobject *kobj,
+ struct attribute *attr, int unused)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct device_attribute *d_attr;
+ u32 offset;
-#define coresight_etm4x_cross_read(name, offset) \
- coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read, \
- name, offset)
+ d_attr = container_of(attr, struct device_attribute, attr);
+ offset = coresight_etm4x_attr_to_offset(d_attr);
-coresight_etm4x_reg(trcpdcr, TRCPDCR);
-coresight_etm4x_reg(trcpdsr, TRCPDSR);
-coresight_etm4x_reg(trclsr, TRCLSR);
-coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS);
-coresight_etm4x_reg(trcdevid, TRCDEVID);
-coresight_etm4x_reg(trcdevtype, TRCDEVTYPE);
-coresight_etm4x_reg(trcpidr0, TRCPIDR0);
-coresight_etm4x_reg(trcpidr1, TRCPIDR1);
-coresight_etm4x_reg(trcpidr2, TRCPIDR2);
-coresight_etm4x_reg(trcpidr3, TRCPIDR3);
-coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
-coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
-coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
+ if (etm4x_register_implemented(drvdata, offset))
+ return attr->mode;
+ return 0;
+}
+
+#define coresight_etm4x_reg(name, offset) \
+ &((struct dev_ext_attribute[]) { \
+ { \
+ __ATTR(name, 0444, coresight_etm4x_reg_show, NULL), \
+ (void *)(unsigned long)offset \
+ } \
+ })[0].attr.attr
static struct attribute *coresight_etmv4_mgmt_attrs[] = {
- &dev_attr_trcoslsr.attr,
- &dev_attr_trcpdcr.attr,
- &dev_attr_trcpdsr.attr,
- &dev_attr_trclsr.attr,
- &dev_attr_trcconfig.attr,
- &dev_attr_trctraceid.attr,
- &dev_attr_trcauthstatus.attr,
- &dev_attr_trcdevid.attr,
- &dev_attr_trcdevtype.attr,
- &dev_attr_trcpidr0.attr,
- &dev_attr_trcpidr1.attr,
- &dev_attr_trcpidr2.attr,
- &dev_attr_trcpidr3.attr,
+ coresight_etm4x_reg(trcpdcr, TRCPDCR),
+ coresight_etm4x_reg(trcpdsr, TRCPDSR),
+ coresight_etm4x_reg(trclsr, TRCLSR),
+ coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS),
+ coresight_etm4x_reg(trcdevid, TRCDEVID),
+ coresight_etm4x_reg(trcdevtype, TRCDEVTYPE),
+ coresight_etm4x_reg(trcpidr0, TRCPIDR0),
+ coresight_etm4x_reg(trcpidr1, TRCPIDR1),
+ coresight_etm4x_reg(trcpidr2, TRCPIDR2),
+ coresight_etm4x_reg(trcpidr3, TRCPIDR3),
+ coresight_etm4x_reg(trcoslsr, TRCOSLSR),
+ coresight_etm4x_reg(trcconfig, TRCCONFIGR),
+ coresight_etm4x_reg(trctraceid, TRCTRACEIDR),
+ coresight_etm4x_reg(trcdevarch, TRCDEVARCH),
NULL,
};
-coresight_etm4x_cross_read(trcidr0, TRCIDR0);
-coresight_etm4x_cross_read(trcidr1, TRCIDR1);
-coresight_etm4x_cross_read(trcidr2, TRCIDR2);
-coresight_etm4x_cross_read(trcidr3, TRCIDR3);
-coresight_etm4x_cross_read(trcidr4, TRCIDR4);
-coresight_etm4x_cross_read(trcidr5, TRCIDR5);
-/* trcidr[6,7] are reserved */
-coresight_etm4x_cross_read(trcidr8, TRCIDR8);
-coresight_etm4x_cross_read(trcidr9, TRCIDR9);
-coresight_etm4x_cross_read(trcidr10, TRCIDR10);
-coresight_etm4x_cross_read(trcidr11, TRCIDR11);
-coresight_etm4x_cross_read(trcidr12, TRCIDR12);
-coresight_etm4x_cross_read(trcidr13, TRCIDR13);
-
static struct attribute *coresight_etmv4_trcidr_attrs[] = {
- &dev_attr_trcidr0.attr,
- &dev_attr_trcidr1.attr,
- &dev_attr_trcidr2.attr,
- &dev_attr_trcidr3.attr,
- &dev_attr_trcidr4.attr,
- &dev_attr_trcidr5.attr,
+ coresight_etm4x_reg(trcidr0, TRCIDR0),
+ coresight_etm4x_reg(trcidr1, TRCIDR1),
+ coresight_etm4x_reg(trcidr2, TRCIDR2),
+ coresight_etm4x_reg(trcidr3, TRCIDR3),
+ coresight_etm4x_reg(trcidr4, TRCIDR4),
+ coresight_etm4x_reg(trcidr5, TRCIDR5),
/* trcidr[6,7] are reserved */
- &dev_attr_trcidr8.attr,
- &dev_attr_trcidr9.attr,
- &dev_attr_trcidr10.attr,
- &dev_attr_trcidr11.attr,
- &dev_attr_trcidr12.attr,
- &dev_attr_trcidr13.attr,
+ coresight_etm4x_reg(trcidr8, TRCIDR8),
+ coresight_etm4x_reg(trcidr9, TRCIDR9),
+ coresight_etm4x_reg(trcidr10, TRCIDR10),
+ coresight_etm4x_reg(trcidr11, TRCIDR11),
+ coresight_etm4x_reg(trcidr12, TRCIDR12),
+ coresight_etm4x_reg(trcidr13, TRCIDR13),
NULL,
};
@@ -2422,6 +2504,7 @@ static const struct attribute_group coresight_etmv4_group = {
};
static const struct attribute_group coresight_etmv4_mgmt_group = {
+ .is_visible = coresight_etm4x_attr_reg_implemented,
.attrs = coresight_etmv4_mgmt_attrs,
.name = "mgmt",
};
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index eefc7371c6c4..afceee6d4186 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -8,6 +8,7 @@
#include <asm/local.h>
#include <linux/spinlock.h>
+#include <linux/types.h>
#include "coresight-priv.h"
/*
@@ -28,6 +29,7 @@
#define TRCAUXCTLR 0x018
#define TRCEVENTCTL0R 0x020
#define TRCEVENTCTL1R 0x024
+#define TRCRSR 0x028
#define TRCSTALLCTLR 0x02C
#define TRCTSCTLR 0x030
#define TRCSYNCPR 0x034
@@ -44,13 +46,14 @@
#define TRCVDSACCTLR 0x0A4
#define TRCVDARCCTLR 0x0A8
/* Derived resources registers */
-#define TRCSEQEVRn(n) (0x100 + (n * 4))
+#define TRCSEQEVRn(n) (0x100 + (n * 4)) /* n = 0-2 */
#define TRCSEQRSTEVR 0x118
#define TRCSEQSTR 0x11C
#define TRCEXTINSELR 0x120
-#define TRCCNTRLDVRn(n) (0x140 + (n * 4))
-#define TRCCNTCTLRn(n) (0x150 + (n * 4))
-#define TRCCNTVRn(n) (0x160 + (n * 4))
+#define TRCEXTINSELRn(n) (0x120 + (n * 4)) /* n = 0-3 */
+#define TRCCNTRLDVRn(n) (0x140 + (n * 4)) /* n = 0-3 */
+#define TRCCNTCTLRn(n) (0x150 + (n * 4)) /* n = 0-3 */
+#define TRCCNTVRn(n) (0x160 + (n * 4)) /* n = 0-3 */
/* ID registers */
#define TRCIDR8 0x180
#define TRCIDR9 0x184
@@ -59,7 +62,7 @@
#define TRCIDR12 0x190
#define TRCIDR13 0x194
#define TRCIMSPEC0 0x1C0
-#define TRCIMSPECn(n) (0x1C0 + (n * 4))
+#define TRCIMSPECn(n) (0x1C0 + (n * 4)) /* n = 1-7 */
#define TRCIDR0 0x1E0
#define TRCIDR1 0x1E4
#define TRCIDR2 0x1E8
@@ -68,9 +71,12 @@
#define TRCIDR5 0x1F4
#define TRCIDR6 0x1F8
#define TRCIDR7 0x1FC
-/* Resource selection registers */
+/*
+ * Resource selection registers, n = 2-31.
+ * First pair (regs 0, 1) is always present and is reserved.
+ */
#define TRCRSCTLRn(n) (0x200 + (n * 4))
-/* Single-shot comparator registers */
+/* Single-shot comparator registers, n = 0-7 */
#define TRCSSCCRn(n) (0x280 + (n * 4))
#define TRCSSCSRn(n) (0x2A0 + (n * 4))
#define TRCSSPCICRn(n) (0x2C0 + (n * 4))
@@ -80,11 +86,13 @@
#define TRCPDCR 0x310
#define TRCPDSR 0x314
/* Trace registers (0x318-0xEFC) */
-/* Comparator registers */
+/* Address Comparator registers n = 0-15 */
#define TRCACVRn(n) (0x400 + (n * 8))
#define TRCACATRn(n) (0x480 + (n * 8))
+/* Data Value Comparator Value registers, n = 0-7 */
#define TRCDVCVRn(n) (0x500 + (n * 16))
#define TRCDVCMRn(n) (0x580 + (n * 16))
+/* ContextID/Virtual ContextID comparators, n = 0-7 */
#define TRCCIDCVRn(n) (0x600 + (n * 8))
#define TRCVMIDCVRn(n) (0x640 + (n * 8))
#define TRCCIDCCTLR0 0x680
@@ -120,6 +128,368 @@
#define TRCCIDR2 0xFF8
#define TRCCIDR3 0xFFC
+#define TRCRSR_TA BIT(12)
+
+/*
+ * System instructions to access ETM registers.
+ * See ETMv4.4 spec ARM IHI0064F section 4.3.6 System instructions
+ */
+#define ETM4x_OFFSET_TO_REG(x) ((x) >> 2)
+
+#define ETM4x_CRn(n) (((n) >> 7) & 0x7)
+#define ETM4x_Op2(n) (((n) >> 4) & 0x7)
+#define ETM4x_CRm(n) ((n) & 0xf)
+
+#include <asm/sysreg.h>
+#define ETM4x_REG_NUM_TO_SYSREG(n) \
+ sys_reg(2, 1, ETM4x_CRn(n), ETM4x_CRm(n), ETM4x_Op2(n))
+
+#define READ_ETM4x_REG(reg) \
+ read_sysreg_s(ETM4x_REG_NUM_TO_SYSREG((reg)))
+#define WRITE_ETM4x_REG(val, reg) \
+ write_sysreg_s(val, ETM4x_REG_NUM_TO_SYSREG((reg)))
+
+#define read_etm4x_sysreg_const_offset(offset) \
+ READ_ETM4x_REG(ETM4x_OFFSET_TO_REG(offset))
+
+#define write_etm4x_sysreg_const_offset(val, offset) \
+ WRITE_ETM4x_REG(val, ETM4x_OFFSET_TO_REG(offset))
+
+#define CASE_READ(res, x) \
+ case (x): { (res) = read_etm4x_sysreg_const_offset((x)); break; }
+
+#define CASE_WRITE(val, x) \
+ case (x): { write_etm4x_sysreg_const_offset((val), (x)); break; }
+
+#define CASE_NOP(__unused, x) \
+ case (x): /* fall through */
+
+#define ETE_ONLY_SYSREG_LIST(op, val) \
+ CASE_##op((val), TRCRSR) \
+ CASE_##op((val), TRCEXTINSELRn(1)) \
+ CASE_##op((val), TRCEXTINSELRn(2)) \
+ CASE_##op((val), TRCEXTINSELRn(3))
+
+/* List of registers accessible via System instructions */
+#define ETM4x_ONLY_SYSREG_LIST(op, val) \
+ CASE_##op((val), TRCPROCSELR) \
+ CASE_##op((val), TRCVDCTLR) \
+ CASE_##op((val), TRCVDSACCTLR) \
+ CASE_##op((val), TRCVDARCCTLR) \
+ CASE_##op((val), TRCOSLAR)
+
+#define ETM_COMMON_SYSREG_LIST(op, val) \
+ CASE_##op((val), TRCPRGCTLR) \
+ CASE_##op((val), TRCSTATR) \
+ CASE_##op((val), TRCCONFIGR) \
+ CASE_##op((val), TRCAUXCTLR) \
+ CASE_##op((val), TRCEVENTCTL0R) \
+ CASE_##op((val), TRCEVENTCTL1R) \
+ CASE_##op((val), TRCSTALLCTLR) \
+ CASE_##op((val), TRCTSCTLR) \
+ CASE_##op((val), TRCSYNCPR) \
+ CASE_##op((val), TRCCCCTLR) \
+ CASE_##op((val), TRCBBCTLR) \
+ CASE_##op((val), TRCTRACEIDR) \
+ CASE_##op((val), TRCQCTLR) \
+ CASE_##op((val), TRCVICTLR) \
+ CASE_##op((val), TRCVIIECTLR) \
+ CASE_##op((val), TRCVISSCTLR) \
+ CASE_##op((val), TRCVIPCSSCTLR) \
+ CASE_##op((val), TRCSEQEVRn(0)) \
+ CASE_##op((val), TRCSEQEVRn(1)) \
+ CASE_##op((val), TRCSEQEVRn(2)) \
+ CASE_##op((val), TRCSEQRSTEVR) \
+ CASE_##op((val), TRCSEQSTR) \
+ CASE_##op((val), TRCEXTINSELR) \
+ CASE_##op((val), TRCCNTRLDVRn(0)) \
+ CASE_##op((val), TRCCNTRLDVRn(1)) \
+ CASE_##op((val), TRCCNTRLDVRn(2)) \
+ CASE_##op((val), TRCCNTRLDVRn(3)) \
+ CASE_##op((val), TRCCNTCTLRn(0)) \
+ CASE_##op((val), TRCCNTCTLRn(1)) \
+ CASE_##op((val), TRCCNTCTLRn(2)) \
+ CASE_##op((val), TRCCNTCTLRn(3)) \
+ CASE_##op((val), TRCCNTVRn(0)) \
+ CASE_##op((val), TRCCNTVRn(1)) \
+ CASE_##op((val), TRCCNTVRn(2)) \
+ CASE_##op((val), TRCCNTVRn(3)) \
+ CASE_##op((val), TRCIDR8) \
+ CASE_##op((val), TRCIDR9) \
+ CASE_##op((val), TRCIDR10) \
+ CASE_##op((val), TRCIDR11) \
+ CASE_##op((val), TRCIDR12) \
+ CASE_##op((val), TRCIDR13) \
+ CASE_##op((val), TRCIMSPECn(0)) \
+ CASE_##op((val), TRCIMSPECn(1)) \
+ CASE_##op((val), TRCIMSPECn(2)) \
+ CASE_##op((val), TRCIMSPECn(3)) \
+ CASE_##op((val), TRCIMSPECn(4)) \
+ CASE_##op((val), TRCIMSPECn(5)) \
+ CASE_##op((val), TRCIMSPECn(6)) \
+ CASE_##op((val), TRCIMSPECn(7)) \
+ CASE_##op((val), TRCIDR0) \
+ CASE_##op((val), TRCIDR1) \
+ CASE_##op((val), TRCIDR2) \
+ CASE_##op((val), TRCIDR3) \
+ CASE_##op((val), TRCIDR4) \
+ CASE_##op((val), TRCIDR5) \
+ CASE_##op((val), TRCIDR6) \
+ CASE_##op((val), TRCIDR7) \
+ CASE_##op((val), TRCRSCTLRn(2)) \
+ CASE_##op((val), TRCRSCTLRn(3)) \
+ CASE_##op((val), TRCRSCTLRn(4)) \
+ CASE_##op((val), TRCRSCTLRn(5)) \
+ CASE_##op((val), TRCRSCTLRn(6)) \
+ CASE_##op((val), TRCRSCTLRn(7)) \
+ CASE_##op((val), TRCRSCTLRn(8)) \
+ CASE_##op((val), TRCRSCTLRn(9)) \
+ CASE_##op((val), TRCRSCTLRn(10)) \
+ CASE_##op((val), TRCRSCTLRn(11)) \
+ CASE_##op((val), TRCRSCTLRn(12)) \
+ CASE_##op((val), TRCRSCTLRn(13)) \
+ CASE_##op((val), TRCRSCTLRn(14)) \
+ CASE_##op((val), TRCRSCTLRn(15)) \
+ CASE_##op((val), TRCRSCTLRn(16)) \
+ CASE_##op((val), TRCRSCTLRn(17)) \
+ CASE_##op((val), TRCRSCTLRn(18)) \
+ CASE_##op((val), TRCRSCTLRn(19)) \
+ CASE_##op((val), TRCRSCTLRn(20)) \
+ CASE_##op((val), TRCRSCTLRn(21)) \
+ CASE_##op((val), TRCRSCTLRn(22)) \
+ CASE_##op((val), TRCRSCTLRn(23)) \
+ CASE_##op((val), TRCRSCTLRn(24)) \
+ CASE_##op((val), TRCRSCTLRn(25)) \
+ CASE_##op((val), TRCRSCTLRn(26)) \
+ CASE_##op((val), TRCRSCTLRn(27)) \
+ CASE_##op((val), TRCRSCTLRn(28)) \
+ CASE_##op((val), TRCRSCTLRn(29)) \
+ CASE_##op((val), TRCRSCTLRn(30)) \
+ CASE_##op((val), TRCRSCTLRn(31)) \
+ CASE_##op((val), TRCSSCCRn(0)) \
+ CASE_##op((val), TRCSSCCRn(1)) \
+ CASE_##op((val), TRCSSCCRn(2)) \
+ CASE_##op((val), TRCSSCCRn(3)) \
+ CASE_##op((val), TRCSSCCRn(4)) \
+ CASE_##op((val), TRCSSCCRn(5)) \
+ CASE_##op((val), TRCSSCCRn(6)) \
+ CASE_##op((val), TRCSSCCRn(7)) \
+ CASE_##op((val), TRCSSCSRn(0)) \
+ CASE_##op((val), TRCSSCSRn(1)) \
+ CASE_##op((val), TRCSSCSRn(2)) \
+ CASE_##op((val), TRCSSCSRn(3)) \
+ CASE_##op((val), TRCSSCSRn(4)) \
+ CASE_##op((val), TRCSSCSRn(5)) \
+ CASE_##op((val), TRCSSCSRn(6)) \
+ CASE_##op((val), TRCSSCSRn(7)) \
+ CASE_##op((val), TRCSSPCICRn(0)) \
+ CASE_##op((val), TRCSSPCICRn(1)) \
+ CASE_##op((val), TRCSSPCICRn(2)) \
+ CASE_##op((val), TRCSSPCICRn(3)) \
+ CASE_##op((val), TRCSSPCICRn(4)) \
+ CASE_##op((val), TRCSSPCICRn(5)) \
+ CASE_##op((val), TRCSSPCICRn(6)) \
+ CASE_##op((val), TRCSSPCICRn(7)) \
+ CASE_##op((val), TRCOSLSR) \
+ CASE_##op((val), TRCACVRn(0)) \
+ CASE_##op((val), TRCACVRn(1)) \
+ CASE_##op((val), TRCACVRn(2)) \
+ CASE_##op((val), TRCACVRn(3)) \
+ CASE_##op((val), TRCACVRn(4)) \
+ CASE_##op((val), TRCACVRn(5)) \
+ CASE_##op((val), TRCACVRn(6)) \
+ CASE_##op((val), TRCACVRn(7)) \
+ CASE_##op((val), TRCACVRn(8)) \
+ CASE_##op((val), TRCACVRn(9)) \
+ CASE_##op((val), TRCACVRn(10)) \
+ CASE_##op((val), TRCACVRn(11)) \
+ CASE_##op((val), TRCACVRn(12)) \
+ CASE_##op((val), TRCACVRn(13)) \
+ CASE_##op((val), TRCACVRn(14)) \
+ CASE_##op((val), TRCACVRn(15)) \
+ CASE_##op((val), TRCACATRn(0)) \
+ CASE_##op((val), TRCACATRn(1)) \
+ CASE_##op((val), TRCACATRn(2)) \
+ CASE_##op((val), TRCACATRn(3)) \
+ CASE_##op((val), TRCACATRn(4)) \
+ CASE_##op((val), TRCACATRn(5)) \
+ CASE_##op((val), TRCACATRn(6)) \
+ CASE_##op((val), TRCACATRn(7)) \
+ CASE_##op((val), TRCACATRn(8)) \
+ CASE_##op((val), TRCACATRn(9)) \
+ CASE_##op((val), TRCACATRn(10)) \
+ CASE_##op((val), TRCACATRn(11)) \
+ CASE_##op((val), TRCACATRn(12)) \
+ CASE_##op((val), TRCACATRn(13)) \
+ CASE_##op((val), TRCACATRn(14)) \
+ CASE_##op((val), TRCACATRn(15)) \
+ CASE_##op((val), TRCDVCVRn(0)) \
+ CASE_##op((val), TRCDVCVRn(1)) \
+ CASE_##op((val), TRCDVCVRn(2)) \
+ CASE_##op((val), TRCDVCVRn(3)) \
+ CASE_##op((val), TRCDVCVRn(4)) \
+ CASE_##op((val), TRCDVCVRn(5)) \
+ CASE_##op((val), TRCDVCVRn(6)) \
+ CASE_##op((val), TRCDVCVRn(7)) \
+ CASE_##op((val), TRCDVCMRn(0)) \
+ CASE_##op((val), TRCDVCMRn(1)) \
+ CASE_##op((val), TRCDVCMRn(2)) \
+ CASE_##op((val), TRCDVCMRn(3)) \
+ CASE_##op((val), TRCDVCMRn(4)) \
+ CASE_##op((val), TRCDVCMRn(5)) \
+ CASE_##op((val), TRCDVCMRn(6)) \
+ CASE_##op((val), TRCDVCMRn(7)) \
+ CASE_##op((val), TRCCIDCVRn(0)) \
+ CASE_##op((val), TRCCIDCVRn(1)) \
+ CASE_##op((val), TRCCIDCVRn(2)) \
+ CASE_##op((val), TRCCIDCVRn(3)) \
+ CASE_##op((val), TRCCIDCVRn(4)) \
+ CASE_##op((val), TRCCIDCVRn(5)) \
+ CASE_##op((val), TRCCIDCVRn(6)) \
+ CASE_##op((val), TRCCIDCVRn(7)) \
+ CASE_##op((val), TRCVMIDCVRn(0)) \
+ CASE_##op((val), TRCVMIDCVRn(1)) \
+ CASE_##op((val), TRCVMIDCVRn(2)) \
+ CASE_##op((val), TRCVMIDCVRn(3)) \
+ CASE_##op((val), TRCVMIDCVRn(4)) \
+ CASE_##op((val), TRCVMIDCVRn(5)) \
+ CASE_##op((val), TRCVMIDCVRn(6)) \
+ CASE_##op((val), TRCVMIDCVRn(7)) \
+ CASE_##op((val), TRCCIDCCTLR0) \
+ CASE_##op((val), TRCCIDCCTLR1) \
+ CASE_##op((val), TRCVMIDCCTLR0) \
+ CASE_##op((val), TRCVMIDCCTLR1) \
+ CASE_##op((val), TRCCLAIMSET) \
+ CASE_##op((val), TRCCLAIMCLR) \
+ CASE_##op((val), TRCAUTHSTATUS) \
+ CASE_##op((val), TRCDEVARCH) \
+ CASE_##op((val), TRCDEVID)
+
+/* List of registers only accessible via memory-mapped interface */
+#define ETM_MMAP_LIST(op, val) \
+ CASE_##op((val), TRCDEVTYPE) \
+ CASE_##op((val), TRCPDCR) \
+ CASE_##op((val), TRCPDSR) \
+ CASE_##op((val), TRCDEVAFF0) \
+ CASE_##op((val), TRCDEVAFF1) \
+ CASE_##op((val), TRCLAR) \
+ CASE_##op((val), TRCLSR) \
+ CASE_##op((val), TRCITCTRL) \
+ CASE_##op((val), TRCPIDR4) \
+ CASE_##op((val), TRCPIDR0) \
+ CASE_##op((val), TRCPIDR1) \
+ CASE_##op((val), TRCPIDR2) \
+ CASE_##op((val), TRCPIDR3)
+
+#define ETM4x_READ_SYSREG_CASES(res) \
+ ETM_COMMON_SYSREG_LIST(READ, (res)) \
+ ETM4x_ONLY_SYSREG_LIST(READ, (res))
+
+#define ETM4x_WRITE_SYSREG_CASES(val) \
+ ETM_COMMON_SYSREG_LIST(WRITE, (val)) \
+ ETM4x_ONLY_SYSREG_LIST(WRITE, (val))
+
+#define ETM_COMMON_SYSREG_LIST_CASES \
+ ETM_COMMON_SYSREG_LIST(NOP, __unused)
+
+#define ETM4x_ONLY_SYSREG_LIST_CASES \
+ ETM4x_ONLY_SYSREG_LIST(NOP, __unused)
+
+#define ETM4x_SYSREG_LIST_CASES \
+ ETM_COMMON_SYSREG_LIST_CASES \
+ ETM4x_ONLY_SYSREG_LIST(NOP, __unused)
+
+#define ETM4x_MMAP_LIST_CASES ETM_MMAP_LIST(NOP, __unused)
+
+/* ETE only supports system register access */
+#define ETE_READ_CASES(res) \
+ ETM_COMMON_SYSREG_LIST(READ, (res)) \
+ ETE_ONLY_SYSREG_LIST(READ, (res))
+
+#define ETE_WRITE_CASES(val) \
+ ETM_COMMON_SYSREG_LIST(WRITE, (val)) \
+ ETE_ONLY_SYSREG_LIST(WRITE, (val))
+
+#define ETE_ONLY_SYSREG_LIST_CASES \
+ ETE_ONLY_SYSREG_LIST(NOP, __unused)
+
+#define read_etm4x_sysreg_offset(offset, _64bit) \
+ ({ \
+ u64 __val; \
+ \
+ if (__builtin_constant_p((offset))) \
+ __val = read_etm4x_sysreg_const_offset((offset)); \
+ else \
+ __val = etm4x_sysreg_read((offset), true, (_64bit)); \
+ __val; \
+ })
+
+#define write_etm4x_sysreg_offset(val, offset, _64bit) \
+ do { \
+ if (__builtin_constant_p((offset))) \
+ write_etm4x_sysreg_const_offset((val), \
+ (offset)); \
+ else \
+ etm4x_sysreg_write((val), (offset), true, \
+ (_64bit)); \
+ } while (0)
+
+
+#define etm4x_relaxed_read32(csa, offset) \
+ ((u32)((csa)->io_mem ? \
+ readl_relaxed((csa)->base + (offset)) : \
+ read_etm4x_sysreg_offset((offset), false)))
+
+#define etm4x_relaxed_read64(csa, offset) \
+ ((u64)((csa)->io_mem ? \
+ readq_relaxed((csa)->base + (offset)) : \
+ read_etm4x_sysreg_offset((offset), true)))
+
+#define etm4x_read32(csa, offset) \
+ ({ \
+ u32 __val = etm4x_relaxed_read32((csa), (offset)); \
+ __iormb(__val); \
+ __val; \
+ })
+
+#define etm4x_read64(csa, offset) \
+ ({ \
+ u64 __val = etm4x_relaxed_read64((csa), (offset)); \
+ __iormb(__val); \
+ __val; \
+ })
+
+#define etm4x_relaxed_write32(csa, val, offset) \
+ do { \
+ if ((csa)->io_mem) \
+ writel_relaxed((val), (csa)->base + (offset)); \
+ else \
+ write_etm4x_sysreg_offset((val), (offset), \
+ false); \
+ } while (0)
+
+#define etm4x_relaxed_write64(csa, val, offset) \
+ do { \
+ if ((csa)->io_mem) \
+ writeq_relaxed((val), (csa)->base + (offset)); \
+ else \
+ write_etm4x_sysreg_offset((val), (offset), \
+ true); \
+ } while (0)
+
+#define etm4x_write32(csa, val, offset) \
+ do { \
+ __iowmb(); \
+ etm4x_relaxed_write32((csa), (val), (offset)); \
+ } while (0)
+
+#define etm4x_write64(csa, val, offset) \
+ do { \
+ __iowmb(); \
+ etm4x_relaxed_write64((csa), (val), (offset)); \
+ } while (0)
+
+
/* ETMv4 resources */
#define ETM_MAX_NR_PE 8
#define ETMv4_MAX_CNTR 4
@@ -136,7 +506,6 @@
#define ETM_MAX_RES_SEL 32
#define ETM_MAX_SS_CMP 8
-#define ETM_ARCH_V4 0x40
#define ETMv4_SYNC_MASK 0x1F
#define ETM_CYC_THRESHOLD_MASK 0xFFF
#define ETM_CYC_THRESHOLD_DEFAULT 0x100
@@ -174,34 +543,174 @@
ETM_MODE_EXCL_KERN | \
ETM_MODE_EXCL_USER)
+/*
+ * TRCOSLSR.OSLM advertises the OS Lock model.
+ * OSLM[2:0] = TRCOSLSR[4:3,0]
+ *
+ * 0b000 - Trace OS Lock is not implemented.
+ * 0b010 - Trace OS Lock is implemented.
+ * 0b100 - Trace OS Lock is not implemented, unit is controlled by PE OS Lock.
+ */
+#define ETM_OSLOCK_NI 0b000
+#define ETM_OSLOCK_PRESENT 0b010
+#define ETM_OSLOCK_PE 0b100
+
+#define ETM_OSLSR_OSLM(oslsr) ((((oslsr) & GENMASK(4, 3)) >> 2) | (oslsr & 0x1))
+
+/*
+ * TRCDEVARCH Bit field definitions
+ * Bits[31:21] - ARCHITECT = Always Arm Ltd.
+ * * Bits[31:28] = 0x4
+ * * Bits[27:21] = 0b0111011
+ * Bit[20] - PRESENT, Indicates the presence of this register.
+ *
+ * Bit[19:16] - REVISION, Revision of the architecture.
+ *
+ * Bit[15:0] - ARCHID, Identifies this component as an ETM
+ * * Bits[15:12] - architecture version of ETM
+ * * = 4 for ETMv4
+ * * Bits[11:0] = 0xA13, architecture part number for ETM.
+ */
+#define ETM_DEVARCH_ARCHITECT_MASK GENMASK(31, 21)
+#define ETM_DEVARCH_ARCHITECT_ARM ((0x4 << 28) | (0b0111011 << 21))
+#define ETM_DEVARCH_PRESENT BIT(20)
+#define ETM_DEVARCH_REVISION_SHIFT 16
+#define ETM_DEVARCH_REVISION_MASK GENMASK(19, 16)
+#define ETM_DEVARCH_REVISION(x) \
+ (((x) & ETM_DEVARCH_REVISION_MASK) >> ETM_DEVARCH_REVISION_SHIFT)
+#define ETM_DEVARCH_ARCHID_MASK GENMASK(15, 0)
+#define ETM_DEVARCH_ARCHID_ARCH_VER_SHIFT 12
+#define ETM_DEVARCH_ARCHID_ARCH_VER_MASK GENMASK(15, 12)
+#define ETM_DEVARCH_ARCHID_ARCH_VER(x) \
+ (((x) & ETM_DEVARCH_ARCHID_ARCH_VER_MASK) >> ETM_DEVARCH_ARCHID_ARCH_VER_SHIFT)
+
+#define ETM_DEVARCH_MAKE_ARCHID_ARCH_VER(ver) \
+ (((ver) << ETM_DEVARCH_ARCHID_ARCH_VER_SHIFT) & ETM_DEVARCH_ARCHID_ARCH_VER_MASK)
+
+#define ETM_DEVARCH_ARCHID_ARCH_PART(x) ((x) & 0xfffUL)
+
+#define ETM_DEVARCH_MAKE_ARCHID(major) \
+ ((ETM_DEVARCH_MAKE_ARCHID_ARCH_VER(major)) | ETM_DEVARCH_ARCHID_ARCH_PART(0xA13))
+
+#define ETM_DEVARCH_ARCHID_ETMv4x ETM_DEVARCH_MAKE_ARCHID(0x4)
+#define ETM_DEVARCH_ARCHID_ETE ETM_DEVARCH_MAKE_ARCHID(0x5)
+
+#define ETM_DEVARCH_ID_MASK \
+ (ETM_DEVARCH_ARCHITECT_MASK | ETM_DEVARCH_ARCHID_MASK | ETM_DEVARCH_PRESENT)
+#define ETM_DEVARCH_ETMv4x_ARCH \
+ (ETM_DEVARCH_ARCHITECT_ARM | ETM_DEVARCH_ARCHID_ETMv4x | ETM_DEVARCH_PRESENT)
+#define ETM_DEVARCH_ETE_ARCH \
+ (ETM_DEVARCH_ARCHITECT_ARM | ETM_DEVARCH_ARCHID_ETE | ETM_DEVARCH_PRESENT)
+
#define TRCSTATR_IDLE_BIT 0
#define TRCSTATR_PMSTABLE_BIT 1
#define ETM_DEFAULT_ADDR_COMP 0
+#define TRCSSCSRn_PC BIT(3)
+
/* PowerDown Control Register bits */
#define TRCPDCR_PU BIT(3)
-/* secure state access levels - TRCACATRn */
-#define ETM_EXLEVEL_S_APP BIT(8)
-#define ETM_EXLEVEL_S_OS BIT(9)
-#define ETM_EXLEVEL_S_HYP BIT(10)
-#define ETM_EXLEVEL_S_MON BIT(11)
-/* non-secure state access levels - TRCACATRn */
-#define ETM_EXLEVEL_NS_APP BIT(12)
-#define ETM_EXLEVEL_NS_OS BIT(13)
-#define ETM_EXLEVEL_NS_HYP BIT(14)
-#define ETM_EXLEVEL_NS_NA BIT(15)
+#define TRCACATR_EXLEVEL_SHIFT 8
-/* access level control in TRCVICTLR - same bits as TRCACATRn but shifted */
-#define ETM_EXLEVEL_LSHIFT_TRCVICTLR 8
+/*
+ * Exception level mask for Secure and Non-Secure ELs.
+ * ETM defines the bits for EL control (e.g, TRVICTLR, TRCACTRn).
+ * The Secure and Non-Secure ELs are always to gether.
+ * Non-secure EL3 is never implemented.
+ * We use the following generic mask as they appear in different
+ * registers and this can be shifted for the appropriate
+ * fields.
+ */
+#define ETM_EXLEVEL_S_APP BIT(0) /* Secure EL0 */
+#define ETM_EXLEVEL_S_OS BIT(1) /* Secure EL1 */
+#define ETM_EXLEVEL_S_HYP BIT(2) /* Secure EL2 */
+#define ETM_EXLEVEL_S_MON BIT(3) /* Secure EL3/Monitor */
+#define ETM_EXLEVEL_NS_APP BIT(4) /* NonSecure EL0 */
+#define ETM_EXLEVEL_NS_OS BIT(5) /* NonSecure EL1 */
+#define ETM_EXLEVEL_NS_HYP BIT(6) /* NonSecure EL2 */
+
+#define ETM_EXLEVEL_MASK (GENMASK(6, 0))
+#define ETM_EXLEVEL_S_MASK (GENMASK(3, 0))
+#define ETM_EXLEVEL_NS_MASK (GENMASK(6, 4))
+
+/* access level controls in TRCACATRn */
+#define TRCACATR_EXLEVEL_SHIFT 8
+
+/* access level control in TRCVICTLR */
+#define TRCVICTLR_EXLEVEL_SHIFT 16
+#define TRCVICTLR_EXLEVEL_S_SHIFT 16
+#define TRCVICTLR_EXLEVEL_NS_SHIFT 20
/* secure / non secure masks - TRCVICTLR, IDR3 */
-#define ETM_EXLEVEL_S_VICTLR_MASK GENMASK(19, 16)
-/* NS MON (EL3) mode never implemented */
-#define ETM_EXLEVEL_NS_VICTLR_MASK GENMASK(22, 20)
+#define TRCVICTLR_EXLEVEL_MASK (ETM_EXLEVEL_MASK << TRCVICTLR_EXLEVEL_SHIFT)
+#define TRCVICTLR_EXLEVEL_S_MASK (ETM_EXLEVEL_S_MASK << TRCVICTLR_EXLEVEL_SHIFT)
+#define TRCVICTLR_EXLEVEL_NS_MASK (ETM_EXLEVEL_NS_MASK << TRCVICTLR_EXLEVEL_SHIFT)
+
+#define ETM_TRCIDR1_ARCH_MAJOR_SHIFT 8
+#define ETM_TRCIDR1_ARCH_MAJOR_MASK (0xfU << ETM_TRCIDR1_ARCH_MAJOR_SHIFT)
+#define ETM_TRCIDR1_ARCH_MAJOR(x) \
+ (((x) & ETM_TRCIDR1_ARCH_MAJOR_MASK) >> ETM_TRCIDR1_ARCH_MAJOR_SHIFT)
+#define ETM_TRCIDR1_ARCH_MINOR_SHIFT 4
+#define ETM_TRCIDR1_ARCH_MINOR_MASK (0xfU << ETM_TRCIDR1_ARCH_MINOR_SHIFT)
+#define ETM_TRCIDR1_ARCH_MINOR(x) \
+ (((x) & ETM_TRCIDR1_ARCH_MINOR_MASK) >> ETM_TRCIDR1_ARCH_MINOR_SHIFT)
+#define ETM_TRCIDR1_ARCH_SHIFT ETM_TRCIDR1_ARCH_MINOR_SHIFT
+#define ETM_TRCIDR1_ARCH_MASK \
+ (ETM_TRCIDR1_ARCH_MAJOR_MASK | ETM_TRCIDR1_ARCH_MINOR_MASK)
+
+#define ETM_TRCIDR1_ARCH_ETMv4 0x4
+
+/*
+ * Driver representation of the ETM architecture.
+ * The version of an ETM component can be detected from
+ *
+ * TRCDEVARCH - CoreSight architected register
+ * - Bits[15:12] - Major version
+ * - Bits[19:16] - Minor version
+ * TRCIDR1 - ETM architected register
+ * - Bits[11:8] - Major version
+ * - Bits[7:4] - Minor version
+ * We must rely on TRCDEVARCH for the version information,
+ * however we don't want to break the support for potential
+ * old implementations which might not implement it. Thus
+ * we fall back to TRCIDR1 if TRCDEVARCH is not implemented
+ * for memory mapped components.
+ * Now to make certain decisions easier based on the version
+ * we use an internal representation of the version in the
+ * driver, as follows :
+ *
+ * ETM_ARCH_VERSION[7:0], where :
+ * Bits[7:4] - Major version
+ * Bits[3:0] - Minro version
+ */
+#define ETM_ARCH_VERSION(major, minor) \
+ ((((major) & 0xfU) << 4) | (((minor) & 0xfU)))
+#define ETM_ARCH_MAJOR_VERSION(arch) (((arch) >> 4) & 0xfU)
+#define ETM_ARCH_MINOR_VERSION(arch) ((arch) & 0xfU)
+
+#define ETM_ARCH_V4 ETM_ARCH_VERSION(4, 0)
+#define ETM_ARCH_ETE ETM_ARCH_VERSION(5, 0)
/* Interpretation of resource numbers change at ETM v4.3 architecture */
-#define ETM4X_ARCH_4V3 0x43
+#define ETM_ARCH_V4_3 ETM_ARCH_VERSION(4, 3)
+
+static inline u8 etm_devarch_to_arch(u32 devarch)
+{
+ return ETM_ARCH_VERSION(ETM_DEVARCH_ARCHID_ARCH_VER(devarch),
+ ETM_DEVARCH_REVISION(devarch));
+}
+
+static inline u8 etm_trcidr_to_arch(u32 trcidr1)
+{
+ return ETM_ARCH_VERSION(ETM_TRCIDR1_ARCH_MAJOR(trcidr1),
+ ETM_TRCIDR1_ARCH_MINOR(trcidr1));
+}
+
+enum etm_impdef_type {
+ ETM4_IMPDEF_HISI_CORE_COMMIT,
+ ETM4_IMPDEF_FEATURE_MAX,
+};
/**
* struct etmv4_config - configuration information related to an ETMv4
@@ -250,7 +759,7 @@
* @vmid_mask0: VM ID comparator mask for comparator 0-3.
* @vmid_mask1: VM ID comparator mask for comparator 4-7.
* @ext_inp: External input selection.
- * @arch: ETM architecture version (for arch dependent config).
+ * @s_ex_level: Secure ELs where tracing is supported.
*/
struct etmv4_config {
u32 mode;
@@ -294,7 +803,7 @@ struct etmv4_config {
u32 vmid_mask0;
u32 vmid_mask1;
u32 ext_inp;
- u8 arch;
+ u8 s_ex_level;
};
/**
@@ -363,7 +872,7 @@ struct etmv4_save_state {
* @spinlock: Only one at a time pls.
* @mode: This tracer's mode, i.e sysFS, Perf or disabled.
* @cpu: The cpu this component is affined to.
- * @arch: ETM version number.
+ * @arch: ETM architecture version.
* @nr_pe: The number of processing entity available for tracing.
* @nr_pe_cmp: The number of processing entity comparator inputs that are
* available for tracing.
@@ -410,17 +919,22 @@ struct etmv4_save_state {
* @nooverflow: Indicate if overflow prevention is supported.
* @atbtrig: If the implementation can support ATB triggers
* @lpoverride: If the implementation can support low-power state over.
+ * @trfc: If the implementation supports Arm v8.4 trace filter controls.
* @config: structure holding configuration parameters.
* @save_state: State to be preserved across power loss
* @state_needs_restore: True when there is context to restore after PM exit
* @skip_power_up: Indicates if an implementation can skip powering up
* the trace unit.
+ * @arch_features: Bitmap of arch features of etmv4 devices.
*/
struct etmv4_drvdata {
void __iomem *base;
struct coresight_device *csdev;
- spinlock_t spinlock;
+ raw_spinlock_t spinlock;
local_t mode;
+ u32 etm_quirks;
+ int hw_state;
+ int rc_cpu;
int cpu;
u8 arch;
u8 nr_pe;
@@ -444,6 +958,7 @@ struct etmv4_drvdata {
u8 s_ex_level;
u8 ns_ex_level;
u8 q_support;
+ u8 os_lock_model;
bool sticky_enable;
bool boot_enable;
bool os_unlock;
@@ -459,10 +974,12 @@ struct etmv4_drvdata {
bool nooverflow;
bool atbtrig;
bool lpoverride;
+ bool trfc;
struct etmv4_config config;
struct etmv4_save_state *save_state;
bool state_needs_restore;
bool skip_power_up;
+ DECLARE_BITMAP(arch_features, ETM4_IMPDEF_FEATURE_MAX);
};
/* Address comparator access types */
@@ -483,4 +1000,12 @@ enum etm_addr_ctxtype {
extern const struct attribute_group *coresight_etmv4_groups[];
void etm4_config_trace_mode(struct etmv4_config *config);
+
+u64 etm4x_sysreg_read(u32 offset, bool _relaxed, bool _64bit);
+void etm4x_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit);
+
+static inline bool etm4x_is_ete(struct etmv4_drvdata *drvdata)
+{
+ return drvdata->arch >= ETM_ARCH_ETE;
+}
#endif
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index b2fb853776d7..b363dd6bc510 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -52,13 +52,14 @@ static int dynamic_funnel_enable_hw(struct funnel_drvdata *drvdata, int port)
{
u32 functl;
int rc = 0;
+ struct coresight_device *csdev = drvdata->csdev;
CS_UNLOCK(drvdata->base);
functl = readl_relaxed(drvdata->base + FUNNEL_FUNCTL);
/* Claim the device only when we enable the first slave */
if (!(functl & FUNNEL_ENSx_MASK)) {
- rc = coresight_claim_device_unlocked(drvdata->base);
+ rc = coresight_claim_device_unlocked(csdev);
if (rc)
goto done;
}
@@ -101,6 +102,7 @@ static void dynamic_funnel_disable_hw(struct funnel_drvdata *drvdata,
int inport)
{
u32 functl;
+ struct coresight_device *csdev = drvdata->csdev;
CS_UNLOCK(drvdata->base);
@@ -110,7 +112,7 @@ static void dynamic_funnel_disable_hw(struct funnel_drvdata *drvdata,
/* Disclaim the device if none of the slaves are now active */
if (!(functl & FUNNEL_ENSx_MASK))
- coresight_disclaim_device_unlocked(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
}
@@ -242,6 +244,7 @@ static int funnel_probe(struct device *dev, struct resource *res)
}
drvdata->base = base;
desc.groups = coresight_funnel_groups;
+ desc.access = CSDEV_ACCESS_IOMEM(base);
}
dev_set_drvdata(dev, drvdata);
@@ -356,7 +359,7 @@ static struct platform_driver static_funnel_driver = {
.remove = static_funnel_remove,
.driver = {
.name = "coresight-static-funnel",
- .owner = THIS_MODULE,
+ /* THIS_MODULE is taken care of by platform_driver_register() */
.of_match_table = static_funnel_match,
.acpi_match_table = ACPI_PTR(static_funnel_ids),
.pm = &funnel_dev_pm_ops,
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index f5f654ea2994..599dc25039df 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -34,7 +34,10 @@
*/
#define CORESIGHT_CLAIM_SELF_HOSTED BIT(1)
-#define TIMEOUT_US 100
+/* Timeout is in ms to accommodate longer time taken
+ * by ETR hardware on OcteonTX2 implementation.
+ */
+#define TIMEOUT_US 5000
#define BMVAL(val, lsb, msb) ((val & GENMASK(msb, lsb)) >> lsb)
#define ETM_MODE_EXCL_KERN BIT(30)
@@ -81,6 +84,7 @@ enum cs_mode {
CS_MODE_DISABLED,
CS_MODE_SYSFS,
CS_MODE_PERF,
+ CS_MODE_READ_PREVBOOT,
};
/**
@@ -232,4 +236,7 @@ coresight_find_csdev_by_fwnode(struct fwnode_handle *r_fwnode);
void coresight_set_assoc_ectdev_mutex(struct coresight_device *csdev,
struct coresight_device *ect_csdev);
+void coresight_set_percpu_sink(int cpu, struct coresight_device *csdev);
+struct coresight_device *coresight_get_percpu_sink(int cpu);
+
#endif
diff --git a/drivers/hwtracing/coresight/coresight-quirks.c b/drivers/hwtracing/coresight/coresight-quirks.c
new file mode 100644
index 000000000000..f10bdb2d877e
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-quirks.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+#include <asm/cputype.h>
+#include <linux/coresight.h>
+#include "coresight-priv.h"
+#include "coresight-quirks.h"
+#include "coresight-etm4x.h"
+
+/* Raw enable/disable APIs for ETM sync insertion */
+void etm4_enable_raw(struct coresight_device *csdev)
+{
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct csdev_access *csa = &csdev->access;
+
+ CS_UNLOCK(drvdata->base);
+
+ /* Writing 0 to TRCOLSAR unlocks the trace registers */
+ writel(0x0, drvdata->base + TRCOSLAR);
+
+ /* Enable the trace unit */
+ writel(1, drvdata->base + TRCPRGCTLR);
+
+ coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 0);
+
+ dsb(sy);
+ isb();
+
+ CS_LOCK(drvdata->base);
+}
+EXPORT_SYMBOL(etm4_enable_raw);
+
+void etm4_disable_raw(struct coresight_device *csdev)
+{
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct csdev_access *csa = &csdev->access;
+
+ CS_UNLOCK(drvdata->base);
+ /*
+ * Make sure everything completes before disabling, as recommended
+ * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register,
+ * SSTATUS") of ARM IHI 0064D
+ */
+ dsb(sy);
+ isb();
+
+ writel_relaxed(0x0, drvdata->base + TRCPRGCTLR);
+
+ /* Wait for ETM to become stable */
+ coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1);
+
+ CS_LOCK(drvdata->base);
+}
+EXPORT_SYMBOL(etm4_disable_raw);
+
+bool coresight_etm_has_hw_sync(void)
+{
+ /* Check if hardware supports sync insertion */
+ if (midr_is_cpu_model_range(read_cpuid_id(),
+ MIDR_MRVL_OCTEONTX2_96XX,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(3, 1)) ||
+ midr_is_cpu_model_range(read_cpuid_id(),
+ MIDR_MRVL_OCTEONTX2_95XX,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(2, 0)))
+ return false;
+ else
+ return true;
+}
+
+/* ETM quirks on OcteonTX */
+u32 coresight_get_etm_quirks(unsigned int id)
+{
+ u32 quirks = 0; /* reset */
+
+ if (id == OCTEONTX_CN9XXX_ETM)
+ quirks |= CORESIGHT_QUIRK_ETM_TREAT_ETMv43;
+
+ if (!coresight_etm_has_hw_sync())
+ quirks |= CORESIGHT_QUIRK_ETM_SW_SYNC;
+
+ return quirks;
+}
+EXPORT_SYMBOL(coresight_get_etm_quirks);
+
+/* APIs for choosing the sync insertion mode */
+int coresight_get_etm_sync_mode(void)
+{
+ /* Check if hardware supports sync insertion */
+ if (coresight_etm_has_hw_sync())
+ return SYNC_MODE_HW;
+
+ /* Find the software based sync insertion mode */
+#ifdef CONFIG_TASK_ISOLATION
+ return SYNC_MODE_SW_GLOBAL;
+#else
+ return SYNC_MODE_SW_PER_CORE;
+#endif
+}
+EXPORT_SYMBOL(coresight_get_etm_sync_mode);
+
+/* Support functions for managing active ETM list used by
+ * global mode sync insertion.
+ *
+ * Note: It is assumed that all accessor functions
+ * on etm_active_list should be called in a atomic context
+ */
+
+static cpumask_t etm_active_list; /* Bitmap of active ETMs cpu wise */
+
+void coresight_etm_active_enable(int cpu)
+{
+ cpumask_set_cpu(cpu, &etm_active_list);
+}
+EXPORT_SYMBOL(coresight_etm_active_enable);
+
+void coresight_etm_active_disable(int cpu)
+{
+ cpumask_clear_cpu(cpu, &etm_active_list);
+}
+EXPORT_SYMBOL(coresight_etm_active_disable);
+
+cpumask_t coresight_etm_active_list(void)
+{
+ return etm_active_list;
+}
+EXPORT_SYMBOL(coresight_etm_active_list);
+
+/* ETR quirks on OcteonTX */
+u32 coresight_get_etr_quirks(unsigned int id)
+{
+ u32 quirks = 0; /* reset */
+
+ if (midr_is_cpu_model_range(read_cpuid_id(),
+ MIDR_MRVL_OCTEONTX2_96XX,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(3, 1)) ||
+ midr_is_cpu_model_range(read_cpuid_id(),
+ MIDR_MRVL_OCTEONTX2_95XX,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(2, 0)))
+ quirks |= CORESIGHT_QUIRK_ETR_RESET_CTL_REG |
+ CORESIGHT_QUIRK_ETR_BUFFSIZE_8BX |
+ CORESIGHT_QUIRK_ETR_NO_STOP_FLUSH;
+
+ /* Common across all Chip variants and revisions */
+ if (id == OCTEONTX_CN9XXX_ETR) {
+ quirks |= CORESIGHT_QUIRK_ETR_SECURE_BUFF |
+ CORESIGHT_QUIRK_ETR_FORCE_64B_DBA_RW;
+ quirks |= coresight_get_etm_quirks(OCTEONTX_CN9XXX_ETM);
+ }
+
+ return quirks;
+}
+EXPORT_SYMBOL(coresight_get_etr_quirks);
diff --git a/drivers/hwtracing/coresight/coresight-quirks.h b/drivers/hwtracing/coresight/coresight-quirks.h
new file mode 100644
index 000000000000..b528dc98c98a
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-quirks.h
@@ -0,0 +1,64 @@
+#ifndef _CORESIGHT_QUIRKS_H
+#define _CORESIGHT_QUIRKS_H
+
+/* Marvell OcteonTx CN9xxx ETM device */
+#define OCTEONTX_CN9XXX_ETM 0x000cc210
+
+/* Coresight ETM Hardware quirks */
+#define CORESIGHT_QUIRK_ETM_SW_SYNC 0x1 /* No Hardware sync */
+#define CORESIGHT_QUIRK_ETM_TREAT_ETMv43 0x2 /* ETMv4.2 as ETMv4.3 */
+
+/* Marvell OcteonTx CN9xxx ETR device */
+#define OCTEONTX_CN9XXX_ETR 0x000cc213
+
+/* Coresight ETR Hardware quirks */
+#define CORESIGHT_QUIRK_ETR_BUFFSIZE_8BX 0x10 /* 8 byte size multiplier */
+#define CORESIGHT_QUIRK_ETR_SECURE_BUFF 0x20 /* Trace buffer is Secure */
+#define CORESIGHT_QUIRK_ETR_RESET_CTL_REG 0x40 /* Reset CTL on reset */
+#define CORESIGHT_QUIRK_ETR_NO_STOP_FLUSH 0x80 /* No Stop on flush */
+#define CORESIGHT_QUIRK_ETR_FORCE_64B_DBA_RW 0x100 /* 64b DBA read/write */
+
+/* ETM sync insertion modes
+ * 1. MODE_HW
+ * Sync insertion is done by hardware without any software intervention
+ *
+ * 2. MODE_SW_GLOBAL
+ * sync insertion runs from common timer handler on primary core
+ *
+ * 3. MODE_SW_PER_CORE
+ * sync insertion runs from per core timer handler
+ *
+ * When hardware doesn't support sync insertion, we fall back to software based
+ * ones. Typically, GLOBAL mode would be preferred when the traced cores are
+ * running performance critical applications and cannot be interrupted,
+ * but at the same time there would be a small loss of trace data during the
+ * insertion sequence as well.
+ *
+ * For the sake of simplicity, in GLOBAL mode, common timer handler is
+ * always expected to run on primary core(core 0).
+ */
+#define SYNC_GLOBAL_CORE 0 /* Core 0 */
+
+enum etm_sync_mode {
+ SYNC_MODE_INVALID,
+ SYNC_MODE_HW,
+ SYNC_MODE_SW_GLOBAL,
+ SYNC_MODE_SW_PER_CORE,
+};
+
+enum hw_state {
+ USR_STOP,
+ SW_STOP,
+ USR_START,
+};
+
+u32 coresight_get_etm_quirks(unsigned int id);
+u32 coresight_get_etr_quirks(unsigned int id);
+int coresight_get_etm_sync_mode(void);
+
+void etm4_enable_raw(struct coresight_device *csdev);
+void etm4_disable_raw(struct coresight_device *csdev);
+void coresight_etm_active_enable(int cpu);
+void coresight_etm_active_disable(int cpu);
+cpumask_t coresight_etm_active_list(void);
+#endif
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index da2bfeeabc1b..b86acbc74cf0 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -45,12 +45,14 @@ struct replicator_drvdata {
static void dynamic_replicator_reset(struct replicator_drvdata *drvdata)
{
+ struct coresight_device *csdev = drvdata->csdev;
+
CS_UNLOCK(drvdata->base);
- if (!coresight_claim_device_unlocked(drvdata->base)) {
+ if (!coresight_claim_device_unlocked(csdev)) {
writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0);
writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1);
- coresight_disclaim_device_unlocked(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
}
CS_LOCK(drvdata->base);
@@ -70,6 +72,7 @@ static int dynamic_replicator_enable(struct replicator_drvdata *drvdata,
{
int rc = 0;
u32 id0val, id1val;
+ struct coresight_device *csdev = drvdata->csdev;
CS_UNLOCK(drvdata->base);
@@ -84,7 +87,7 @@ static int dynamic_replicator_enable(struct replicator_drvdata *drvdata,
id0val = id1val = 0xff;
if (id0val == 0xff && id1val == 0xff)
- rc = coresight_claim_device_unlocked(drvdata->base);
+ rc = coresight_claim_device_unlocked(csdev);
if (!rc) {
switch (outport) {
@@ -140,6 +143,7 @@ static void dynamic_replicator_disable(struct replicator_drvdata *drvdata,
int inport, int outport)
{
u32 reg;
+ struct coresight_device *csdev = drvdata->csdev;
switch (outport) {
case 0:
@@ -160,7 +164,7 @@ static void dynamic_replicator_disable(struct replicator_drvdata *drvdata,
if ((readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0) == 0xff) &&
(readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1) == 0xff))
- coresight_disclaim_device_unlocked(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
}
@@ -254,6 +258,7 @@ static int replicator_probe(struct device *dev, struct resource *res)
}
drvdata->base = base;
desc.groups = replicator_groups;
+ desc.access = CSDEV_ACCESS_IOMEM(base);
}
if (fwnode_property_present(dev_fwnode(dev),
@@ -374,7 +379,7 @@ static struct platform_driver static_replicator_driver = {
.remove = static_replicator_remove,
.driver = {
.name = "coresight-static-replicator",
- .owner = THIS_MODULE,
+ /* THIS_MODULE is taken care of by platform_driver_register() */
.of_match_table = of_match_ptr(static_replicator_match),
.acpi_match_table = ACPI_PTR(static_replicator_acpi_ids),
.pm = &replicator_dev_pm_ops,
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 0ecca9f93f3a..58062a5a8238 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -96,7 +96,7 @@ module_param_named(
boot_nr_channel, boot_nr_channel, int, S_IRUGO
);
-/**
+/*
* struct channel_space - central management entity for extended ports
* @base: memory mapped base address where channels start.
* @phys: physical base address of channel region.
@@ -258,6 +258,7 @@ static void stm_disable(struct coresight_device *csdev,
struct perf_event *event)
{
struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct csdev_access *csa = &csdev->access;
/*
* For as long as the tracer isn't disabled another entity can't
@@ -270,7 +271,7 @@ static void stm_disable(struct coresight_device *csdev,
spin_unlock(&drvdata->spinlock);
/* Wait until the engine has completely stopped */
- coresight_timeout(drvdata->base, STMTCSR, STMTCSR_BUSY_BIT, 0);
+ coresight_timeout(csa, STMTCSR, STMTCSR_BUSY_BIT, 0);
pm_runtime_put(csdev->dev.parent);
@@ -884,6 +885,7 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
if (IS_ERR(base))
return PTR_ERR(base);
drvdata->base = base;
+ desc.access = CSDEV_ACCESS_IOMEM(base);
ret = stm_get_stimulus_area(dev, &ch_res);
if (ret)
diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c
index e29b3914fc0f..2e6e90a3274e 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-core.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-core.c
@@ -26,6 +26,7 @@
#include "coresight-priv.h"
#include "coresight-tmc.h"
+#include "coresight-tmc-secure-etr.h"
DEFINE_CORESIGHT_DEVLIST(etb_devs, "tmc_etb");
DEFINE_CORESIGHT_DEVLIST(etf_devs, "tmc_etf");
@@ -33,31 +34,38 @@ DEFINE_CORESIGHT_DEVLIST(etr_devs, "tmc_etr");
void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
{
+ struct coresight_device *csdev = drvdata->csdev;
+ struct csdev_access *csa = &csdev->access;
+
/* Ensure formatter, unformatter and hardware fifo are empty */
- if (coresight_timeout(drvdata->base,
- TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
- dev_err(&drvdata->csdev->dev,
+ if (coresight_timeout(csa, TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
+ dev_err(&csdev->dev,
"timeout while waiting for TMC to be Ready\n");
}
}
void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
{
+ struct coresight_device *csdev = drvdata->csdev;
+ struct csdev_access *csa = &csdev->access;
u32 ffcr;
ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
- ffcr |= TMC_FFCR_STOP_ON_FLUSH;
- writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
+
+ if (!(drvdata->etr_quirks & CORESIGHT_QUIRK_ETR_NO_STOP_FLUSH)) {
+ ffcr |= TMC_FFCR_STOP_ON_FLUSH;
+ writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
+ }
ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT);
writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
/* Ensure flush completes */
- if (coresight_timeout(drvdata->base,
- TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
- dev_err(&drvdata->csdev->dev,
+ if (coresight_timeout(csa, TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
+ dev_err(&csdev->dev,
"timeout while waiting for completion of Manual Flush\n");
}
- tmc_wait_for_tmcready(drvdata);
+ if (!(drvdata->etr_quirks & CORESIGHT_QUIRK_ETR_NO_STOP_FLUSH))
+ tmc_wait_for_tmcready(drvdata);
}
void tmc_enable_hw(struct tmc_drvdata *drvdata)
@@ -146,6 +154,11 @@ static int tmc_open(struct inode *inode, struct file *file)
struct tmc_drvdata *drvdata = container_of(file->private_data,
struct tmc_drvdata, miscdev);
+ if (drvdata->buf == NULL) {
+ drvdata->mode = CS_MODE_READ_PREVBOOT;
+ dev_info(&drvdata->csdev->dev, "TMC read mode for previous boot\n");
+ }
+
ret = tmc_read_prepare(drvdata);
if (ret)
return ret;
@@ -266,7 +279,22 @@ coresight_tmc_reg(authstatus, TMC_AUTHSTATUS);
coresight_tmc_reg(devid, CORESIGHT_DEVID);
coresight_tmc_reg64(rrp, TMC_RRP, TMC_RRPHI);
coresight_tmc_reg64(rwp, TMC_RWP, TMC_RWPHI);
-coresight_tmc_reg64(dba, TMC_DBALO, TMC_DBAHI);
+
+/* To accommodate silicons that don't support 32 bit split reads
+ * of DBA, use tmc_read_dba so that ETR quirks can be processed.
+ */
+static ssize_t dba_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(_dev->parent);
+ u64 val;
+
+ pm_runtime_get_sync(_dev->parent);
+ val = tmc_read_dba(drvdata);
+ pm_runtime_put_sync(_dev->parent);
+ return scnprintf(buf, PAGE_SIZE, "0x%llx\n", val);
+}
+static DEVICE_ATTR_RO(dba);
static struct attribute *coresight_tmc_mgmt_attrs[] = {
&dev_attr_rsz.attr,
@@ -344,9 +372,20 @@ static ssize_t buffer_size_store(struct device *dev,
static DEVICE_ATTR_RW(buffer_size);
+static ssize_t tracebuffer_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val = drvdata->size;
+
+ return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(tracebuffer_size);
+
static struct attribute *coresight_tmc_attrs[] = {
&dev_attr_trigger_cntr.attr,
&dev_attr_buffer_size.attr,
+ &dev_attr_tracebuffer_size.attr,
NULL,
};
@@ -377,6 +416,13 @@ static inline bool tmc_etr_has_non_secure_access(struct tmc_drvdata *drvdata)
return (auth & TMC_AUTH_NSID_MASK) == 0x3;
}
+static inline bool tmc_etr_has_secure_access(struct tmc_drvdata *drvdata)
+{
+ u32 auth = readl_relaxed(drvdata->base + TMC_AUTHSTATUS);
+
+ return (auth & TMC_AUTH_SID_MASK) == 0x30;
+}
+
/* Detect and initialise the capabilities of a TMC ETR */
static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps)
{
@@ -384,7 +430,8 @@ static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps)
u32 dma_mask = 0;
struct tmc_drvdata *drvdata = dev_get_drvdata(parent);
- if (!tmc_etr_has_non_secure_access(drvdata))
+ if (!tmc_etr_has_non_secure_access(drvdata) &&
+ !tmc_etr_has_secure_access(drvdata))
return -EACCES;
/* Set the unadvertised capabilities */
@@ -429,6 +476,21 @@ static u32 tmc_etr_get_default_buffer_size(struct device *dev)
return size;
}
+static u32 tmc_etr_get_max_burst_size(struct device *dev)
+{
+ u32 burst_size;
+
+ if (fwnode_property_read_u32(dev->fwnode, "arm,max-burst-size",
+ &burst_size))
+ return TMC_AXICTL_WR_BURST_16;
+
+ /* Only permissible values are 0 to 15 */
+ if (burst_size > 0xF)
+ burst_size = TMC_AXICTL_WR_BURST_16;
+
+ return burst_size;
+}
+
static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret = 0;
@@ -456,19 +518,45 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
}
drvdata->base = base;
+ desc.access = CSDEV_ACCESS_IOMEM(base);
spin_lock_init(&drvdata->spinlock);
+ drvdata->cpu = coresight_get_cpu(dev);
+
+ /* Enable quirks for Silicon issues */
+ drvdata->etr_quirks = coresight_get_etr_quirks(id->id);
+
+ /* Update the SMP target cpu */
+ drvdata->rc_cpu = coresight_get_etm_sync_mode() == SYNC_MODE_SW_GLOBAL ?
+ SYNC_GLOBAL_CORE : drvdata->cpu;
+
+ if (drvdata->etr_quirks & CORESIGHT_QUIRK_ETM_SW_SYNC) {
+ tmc_etr_add_cpumap(drvdata); /* Used for global sync mode */
+ tmc_etr_timer_init(drvdata);
+ }
+
devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
drvdata->config_type = BMVAL(devid, 6, 7);
drvdata->memwidth = tmc_get_memwidth(devid);
/* This device is not associated with a session */
drvdata->pid = -1;
- if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
+ if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
drvdata->size = tmc_etr_get_default_buffer_size(dev);
- else
+ drvdata->max_burst_size = tmc_etr_get_max_burst_size(dev);
+ } else {
drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
+ }
+
+ if (drvdata->etr_quirks & CORESIGHT_QUIRK_ETR_SECURE_BUFF) {
+ if (tmc_get_cpu_tracebufsize(drvdata, &drvdata->size) ||
+ !drvdata->size) {
+ pr_err("Secure tracebuffer not available\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
desc.dev = dev;
desc.groups = coresight_tmc_groups;
@@ -563,6 +651,10 @@ static void tmc_remove(struct amba_device *adev)
{
struct tmc_drvdata *drvdata = dev_get_drvdata(&adev->dev);
+ if ((drvdata->etr_quirks & CORESIGHT_QUIRK_ETM_SW_SYNC) &&
+ (drvdata->mode == CS_MODE_SYSFS))
+ smp_call_function_single(drvdata->rc_cpu, tmc_etr_timer_cancel,
+ drvdata, true);
/*
* Since misc_open() holds a refcount on the f_ops, which is
* etb fops in this case, device is there until last file
@@ -580,6 +672,8 @@ static const struct amba_id tmc_ids[] = {
CS_AMBA_ID(0x000bb9e9),
/* Coresight SoC 600 TMC-ETF */
CS_AMBA_ID(0x000bb9ea),
+ /* Marvell OcteonTx CN9xxx */
+ CS_AMBA_ID_DATA(0x000cc213, (unsigned long)OCTEONTX_CN9XXX_ETR_CAPS),
{ 0, 0},
};
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 8978f3410bee..cd0fb7bfba68 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -37,7 +37,7 @@ static void __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
{
- int rc = coresight_claim_device(drvdata->base);
+ int rc = coresight_claim_device(drvdata->csdev);
if (rc)
return rc;
@@ -88,7 +88,7 @@ static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
{
__tmc_etb_disable_hw(drvdata);
- coresight_disclaim_device(drvdata->base);
+ coresight_disclaim_device(drvdata->csdev);
}
static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
@@ -109,7 +109,7 @@ static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
{
- int rc = coresight_claim_device(drvdata->base);
+ int rc = coresight_claim_device(drvdata->csdev);
if (rc)
return rc;
@@ -120,11 +120,13 @@ static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
{
+ struct coresight_device *csdev = drvdata->csdev;
+
CS_UNLOCK(drvdata->base);
tmc_flush_and_stop(drvdata);
tmc_disable_hw(drvdata);
- coresight_disclaim_device_unlocked(drvdata->base);
+ coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 3309b1344ffc..cd9ceb7c6126 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -18,6 +18,8 @@
#include "coresight-etm-perf.h"
#include "coresight-priv.h"
#include "coresight-tmc.h"
+#include "coresight-tmc-secure-etr.h"
+
struct etr_flat_buf {
struct device *dev;
@@ -793,10 +795,13 @@ static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata)
helper_ops(catu)->disable(catu, drvdata->etr_buf);
}
+extern const struct etr_buf_operations etr_secure_buf_ops;
+
static const struct etr_buf_operations *etr_buf_ops[] = {
[ETR_MODE_FLAT] = &etr_flat_buf_ops,
[ETR_MODE_ETR_SG] = &etr_sg_buf_ops,
[ETR_MODE_CATU] = NULL,
+ [ETR_MODE_SECURE] = &etr_secure_buf_ops,
};
void tmc_etr_set_catu_ops(const struct etr_buf_operations *catu)
@@ -822,6 +827,7 @@ static inline int tmc_etr_mode_alloc_buf(int mode,
case ETR_MODE_FLAT:
case ETR_MODE_ETR_SG:
case ETR_MODE_CATU:
+ case ETR_MODE_SECURE:
if (etr_buf_ops[mode] && etr_buf_ops[mode]->alloc)
rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf,
node, pages);
@@ -863,6 +869,12 @@ static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
etr_buf->size = size;
+ if (drvdata->etr_quirks & CORESIGHT_QUIRK_ETR_SECURE_BUFF) {
+ rc = tmc_etr_mode_alloc_buf(ETR_MODE_SECURE, drvdata,
+ etr_buf, node, pages);
+ goto done;
+ }
+
/*
* If we have to use an existing list of pages, we cannot reliably
* use a contiguous DMA memory (even if we have an IOMMU). Otherwise,
@@ -885,6 +897,8 @@ static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
if (rc && has_catu)
rc = tmc_etr_mode_alloc_buf(ETR_MODE_CATU, drvdata,
etr_buf, node, pages);
+
+done:
if (rc) {
kfree(etr_buf);
return ERR_PTR(rc);
@@ -956,11 +970,11 @@ static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
dev_dbg(&drvdata->csdev->dev,
"tmc memory error detected, truncating buffer\n");
etr_buf->len = 0;
- etr_buf->full = 0;
+ etr_buf->full = false;
return;
}
- etr_buf->full = status & TMC_STS_FULL;
+ etr_buf->full = !!(status & TMC_STS_FULL);
WARN_ON(!etr_buf->ops || !etr_buf->ops->sync);
@@ -974,15 +988,22 @@ static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
CS_UNLOCK(drvdata->base);
+ if (drvdata->etr_quirks & CORESIGHT_QUIRK_ETR_RESET_CTL_REG)
+ tmc_disable_hw(drvdata);
+
/* Wait for TMCSReady bit to be set */
tmc_wait_for_tmcready(drvdata);
- writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ);
+ if (drvdata->etr_quirks & CORESIGHT_QUIRK_ETR_BUFFSIZE_8BX)
+ writel_relaxed(etr_buf->size / 8, drvdata->base + TMC_RSZ);
+ else
+ writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ);
writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
axictl &= ~TMC_AXICTL_CLEAR_MASK;
- axictl |= (TMC_AXICTL_PROT_CTL_B1 | TMC_AXICTL_WR_BURST_16);
+ axictl |= TMC_AXICTL_PROT_CTL_B1;
+ axictl |= TMC_AXICTL_WR_BURST(drvdata->max_burst_size);
axictl |= TMC_AXICTL_AXCACHE_OS;
if (tmc_etr_has_cap(drvdata, TMC_ETR_AXI_ARCACHE)) {
@@ -1040,7 +1061,7 @@ static int tmc_etr_enable_hw(struct tmc_drvdata *drvdata,
rc = tmc_etr_enable_catu(drvdata, etr_buf);
if (rc)
return rc;
- rc = coresight_claim_device(drvdata->base);
+ rc = coresight_claim_device(drvdata->csdev);
if (!rc) {
drvdata->etr_buf = etr_buf;
__tmc_etr_enable_hw(drvdata);
@@ -1134,7 +1155,7 @@ void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
__tmc_etr_disable_hw(drvdata);
/* Disable CATU device if this ETR is connected to one */
tmc_etr_disable_catu(drvdata);
- coresight_disclaim_device(drvdata->base);
+ coresight_disclaim_device(drvdata->csdev);
/* Reset the ETR buf used by hardware */
drvdata->etr_buf = NULL;
}
@@ -1205,6 +1226,11 @@ out:
if (free_buf)
tmc_etr_free_sysfs_buf(free_buf);
+ if (!ret && (drvdata->etr_quirks & CORESIGHT_QUIRK_ETM_SW_SYNC) &&
+ (drvdata->mode != CS_MODE_READ_PREVBOOT))
+ smp_call_function_single(drvdata->rc_cpu, tmc_etr_timer_start,
+ drvdata, true);
+
if (!ret)
dev_dbg(&csdev->dev, "TMC-ETR enabled\n");
@@ -1648,6 +1674,10 @@ static int tmc_disable_etr_sink(struct coresight_device *csdev)
{
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ u32 mode;
+
+ /* Cache the drvdata->mode */
+ mode = drvdata->mode;
spin_lock_irqsave(&drvdata->spinlock, flags);
@@ -1672,6 +1702,11 @@ static int tmc_disable_etr_sink(struct coresight_device *csdev)
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ if ((drvdata->etr_quirks & CORESIGHT_QUIRK_ETM_SW_SYNC) &&
+ (mode == CS_MODE_SYSFS))
+ smp_call_function_single(drvdata->rc_cpu, tmc_etr_timer_cancel,
+ drvdata, true);
+
dev_dbg(&csdev->dev, "TMC-ETR disabled\n");
return 0;
}
@@ -1697,6 +1732,20 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
return -EINVAL;
+ if (drvdata->mode == CS_MODE_READ_PREVBOOT) {
+ /* Initialize drvdata for reading trace data from last boot */
+ ret = tmc_enable_etr_sink_sysfs(drvdata->csdev);
+ if (ret)
+ return ret;
+ /* Update the buffer offset, len */
+ tmc_etr_sync_sysfs_buf(drvdata);
+ return 0;
+ }
+
+ if (drvdata->etr_quirks & CORESIGHT_QUIRK_ETR_NO_STOP_FLUSH)
+ smp_call_function_single(drvdata->rc_cpu, tmc_flushstop_etm_off,
+ drvdata, true);
+
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
ret = -EBUSY;
@@ -1759,5 +1808,10 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
if (sysfs_buf)
tmc_etr_free_sysfs_buf(sysfs_buf);
+ if ((drvdata->mode == CS_MODE_SYSFS) &&
+ (drvdata->etr_quirks & CORESIGHT_QUIRK_ETR_NO_STOP_FLUSH))
+ smp_call_function_single(drvdata->rc_cpu, tmc_flushstop_etm_on,
+ drvdata, true);
+
return 0;
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc-secure-etr.c b/drivers/hwtracing/coresight/coresight-tmc-secure-etr.c
new file mode 100644
index 000000000000..60849e91c546
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-tmc-secure-etr.c
@@ -0,0 +1,417 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/atomic.h>
+#include <linux/coresight.h>
+#include <linux/dma-mapping.h>
+#include <linux/refcount.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "coresight-etm4x.h"
+#include "coresight-priv.h"
+#include "coresight-tmc.h"
+#include "coresight-quirks.h"
+#include "coresight-tmc-secure-etr.h"
+
+/* SW mode sync insertion interval
+ *
+ * Sync insertion interval for 1M is based on assumption of
+ * trace data generated at 4bits/cycle ,cycle period of 0.4 ns
+ * and atleast 4 syncs per buffer wrap.
+ *
+ * One limitation of fixing only 4 syncs per buffer wrap is that
+ * we might loose 1/4 of the initial buffer data due to lack of sync.
+ * But on the other hand, we could reduce the sync insertion frequency
+ * by increasing the buffer size which seems to be a good compromise.
+ */
+#define SYNC_TICK_NS_PER_MB 200000 /* 200us */
+#define SYNCS_PER_FILL 4
+
+/* Global mode timer management */
+
+/**
+ * struct tmc_etr_tsync_global - Global mode timer
+ * @drvdata_cpumap: cpu to tmc drvdata map
+ * @timer: global timer shared by all cores
+ * @tick: gloabl timer tick period
+ * @active_count: timer reference count
+ */
+static struct tmc_etr_tsync_global {
+ struct tmc_drvdata *drvdata_cpumap[NR_CPUS];
+ struct hrtimer timer;
+ int active_count;
+ u64 tick;
+} tmc_etr_tsync_global;
+
+/* Accessor functions for tsync global */
+void tmc_etr_add_cpumap(struct tmc_drvdata *drvdata)
+{
+ tmc_etr_tsync_global.drvdata_cpumap[drvdata->cpu] = drvdata;
+}
+
+static inline struct tmc_drvdata *cpu_to_tmcdrvdata(int cpu)
+{
+ return tmc_etr_tsync_global.drvdata_cpumap[cpu];
+}
+
+static inline struct hrtimer *tmc_etr_tsync_global_timer(void)
+{
+ return &tmc_etr_tsync_global.timer;
+}
+
+static inline void tmc_etr_tsync_global_tick(u64 tick)
+{
+ tmc_etr_tsync_global.tick = tick;
+}
+
+/* Refernence counting is assumed to be always called from
+ * an atomic context.
+ */
+static inline int tmc_etr_tsync_global_addref(void)
+{
+ return ++tmc_etr_tsync_global.active_count;
+}
+
+static inline int tmc_etr_tsync_global_delref(void)
+{
+ return --tmc_etr_tsync_global.active_count;
+}
+
+/* Sync insertion API */
+static void tmc_etr_insert_sync(struct tmc_drvdata *drvdata)
+{
+ struct coresight_device *sdev = drvdata->etm_source;
+ struct etr_tsync_data *syncd = &drvdata->tsync_data;
+ struct etmv4_drvdata *etm_drvdata = dev_get_drvdata(sdev->dev.parent);
+ int err = 0, len;
+ u64 rwp;
+
+ /* We have three contenders for ETM control.
+ * 1. User initiated ETM control
+ * 2. Timer sync initiated ETM control
+ * 3. No stop on flush initated ETM control
+ * They all run in an atomic context and that too in
+ * the same core. Either on a core in which ETM is associated
+ * or in the primary core thereby mutually exclusive.
+ *
+ * To avoid any sync insertion while ETM is disabled by
+ * user, we rely on the device hw_state.
+ * Like for example, hrtimer being in active state even
+ * after ETM is disabled by user.
+ */
+ if (etm_drvdata->hw_state != USR_START)
+ return;
+
+ rwp = tmc_read_rwp(drvdata);
+ if (!syncd->prev_rwp)
+ goto sync_insert;
+
+ if (syncd->prev_rwp <= rwp) {
+ len = rwp - syncd->prev_rwp;
+ } else { /* Buffer wrapped */
+ goto sync_insert;
+ }
+
+ /* Check if we reached buffer threshold */
+ if (len < syncd->len_thold)
+ goto skip_insert;
+
+ /* Software based sync insertion procedure */
+sync_insert:
+ /* Disable source */
+ etm4_disable_raw(sdev);
+
+ /* Enable source */
+ etm4_enable_raw(sdev);
+
+ if (!err) {
+ /* Mark the write pointer of sync insertion */
+ syncd->prev_rwp = tmc_read_rwp(drvdata);
+ }
+
+skip_insert:
+ return;
+}
+
+/* Timer handler APIs */
+
+static enum hrtimer_restart tmc_etr_timer_handler_percore(struct hrtimer *t)
+{
+ struct tmc_drvdata *drvdata;
+
+ drvdata = container_of(t, struct tmc_drvdata, timer);
+ hrtimer_forward_now(t, ns_to_ktime(drvdata->tsync_data.tick));
+ tmc_etr_insert_sync(drvdata);
+ return HRTIMER_RESTART;
+}
+
+static enum hrtimer_restart tmc_etr_timer_handler_global(struct hrtimer *t)
+{
+ cpumask_t active_mask;
+ int cpu;
+
+ hrtimer_forward_now(t, ns_to_ktime(tmc_etr_tsync_global.tick));
+
+ active_mask = coresight_etm_active_list();
+ /* Run sync insertions for all active ETMs */
+ for_each_cpu(cpu, &active_mask)
+ tmc_etr_insert_sync(cpu_to_tmcdrvdata(cpu));
+
+ return HRTIMER_RESTART;
+}
+
+/* Timer init API common for both global and per core mode */
+void tmc_etr_timer_init(struct tmc_drvdata *drvdata)
+{
+ struct hrtimer *timer;
+
+ timer = coresight_get_etm_sync_mode() == SYNC_MODE_SW_GLOBAL ?
+ tmc_etr_tsync_global_timer() : &drvdata->timer;
+ hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+}
+
+/* Timer setup API common for both global and per core mode
+ *
+ * Global mode: Timer gets started only if its not active already.
+ * Number of users managed by reference counting.
+ * Percore mode: Timer gets started always
+ *
+ * Always executed in an atomic context either in IPI handler
+ * on a remote core or with irqs disabled in the local core
+ */
+void tmc_etr_timer_start(void *data)
+{
+ struct tmc_drvdata *drvdata = data;
+ struct hrtimer *timer;
+ bool mode_global;
+ u64 tick;
+
+ tick = drvdata->tsync_data.tick;
+ mode_global = (coresight_get_etm_sync_mode() == SYNC_MODE_SW_GLOBAL);
+ if (mode_global) {
+ if (tmc_etr_tsync_global_addref() == 1) {
+ /* Start only if we are the first user */
+ tmc_etr_tsync_global_tick(tick); /* Configure tick */
+ } else {
+ dev_dbg(&drvdata->csdev->dev, "global timer active already\n");
+ return;
+ }
+ }
+
+ timer = mode_global ? tmc_etr_tsync_global_timer() : &drvdata->timer;
+ timer->function = mode_global ?
+ tmc_etr_timer_handler_global : tmc_etr_timer_handler_percore;
+ dev_dbg(&drvdata->csdev->dev, "Starting sync timer, mode:%s period:%lld ns\n",
+ mode_global ? "global" : "percore", tick);
+ hrtimer_start(timer, ns_to_ktime(tick), HRTIMER_MODE_REL_PINNED);
+}
+
+/* Timer cancel API common for both global and per core mode
+ *
+ * Global mode: Timer gets cancelled only if there are no other users
+ * Percore mode: Timer gets cancelled always
+ *
+ * Always executed in an atomic context either in IPI handler
+ * on a remote core or with irqs disabled in the local core
+ */
+void tmc_etr_timer_cancel(void *data)
+{
+ struct tmc_drvdata *drvdata = data;
+ struct hrtimer *timer;
+ bool mode_global;
+
+ mode_global = (coresight_get_etm_sync_mode() == SYNC_MODE_SW_GLOBAL);
+ if (mode_global) {
+ if (tmc_etr_tsync_global_delref() != 0) {
+ /* Nothing to do if we are not the last user */
+ return;
+ }
+ }
+
+ timer = mode_global ?
+ tmc_etr_tsync_global_timer() : &drvdata->timer;
+ hrtimer_cancel(timer);
+}
+
+/*
+ * tmc_etr_alloc_secure_buf: Allocate a contiguous DMA buffer.
+ */
+static int tmc_etr_alloc_secure_buf(struct tmc_drvdata *drvdata,
+ struct etr_buf *etr_buf, int node,
+ void **pages)
+{
+ struct etr_secure_buf *secure_buf;
+ struct device *real_dev = drvdata->csdev->dev.parent;
+ u64 s_hwaddr = 0;
+ int err;
+
+ /* We cannot reuse existing pages for flat buf */
+ if (pages)
+ return -EINVAL;
+
+ /* Perf tries to allocate a larger size and falls back to
+ * the drvdata->size or smaller sizes if they fail.
+ * Since we have a cap on per CPU tracebuf size which is
+ * is set in drvdata->size, don't proceed with secure buffer
+ * allocation if size if larger than drvdata->size.
+ */
+ if (etr_buf->size > drvdata->size)
+ return -ENOMEM;
+
+ secure_buf = kzalloc(sizeof(*secure_buf), GFP_KERNEL);
+ if (!secure_buf)
+ return -ENOMEM;
+
+ secure_buf->size = etr_buf->size;
+ secure_buf->dev = &drvdata->csdev->dev;
+
+ secure_buf->vaddr = dma_alloc_coherent(real_dev, etr_buf->size,
+ &secure_buf->daddr, GFP_KERNEL);
+ if (!secure_buf->vaddr) {
+ kfree(secure_buf);
+ return -ENOMEM;
+ }
+
+ /* Register driver allocated dma buffer for necessary
+ * mapping in the secure world
+ */
+ if (tmc_register_drvbuf(drvdata, secure_buf->daddr, secure_buf->size)) {
+ err = -ENOMEM;
+ goto reg_err;
+ }
+
+ /* Allocate secure trace buffer */
+ if (tmc_alloc_secbuf(drvdata, secure_buf->size, &s_hwaddr)) {
+ err = -ENOMEM;
+ goto salloc_err;
+ }
+
+ secure_buf->secure_hwaddr = s_hwaddr;
+
+ /* Pass the secure_hwaddr to etr_buf so that
+ * the core tmc driver can use this to program
+ * registers like DBA.
+ */
+ etr_buf->hwaddr = secure_buf->secure_hwaddr;
+ etr_buf->mode = ETR_MODE_SECURE;
+ etr_buf->private = secure_buf;
+
+ /* Calculate parameters for sync packet insertion */
+ if (drvdata->etr_quirks & CORESIGHT_QUIRK_ETM_SW_SYNC) {
+ drvdata->tsync_data.len_thold = drvdata->size / (SYNCS_PER_FILL);
+ drvdata->tsync_data.tick = (drvdata->size / SZ_1M) * SYNC_TICK_NS_PER_MB;
+ drvdata->tsync_data.prev_rwp = 0;
+ if (!drvdata->tsync_data.tick) {
+ drvdata->tsync_data.tick = SYNC_TICK_NS_PER_MB;
+ dev_warn(&drvdata->csdev->dev,
+ "Trace bufer size not sufficient, sync insertion can fail\n");
+ }
+ }
+
+ return 0;
+
+salloc_err:
+ tmc_unregister_drvbuf(drvdata, secure_buf->daddr, secure_buf->size);
+
+reg_err:
+ dma_free_coherent(real_dev, etr_buf->size, secure_buf->vaddr,
+ secure_buf->daddr);
+ return err;
+
+}
+
+static void tmc_etr_free_secure_buf(struct etr_buf *etr_buf)
+{
+ struct etr_secure_buf *secure_buf = etr_buf->private;
+ struct tmc_drvdata *drvdata;
+ struct device *real_dev;
+
+ if (!secure_buf)
+ return;
+
+ real_dev = secure_buf->dev->parent;
+ drvdata = dev_get_drvdata(real_dev);
+
+ dma_free_coherent(real_dev, secure_buf->size, secure_buf->vaddr,
+ secure_buf->daddr);
+
+ tmc_unregister_drvbuf(drvdata, secure_buf->daddr, secure_buf->size);
+
+ tmc_free_secbuf(drvdata, secure_buf->secure_hwaddr, secure_buf->size);
+
+ kfree(secure_buf);
+}
+
+static void tmc_etr_sync_secure_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
+{
+ struct etr_secure_buf *secure_buf = etr_buf->private;
+ u64 w_offset;
+
+ /*
+ * Adjust the buffer to point to the beginning of the trace data
+ * and update the available trace data.
+ */
+ w_offset = rwp - secure_buf->secure_hwaddr;
+
+ if (etr_buf->full) {
+ etr_buf->offset = w_offset;
+ etr_buf->len = etr_buf->size;
+ } else {
+ etr_buf->offset = 0;
+ etr_buf->len = w_offset;
+ }
+
+ /* Copy the secure buffer to the driver allocated buffer.
+ * This is done here so that when the core TMC driver starts
+ * to copy the data to sysfs or perf buffer, we do not
+ * generate SMC calls at different offsets everytime.
+ */
+ tmc_copy_secure_buffer(secure_buf, 0x0, etr_buf->len);
+}
+
+static ssize_t tmc_etr_get_data_secure_buf(struct etr_buf *etr_buf,
+ u64 offset, size_t len, char **bufpp)
+{
+ struct etr_secure_buf *secure_buf = etr_buf->private;
+
+ *bufpp = (char *)secure_buf->vaddr + offset;
+
+ /*
+ * tmc_etr_buf_get_data already adjusts the length to handle
+ * buffer wrapping around.
+ */
+ return len;
+}
+
+const struct etr_buf_operations etr_secure_buf_ops = {
+ .alloc = tmc_etr_alloc_secure_buf,
+ .free = tmc_etr_free_secure_buf,
+ .sync = tmc_etr_sync_secure_buf,
+ .get_data = tmc_etr_get_data_secure_buf,
+};
+
+/* APIs to manage ETM start/stop when ETR stop on flush is broken */
+
+void tmc_flushstop_etm_off(void *data)
+{
+ struct tmc_drvdata *drvdata = data;
+ struct coresight_device *sdev = drvdata->etm_source;
+ struct etmv4_drvdata *etm_drvdata = dev_get_drvdata(sdev->dev.parent);
+
+ if (etm_drvdata->hw_state == USR_START) {
+ etm4_disable_raw(sdev);
+ etm_drvdata->hw_state = SW_STOP;
+ }
+}
+
+void tmc_flushstop_etm_on(void *data)
+{
+ struct tmc_drvdata *drvdata = data;
+ struct coresight_device *sdev = drvdata->etm_source;
+ struct etmv4_drvdata *etm_drvdata = dev_get_drvdata(sdev->dev.parent);
+
+ if (etm_drvdata->hw_state == SW_STOP) { /* Restore the user configured state */
+ etm4_enable_raw(sdev);
+ etm_drvdata->hw_state = USR_START;
+ }
+}
diff --git a/drivers/hwtracing/coresight/coresight-tmc-secure-etr.h b/drivers/hwtracing/coresight/coresight-tmc-secure-etr.h
new file mode 100644
index 000000000000..de270eb904e2
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-tmc-secure-etr.h
@@ -0,0 +1,115 @@
+#ifndef _CORESIGHT_TMC_SECURE_ETR_H
+#define _CORESIGHT_TMC_SECURE_ETR_H
+
+#include <linux/arm-smccc.h>
+
+void tmc_etr_timer_start(void *data);
+void tmc_etr_timer_init(struct tmc_drvdata *drvdata);
+void tmc_etr_timer_cancel(void *data);
+void tmc_flushstop_etm_off(void *data);
+void tmc_flushstop_etm_on(void *data);
+void tmc_etr_add_cpumap(struct tmc_drvdata *drvdata);
+
+struct etr_secure_buf {
+ struct device *dev;
+ dma_addr_t daddr;
+ dma_addr_t secure_hwaddr;
+ void *vaddr;
+ size_t size;
+};
+
+/* SMC call ids for managing the secure trace buffer */
+
+/* Args: x1 - size, x2 - cpu, x3 - llc lock flag
+ * Returns: x0 - status, x1 - secure buffer address
+ */
+#define OCTEONTX_TRC_ALLOC_SBUF 0xc2000c05
+/* Args: x1 - non secure buffer address, x2 - size */
+#define OCTEONTX_TRC_REGISTER_DRVBUF 0xc2000c06
+/* Args: x1 - dst(non secure), x2 - src(secure), x3 - size */
+#define OCTEONTX_TRC_COPY_TO_DRVBUF 0xc2000c07
+/* Args: x1 - secure buffer address, x2 - size */
+#define OCTEONTX_TRC_FREE_SBUF 0xc2000c08
+/* Args: x1 - non secure buffer address, x2 - size */
+#define OCTEONTX_TRC_UNREGISTER_DRVBUF 0xc2000c09
+/* Args: Nil
+ * Returns: cpu trace buffer size
+ */
+#define OCTEONTX_TRC_GET_CPU_BUFSIZE 0xc2000c0a
+
+/* SMC Calls for secure buffer management */
+static inline int tmc_alloc_secbuf(struct tmc_drvdata *drvdata,
+ size_t len, dma_addr_t *s_paddr)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(OCTEONTX_TRC_ALLOC_SBUF, len, drvdata->cpu,
+ 0, 0, 0, 0, 0, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS)
+ return -EFAULT;
+
+ *s_paddr = res.a1;
+ return 0;
+}
+
+static inline int tmc_free_secbuf(struct tmc_drvdata *drvdata,
+ dma_addr_t s_paddr, size_t len)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(OCTEONTX_TRC_FREE_SBUF, s_paddr, len,
+ 0, 0, 0, 0, 0, &res);
+ return 0;
+}
+
+static inline int tmc_register_drvbuf(struct tmc_drvdata *drvdata,
+ dma_addr_t paddr, size_t len)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(OCTEONTX_TRC_REGISTER_DRVBUF, paddr, len,
+ 0, 0, 0, 0, 0, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS)
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline int tmc_unregister_drvbuf(struct tmc_drvdata *drvdata,
+ dma_addr_t paddr, size_t len)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(OCTEONTX_TRC_UNREGISTER_DRVBUF, paddr, len,
+ 0, 0, 0, 0, 0, &res);
+ return 0;
+
+}
+
+static inline int tmc_copy_secure_buffer(struct etr_secure_buf *secure_buf,
+ uint64_t offset, size_t len)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(OCTEONTX_TRC_COPY_TO_DRVBUF, secure_buf->daddr + offset,
+ secure_buf->secure_hwaddr + offset, len, 0, 0, 0, 0, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS)
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline int tmc_get_cpu_tracebufsize(struct tmc_drvdata *drvdata, u32 *len)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(OCTEONTX_TRC_GET_CPU_BUFSIZE, 0, 0, 0,
+ 0, 0, 0, 0, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS)
+ return -EFAULT;
+
+ *len = (u32)res.a1;
+ return 0;
+}
+
+#endif
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index b91ec7dde7bc..f7ed954a241d 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -13,6 +13,8 @@
#include <linux/mutex.h>
#include <linux/refcount.h>
+#include "coresight-quirks.h"
+
#define TMC_RSZ 0x004
#define TMC_STS 0x00c
#define TMC_RRD 0x010
@@ -70,7 +72,8 @@
#define TMC_AXICTL_PROT_CTL_B0 BIT(0)
#define TMC_AXICTL_PROT_CTL_B1 BIT(1)
#define TMC_AXICTL_SCT_GAT_MODE BIT(7)
-#define TMC_AXICTL_WR_BURST_16 0xF00
+#define TMC_AXICTL_WR_BURST(v) (((v) & 0xf) << 8)
+#define TMC_AXICTL_WR_BURST_16 0xf
/* Write-back Read and Write-allocate */
#define TMC_AXICTL_AXCACHE_OS (0xf << 2)
#define TMC_AXICTL_ARCACHE_OS (0xf << 16)
@@ -92,6 +95,7 @@
#define TMC_DEVID_AXIAW_MASK 0x7f
#define TMC_AUTH_NSID_MASK GENMASK(1, 0)
+#define TMC_AUTH_SID_MASK GENMASK(5, 4)
enum tmc_config_type {
TMC_CONFIG_TYPE_ETB,
@@ -130,10 +134,14 @@ enum tmc_mem_intf_width {
#define CORESIGHT_SOC_600_ETR_CAPS \
(TMC_ETR_SAVE_RESTORE | TMC_ETR_AXI_ARCACHE)
+/* Marvell OcteonTx CN9xxx TMC-ETR unadvertised capabilities */
+#define OCTEONTX_CN9XXX_ETR_CAPS (TMC_ETR_SAVE_RESTORE)
+
enum etr_mode {
ETR_MODE_FLAT, /* Uses contiguous flat buffer */
ETR_MODE_ETR_SG, /* Uses in-built TMC ETR SG mechanism */
ETR_MODE_CATU, /* Use SG mechanism in CATU */
+ ETR_MODE_SECURE, /* Use Secure buffer */
};
struct etr_buf_operations;
@@ -163,6 +171,20 @@ struct etr_buf {
};
/**
+ * struct etr_tsync_data - Timer based sync insertion data management
+ * @syncs_per_fill: syncs inserted per buffer wrap
+ * @prev_rwp: writepointer for the last sync insertion
+ * @len_thold: Buffer length threshold for inserting syncs
+ * @tick: Tick interval in ns
+ */
+struct etr_tsync_data {
+ int syncs_per_fill;
+ u64 prev_rwp;
+ u64 len_thold;
+ u64 tick;
+};
+
+/**
* struct tmc_drvdata - specifics associated to an TMC component
* @base: memory mapped base address for this component.
* @csdev: component vitals needed by the framework.
@@ -174,6 +196,8 @@ struct etr_buf {
* @etr_buf: details of buffer used in TMC-ETR
* @len: size of the available trace for ETF/ETB.
* @size: trace buffer size for this TMC (common for all modes).
+ * @max_burst_size: The maximum burst size that can be initiated by
+ * TMC-ETR on AXI bus.
* @mode: how this TMC is being used.
* @config_type: TMC variant, must be of type @tmc_config_type.
* @memwidth: width of the memory interface databus, in bytes.
@@ -198,7 +222,11 @@ struct tmc_drvdata {
};
u32 len;
u32 size;
+ u32 max_burst_size;
u32 mode;
+ u32 etr_quirks;
+ int cpu;
+ int rc_cpu;
enum tmc_config_type config_type;
enum tmc_mem_intf_width memwidth;
u32 trigger_cntr;
@@ -207,6 +235,9 @@ struct tmc_drvdata {
struct mutex idr_mutex;
struct etr_buf *sysfs_buf;
struct etr_buf *perf_buf;
+ void *etm_source;
+ struct etr_tsync_data tsync_data;
+ struct hrtimer timer;
};
struct etr_buf_operations {
@@ -288,7 +319,24 @@ tmc_write_##name(struct tmc_drvdata *drvdata, u64 val) \
TMC_REG_PAIR(rrp, TMC_RRP, TMC_RRPHI)
TMC_REG_PAIR(rwp, TMC_RWP, TMC_RWPHI)
-TMC_REG_PAIR(dba, TMC_DBALO, TMC_DBAHI)
+
+static inline u64 tmc_read_dba(struct tmc_drvdata *drvdata)
+{
+ if (drvdata->etr_quirks & CORESIGHT_QUIRK_ETR_FORCE_64B_DBA_RW)
+ return readq(drvdata->base + TMC_DBALO);
+
+ return coresight_read_reg_pair(drvdata->base, TMC_DBALO, TMC_DBAHI);
+}
+
+static inline void tmc_write_dba(struct tmc_drvdata *drvdata, u64 val)
+{
+ if (drvdata->etr_quirks & CORESIGHT_QUIRK_ETR_FORCE_64B_DBA_RW) {
+ writeq(val, drvdata->base + TMC_DBALO);
+ return;
+ }
+
+ coresight_write_reg_pair(drvdata->base, val, TMC_DBALO, TMC_DBAHI);
+}
/* Initialise the caps from unadvertised static capabilities of the device */
static inline void tmc_etr_init_caps(struct tmc_drvdata *drvdata, u32 dev_caps)
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 0ca39d905d0b..34d37abd2c8d 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -49,7 +49,7 @@
DEFINE_CORESIGHT_DEVLIST(tpiu_devs, "tpiu");
-/**
+/*
* @base: memory mapped base address for this component.
* @atclk: optional clock for the core parts of the TPIU.
* @csdev: component vitals needed by the framework.
@@ -60,49 +60,45 @@ struct tpiu_drvdata {
struct coresight_device *csdev;
};
-static void tpiu_enable_hw(struct tpiu_drvdata *drvdata)
+static void tpiu_enable_hw(struct csdev_access *csa)
{
- CS_UNLOCK(drvdata->base);
+ CS_UNLOCK(csa->base);
/* TODO: fill this up */
- CS_LOCK(drvdata->base);
+ CS_LOCK(csa->base);
}
static int tpiu_enable(struct coresight_device *csdev, u32 mode, void *__unused)
{
- struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- tpiu_enable_hw(drvdata);
+ tpiu_enable_hw(&csdev->access);
atomic_inc(csdev->refcnt);
dev_dbg(&csdev->dev, "TPIU enabled\n");
return 0;
}
-static void tpiu_disable_hw(struct tpiu_drvdata *drvdata)
+static void tpiu_disable_hw(struct csdev_access *csa)
{
- CS_UNLOCK(drvdata->base);
+ CS_UNLOCK(csa->base);
/* Clear formatter and stop on flush */
- writel_relaxed(FFCR_STOP_FI, drvdata->base + TPIU_FFCR);
+ csdev_access_relaxed_write32(csa, FFCR_STOP_FI, TPIU_FFCR);
/* Generate manual flush */
- writel_relaxed(FFCR_STOP_FI | FFCR_FON_MAN, drvdata->base + TPIU_FFCR);
+ csdev_access_relaxed_write32(csa, FFCR_STOP_FI | FFCR_FON_MAN, TPIU_FFCR);
/* Wait for flush to complete */
- coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN_BIT, 0);
+ coresight_timeout(csa, TPIU_FFCR, FFCR_FON_MAN_BIT, 0);
/* Wait for formatter to stop */
- coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED_BIT, 1);
+ coresight_timeout(csa, TPIU_FFSR, FFSR_FT_STOPPED_BIT, 1);
- CS_LOCK(drvdata->base);
+ CS_LOCK(csa->base);
}
static int tpiu_disable(struct coresight_device *csdev)
{
- struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
if (atomic_dec_return(csdev->refcnt))
return -EBUSY;
- tpiu_disable_hw(drvdata);
+ tpiu_disable_hw(&csdev->access);
dev_dbg(&csdev->dev, "TPIU disabled\n");
return 0;
@@ -149,9 +145,10 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
return PTR_ERR(base);
drvdata->base = base;
+ desc.access = CSDEV_ACCESS_IOMEM(base);
/* Disable tpiu to support older devices */
- tpiu_disable_hw(drvdata);
+ tpiu_disable_hw(&desc.access);
pdata = coresight_get_platform_data(dev);
if (IS_ERR(pdata))
diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c
new file mode 100644
index 000000000000..176868496879
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-trbe.c
@@ -0,0 +1,1157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This driver enables Trace Buffer Extension (TRBE) as a per-cpu coresight
+ * sink device could then pair with an appropriate per-cpu coresight source
+ * device (ETE) thus generating required trace data. Trace can be enabled
+ * via the perf framework.
+ *
+ * The AUX buffer handling is inspired from Arm SPE PMU driver.
+ *
+ * Copyright (C) 2020 ARM Ltd.
+ *
+ * Author: Anshuman Khandual <anshuman.khandual@arm.com>
+ */
+#define DRVNAME "arm_trbe"
+
+#define pr_fmt(fmt) DRVNAME ": " fmt
+
+#include <asm/barrier.h>
+#include "coresight-trbe.h"
+
+#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
+
+/*
+ * A padding packet that will help the user space tools
+ * in skipping relevant sections in the captured trace
+ * data which could not be decoded. TRBE doesn't support
+ * formatting the trace data, unlike the legacy CoreSight
+ * sinks and thus we use ETE trace packets to pad the
+ * sections of the buffer.
+ */
+#define ETE_IGNORE_PACKET 0x70
+
+/*
+ * Minimum amount of meaningful trace will contain:
+ * A-Sync, Trace Info, Trace On, Address, Atom.
+ * This is about 44bytes of ETE trace. To be on
+ * the safer side, we assume 64bytes is the minimum
+ * space required for a meaningful session, before
+ * we hit a "WRAP" event.
+ */
+#define TRBE_TRACE_MIN_BUF_SIZE 64
+
+enum trbe_fault_action {
+ TRBE_FAULT_ACT_WRAP,
+ TRBE_FAULT_ACT_SPURIOUS,
+ TRBE_FAULT_ACT_FATAL,
+};
+
+struct trbe_buf {
+ /*
+ * Even though trbe_base represents vmap()
+ * mapped allocated buffer's start address,
+ * it's being as unsigned long for various
+ * arithmetic and comparision operations &
+ * also to be consistent with trbe_write &
+ * trbe_limit sibling pointers.
+ */
+ unsigned long trbe_base;
+ unsigned long trbe_limit;
+ unsigned long trbe_write;
+ int nr_pages;
+ void **pages;
+ bool snapshot;
+ struct trbe_cpudata *cpudata;
+};
+
+struct trbe_cpudata {
+ bool trbe_flag;
+ u64 trbe_align;
+ int cpu;
+ enum cs_mode mode;
+ struct trbe_buf *buf;
+ struct trbe_drvdata *drvdata;
+};
+
+struct trbe_drvdata {
+ struct trbe_cpudata __percpu *cpudata;
+ struct perf_output_handle * __percpu *handle;
+ struct hlist_node hotplug_node;
+ int irq;
+ cpumask_t supported_cpus;
+ enum cpuhp_state trbe_online;
+ struct platform_device *pdev;
+};
+
+static int trbe_alloc_node(struct perf_event *event)
+{
+ if (event->cpu == -1)
+ return NUMA_NO_NODE;
+ return cpu_to_node(event->cpu);
+}
+
+static void trbe_drain_buffer(void)
+{
+ tsb_csync();
+ dsb(nsh);
+}
+
+static void trbe_drain_and_disable_local(void)
+{
+ u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
+
+ trbe_drain_buffer();
+
+ /*
+ * Disable the TRBE without clearing LIMITPTR which
+ * might be required for fetching the buffer limits.
+ */
+ trblimitr &= ~TRBLIMITR_ENABLE;
+ write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
+ isb();
+}
+
+static void trbe_reset_local(void)
+{
+ trbe_drain_and_disable_local();
+ write_sysreg_s(0, SYS_TRBLIMITR_EL1);
+ write_sysreg_s(0, SYS_TRBPTR_EL1);
+ write_sysreg_s(0, SYS_TRBBASER_EL1);
+ write_sysreg_s(0, SYS_TRBSR_EL1);
+}
+
+static void trbe_stop_and_truncate_event(struct perf_output_handle *handle)
+{
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+
+ /*
+ * We cannot proceed with the buffer collection and we
+ * do not have any data for the current session. The
+ * etm_perf driver expects to close out the aux_buffer
+ * at event_stop(). So disable the TRBE here and leave
+ * the update_buffer() to return a 0 size.
+ */
+ trbe_drain_and_disable_local();
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ *this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL;
+}
+
+/*
+ * TRBE Buffer Management
+ *
+ * The TRBE buffer spans from the base pointer till the limit pointer. When enabled,
+ * it starts writing trace data from the write pointer onward till the limit pointer.
+ * When the write pointer reaches the address just before the limit pointer, it gets
+ * wrapped around again to the base pointer. This is called a TRBE wrap event, which
+ * generates a maintenance interrupt when operated in WRAP or FILL mode. This driver
+ * uses FILL mode, where the TRBE stops the trace collection at wrap event. The IRQ
+ * handler updates the AUX buffer and re-enables the TRBE with updated WRITE and
+ * LIMIT pointers.
+ *
+ * Wrap around with an IRQ
+ * ------ < ------ < ------- < ----- < -----
+ * | |
+ * ------ > ------ > ------- > ----- > -----
+ *
+ * +---------------+-----------------------+
+ * | | |
+ * +---------------+-----------------------+
+ * Base Pointer Write Pointer Limit Pointer
+ *
+ * The base and limit pointers always needs to be PAGE_SIZE aligned. But the write
+ * pointer can be aligned to the implementation defined TRBE trace buffer alignment
+ * as captured in trbe_cpudata->trbe_align.
+ *
+ *
+ * head tail wakeup
+ * +---------------------------------------+----- ~ ~ ------
+ * |$$$$$$$|################|$$$$$$$$$$$$$$| |
+ * +---------------------------------------+----- ~ ~ ------
+ * Base Pointer Write Pointer Limit Pointer
+ *
+ * The perf_output_handle indices (head, tail, wakeup) are monotonically increasing
+ * values which tracks all the driver writes and user reads from the perf auxiliary
+ * buffer. Generally [head..tail] is the area where the driver can write into unless
+ * the wakeup is behind the tail. Enabled TRBE buffer span needs to be adjusted and
+ * configured depending on the perf_output_handle indices, so that the driver does
+ * not override into areas in the perf auxiliary buffer which is being or yet to be
+ * consumed from the user space. The enabled TRBE buffer area is a moving subset of
+ * the allocated perf auxiliary buffer.
+ */
+static void trbe_pad_buf(struct perf_output_handle *handle, int len)
+{
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+ u64 head = PERF_IDX2OFF(handle->head, buf);
+
+ memset((void *)buf->trbe_base + head, ETE_IGNORE_PACKET, len);
+ if (!buf->snapshot)
+ perf_aux_output_skip(handle, len);
+}
+
+static unsigned long trbe_snapshot_offset(struct perf_output_handle *handle)
+{
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+
+ /*
+ * The ETE trace has alignment synchronization packets allowing
+ * the decoder to reset in case of an overflow or corruption.
+ * So we can use the entire buffer for the snapshot mode.
+ */
+ return buf->nr_pages * PAGE_SIZE;
+}
+
+/*
+ * TRBE Limit Calculation
+ *
+ * The following markers are used to illustrate various TRBE buffer situations.
+ *
+ * $$$$ - Data area, unconsumed captured trace data, not to be overridden
+ * #### - Free area, enabled, trace will be written
+ * %%%% - Free area, disabled, trace will not be written
+ * ==== - Free area, padded with ETE_IGNORE_PACKET, trace will be skipped
+ */
+static unsigned long __trbe_normal_offset(struct perf_output_handle *handle)
+{
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+ struct trbe_cpudata *cpudata = buf->cpudata;
+ const u64 bufsize = buf->nr_pages * PAGE_SIZE;
+ u64 limit = bufsize;
+ u64 head, tail, wakeup;
+
+ head = PERF_IDX2OFF(handle->head, buf);
+
+ /*
+ * head
+ * ------->|
+ * |
+ * head TRBE align tail
+ * +----|-------|---------------|-------+
+ * |$$$$|=======|###############|$$$$$$$|
+ * +----|-------|---------------|-------+
+ * trbe_base trbe_base + nr_pages
+ *
+ * Perf aux buffer output head position can be misaligned depending on
+ * various factors including user space reads. In case misaligned, head
+ * needs to be aligned before TRBE can be configured. Pad the alignment
+ * gap with ETE_IGNORE_PACKET bytes that will be ignored by user tools
+ * and skip this section thus advancing the head.
+ */
+ if (!IS_ALIGNED(head, cpudata->trbe_align)) {
+ unsigned long delta = roundup(head, cpudata->trbe_align) - head;
+
+ delta = min(delta, handle->size);
+ trbe_pad_buf(handle, delta);
+ head = PERF_IDX2OFF(handle->head, buf);
+ }
+
+ /*
+ * head = tail (size = 0)
+ * +----|-------------------------------+
+ * |$$$$|$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ |
+ * +----|-------------------------------+
+ * trbe_base trbe_base + nr_pages
+ *
+ * Perf aux buffer does not have any space for the driver to write into.
+ * Just communicate trace truncation event to the user space by marking
+ * it with PERF_AUX_FLAG_TRUNCATED.
+ */
+ if (!handle->size) {
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ return 0;
+ }
+
+ /* Compute the tail and wakeup indices now that we've aligned head */
+ tail = PERF_IDX2OFF(handle->head + handle->size, buf);
+ wakeup = PERF_IDX2OFF(handle->wakeup, buf);
+
+ /*
+ * Lets calculate the buffer area which TRBE could write into. There
+ * are three possible scenarios here. Limit needs to be aligned with
+ * PAGE_SIZE per the TRBE requirement. Always avoid clobbering the
+ * unconsumed data.
+ *
+ * 1) head < tail
+ *
+ * head tail
+ * +----|-----------------------|-------+
+ * |$$$$|#######################|$$$$$$$|
+ * +----|-----------------------|-------+
+ * trbe_base limit trbe_base + nr_pages
+ *
+ * TRBE could write into [head..tail] area. Unless the tail is right at
+ * the end of the buffer, neither an wrap around nor an IRQ is expected
+ * while being enabled.
+ *
+ * 2) head == tail
+ *
+ * head = tail (size > 0)
+ * +----|-------------------------------+
+ * |%%%%|###############################|
+ * +----|-------------------------------+
+ * trbe_base limit = trbe_base + nr_pages
+ *
+ * TRBE should just write into [head..base + nr_pages] area even though
+ * the entire buffer is empty. Reason being, when the trace reaches the
+ * end of the buffer, it will just wrap around with an IRQ giving an
+ * opportunity to reconfigure the buffer.
+ *
+ * 3) tail < head
+ *
+ * tail head
+ * +----|-----------------------|-------+
+ * |%%%%|$$$$$$$$$$$$$$$$$$$$$$$|#######|
+ * +----|-----------------------|-------+
+ * trbe_base limit = trbe_base + nr_pages
+ *
+ * TRBE should just write into [head..base + nr_pages] area even though
+ * the [trbe_base..tail] is also empty. Reason being, when the trace
+ * reaches the end of the buffer, it will just wrap around with an IRQ
+ * giving an opportunity to reconfigure the buffer.
+ */
+ if (head < tail)
+ limit = round_down(tail, PAGE_SIZE);
+
+ /*
+ * Wakeup may be arbitrarily far into the future. If it's not in the
+ * current generation, either we'll wrap before hitting it, or it's
+ * in the past and has been handled already.
+ *
+ * If there's a wakeup before we wrap, arrange to be woken up by the
+ * page boundary following it. Keep the tail boundary if that's lower.
+ *
+ * head wakeup tail
+ * +----|---------------|-------|-------+
+ * |$$$$|###############|%%%%%%%|$$$$$$$|
+ * +----|---------------|-------|-------+
+ * trbe_base limit trbe_base + nr_pages
+ */
+ if (handle->wakeup < (handle->head + handle->size) && head <= wakeup)
+ limit = min(limit, round_up(wakeup, PAGE_SIZE));
+
+ /*
+ * There are two situation when this can happen i.e limit is before
+ * the head and hence TRBE cannot be configured.
+ *
+ * 1) head < tail (aligned down with PAGE_SIZE) and also they are both
+ * within the same PAGE size range.
+ *
+ * PAGE_SIZE
+ * |----------------------|
+ *
+ * limit head tail
+ * +------------|------|--------|-------+
+ * |$$$$$$$$$$$$$$$$$$$|========|$$$$$$$|
+ * +------------|------|--------|-------+
+ * trbe_base trbe_base + nr_pages
+ *
+ * 2) head < wakeup (aligned up with PAGE_SIZE) < tail and also both
+ * head and wakeup are within same PAGE size range.
+ *
+ * PAGE_SIZE
+ * |----------------------|
+ *
+ * limit head wakeup tail
+ * +----|------|-------|--------|-------+
+ * |$$$$$$$$$$$|=======|========|$$$$$$$|
+ * +----|------|-------|--------|-------+
+ * trbe_base trbe_base + nr_pages
+ */
+ if (limit > head)
+ return limit;
+
+ trbe_pad_buf(handle, handle->size);
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ return 0;
+}
+
+static unsigned long trbe_normal_offset(struct perf_output_handle *handle)
+{
+ struct trbe_buf *buf = perf_get_aux(handle);
+ u64 limit = __trbe_normal_offset(handle);
+ u64 head = PERF_IDX2OFF(handle->head, buf);
+
+ /*
+ * If the head is too close to the limit and we don't
+ * have space for a meaningful run, we rather pad it
+ * and start fresh.
+ */
+ if (limit && (limit - head < TRBE_TRACE_MIN_BUF_SIZE)) {
+ trbe_pad_buf(handle, limit - head);
+ limit = __trbe_normal_offset(handle);
+ }
+ return limit;
+}
+
+static unsigned long compute_trbe_buffer_limit(struct perf_output_handle *handle)
+{
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+ unsigned long offset;
+
+ if (buf->snapshot)
+ offset = trbe_snapshot_offset(handle);
+ else
+ offset = trbe_normal_offset(handle);
+ return buf->trbe_base + offset;
+}
+
+static void clr_trbe_status(void)
+{
+ u64 trbsr = read_sysreg_s(SYS_TRBSR_EL1);
+
+ WARN_ON(is_trbe_enabled());
+ trbsr &= ~TRBSR_IRQ;
+ trbsr &= ~TRBSR_TRG;
+ trbsr &= ~TRBSR_WRAP;
+ trbsr &= ~(TRBSR_EC_MASK << TRBSR_EC_SHIFT);
+ trbsr &= ~(TRBSR_BSC_MASK << TRBSR_BSC_SHIFT);
+ trbsr &= ~TRBSR_STOP;
+ write_sysreg_s(trbsr, SYS_TRBSR_EL1);
+}
+
+static void set_trbe_limit_pointer_enabled(unsigned long addr)
+{
+ u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
+
+ WARN_ON(!IS_ALIGNED(addr, (1UL << TRBLIMITR_LIMIT_SHIFT)));
+ WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
+
+ trblimitr &= ~TRBLIMITR_NVM;
+ trblimitr &= ~(TRBLIMITR_FILL_MODE_MASK << TRBLIMITR_FILL_MODE_SHIFT);
+ trblimitr &= ~(TRBLIMITR_TRIG_MODE_MASK << TRBLIMITR_TRIG_MODE_SHIFT);
+ trblimitr &= ~(TRBLIMITR_LIMIT_MASK << TRBLIMITR_LIMIT_SHIFT);
+
+ /*
+ * Fill trace buffer mode is used here while configuring the
+ * TRBE for trace capture. In this particular mode, the trace
+ * collection is stopped and a maintenance interrupt is raised
+ * when the current write pointer wraps. This pause in trace
+ * collection gives the software an opportunity to capture the
+ * trace data in the interrupt handler, before reconfiguring
+ * the TRBE.
+ */
+ trblimitr |= (TRBE_FILL_MODE_FILL & TRBLIMITR_FILL_MODE_MASK) << TRBLIMITR_FILL_MODE_SHIFT;
+
+ /*
+ * Trigger mode is not used here while configuring the TRBE for
+ * the trace capture. Hence just keep this in the ignore mode.
+ */
+ trblimitr |= (TRBE_TRIG_MODE_IGNORE & TRBLIMITR_TRIG_MODE_MASK) <<
+ TRBLIMITR_TRIG_MODE_SHIFT;
+ trblimitr |= (addr & PAGE_MASK);
+
+ trblimitr |= TRBLIMITR_ENABLE;
+ write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
+
+ /* Synchronize the TRBE enable event */
+ isb();
+}
+
+static void trbe_enable_hw(struct trbe_buf *buf)
+{
+ WARN_ON(buf->trbe_write < buf->trbe_base);
+ WARN_ON(buf->trbe_write >= buf->trbe_limit);
+ set_trbe_disabled();
+ isb();
+ clr_trbe_status();
+ set_trbe_base_pointer(buf->trbe_base);
+ set_trbe_write_pointer(buf->trbe_write);
+
+ /*
+ * Synchronize all the register updates
+ * till now before enabling the TRBE.
+ */
+ isb();
+ set_trbe_limit_pointer_enabled(buf->trbe_limit);
+}
+
+static enum trbe_fault_action trbe_get_fault_act(u64 trbsr)
+{
+ int ec = get_trbe_ec(trbsr);
+ int bsc = get_trbe_bsc(trbsr);
+
+ WARN_ON(is_trbe_running(trbsr));
+ if (is_trbe_trg(trbsr) || is_trbe_abort(trbsr))
+ return TRBE_FAULT_ACT_FATAL;
+
+ if ((ec == TRBE_EC_STAGE1_ABORT) || (ec == TRBE_EC_STAGE2_ABORT))
+ return TRBE_FAULT_ACT_FATAL;
+
+ if (is_trbe_wrap(trbsr) && (ec == TRBE_EC_OTHERS) && (bsc == TRBE_BSC_FILLED)) {
+ if (get_trbe_write_pointer() == get_trbe_base_pointer())
+ return TRBE_FAULT_ACT_WRAP;
+ }
+ return TRBE_FAULT_ACT_SPURIOUS;
+}
+
+static void *arm_trbe_alloc_buffer(struct coresight_device *csdev,
+ struct perf_event *event, void **pages,
+ int nr_pages, bool snapshot)
+{
+ struct trbe_buf *buf;
+ struct page **pglist;
+ int i;
+
+ /*
+ * TRBE LIMIT and TRBE WRITE pointers must be page aligned. But with
+ * just a single page, there would not be any room left while writing
+ * into a partially filled TRBE buffer after the page size alignment.
+ * Hence restrict the minimum buffer size as two pages.
+ */
+ if (nr_pages < 2)
+ return NULL;
+
+ buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, trbe_alloc_node(event));
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL);
+ if (!pglist) {
+ kfree(buf);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0; i < nr_pages; i++)
+ pglist[i] = virt_to_page(pages[i]);
+
+ buf->trbe_base = (unsigned long)vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL);
+ if (!buf->trbe_base) {
+ kfree(pglist);
+ kfree(buf);
+ return ERR_PTR(-ENOMEM);
+ }
+ buf->trbe_limit = buf->trbe_base + nr_pages * PAGE_SIZE;
+ buf->trbe_write = buf->trbe_base;
+ buf->snapshot = snapshot;
+ buf->nr_pages = nr_pages;
+ buf->pages = pages;
+ kfree(pglist);
+ return buf;
+}
+
+static void arm_trbe_free_buffer(void *config)
+{
+ struct trbe_buf *buf = config;
+
+ vunmap((void *)buf->trbe_base);
+ kfree(buf);
+}
+
+static unsigned long arm_trbe_update_buffer(struct coresight_device *csdev,
+ struct perf_output_handle *handle,
+ void *config)
+{
+ struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
+ struct trbe_buf *buf = config;
+ enum trbe_fault_action act;
+ unsigned long size, offset;
+ unsigned long write, base, status;
+ unsigned long flags;
+
+ WARN_ON(buf->cpudata != cpudata);
+ WARN_ON(cpudata->cpu != smp_processor_id());
+ WARN_ON(cpudata->drvdata != drvdata);
+ if (cpudata->mode != CS_MODE_PERF)
+ return 0;
+
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW);
+
+ /*
+ * We are about to disable the TRBE. And this could in turn
+ * fill up the buffer triggering, an IRQ. This could be consumed
+ * by the PE asynchronously, causing a race here against
+ * the IRQ handler in closing out the handle. So, let us
+ * make sure the IRQ can't trigger while we are collecting
+ * the buffer. We also make sure that a WRAP event is handled
+ * accordingly.
+ */
+ local_irq_save(flags);
+
+ /*
+ * If the TRBE was disabled due to lack of space in the AUX buffer or a
+ * spurious fault, the driver leaves it disabled, truncating the buffer.
+ * Since the etm_perf driver expects to close out the AUX buffer, the
+ * driver skips it. Thus, just pass in 0 size here to indicate that the
+ * buffer was truncated.
+ */
+ if (!is_trbe_enabled()) {
+ size = 0;
+ goto done;
+ }
+ /*
+ * perf handle structure needs to be shared with the TRBE IRQ handler for
+ * capturing trace data and restarting the handle. There is a probability
+ * of an undefined reference based crash when etm event is being stopped
+ * while a TRBE IRQ also getting processed. This happens due the release
+ * of perf handle via perf_aux_output_end() in etm_event_stop(). Stopping
+ * the TRBE here will ensure that no IRQ could be generated when the perf
+ * handle gets freed in etm_event_stop().
+ */
+ trbe_drain_and_disable_local();
+ write = get_trbe_write_pointer();
+ base = get_trbe_base_pointer();
+
+ /* Check if there is a pending interrupt and handle it here */
+ status = read_sysreg_s(SYS_TRBSR_EL1);
+ if (is_trbe_irq(status)) {
+
+ /*
+ * Now that we are handling the IRQ here, clear the IRQ
+ * from the status, to let the irq handler know that it
+ * is taken care of.
+ */
+ clr_trbe_irq();
+ isb();
+
+ act = trbe_get_fault_act(status);
+ /*
+ * If this was not due to a WRAP event, we have some
+ * errors and as such buffer is empty.
+ */
+ if (act != TRBE_FAULT_ACT_WRAP) {
+ size = 0;
+ goto done;
+ }
+
+ /*
+ * Otherwise, the buffer is full and the write pointer
+ * has reached base. Adjust this back to the Limit pointer
+ * for correct size. Also, mark the buffer truncated.
+ */
+ write = get_trbe_limit_pointer();
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ }
+
+ offset = write - base;
+ if (WARN_ON_ONCE(offset < PERF_IDX2OFF(handle->head, buf)))
+ size = 0;
+ else
+ size = offset - PERF_IDX2OFF(handle->head, buf);
+
+done:
+ local_irq_restore(flags);
+
+ if (buf->snapshot)
+ handle->head += size;
+ return size;
+}
+
+static int arm_trbe_enable(struct coresight_device *csdev, u32 mode, void *data)
+{
+ struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
+ struct perf_output_handle *handle = data;
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+
+ WARN_ON(cpudata->cpu != smp_processor_id());
+ WARN_ON(cpudata->drvdata != drvdata);
+ if (mode != CS_MODE_PERF)
+ return -EINVAL;
+
+ *this_cpu_ptr(drvdata->handle) = handle;
+ cpudata->buf = buf;
+ cpudata->mode = mode;
+ buf->cpudata = cpudata;
+ buf->trbe_limit = compute_trbe_buffer_limit(handle);
+ buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf);
+ if (buf->trbe_limit == buf->trbe_base) {
+ trbe_stop_and_truncate_event(handle);
+ return 0;
+ }
+ trbe_enable_hw(buf);
+ return 0;
+}
+
+static int arm_trbe_disable(struct coresight_device *csdev)
+{
+ struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
+ struct trbe_buf *buf = cpudata->buf;
+
+ WARN_ON(buf->cpudata != cpudata);
+ WARN_ON(cpudata->cpu != smp_processor_id());
+ WARN_ON(cpudata->drvdata != drvdata);
+ if (cpudata->mode != CS_MODE_PERF)
+ return -EINVAL;
+
+ trbe_drain_and_disable_local();
+ buf->cpudata = NULL;
+ cpudata->buf = NULL;
+ cpudata->mode = CS_MODE_DISABLED;
+ return 0;
+}
+
+static void trbe_handle_spurious(struct perf_output_handle *handle)
+{
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+
+ buf->trbe_limit = compute_trbe_buffer_limit(handle);
+ buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf);
+ if (buf->trbe_limit == buf->trbe_base) {
+ trbe_drain_and_disable_local();
+ return;
+ }
+ trbe_enable_hw(buf);
+}
+
+static void trbe_handle_overflow(struct perf_output_handle *handle)
+{
+ struct perf_event *event = handle->event;
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+ unsigned long offset, size;
+ struct etm_event_data *event_data;
+
+ offset = get_trbe_limit_pointer() - get_trbe_base_pointer();
+ size = offset - PERF_IDX2OFF(handle->head, buf);
+ if (buf->snapshot)
+ handle->head += size;
+
+ /*
+ * Mark the buffer as truncated, as we have stopped the trace
+ * collection upon the WRAP event, without stopping the source.
+ */
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW |
+ PERF_AUX_FLAG_TRUNCATED);
+ perf_aux_output_end(handle, size);
+ event_data = perf_aux_output_begin(handle, event);
+ if (!event_data) {
+ /*
+ * We are unable to restart the trace collection,
+ * thus leave the TRBE disabled. The etm-perf driver
+ * is able to detect this with a disconnected handle
+ * (handle->event = NULL).
+ */
+ trbe_drain_and_disable_local();
+ *this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL;
+ return;
+ }
+ buf->trbe_limit = compute_trbe_buffer_limit(handle);
+ buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf);
+ if (buf->trbe_limit == buf->trbe_base) {
+ trbe_stop_and_truncate_event(handle);
+ return;
+ }
+ *this_cpu_ptr(buf->cpudata->drvdata->handle) = handle;
+ trbe_enable_hw(buf);
+}
+
+static bool is_perf_trbe(struct perf_output_handle *handle)
+{
+ struct trbe_buf *buf = etm_perf_sink_config(handle);
+ struct trbe_cpudata *cpudata = buf->cpudata;
+ struct trbe_drvdata *drvdata = cpudata->drvdata;
+ int cpu = smp_processor_id();
+
+ WARN_ON(buf->trbe_base != get_trbe_base_pointer());
+ WARN_ON(buf->trbe_limit != get_trbe_limit_pointer());
+
+ if (cpudata->mode != CS_MODE_PERF)
+ return false;
+
+ if (cpudata->cpu != cpu)
+ return false;
+
+ if (!cpumask_test_cpu(cpu, &drvdata->supported_cpus))
+ return false;
+
+ return true;
+}
+
+static irqreturn_t arm_trbe_irq_handler(int irq, void *dev)
+{
+ struct perf_output_handle **handle_ptr = dev;
+ struct perf_output_handle *handle = *handle_ptr;
+ enum trbe_fault_action act;
+ u64 status;
+
+ /*
+ * Ensure the trace is visible to the CPUs and
+ * any external aborts have been resolved.
+ */
+ trbe_drain_and_disable_local();
+
+ status = read_sysreg_s(SYS_TRBSR_EL1);
+ /*
+ * If the pending IRQ was handled by update_buffer callback
+ * we have nothing to do here.
+ */
+ if (!is_trbe_irq(status))
+ return IRQ_NONE;
+
+ clr_trbe_irq();
+ isb();
+
+ if (WARN_ON_ONCE(!handle) || !perf_get_aux(handle))
+ return IRQ_NONE;
+
+ if (!is_perf_trbe(handle))
+ return IRQ_NONE;
+
+ /*
+ * Ensure perf callbacks have completed, which may disable
+ * the trace buffer in response to a TRUNCATION flag.
+ */
+ irq_work_run();
+
+ act = trbe_get_fault_act(status);
+ switch (act) {
+ case TRBE_FAULT_ACT_WRAP:
+ trbe_handle_overflow(handle);
+ break;
+ case TRBE_FAULT_ACT_SPURIOUS:
+ trbe_handle_spurious(handle);
+ break;
+ case TRBE_FAULT_ACT_FATAL:
+ trbe_stop_and_truncate_event(handle);
+ break;
+ }
+ return IRQ_HANDLED;
+}
+
+static const struct coresight_ops_sink arm_trbe_sink_ops = {
+ .enable = arm_trbe_enable,
+ .disable = arm_trbe_disable,
+ .alloc_buffer = arm_trbe_alloc_buffer,
+ .free_buffer = arm_trbe_free_buffer,
+ .update_buffer = arm_trbe_update_buffer,
+};
+
+static const struct coresight_ops arm_trbe_cs_ops = {
+ .sink_ops = &arm_trbe_sink_ops,
+};
+
+static ssize_t align_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct trbe_cpudata *cpudata = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%llx\n", cpudata->trbe_align);
+}
+static DEVICE_ATTR_RO(align);
+
+static ssize_t flag_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct trbe_cpudata *cpudata = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", cpudata->trbe_flag);
+}
+static DEVICE_ATTR_RO(flag);
+
+static struct attribute *arm_trbe_attrs[] = {
+ &dev_attr_align.attr,
+ &dev_attr_flag.attr,
+ NULL,
+};
+
+static const struct attribute_group arm_trbe_group = {
+ .attrs = arm_trbe_attrs,
+};
+
+static const struct attribute_group *arm_trbe_groups[] = {
+ &arm_trbe_group,
+ NULL,
+};
+
+static void arm_trbe_enable_cpu(void *info)
+{
+ struct trbe_drvdata *drvdata = info;
+
+ trbe_reset_local();
+ enable_percpu_irq(drvdata->irq, IRQ_TYPE_NONE);
+}
+
+static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cpu)
+{
+ struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
+ struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu);
+ struct coresight_desc desc = { 0 };
+ struct device *dev;
+
+ if (WARN_ON(trbe_csdev))
+ return;
+
+ dev = &cpudata->drvdata->pdev->dev;
+ desc.name = devm_kasprintf(dev, GFP_KERNEL, "trbe%d", cpu);
+ if (!desc.name)
+ goto cpu_clear;
+
+ desc.type = CORESIGHT_DEV_TYPE_SINK;
+ desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM;
+ desc.ops = &arm_trbe_cs_ops;
+ desc.pdata = dev_get_platdata(dev);
+ desc.groups = arm_trbe_groups;
+ desc.dev = dev;
+ trbe_csdev = coresight_register(&desc);
+ if (IS_ERR(trbe_csdev))
+ goto cpu_clear;
+
+ dev_set_drvdata(&trbe_csdev->dev, cpudata);
+ coresight_set_percpu_sink(cpu, trbe_csdev);
+ return;
+cpu_clear:
+ cpumask_clear_cpu(cpu, &drvdata->supported_cpus);
+}
+
+static void arm_trbe_probe_cpu(void *info)
+{
+ struct trbe_drvdata *drvdata = info;
+ int cpu = smp_processor_id();
+ struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
+ u64 trbidr;
+
+ if (WARN_ON(!cpudata))
+ goto cpu_clear;
+
+ if (!is_trbe_available()) {
+ pr_err("TRBE is not implemented on cpu %d\n", cpu);
+ goto cpu_clear;
+ }
+
+ trbidr = read_sysreg_s(SYS_TRBIDR_EL1);
+ if (!is_trbe_programmable(trbidr)) {
+ pr_err("TRBE is owned in higher exception level on cpu %d\n", cpu);
+ goto cpu_clear;
+ }
+
+ cpudata->trbe_align = 1ULL << get_trbe_address_align(trbidr);
+ if (cpudata->trbe_align > SZ_2K) {
+ pr_err("Unsupported alignment on cpu %d\n", cpu);
+ goto cpu_clear;
+ }
+ cpudata->trbe_flag = get_trbe_flag_update(trbidr);
+ cpudata->cpu = cpu;
+ cpudata->drvdata = drvdata;
+ return;
+cpu_clear:
+ cpumask_clear_cpu(cpu, &drvdata->supported_cpus);
+}
+
+static void arm_trbe_remove_coresight_cpu(void *info)
+{
+ int cpu = smp_processor_id();
+ struct trbe_drvdata *drvdata = info;
+ struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
+ struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu);
+
+ disable_percpu_irq(drvdata->irq);
+ trbe_reset_local();
+ if (trbe_csdev) {
+ coresight_unregister(trbe_csdev);
+ cpudata->drvdata = NULL;
+ coresight_set_percpu_sink(cpu, NULL);
+ }
+}
+
+static int arm_trbe_probe_coresight(struct trbe_drvdata *drvdata)
+{
+ int cpu;
+
+ drvdata->cpudata = alloc_percpu(typeof(*drvdata->cpudata));
+ if (!drvdata->cpudata)
+ return -ENOMEM;
+
+ for_each_cpu(cpu, &drvdata->supported_cpus) {
+ smp_call_function_single(cpu, arm_trbe_probe_cpu, drvdata, 1);
+ if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
+ arm_trbe_register_coresight_cpu(drvdata, cpu);
+ if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
+ smp_call_function_single(cpu, arm_trbe_enable_cpu, drvdata, 1);
+ }
+ return 0;
+}
+
+static int arm_trbe_remove_coresight(struct trbe_drvdata *drvdata)
+{
+ int cpu;
+
+ for_each_cpu(cpu, &drvdata->supported_cpus)
+ smp_call_function_single(cpu, arm_trbe_remove_coresight_cpu, drvdata, 1);
+ free_percpu(drvdata->cpudata);
+ return 0;
+}
+
+static int arm_trbe_cpu_startup(unsigned int cpu, struct hlist_node *node)
+{
+ struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node);
+
+ if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) {
+
+ /*
+ * If this CPU was not probed for TRBE,
+ * initialize it now.
+ */
+ if (!coresight_get_percpu_sink(cpu)) {
+ arm_trbe_probe_cpu(drvdata);
+ if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
+ arm_trbe_register_coresight_cpu(drvdata, cpu);
+ if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
+ arm_trbe_enable_cpu(drvdata);
+ } else {
+ arm_trbe_enable_cpu(drvdata);
+ }
+ }
+ return 0;
+}
+
+static int arm_trbe_cpu_teardown(unsigned int cpu, struct hlist_node *node)
+{
+ struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node);
+
+ if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) {
+ disable_percpu_irq(drvdata->irq);
+ trbe_reset_local();
+ }
+ return 0;
+}
+
+static int arm_trbe_probe_cpuhp(struct trbe_drvdata *drvdata)
+{
+ enum cpuhp_state trbe_online;
+ int ret;
+
+ trbe_online = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRVNAME,
+ arm_trbe_cpu_startup, arm_trbe_cpu_teardown);
+ if (trbe_online < 0)
+ return trbe_online;
+
+ ret = cpuhp_state_add_instance(trbe_online, &drvdata->hotplug_node);
+ if (ret) {
+ cpuhp_remove_multi_state(trbe_online);
+ return ret;
+ }
+ drvdata->trbe_online = trbe_online;
+ return 0;
+}
+
+static void arm_trbe_remove_cpuhp(struct trbe_drvdata *drvdata)
+{
+ cpuhp_remove_multi_state(drvdata->trbe_online);
+}
+
+static int arm_trbe_probe_irq(struct platform_device *pdev,
+ struct trbe_drvdata *drvdata)
+{
+ int ret;
+
+ drvdata->irq = platform_get_irq(pdev, 0);
+ if (drvdata->irq < 0) {
+ pr_err("IRQ not found for the platform device\n");
+ return drvdata->irq;
+ }
+
+ if (!irq_is_percpu(drvdata->irq)) {
+ pr_err("IRQ is not a PPI\n");
+ return -EINVAL;
+ }
+
+ if (irq_get_percpu_devid_partition(drvdata->irq, &drvdata->supported_cpus))
+ return -EINVAL;
+
+ drvdata->handle = alloc_percpu(struct perf_output_handle *);
+ if (!drvdata->handle)
+ return -ENOMEM;
+
+ ret = request_percpu_irq(drvdata->irq, arm_trbe_irq_handler, DRVNAME, drvdata->handle);
+ if (ret) {
+ free_percpu(drvdata->handle);
+ return ret;
+ }
+ return 0;
+}
+
+static void arm_trbe_remove_irq(struct trbe_drvdata *drvdata)
+{
+ free_percpu_irq(drvdata->irq, drvdata->handle);
+ free_percpu(drvdata->handle);
+}
+
+static int arm_trbe_device_probe(struct platform_device *pdev)
+{
+ struct coresight_platform_data *pdata;
+ struct trbe_drvdata *drvdata;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ pdata = coresight_get_platform_data(dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
+ dev_set_drvdata(dev, drvdata);
+ dev->platform_data = pdata;
+ drvdata->pdev = pdev;
+ ret = arm_trbe_probe_irq(pdev, drvdata);
+ if (ret)
+ return ret;
+
+ ret = arm_trbe_probe_coresight(drvdata);
+ if (ret)
+ goto probe_failed;
+
+ ret = arm_trbe_probe_cpuhp(drvdata);
+ if (ret)
+ goto cpuhp_failed;
+
+ return 0;
+cpuhp_failed:
+ arm_trbe_remove_coresight(drvdata);
+probe_failed:
+ arm_trbe_remove_irq(drvdata);
+ return ret;
+}
+
+static int arm_trbe_device_remove(struct platform_device *pdev)
+{
+ struct trbe_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ arm_trbe_remove_cpuhp(drvdata);
+ arm_trbe_remove_coresight(drvdata);
+ arm_trbe_remove_irq(drvdata);
+ return 0;
+}
+
+static const struct of_device_id arm_trbe_of_match[] = {
+ { .compatible = "arm,trace-buffer-extension"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, arm_trbe_of_match);
+
+static struct platform_driver arm_trbe_driver = {
+ .driver = {
+ .name = DRVNAME,
+ .of_match_table = of_match_ptr(arm_trbe_of_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = arm_trbe_device_probe,
+ .remove = arm_trbe_device_remove,
+};
+
+static int __init arm_trbe_init(void)
+{
+ int ret;
+
+ if (arm64_kernel_unmapped_at_el0()) {
+ pr_err("TRBE wouldn't work if kernel gets unmapped at EL0\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = platform_driver_register(&arm_trbe_driver);
+ if (!ret)
+ return 0;
+
+ pr_err("Error registering %s platform driver\n", DRVNAME);
+ return ret;
+}
+
+static void __exit arm_trbe_exit(void)
+{
+ platform_driver_unregister(&arm_trbe_driver);
+}
+module_init(arm_trbe_init);
+module_exit(arm_trbe_exit);
+
+MODULE_AUTHOR("Anshuman Khandual <anshuman.khandual@arm.com>");
+MODULE_DESCRIPTION("Arm Trace Buffer Extension (TRBE) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwtracing/coresight/coresight-trbe.h b/drivers/hwtracing/coresight/coresight-trbe.h
new file mode 100644
index 000000000000..abf3e36082f0
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-trbe.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This contains all required hardware related helper functions for
+ * Trace Buffer Extension (TRBE) driver in the coresight framework.
+ *
+ * Copyright (C) 2020 ARM Ltd.
+ *
+ * Author: Anshuman Khandual <anshuman.khandual@arm.com>
+ */
+#include <linux/coresight.h>
+#include <linux/device.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+
+#include "coresight-etm-perf.h"
+
+static inline bool is_trbe_available(void)
+{
+ u64 aa64dfr0 = read_sysreg_s(SYS_ID_AA64DFR0_EL1);
+ unsigned int trbe = cpuid_feature_extract_unsigned_field(aa64dfr0, ID_AA64DFR0_TRBE_SHIFT);
+
+ return trbe >= 0b0001;
+}
+
+static inline bool is_trbe_enabled(void)
+{
+ u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
+
+ return trblimitr & TRBLIMITR_ENABLE;
+}
+
+#define TRBE_EC_OTHERS 0
+#define TRBE_EC_STAGE1_ABORT 36
+#define TRBE_EC_STAGE2_ABORT 37
+
+static inline int get_trbe_ec(u64 trbsr)
+{
+ return (trbsr >> TRBSR_EC_SHIFT) & TRBSR_EC_MASK;
+}
+
+#define TRBE_BSC_NOT_STOPPED 0
+#define TRBE_BSC_FILLED 1
+#define TRBE_BSC_TRIGGERED 2
+
+static inline int get_trbe_bsc(u64 trbsr)
+{
+ return (trbsr >> TRBSR_BSC_SHIFT) & TRBSR_BSC_MASK;
+}
+
+static inline void clr_trbe_irq(void)
+{
+ u64 trbsr = read_sysreg_s(SYS_TRBSR_EL1);
+
+ trbsr &= ~TRBSR_IRQ;
+ write_sysreg_s(trbsr, SYS_TRBSR_EL1);
+}
+
+static inline bool is_trbe_irq(u64 trbsr)
+{
+ return trbsr & TRBSR_IRQ;
+}
+
+static inline bool is_trbe_trg(u64 trbsr)
+{
+ return trbsr & TRBSR_TRG;
+}
+
+static inline bool is_trbe_wrap(u64 trbsr)
+{
+ return trbsr & TRBSR_WRAP;
+}
+
+static inline bool is_trbe_abort(u64 trbsr)
+{
+ return trbsr & TRBSR_ABORT;
+}
+
+static inline bool is_trbe_running(u64 trbsr)
+{
+ return !(trbsr & TRBSR_STOP);
+}
+
+#define TRBE_TRIG_MODE_STOP 0
+#define TRBE_TRIG_MODE_IRQ 1
+#define TRBE_TRIG_MODE_IGNORE 3
+
+#define TRBE_FILL_MODE_FILL 0
+#define TRBE_FILL_MODE_WRAP 1
+#define TRBE_FILL_MODE_CIRCULAR_BUFFER 3
+
+static inline void set_trbe_disabled(void)
+{
+ u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
+
+ trblimitr &= ~TRBLIMITR_ENABLE;
+ write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
+}
+
+static inline bool get_trbe_flag_update(u64 trbidr)
+{
+ return trbidr & TRBIDR_FLAG;
+}
+
+static inline bool is_trbe_programmable(u64 trbidr)
+{
+ return !(trbidr & TRBIDR_PROG);
+}
+
+static inline int get_trbe_address_align(u64 trbidr)
+{
+ return (trbidr >> TRBIDR_ALIGN_SHIFT) & TRBIDR_ALIGN_MASK;
+}
+
+static inline unsigned long get_trbe_write_pointer(void)
+{
+ return read_sysreg_s(SYS_TRBPTR_EL1);
+}
+
+static inline void set_trbe_write_pointer(unsigned long addr)
+{
+ WARN_ON(is_trbe_enabled());
+ write_sysreg_s(addr, SYS_TRBPTR_EL1);
+}
+
+static inline unsigned long get_trbe_limit_pointer(void)
+{
+ u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
+ unsigned long addr = trblimitr & (TRBLIMITR_LIMIT_MASK << TRBLIMITR_LIMIT_SHIFT);
+
+ WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
+ return addr;
+}
+
+static inline unsigned long get_trbe_base_pointer(void)
+{
+ u64 trbbaser = read_sysreg_s(SYS_TRBBASER_EL1);
+ unsigned long addr = trbbaser & (TRBBASER_BASE_MASK << TRBBASER_BASE_SHIFT);
+
+ WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
+ return addr;
+}
+
+static inline void set_trbe_base_pointer(unsigned long addr)
+{
+ WARN_ON(is_trbe_enabled());
+ WARN_ON(!IS_ALIGNED(addr, (1UL << TRBBASER_BASE_SHIFT)));
+ WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
+ write_sysreg_s(addr, SYS_TRBBASER_EL1);
+}
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index e0e45fc19b8f..5cfe70aedced 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -17,6 +17,7 @@
#include <linux/interrupt.h>
#include <linux/mv643xx_i2c.h>
#include <linux/platform_device.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/reset.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -147,6 +148,7 @@ struct mv64xxx_i2c_data {
bool irq_clear_inverted;
/* Clk div is 2 to the power n, not 2 to the power n + 1 */
bool clk_n_base_0;
+ struct i2c_bus_recovery_info rinfo;
};
static struct mv64xxx_i2c_regs mv64xxx_i2c_regs_mv64xxx = {
@@ -325,7 +327,8 @@ mv64xxx_i2c_fsm(struct mv64xxx_i2c_data *drv_data, u32 status)
drv_data->msg->flags);
drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP;
mv64xxx_i2c_hw_init(drv_data);
- drv_data->rc = -EIO;
+ i2c_recover_bus(&drv_data->adapter);
+ drv_data->rc = -EAGAIN;
}
}
@@ -561,6 +564,7 @@ mv64xxx_i2c_wait_for_completion(struct mv64xxx_i2c_data *drv_data)
"time_left: %d\n", drv_data->block,
(int)time_left);
mv64xxx_i2c_hw_init(drv_data);
+ i2c_recover_bus(&drv_data->adapter);
}
} else
spin_unlock_irqrestore(&drv_data->lock, flags);
@@ -870,6 +874,25 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
}
#endif /* CONFIG_OF */
+static int mv64xxx_i2c_init_recovery_info(struct mv64xxx_i2c_data *drv_data,
+ struct device *dev)
+{
+ struct i2c_bus_recovery_info *rinfo = &drv_data->rinfo;
+
+ rinfo->pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR(rinfo->pinctrl)) {
+ if (PTR_ERR(rinfo->pinctrl) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "can't get pinctrl, bus recovery not supported\n");
+ return PTR_ERR(rinfo->pinctrl);
+ } else if (!rinfo->pinctrl) {
+ return -ENODEV;
+ }
+
+ drv_data->adapter.bus_recovery_info = rinfo;
+ return 0;
+}
+
static int
mv64xxx_i2c_probe(struct platform_device *pd)
{
@@ -926,6 +949,10 @@ mv64xxx_i2c_probe(struct platform_device *pd)
goto exit_reset;
}
+ rc = mv64xxx_i2c_init_recovery_info(drv_data, &pd->dev);
+ if (rc == -EPROBE_DEFER)
+ goto exit_reset;
+
drv_data->adapter.dev.parent = &pd->dev;
drv_data->adapter.algo = &mv64xxx_i2c_algo;
drv_data->adapter.owner = THIS_MODULE;
diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c
index 845eda70b8ca..9f82fd3b5ea3 100644
--- a/drivers/i2c/busses/i2c-octeon-core.c
+++ b/drivers/i2c/busses/i2c-octeon-core.c
@@ -17,6 +17,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include "i2c-octeon-core.h"
@@ -60,11 +61,19 @@ static int octeon_i2c_wait(struct octeon_i2c *i2c)
return octeon_i2c_test_iflg(i2c) ? 0 : -ETIMEDOUT;
}
- i2c->int_enable(i2c);
- time_left = wait_event_timeout(i2c->queue, octeon_i2c_test_iflg(i2c),
- i2c->adap.timeout);
- i2c->int_disable(i2c);
-
+ if (i2c->twsi_freq <= FREQ_400KHZ) {
+ i2c->int_enable(i2c);
+ time_left = wait_event_timeout(i2c->queue,
+ octeon_i2c_test_iflg(i2c),
+ i2c->adap.timeout);
+ i2c->int_disable(i2c);
+ } else {
+ time_left = 1000; /* 1ms */
+ do {
+ if (time_left--)
+ __udelay(1);
+ } while (!octeon_i2c_test_iflg(i2c));
+ }
if (i2c->broken_irq_check && !time_left &&
octeon_i2c_test_iflg(i2c)) {
dev_err(i2c->dev, "broken irq connection detected, switching to polling mode.\n");
@@ -177,6 +186,7 @@ static int octeon_i2c_hlc_wait(struct octeon_i2c *i2c)
static int octeon_i2c_check_status(struct octeon_i2c *i2c, int final_read)
{
u8 stat;
+ u64 mode;
/*
* This is ugly... in HLC mode the status is not in the status register
@@ -239,6 +249,13 @@ static int octeon_i2c_check_status(struct octeon_i2c *i2c, int final_read)
case STAT_RXADDR_NAK:
case STAT_AD2W_NAK:
return -ENXIO;
+
+ case STAT_WDOG_TOUT:
+ mode = __raw_readq(i2c->twsi_base + MODE(i2c));
+ /* Set BUS_MON_RST to reset bus monitor */
+ mode |= BIT(3);
+ octeon_i2c_writeq_flush(mode, i2c->twsi_base + MODE(i2c));
+ return -EIO;
default:
dev_err(i2c->dev, "unhandled state: %d\n", stat);
return -EIO;
@@ -607,7 +624,7 @@ int octeon_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
struct octeon_i2c *i2c = i2c_get_adapdata(adap);
int i, ret = 0;
- if (num == 1) {
+ if (num == 1 && (i2c->twsi_freq <= FREQ_400KHZ)) {
if (msgs[0].len > 0 && msgs[0].len <= 8) {
if (msgs[0].flags & I2C_M_RD)
ret = octeon_i2c_hlc_read(i2c, msgs);
@@ -615,7 +632,7 @@ int octeon_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
ret = octeon_i2c_hlc_write(i2c, msgs);
goto out;
}
- } else if (num == 2) {
+ } else if (num == 2 && (i2c->twsi_freq <= FREQ_400KHZ)) {
if ((msgs[0].flags & I2C_M_RD) == 0 &&
(msgs[1].flags & I2C_M_RECV_LEN) == 0 &&
msgs[0].len > 0 && msgs[0].len <= 2 &&
@@ -658,8 +675,19 @@ out:
void octeon_i2c_set_clock(struct octeon_i2c *i2c)
{
int tclk, thp_base, inc, thp_idx, mdiv_idx, ndiv_idx, foscl, diff;
- int thp = 0x18, mdiv = 2, ndiv = 0, delta_hz = 1000000;
+ /* starting value on search for lowest diff */
+ const int huge_delta = 1000000;
+ /*
+ * Find divisors to produce target frequency, start with large delta
+ * to cover wider range of divisors, note thp = TCLK half period.
+ */
+ int ds = 10, thp = 0x18, mdiv = 2, ndiv = 0, delta_hz = huge_delta;
+ if (octeon_i2c_is_otx2(to_pci_dev(i2c->dev))) {
+ thp = 0x3;
+ if (i2c->twsi_freq > FREQ_400KHZ)
+ ds = 15;
+ }
for (ndiv_idx = 0; ndiv_idx < 8 && delta_hz != 0; ndiv_idx++) {
/*
* An mdiv value of less than 2 seems to not work well
@@ -670,19 +698,29 @@ void octeon_i2c_set_clock(struct octeon_i2c *i2c)
* For given ndiv and mdiv values check the
* two closest thp values.
*/
- tclk = i2c->twsi_freq * (mdiv_idx + 1) * 10;
+ tclk = i2c->twsi_freq * (mdiv_idx + 1) * ds;
tclk *= (1 << ndiv_idx);
- thp_base = (i2c->sys_freq / (tclk * 2)) - 1;
+ if (octeon_i2c_is_otx2(to_pci_dev(i2c->dev)))
+ thp_base = (i2c->sys_freq / tclk) - 2;
+ else
+ thp_base = (i2c->sys_freq / (tclk * 2)) - 1;
for (inc = 0; inc <= 1; inc++) {
thp_idx = thp_base + inc;
if (thp_idx < 5 || thp_idx > 0xff)
continue;
- foscl = i2c->sys_freq / (2 * (thp_idx + 1));
+ if (octeon_i2c_is_otx2(to_pci_dev(i2c->dev)))
+ foscl = i2c->sys_freq / (thp_idx + 2);
+ else
+ foscl = i2c->sys_freq /
+ (2 * (thp_idx + 1));
foscl = foscl / (1 << ndiv_idx);
- foscl = foscl / (mdiv_idx + 1) / 10;
+ foscl = foscl / (mdiv_idx + 1) / ds;
+ if (foscl > i2c->twsi_freq)
+ continue;
diff = abs(foscl - i2c->twsi_freq);
+ /* Use it if smaller diff from target */
if (diff < delta_hz) {
delta_hz = diff;
thp = thp_idx;
@@ -694,6 +732,17 @@ void octeon_i2c_set_clock(struct octeon_i2c *i2c)
}
octeon_i2c_reg_write(i2c, SW_TWSI_OP_TWSI_CLK, thp);
octeon_i2c_reg_write(i2c, SW_TWSI_EOP_TWSI_CLKCTL, (mdiv << 3) | ndiv);
+ if (octeon_i2c_is_otx2(to_pci_dev(i2c->dev))) {
+ u64 mode;
+
+ mode = __raw_readq(i2c->twsi_base + MODE(i2c));
+ /* Set REFCLK_SRC and HS_MODE in TWSX_MODE register */
+ if (i2c->twsi_freq > FREQ_400KHZ)
+ mode |= BIT(4) | BIT(0);
+ else
+ mode &= ~(BIT(4) | BIT(0));
+ octeon_i2c_writeq_flush(mode, i2c->twsi_base + MODE(i2c));
+ }
}
int octeon_i2c_init_lowlevel(struct octeon_i2c *i2c)
diff --git a/drivers/i2c/busses/i2c-octeon-core.h b/drivers/i2c/busses/i2c-octeon-core.h
index 9bb9f64fdda0..37a58ab00691 100644
--- a/drivers/i2c/busses/i2c-octeon-core.h
+++ b/drivers/i2c/busses/i2c-octeon-core.h
@@ -71,6 +71,7 @@
#define STAT_SLAVE_ACK 0xC8
#define STAT_AD2W_ACK 0xD0
#define STAT_AD2W_NAK 0xD8
+#define STAT_WDOG_TOUT 0xF0
#define STAT_IDLE 0xF8
/* TWSI_INT values */
@@ -92,11 +93,13 @@ struct octeon_i2c_reg_offset {
unsigned int sw_twsi;
unsigned int twsi_int;
unsigned int sw_twsi_ext;
+ unsigned int mode;
};
#define SW_TWSI(x) (x->roff.sw_twsi)
#define TWSI_INT(x) (x->roff.twsi_int)
#define SW_TWSI_EXT(x) (x->roff.sw_twsi_ext)
+#define MODE(x) (x->roff.mode)
struct octeon_i2c {
wait_queue_head_t queue;
@@ -211,6 +214,21 @@ static inline void octeon_i2c_write_int(struct octeon_i2c *i2c, u64 data)
octeon_i2c_writeq_flush(data, i2c->twsi_base + TWSI_INT(i2c));
}
+#define FREQ_400KHZ 400000
+#define PCI_SUBSYS_DEVID_9XXX 0xB
+/**
+ * octeon_i2c_is_otx2 - check for chip ID
+ * @pdev: PCI dev structure
+ *
+ * Returns TRUE if OcteonTX2, FALSE otherwise.
+ */
+static inline bool octeon_i2c_is_otx2(struct pci_dev *pdev)
+{
+ u32 chip_id = (pdev->subsystem_device >> 12) & 0xF;
+
+ return (chip_id == PCI_SUBSYS_DEVID_9XXX);
+}
+
/* Prototypes */
irqreturn_t octeon_i2c_isr(int irq, void *dev_id);
int octeon_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num);
diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
index 12c90aa0900e..ed88d234f741 100644
--- a/drivers/i2c/busses/i2c-thunderx-pcidrv.c
+++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
@@ -165,6 +165,7 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev,
i2c->roff.sw_twsi = 0x1000;
i2c->roff.twsi_int = 0x1010;
i2c->roff.sw_twsi_ext = 0x1018;
+ i2c->roff.mode = 0x1038;
i2c->dev = dev;
pci_set_drvdata(pdev, i2c);
@@ -205,6 +206,12 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev,
if (ret)
goto error;
+ /*
+ * For OcteonTX2 chips, set reference frequency to 100MHz
+ * as refclk_src in TWSI_MODE register defaults to 100MHz.
+ */
+ if (octeon_i2c_is_otx2(pdev) && (i2c->twsi_freq <= FREQ_400KHZ))
+ i2c->sys_freq = 100000000;
octeon_i2c_set_clock(i2c);
i2c->adap = thunderx_i2c_ops;
@@ -213,6 +220,7 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev,
i2c->adap.bus_recovery_info = &octeon_i2c_recovery_info;
i2c->adap.dev.parent = dev;
i2c->adap.dev.of_node = pdev->dev.of_node;
+ i2c->adap.dev.fwnode = dev->fwnode;
snprintf(i2c->adap.name, sizeof(i2c->adap.name),
"Cavium ThunderX i2c adapter at %s", dev_name(dev));
i2c_set_adapdata(&i2c->adap, i2c);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 27c2014cd72a..518ed010cf48 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -138,6 +138,13 @@ static bool queue_empty(struct arm_smmu_ll_queue *q)
Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
}
+static void queue_sync_cons(struct arm_smmu_queue *qw)
+{
+ struct arm_smmu_ll_queue *q = &qw->llq;
+
+ q->cons = readl_relaxed(qw->cons_reg);
+}
+
static bool queue_consumed(struct arm_smmu_ll_queue *q, u32 prod)
{
return ((Q_WRP(q, q->cons) == Q_WRP(q, prod)) &&
@@ -187,6 +194,46 @@ static u32 queue_inc_prod_n(struct arm_smmu_ll_queue *q, int n)
return Q_OVF(q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
}
+static void queue_inc_prod(struct arm_smmu_queue *qw)
+{
+ struct arm_smmu_ll_queue *q = &qw->llq;
+ u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1;
+
+ q->prod = Q_OVF(q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
+ writel(q->prod, qw->prod_reg);
+}
+
+/*
+ * Wait for the SMMU to consume items. If drain is true, wait until the queue
+ * is empty. Otherwise, wait until there is at least one free slot.
+ */
+static int queue_poll_cons(struct arm_smmu_queue *qw, bool drain, bool wfe)
+{
+ ktime_t timeout;
+ unsigned int delay = 1;
+ struct arm_smmu_ll_queue *q = &qw->llq;
+
+ /* Wait longer if it's queue drain */
+ timeout = ktime_add_us(ktime_get(), drain ?
+ ARM_SMMU_CMDQ_DRAIN_TIMEOUT_US :
+ ARM_SMMU_POLL_TIMEOUT_US);
+
+ while (queue_sync_cons(qw), (drain ? !queue_empty(q) : queue_full(q))) {
+ if (ktime_compare(ktime_get(), timeout) > 0)
+ return -ETIMEDOUT;
+
+ if (wfe) {
+ wfe();
+ } else {
+ cpu_relax();
+ udelay(delay);
+ delay *= 2;
+ }
+ }
+
+ return 0;
+}
+
static void queue_poll_init(struct arm_smmu_device *smmu,
struct arm_smmu_queue_poll *qp)
{
@@ -222,6 +269,18 @@ static void queue_write(__le64 *dst, u64 *src, size_t n_dwords)
*dst++ = cpu_to_le64(*src++);
}
+static int queue_insert_raw(struct arm_smmu_queue *qw, u64 *ent)
+{
+ struct arm_smmu_ll_queue *q = &qw->llq;
+
+ if (queue_full(q))
+ return -ENOSPC;
+
+ queue_write(Q_ENT(qw, qw->llq.prod), ent, qw->ent_dwords);
+ queue_inc_prod(qw);
+ return 0;
+}
+
static void queue_read(u64 *dst, __le64 *src, size_t n_dwords)
{
int i;
@@ -708,6 +767,30 @@ static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds,
}
}
+static void arm_smmu_cmdq_insert_cmd(struct arm_smmu_device *smmu, u64 *cmd)
+{
+ unsigned long flags;
+ bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
+ struct arm_smmu_queue *qw = &smmu->cmdq.q;
+ struct arm_smmu_ll_queue *q = &qw->llq;
+
+ spin_lock_irqsave(&smmu->cmdq.spin_lock, flags);
+ if (true) {
+ /* Ensure command queue has atmost two entries */
+ if (!(q->prod & 0x1) && queue_poll_cons(qw, true, false))
+ dev_err(smmu->dev, "command drain timeout\n");
+ }
+
+ while (queue_insert_raw(qw, cmd) == -ENOSPC) {
+ if (queue_poll_cons(qw, false, wfe))
+ dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
+ }
+
+ if (cmd[0] && 0xff == CMDQ_OP_CMD_SYNC && queue_poll_cons(qw, true, wfe))
+ dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
+ spin_unlock_irqrestore(&smmu->cmdq.spin_lock, flags);
+}
+
/*
* This is the actual insertion function, and provides the following
* ordering guarantees to callers:
@@ -735,7 +818,17 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
struct arm_smmu_ll_queue llq = {
.max_n_shift = cmdq->q.llq.max_n_shift,
}, head = llq;
- int ret = 0;
+ int i, ret = 0;
+
+ if (smmu->options & ARM_SMMU_OPT_MSIPOLL) {
+ for (i = 0; i < n; ++i) {
+ u64 *cmd = &cmds[i * CMDQ_ENT_DWORDS];
+
+ arm_smmu_cmdq_insert_cmd(smmu, cmd);
+ }
+ return 0;
+ }
+
/* 1. Allocate some space in the queue */
local_irq_save(flags);
@@ -1375,7 +1468,6 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
cond_resched();
}
-
/*
* Not much we can do on overflow, so scream and pretend we're
* trying harder.
@@ -2656,6 +2748,7 @@ static int arm_smmu_cmdq_init(struct arm_smmu_device *smmu)
atomic_set(&cmdq->owner_prod, 0);
atomic_set(&cmdq->lock, 0);
+ spin_lock_init(&cmdq->spin_lock);
bitmap = (atomic_long_t *)bitmap_zalloc(nents, GFP_KERNEL);
if (!bitmap) {
@@ -3364,6 +3457,24 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
smmu->ias, smmu->oas, smmu->features);
+
+ /* Options based on implementation */
+ reg = readl_relaxed(smmu->base + ARM_SMMU_IIDR);
+
+ /* Marvell Octeontx2 SMMU wrongly issues unsupported
+ * 64 byte memory reads under certain conditions for
+ * reading commands from the command queue.
+ * Force command queue drain for every two writes,
+ * so that SMMU issues only 32 byte reads.
+ */
+ switch (reg) {
+ case IIDR_MRVL_CN96XX_A0:
+ case IIDR_MRVL_CN96XX_B0:
+ case IIDR_MRVL_CN95XX_A0:
+ case IIDR_MRVL_CN95XX_B0:
+ smmu->options |= ARM_SMMU_OPT_MSIPOLL;
+ break;
+ }
return 0;
}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 57e5d223c467..9daeb4ce042e 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -14,6 +14,7 @@
#include <linux/mmzone.h>
#include <linux/sizes.h>
+#define ARM_SMMU_CMDQ_DRAIN_TIMEOUT_US 1000000 /* 1s! */
/* MMIO registers */
#define ARM_SMMU_IDR0 0x0
#define IDR0_ST_LVL GENMASK(28, 27)
@@ -394,6 +395,12 @@
#define MSI_IOVA_BASE 0x8000000
#define MSI_IOVA_LENGTH 0x100000
+#define ARM_SMMU_IIDR 0x18
+#define IIDR_MRVL_CN96XX_A0 0x2b20034c
+#define IIDR_MRVL_CN96XX_B0 0x2b20134c
+#define IIDR_MRVL_CN95XX_A0 0x2b30034c
+#define IIDR_MRVL_CN95XX_B0 0x2b30134c
+
enum pri_resp {
PRI_RESP_DENY = 0,
PRI_RESP_FAIL = 1,
@@ -511,6 +518,7 @@ struct arm_smmu_cmdq {
atomic_long_t *valid_map;
atomic_t owner_prod;
atomic_t lock;
+ spinlock_t spin_lock;
};
struct arm_smmu_cmdq_batch {
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index e5e3fd6b9554..21faa87a368c 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -237,10 +237,23 @@ static void gic_redist_wait_for_rwp(void)
static u64 __maybe_unused gic_read_iar(void)
{
+ u32 irqnr;
+ u32 apr;
+ bool cc_enabled;
+
+ cc_enabled = cpus_have_const_cap(ARM64_WORKAROUND_MRVL_38545);
+ if (cc_enabled)
+ apr = read_sysreg_s(SYS_ICC_AP1R0_EL1);
+
if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
- return gic_read_iar_cavium_thunderx();
+ irqnr = gic_read_iar_cavium_thunderx();
else
- return gic_read_iar_common();
+ irqnr = gic_read_iar_common();
+
+ if (!cc_enabled || apr != read_sysreg_s(SYS_ICC_AP1R0_EL1))
+ return irqnr;
+
+ return ICC_IAR1_EL1_SPURIOUS;
}
#endif
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 05b1009e2820..4b61ce920406 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -16,6 +16,15 @@ config ARM_MHU
The controller has 3 mailbox channels, the last of which can be
used in Secure mode only.
+config MVL_MHU
+ tristate "Marvell MHU Mailbox"
+ depends on ARM_AMBA
+ help
+ Say Y here if you want to build the Marvell MHU controller driver.
+ The driver implements SCMI and AVS support for OcteonTX2 platform.
+ The controller supports single channel between SCP and AP.
+ The MHU implementation uses CPC RAM for creating mailbox memory.
+
config IMX_MBOX
tristate "i.MX Mailbox"
depends on ARCH_MXC || COMPILE_TEST
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 2e06e02b2e03..7f65e5a52b7a 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -7,6 +7,8 @@ obj-$(CONFIG_MAILBOX_TEST) += mailbox-test.o
obj-$(CONFIG_ARM_MHU) += arm_mhu.o arm_mhu_db.o
+obj-$(CONFIG_MVL_MHU) += mvl_mhu.o
+
obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o
obj-$(CONFIG_ARMADA_37XX_RWTM_MBOX) += armada-37xx-rwtm-mailbox.o
diff --git a/drivers/mailbox/mvl_mhu.c b/drivers/mailbox/mvl_mhu.c
new file mode 100644
index 000000000000..e8639b570130
--- /dev/null
+++ b/drivers/mailbox/mvl_mhu.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Marvell Message Handling Unit driver
+ *
+ * Copyright (C) 2019-2021 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+
+#define MHU_NUM_PCHANS 2
+
+#define BAR0 0
+#define SCP_INDEX 0x0
+#define DEV_AP0 0x2
+#define SCP_TO_AP_INTERRUPT 2
+#define DRV_NAME "mbox-thunderx"
+
+#define XCPX_DEVY_XCP_MBOX_LINT_OFFSET 0x000E1C00
+#define XCP_TO_DEV_XCP_MBOX_LINT(xcp_core, device_id) \
+ (XCPX_DEVY_XCP_MBOX_LINT_OFFSET | \
+ ((uint64_t)(xcp_core) << 36) | \
+ ((uint64_t)(device_id) << 4))
+
+#define AP0_TO_SCP_MBOX_LINT XCP_TO_DEV_XCP_MBOX_LINT(SCP_INDEX, DEV_AP0)
+
+/*
+ * Doorbell-Register: XCP(0..1)_DEV(0..7)_XCP_MBOX
+ * Communication data from devices to XCP. When written, sets
+ * XCP(0..1)_DEV(0..7)_XCP_MBOX.
+ * PS: it doesn't matter what is written into this register,
+ * Attempting to writing 'anything' would cause an interrupt
+ * to the target!
+ */
+
+#define DONT_CARE_DATA 0xFF
+#define XCPX_DEVY_XCP_MBOX_OFFSET 0x000E1000
+#define XCP_TO_DEV_XCP_MBOX(xcp_core, device_id) \
+ (XCPX_DEVY_XCP_MBOX_OFFSET | \
+ ((uint64_t)(xcp_core) << 36) | \
+ ((uint64_t)(device_id) << 4))
+
+/* AP0-to-SCP doorbell */
+#define AP0_TO_SCP_MBOX XCP_TO_DEV_XCP_MBOX(SCP_INDEX, DEV_AP0)
+
+/* Register offset: Enable interrupt from SCP to AP */
+#define XCP0_XCP_DEV2_MBOX_RINT_ENA_W1S 0x000D1C60
+
+/* Rx interrupt from SCP to Non-secure AP (linux kernel) */
+#define XCPX_XCP_DEVY_MBOX_RINT_OFFSET 0x000D1C00
+#define XCPX_XCP_DEVY_MBOX_RINT(xcp_core, device_id) \
+ (XCPX_XCP_DEVY_MBOX_RINT_OFFSET | \
+ ((uint64_t)(xcp_core) << 36) | \
+ ((uint64_t)(device_id) << 4))
+
+/* The interrupt status register */
+#define SCP_TO_AP0_MBOX_RINT XCPX_XCP_DEVY_MBOX_RINT(SCP_INDEX, DEV_AP0)
+
+#define XCPX_XCP_DEVY_MBOX_RINT_OFFSET 0x000D1C00
+#define XCPX_XCP_DEVY_MBOX_RINT(xcp_core, device_id) \
+ (XCPX_XCP_DEVY_MBOX_RINT_OFFSET | \
+ ((uint64_t)(xcp_core) << 36) | \
+ ((uint64_t)(device_id) << 4))
+#define SCP_TO_AP0_MBOX_RINT XCPX_XCP_DEVY_MBOX_RINT(SCP_INDEX, DEV_AP0)
+
+
+struct mvl_mhu {
+ struct pci_dev *pdev;
+ struct device *dev;
+
+ /* SCP link information */
+ void __iomem *base; /* tx_reg, rx_reg */
+ void __iomem *payload; /* Shared mem */
+ unsigned int irq;
+ const char *name;
+ spinlock_t link_lock;
+
+ /* Mailbox controller */
+ struct mbox_controller mbox;
+ struct mbox_chan chan[MHU_NUM_PCHANS];
+};
+
+#define MHU_CHANNEL_INDEX(mhu, chan) (chan - &mhu->chan[0])
+
+/**
+ * MVL MHU Mailbox platform specific configuration
+ *
+ * @num_pchans: Maximum number of physical channels
+ * @num_doorbells: Maximum number of doorbells per physical channel
+ */
+struct mvl_mhu_mbox_pdata {
+ unsigned int num_pchans;
+ unsigned int num_doorbells;
+ bool support_doorbells;
+};
+
+/**
+ * MVL MHU Mailbox allocated channel information
+ *
+ * @mhu: Pointer to parent mailbox device
+ * @pchan: Physical channel within which this doorbell resides in
+ * @doorbell: doorbell number pertaining to this channel
+ */
+struct mvl_mhu_channel {
+ struct mvl_mhu *mhu;
+ unsigned int pchan;
+ unsigned int doorbell;
+};
+
+/* Sources of interrupt */
+enum {
+ INDEX_INT_SRC_SCMI_TX,
+ INDEX_INT_SRC_AVS_STS,
+ INDEX_INT_SRC_NONE,
+};
+
+/* information of interrupts from SCP */
+struct int_src_data_s {
+ uint64_t int_src_cnt;
+ uint64_t int_src_data;
+};
+
+/* Secures static data processed in the handler */
+DEFINE_SPINLOCK(mhu_irq_spinlock);
+
+/* bottom half of rx interrupt */
+static irqreturn_t mvl_mhu_rx_interrupt_thread(int irq, void *p)
+{
+ struct mvl_mhu *mhu = (struct mvl_mhu *)p;
+ struct int_src_data_s *data = (struct int_src_data_s *)mhu->payload;
+ u64 val, scmi_tx_cnt, avs_failure_cnt;
+
+ /*
+ * Local copy of event counters. A mismatch of received
+ * count value and the local copy means additional events
+ * are being flagged that needs to be attended by AP
+ */
+ static u64 event_counter[INDEX_INT_SRC_NONE] = {0};
+
+ dev_dbg(mhu->dev, "%s\n", __func__);
+
+ spin_lock_irq(&mhu_irq_spinlock);
+ /* scmi interrupt */
+ scmi_tx_cnt = readq(&data[INDEX_INT_SRC_SCMI_TX].int_src_cnt);
+ if (event_counter[INDEX_INT_SRC_SCMI_TX] != scmi_tx_cnt) {
+ mbox_chan_received_data(&mhu->chan[0], (void *)&val);
+ /* Update the memory to prepare for next */
+ event_counter[INDEX_INT_SRC_SCMI_TX] = scmi_tx_cnt;
+ }
+
+ /* AVS failures */
+ avs_failure_cnt = readq(&data[INDEX_INT_SRC_AVS_STS].int_src_cnt);
+ if (event_counter[INDEX_INT_SRC_AVS_STS] != avs_failure_cnt) {
+ pr_err("!!! FATAL ERROR IN AVS BUS !!! FATAL ERROR IN AVS BUS !!!\n");
+ /* Update the memory to prepare for next */
+ event_counter[INDEX_INT_SRC_AVS_STS] = avs_failure_cnt;
+ }
+ spin_unlock_irq(&mhu_irq_spinlock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mvl_mhu_rx_interrupt(int irq, void *p)
+{
+ struct mvl_mhu *mhu = (struct mvl_mhu *)p;
+ u64 val;
+
+ /* Read interrupt status register */
+ val = readq_relaxed(mhu->base + SCP_TO_AP0_MBOX_RINT);
+ if (val) {
+ /* Clear the interrupt : Write on clear */
+ writeq_relaxed(1ul, mhu->base + SCP_TO_AP0_MBOX_RINT);
+ } else {
+ return IRQ_NONE;
+ }
+
+ return IRQ_WAKE_THREAD;
+}
+
+static bool mvl_mhu_last_tx_done(struct mbox_chan *chan)
+{
+ struct mvl_mhu *mhu = chan->con_priv;
+ u64 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mhu->link_lock, flags);
+ val = readq_relaxed(mhu->base + SCP_TO_AP0_MBOX_RINT);
+ spin_unlock_irqrestore(&mhu->link_lock, flags);
+
+ dev_dbg(mhu->dev, "%s\n", __func__);
+
+ return (val == 0);
+}
+
+static int mvl_mhu_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mvl_mhu *mhu = chan->con_priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mhu->link_lock, flags);
+ writeq_relaxed(DONT_CARE_DATA, mhu->base + AP0_TO_SCP_MBOX);
+ spin_unlock_irqrestore(&mhu->link_lock, flags);
+
+ return 0;
+}
+
+static int mvl_mhu_startup(struct mbox_chan *chan)
+{
+ struct mvl_mhu *mhu = chan->con_priv;
+
+ dev_dbg(mhu->dev, "Channel %ld started\n", MHU_CHANNEL_INDEX(mhu, chan));
+
+ return 0;
+}
+
+static const struct mbox_chan_ops mvl_mhu_ops = {
+ .send_data = mvl_mhu_send_data,
+ .startup = mvl_mhu_startup,
+ .last_tx_done = mvl_mhu_last_tx_done,
+};
+
+static const struct mvl_mhu_mbox_pdata mvl_mhu_pdata = {
+ .num_pchans = MHU_NUM_PCHANS,
+ .num_doorbells = 1,
+ .support_doorbells = false,
+};
+
+static int mvl_mhu_init_link(struct mvl_mhu *mhu)
+{
+ int ret, irq;
+ struct resource res;
+ resource_size_t size;
+ struct device_node *shmem, *np;
+
+ np = mhu->pdev->dev.of_node;
+ dev_dbg(mhu->dev, "Node: %s\n", np && np->name ? np->name : "unknown");
+
+ ret = of_property_read_string(np, "mbox-name", &mhu->name);
+ if (ret)
+ mhu->name = np->full_name;
+
+ /* Get shared memory details between NS AP & SCP */
+ shmem = of_parse_phandle(np, "shmem", 0);
+ ret = of_address_to_resource(shmem, 0, &res);
+ of_node_put(shmem);
+ if (ret) {
+ dev_err(mhu->dev, "failed to get CPC COMMON payload mem resource\n");
+ return ret;
+ }
+ size = resource_size(&res);
+
+ mhu->payload = devm_ioremap(mhu->dev, res.start, size);
+ if (!mhu->payload) {
+ dev_err(mhu->dev, "failed to ioremap CPC COMMON payload\n");
+ return -EADDRNOTAVAIL;
+ }
+
+
+ irq = pci_irq_vector(mhu->pdev, SCP_TO_AP_INTERRUPT);
+ if (irq < 0)
+ return irq;
+
+ ret = request_threaded_irq(irq, mvl_mhu_rx_interrupt,
+ mvl_mhu_rx_interrupt_thread, 0,
+ module_name(THIS_MODULE), mhu);
+ if (ret)
+ return ret;
+
+ /* Enable IRQ from SCP to AP */
+ writeq_relaxed(1ul, mhu->base + XCP0_XCP_DEV2_MBOX_RINT_ENA_W1S);
+
+ mhu->irq = irq;
+ spin_lock_init(&mhu->link_lock);
+ dev_dbg(mhu->dev, "MHU @ 0x%llx [%llx], irq=%d\n", res.start, size, irq);
+
+ return 0;
+}
+
+static int mvl_mhu_init_mbox(struct mvl_mhu *mhu)
+{
+ int i, ret;
+
+ mhu->mbox.dev = mhu->dev;
+ mhu->mbox.chans = &mhu->chan[0];
+ mhu->mbox.num_chans = MHU_NUM_PCHANS;
+ mhu->mbox.txdone_irq = false;
+ mhu->mbox.txdone_poll = true;
+ mhu->mbox.txpoll_period = 1;
+ mhu->mbox.ops = &mvl_mhu_ops;
+
+ for (i = 0; i < mvl_mhu_pdata.num_pchans; i++)
+ mhu->chan[i].con_priv = mhu;
+
+ ret = mbox_controller_register(&mhu->mbox);
+ if (ret) {
+ dev_err(mhu->dev, "Failed to register mailbox controller %d\n",
+ ret);
+ }
+
+ return ret;
+}
+
+static int mvl_mhu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct mvl_mhu *mhu;
+ int ret, nvec;
+
+ if (!pdev || !pdev->dev.of_node)
+ return -ENODEV;
+
+ mhu = devm_kzalloc(&pdev->dev, sizeof(*mhu), GFP_KERNEL);
+ if (!mhu)
+ return -ENOMEM;
+
+ mhu->pdev = pdev;
+ mhu->dev = &pdev->dev;
+ pci_set_drvdata(pdev, mhu);
+
+ if (mvl_mhu_pdata.num_pchans > MHU_NUM_PCHANS) {
+ dev_err(mhu->dev, "Number of physical channel can't exceed %d\n",
+ MHU_NUM_PCHANS);
+ return -EINVAL;
+ }
+ mhu->dev->platform_data = (void *)&mvl_mhu_pdata;
+
+ ret = pcim_enable_device(mhu->pdev);
+ if (ret) {
+ dev_err(mhu->dev, "Failed to enable PCI device: err %d\n", ret);
+ return ret;
+ }
+
+ ret = pci_request_region(mhu->pdev, BAR0, DRV_NAME);
+ if (ret) {
+ dev_err(mhu->dev, "Failed requested region PCI dev err:%d\n",
+ ret);
+ return ret;
+ }
+
+ mhu->base = pcim_iomap(pdev, BAR0, pci_resource_len(mhu->pdev, BAR0));
+ if (!mhu->base) {
+ dev_err(mhu->dev, "Failed to iomap PCI device: err %d\n", ret);
+ return -EINVAL;
+ }
+
+ nvec = pci_alloc_irq_vectors(pdev, 0, 3, PCI_IRQ_MSIX);
+ if (nvec < 0) {
+ dev_err(mhu->dev, "irq vectors allocation failed:%d\n", nvec);
+ return nvec;
+ }
+
+ ret = mvl_mhu_init_link(mhu);
+ if (ret) {
+ dev_err(mhu->dev, "Failed to setup SCP link (%d)\n", ret);
+ return ret;
+ }
+
+ ret = mvl_mhu_init_mbox(mhu);
+ if (ret) {
+ dev_err(mhu->dev, "Failed to initialize mailbox controller (%d)\n",
+ ret);
+ return ret;
+ }
+
+ pr_info("Marvell Message Handling Unit\n");
+
+ return 0;
+}
+
+static void mvl_mhu_remove(struct pci_dev *pdev)
+{
+ struct mvl_mhu *mhu = pci_get_drvdata(pdev);
+
+ pci_free_irq_vectors(pdev);
+ mbox_controller_unregister(&mhu->mbox);
+ pcim_iounmap(pdev, mhu->base);
+ pci_release_region(pdev, BAR0);
+}
+
+static const struct pci_device_id mvl_mhu_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xA067) },
+ { 0, } /* end of table */
+};
+
+static struct pci_driver mvl_mhu_driver = {
+ .name = "mvl_mhu",
+ .id_table = mvl_mhu_ids,
+ .probe = mvl_mhu_probe,
+ .remove = mvl_mhu_remove,
+};
+module_pci_driver(mvl_mhu_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Marvell MHU Driver");
+MODULE_AUTHOR("Sujeet Baranwal <sbaranwal@marvell.com>");
diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c
index b0b251bb207f..32b0a255f43c 100644
--- a/drivers/memory/pl353-smc.c
+++ b/drivers/memory/pl353-smc.c
@@ -427,7 +427,7 @@ out_clk_dis_aper:
return err;
}
-static void pl353_smc_remove(struct amba_device *adev)
+static pl353_smc_remove(struct amba_device *adev)
{
struct pl353_smc_data *pl353_smc = amba_get_drvdata(adev);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index b8847ae04d93..205b80c1c9ed 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1957,6 +1957,21 @@ config MFD_WM97xx
support for the WM97xx, in order to use the actual functionality of
the device other drivers must be enabled.
+config MFD_RSMU_I2C
+ tristate "Renesas Synchronization Management Unit with I2C"
+ depends on I2C && OF
+ select MFD_CORE
+ select REGMAP
+ select REGMAP_I2C
+ help
+ Support for the Renesas synchronization management unit, such as
+ Clockmatrix and 82P33XXX series. This option supports I2C as
+ the control interface.
+
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the functionality
+ of the device.
+
config MFD_STW481X
tristate "Support for ST Microelectronics STw481x"
depends on I2C && (ARCH_NOMADIK || COMPILE_TEST)
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 1780019d2474..c856f8a39c59 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -237,6 +237,7 @@ obj-$(CONFIG_MFD_HI655X_PMIC) += hi655x-pmic.o
obj-$(CONFIG_MFD_DLN2) += dln2.o
obj-$(CONFIG_MFD_RT5033) += rt5033.o
obj-$(CONFIG_MFD_SKY81452) += sky81452.o
+obj-$(CONFIG_MFD_RSMU_I2C) += rsmu_i2c.o
intel-soc-pmic-objs := intel_soc_pmic_core.o intel_soc_pmic_crc.o
obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o
diff --git a/drivers/mfd/rsmu_i2c.c b/drivers/mfd/rsmu_i2c.c
new file mode 100644
index 000000000000..06f582b096bf
--- /dev/null
+++ b/drivers/mfd/rsmu_i2c.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Multi-function driver for the IDT ClockMatrix(TM) and 82P33xxx families of
+ * timing and synchronization devices.
+ *
+ * Copyright (C) 2019 Integrated Device Technology, Inc., a Renesas Company.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/rsmu.h>
+#include "rsmu_private.h"
+
+/*
+ * 16-bit register address: the lower 8 bits of the register address come
+ * from the offset addr byte and the upper 8 bits come from the page register.
+ */
+#define RSMU_CM_PAGE_ADDR 0xFD
+#define RSMU_CM_PAGE_WINDOW 256
+
+/*
+ * 15-bit register address: the lower 7 bits of the register address come
+ * from the offset addr byte and the upper 8 bits come from the page register.
+ */
+#define RSMU_SABRE_PAGE_ADDR 0x7F
+#define RSMU_SABRE_PAGE_WINDOW 128
+
+static bool rsmu_cm_volatile_reg(struct device *dev, unsigned int reg);
+static bool rsmu_sabre_volatile_reg(struct device *dev, unsigned int reg);
+
+/* Current mfd device index */
+static atomic_t rsmu_ndevs = ATOMIC_INIT(0);
+
+/* Platform data */
+static struct rsmu_pdata rsmu_pdata[RSMU_MAX_MFD_DEV];
+
+/* clockmatrix phc devices */
+static struct mfd_cell rsmu_cm_pdev[RSMU_MAX_MFD_DEV] = {
+ [0] = {
+ .name = "idtcm-ptp0",
+ .of_compatible = "renesas,idtcm-ptp0",
+ },
+ [1] = {
+ .name = "idtcm-ptp1",
+ .of_compatible = "renesas,idtcm-ptp1",
+ },
+ [2] = {
+ .name = "idtcm-ptp2",
+ .of_compatible = "renesas,idtcm-ptp2",
+ },
+ [3] = {
+ .name = "idtcm-ptp3",
+ .of_compatible = "renesas,idtcm-ptp3",
+ },
+};
+
+/* sabre phc devices */
+static struct mfd_cell rsmu_sabre_pdev[RSMU_MAX_MFD_DEV] = {
+ [0] = {
+ .name = "idt82p33-ptp0",
+ .of_compatible = "renesas,idt82p33-ptp0",
+ },
+ [1] = {
+ .name = "idt82p33-ptp1",
+ .of_compatible = "renesas,idt82p33-ptp1",
+ },
+ [2] = {
+ .name = "idt82p33-ptp2",
+ .of_compatible = "renesas,idt82p33-ptp2",
+ },
+ [3] = {
+ .name = "idt82p33-ptp3",
+ .of_compatible = "renesas,idt82p33-ptp3",
+ },
+};
+
+/* rsmu character devices */
+static struct mfd_cell rsmu_cdev[RSMU_MAX_MFD_DEV] = {
+ [0] = {
+ .name = "rsmu-cdev0",
+ .of_compatible = "renesas,rsmu-cdev0",
+ },
+ [1] = {
+ .name = "rsmu-cdev1",
+ .of_compatible = "renesas,rsmu-cdev1",
+ },
+ [2] = {
+ .name = "rsmu-cdev2",
+ .of_compatible = "renesas,rsmu-cdev2",
+ },
+ [3] = {
+ .name = "rsmu-cdev3",
+ .of_compatible = "renesas,rsmu-cdev3",
+ },
+};
+
+static const struct regmap_range_cfg rsmu_cm_range_cfg[] = {
+ {
+ .range_min = 0,
+ .range_max = 0xD000,
+ .selector_reg = RSMU_CM_PAGE_ADDR,
+ .selector_mask = 0xFF,
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = RSMU_CM_PAGE_WINDOW,
+ }
+};
+
+static const struct regmap_range_cfg rsmu_sabre_range_cfg[] = {
+ {
+ .range_min = 0,
+ .range_max = 0x400,
+ .selector_reg = RSMU_SABRE_PAGE_ADDR,
+ .selector_mask = 0xFF,
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = RSMU_SABRE_PAGE_WINDOW,
+ }
+};
+
+static const struct regmap_config rsmu_regmap_configs[] = {
+ [RSMU_CM] = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xD000,
+ .ranges = rsmu_cm_range_cfg,
+ .num_ranges = ARRAY_SIZE(rsmu_cm_range_cfg),
+ .volatile_reg = rsmu_cm_volatile_reg,
+ .cache_type = REGCACHE_RBTREE,
+ .can_multi_write = true,
+ },
+ [RSMU_SABRE] = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x400,
+ .ranges = rsmu_sabre_range_cfg,
+ .num_ranges = ARRAY_SIZE(rsmu_sabre_range_cfg),
+ .volatile_reg = rsmu_sabre_volatile_reg,
+ .cache_type = REGCACHE_RBTREE,
+ .can_multi_write = true,
+ },
+};
+
+static bool rsmu_cm_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case RSMU_CM_PAGE_ADDR:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool rsmu_sabre_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case RSMU_SABRE_PAGE_ADDR:
+ return false;
+ default:
+ return true;
+ }
+}
+
+int rsmu_read(struct device *dev, u16 reg, u8 *buf, u16 size)
+{
+ struct rsmu_dev *rsmu = dev_get_drvdata(dev);
+
+ return regmap_bulk_read(rsmu->regmap, reg, buf, size);
+}
+EXPORT_SYMBOL_GPL(rsmu_read);
+
+int rsmu_write(struct device *dev, u16 reg, u8 *buf, u16 size)
+{
+ struct rsmu_dev *rsmu = dev_get_drvdata(dev);
+
+ return regmap_bulk_write(rsmu->regmap, reg, buf, size);
+}
+EXPORT_SYMBOL_GPL(rsmu_write);
+
+static int rsmu_mfd_init(struct rsmu_dev *rsmu, struct mfd_cell *mfd,
+ struct rsmu_pdata *pdata)
+{
+ int ret;
+
+ mfd->platform_data = pdata;
+ mfd->pdata_size = sizeof(struct rsmu_pdata);
+
+ ret = mfd_add_devices(rsmu->dev, -1, mfd, 1, NULL, 0, NULL);
+ if (ret < 0) {
+ dev_err(rsmu->dev, "mfd_add_devices failed with %s\n",
+ mfd->name);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int rsmu_dev_init(struct rsmu_dev *rsmu)
+{
+ struct rsmu_pdata *pdata;
+ struct mfd_cell *pmfd;
+ struct mfd_cell *cmfd;
+ int ret;
+
+ /* Initialize regmap */
+ rsmu->regmap = devm_regmap_init_i2c(rsmu->client,
+ &rsmu_regmap_configs[rsmu->type]);
+ if (IS_ERR(rsmu->regmap)) {
+ ret = PTR_ERR(rsmu->regmap);
+ dev_err(rsmu->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Initialize device index */
+ rsmu->index = atomic_read(&rsmu_ndevs);
+ if (rsmu->index >= RSMU_MAX_MFD_DEV)
+ return -ENODEV;
+
+ /* Initialize platform data */
+ pdata = &rsmu_pdata[rsmu->index];
+ pdata->lock = &rsmu->lock;
+ pdata->type = rsmu->type;
+ pdata->index = rsmu->index;
+
+ /* Initialize MFD devices */
+ cmfd = &rsmu_cdev[rsmu->index];
+ if (rsmu->type == RSMU_CM)
+ pmfd = &rsmu_cm_pdev[rsmu->index];
+ else if (rsmu->type == RSMU_SABRE)
+ pmfd = &rsmu_sabre_pdev[rsmu->index];
+ else
+ return -EINVAL;
+
+ ret = rsmu_mfd_init(rsmu, pmfd, pdata);
+ if (ret)
+ return ret;
+
+ return rsmu_mfd_init(rsmu, cmfd, pdata);
+}
+
+static int rsmu_dt_init(struct rsmu_dev *rsmu)
+{
+ struct device_node *np = rsmu->dev->of_node;
+
+ rsmu->type = RSMU_NONE;
+ if (of_device_is_compatible(np, "idt,8a34000")) {
+ rsmu->type = RSMU_CM;
+ } else if (of_device_is_compatible(np, "idt,82p33810")) {
+ rsmu->type = RSMU_SABRE;
+ } else {
+ dev_err(rsmu->dev, "unknown RSMU device\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rsmu_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct rsmu_dev *rsmu;
+ int ret;
+
+ rsmu = devm_kzalloc(&client->dev, sizeof(struct rsmu_dev),
+ GFP_KERNEL);
+ if (rsmu == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, rsmu);
+ mutex_init(&rsmu->lock);
+ rsmu->dev = &client->dev;
+ rsmu->client = client;
+
+ ret = rsmu_dt_init(rsmu);
+ if (ret)
+ return ret;
+
+ mutex_lock(&rsmu->lock);
+
+ ret = rsmu_dev_init(rsmu);
+ if (ret == 0)
+ atomic_inc(&rsmu_ndevs);
+
+ mutex_unlock(&rsmu->lock);
+
+ return ret;
+}
+
+static int rsmu_remove(struct i2c_client *client)
+{
+ struct rsmu_dev *rsmu = i2c_get_clientdata(client);
+
+ mfd_remove_devices(&client->dev);
+ mutex_destroy(&rsmu->lock);
+ atomic_dec(&rsmu_ndevs);
+
+ return 0;
+}
+
+static const struct i2c_device_id rsmu_id[] = {
+ { "8a34000", 0 },
+ { "82p33810", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, rsmu_id);
+
+static const struct of_device_id rsmu_of_match[] = {
+ {.compatible = "idt,8a34000", },
+ {.compatible = "idt,82p33810", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rsmu_of_match);
+
+static struct i2c_driver rsmu_driver = {
+ .driver = {
+ .name = "rsmu-i2c",
+ .of_match_table = of_match_ptr(rsmu_of_match),
+ },
+ .probe = rsmu_probe,
+ .remove = rsmu_remove,
+ .id_table = rsmu_id,
+};
+
+static int __init rsmu_init(void)
+{
+ return i2c_add_driver(&rsmu_driver);
+}
+/* init early so consumer devices can complete system boot */
+subsys_initcall(rsmu_init);
+
+static void __exit rsmu_exit(void)
+{
+ i2c_del_driver(&rsmu_driver);
+}
+module_exit(rsmu_exit);
+
+MODULE_DESCRIPTION("Renesas SMU I2C multi-function driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/rsmu_private.h b/drivers/mfd/rsmu_private.h
new file mode 100644
index 000000000000..8841a2406492
--- /dev/null
+++ b/drivers/mfd/rsmu_private.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Multi-function driver for the IDT ClockMatrix(TM) and 82p33xxx families of
+ * timing and synchronization devices.
+ *
+ * Copyright (C) 2019 Integrated Device Technology, Inc., a Renesas Company.
+ */
+
+#ifndef __RSMU_MFD_PRIVATE_H
+#define __RSMU_MFD_PRIVATE_H
+
+#include <linux/mfd/rsmu.h>
+
+/* Maximum number of mfd devices */
+#define RSMU_MAX_MFD_DEV 4
+
+struct rsmu_dev {
+ struct device *dev;
+ void *client;
+ struct regmap *regmap;
+ struct mutex lock;
+ enum rsmu_type type;
+ u8 index;
+ u16 page;
+};
+
+enum rsmu_mfd_type {
+ RSMU_MFD_PTP = 0,
+ RSMU_MFD_CDEV = 1,
+ RSMU_MFD_NUM = 2,
+};
+#endif /* __LINUX_MFD_RSMU_H */
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index fafa8b0d8099..87a19ce189d8 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -466,6 +466,31 @@ config HISI_HIKEY_USB
switching between the dual-role USB-C port and the USB-A host ports
using only one USB controller.
+config MARVELL_OTX_BPHY_CTR
+ bool "Marvell OcteonTX BPHY Control driver"
+ select MRVL_OCTEONTX_EL0_INTR
+ help
+ default y
+ Enables BPHY control driver which handles ioctl calls
+ to set/clear IRQ handlers in EL3 using SMC calls.
+ The purpose of this is to handle some BPHY Interrupts in
+ user space directly without kernel's intervention.
+
+config MARVELL_LOKI
+ tristate "Marvell Loki driver"
+ default y
+ help
+ Handles GPINT0 interrupt on Loki SoC.
+
+config RSMU
+ tristate "Renesas Synchronization Management Unit (SMU)"
+ depends on MFD_RSMU_I2C
+ help
+ This option enables support for Renesas SMU, such as Clockmatrix and
+ 82P33XXX series. It will be used by Renesas PTP Clock Manager for
+ Linux (pcm4l) software to provide support to GNSS assisted partial
+ timing support (APTS) and other networking timing functions.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index d23231e73330..207949c56c4d 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -57,3 +57,7 @@ obj-$(CONFIG_HABANA_AI) += habanalabs/
obj-$(CONFIG_UACCE) += uacce/
obj-$(CONFIG_XILINX_SDFEC) += xilinx_sdfec.o
obj-$(CONFIG_HISI_HIKEY_USB) += hisi_hikey_usb.o
+obj-$(CONFIG_MARVELL_OTX_BPHY_CTR) += otx_bphy_ctr.o
+obj-$(CONFIG_MARVELL_LOKI) += mrvl-loki.o
+rsmu-objs := rsmu_cdev.o rsmu_cm.o rsmu_sabre.o
+obj-$(CONFIG_RSMU) += rsmu.o
diff --git a/drivers/misc/mrvl-loki.c b/drivers/misc/mrvl-loki.c
new file mode 100644
index 000000000000..c2a444ad0bcf
--- /dev/null
+++ b/drivers/misc/mrvl-loki.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Loki driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+
+#define PCI_DEVICE_ID_BPHY 0xA089
+
+#define PSM_GPINT0_SUM_W1C 0x0ULL
+#define PSM_GPINT0_SUM_W1S 0x40ULL
+#define PSM_GPINT0_ENA_W1C 0x80ULL
+#define PSM_GPINT0_ENA_W1S 0xC0ULL
+
+#define CPRI_IP_AXI_INT_STATUS(a) (0x100ULL | a << 10)
+#define CPRI_IP_AXI_INT(a) (0x108ULL | a << 10)
+
+#define CPRI_MAX_MHAB 3
+#define CONNIP_MAX_INST 5
+#define CPRI_INT_MASK 0x1F
+
+typedef int (*connip_irq_cb_t)(uint32_t instance, uint32_t pss_int);
+
+struct mrvl_loki {
+ struct pci_dev *pdev;
+ struct msix_entry msix_ent;
+ void __iomem *psm_gpint;
+ void __iomem *cpri_axi[CPRI_MAX_MHAB];
+ int intr_num;
+ connip_irq_cb_t irq_cb;
+};
+
+struct mrvl_loki *g_ml;
+
+int mrvl_loki_register_irq_cb(connip_irq_cb_t func)
+{
+ if (!g_ml) {
+ pr_err("Error: mrvl_loki is NULL\n");
+ return -ENOENT;
+ }
+
+ if (func)
+ g_ml->irq_cb = func;
+ else
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL(mrvl_loki_register_irq_cb);
+
+void mrvl_loki_unregister_irq_cb(void)
+{
+ g_ml->irq_cb = NULL;
+}
+EXPORT_SYMBOL(mrvl_loki_unregister_irq_cb);
+
+static irqreturn_t mrvl_loki_handler(int irq, void *dev)
+{
+ struct mrvl_loki *ml =
+ platform_get_drvdata((struct platform_device *)dev);
+ uint32_t instance, pss_int, val;
+ uint8_t cpri, mac;
+ int ret;
+
+ /* clear GPINT */
+ val = readq_relaxed(ml->psm_gpint + PSM_GPINT0_SUM_W1C) & CPRI_INT_MASK;
+ writeq_relaxed((u64)val, ml->psm_gpint + PSM_GPINT0_SUM_W1C);
+
+ for (instance = 0; instance < CONNIP_MAX_INST; instance++) {
+ if (!(val & (1 << instance)))
+ continue;
+ cpri = instance / 2;
+ mac = instance % 2;
+ pss_int = (u32)readq_relaxed(ml->cpri_axi[cpri] +
+ CPRI_IP_AXI_INT_STATUS(mac));
+ if (ml->irq_cb) {
+ ret = ml->irq_cb(instance, pss_int);
+ if (ret < 0)
+ dev_err(dev,
+ "Error %d from loki CPRI callback\n",
+ ret);
+ }
+
+ /* clear AXI_INT */
+ writeq_relaxed((u64)pss_int,
+ ml->cpri_axi[cpri] + CPRI_IP_AXI_INT(mac));
+ }
+
+ return IRQ_HANDLED;
+}
+
+static inline void msix_enable_ctrl(struct pci_dev *dev)
+{
+ u16 control;
+
+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
+ control |= PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
+}
+
+static int mrvl_loki_probe(struct platform_device *pdev)
+{
+ struct mrvl_loki *ml;
+ struct device *dev = &pdev->dev;
+ struct pci_dev *bphy_pdev;
+ struct resource *res;
+ int ret = 0;
+
+ ml = devm_kzalloc(dev, sizeof(*ml), GFP_KERNEL);
+ if (!ml)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ml);
+
+ /*
+ * BPHY is a PCI device and the kernel resets the MSIXEN bit during
+ * enumeration. So enable it back for interrupts to be generated.
+ */
+ bphy_pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_BPHY,
+ NULL);
+ if (!bphy_pdev) {
+ dev_err(dev, "Couldn't find BPHY PCI device %x\n",
+ PCI_DEVICE_ID_BPHY);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ ml->pdev = bphy_pdev;
+ ml->msix_ent.entry = 0;
+
+ msix_enable_ctrl(bphy_pdev);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ml->psm_gpint = ioremap(res->start, resource_size(res));
+ if (IS_ERR(ml->psm_gpint)) {
+ dev_err(dev, "error in ioremap PSM GPINT\n");
+ return PTR_ERR(ml->psm_gpint);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ ml->cpri_axi[0] = ioremap(res->start, resource_size(res));
+ if (IS_ERR(ml->cpri_axi[0])) {
+ dev_err(dev, "error in ioremap CPRI AXI0\n");
+ return PTR_ERR(ml->cpri_axi[0]);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ ml->cpri_axi[1] = ioremap(res->start, resource_size(res));
+ if (IS_ERR(ml->cpri_axi[1])) {
+ dev_err(dev, "error in ioremap CPRI AXI1\n");
+ return PTR_ERR(ml->cpri_axi[1]);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ ml->cpri_axi[2] = ioremap(res->start, resource_size(res));
+ if (IS_ERR(ml->cpri_axi[2])) {
+ dev_err(dev, "error in ioremap CPRI AXI2\n");
+ return PTR_ERR(ml->cpri_axi[2]);
+ }
+
+ /* register interrupt */
+ ml->intr_num = irq_of_parse_and_map(dev->of_node, 0);
+
+ if (request_irq(ml->intr_num, mrvl_loki_handler, 0,
+ "mrvl loki handler", pdev)) {
+ dev_err(dev, "failed to register irq handler\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ g_ml = ml;
+ dev_info(dev, "Registered interrupt handler for %d\n", ml->intr_num);
+
+ return 0;
+
+err:
+ devm_kfree(&pdev->dev, ml);
+ return ret;
+}
+
+static int mrvl_loki_remove(struct platform_device *pdev)
+{
+ struct mrvl_loki *ml = platform_get_drvdata(pdev);
+
+ free_irq(ml->intr_num, pdev);
+ devm_kfree(&pdev->dev, ml);
+
+ return 0;
+}
+
+static const struct of_device_id mrvl_loki_of_match[] = {
+ { .compatible = "marvell,loki", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mrvl_loki_of_match);
+
+static struct platform_driver mrvl_loki_driver = {
+ .probe = mrvl_loki_probe,
+ .remove = mrvl_loki_remove,
+ .driver = {
+ .name = "mrvl-loki",
+ .of_match_table = of_match_ptr(mrvl_loki_of_match),
+ },
+};
+
+module_platform_driver(mrvl_loki_driver);
+
+MODULE_DESCRIPTION("Marvell Loki Driver");
+MODULE_AUTHOR("Radha Mohan Chintakuntla");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/otx_bphy_ctr.c b/drivers/misc/otx_bphy_ctr.c
new file mode 100644
index 000000000000..7289b8f63004
--- /dev/null
+++ b/drivers/misc/otx_bphy_ctr.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2016, 2018 Cavium Inc.
+ */
+#include <linux/init.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/arm-smccc.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/moduleparam.h>
+#include <linux/uaccess.h>
+#include <linux/mmu_context.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+
+#define DEVICE_NAME "otx-bphy-ctr"
+#define OTX_IOC_MAGIC 0xF3
+/* Old MAX_IRQ has been redefined - now it describes
+ * maximum supported number of interrupts rather than
+ * actual number on current platform. The latter is obtained
+ * from ATF and indicates current capabilities.
+ * This is a limitationm but complies with maximum number of
+ * interrupts' bitmask.
+ */
+#define MAX_IRQ 64
+
+static unsigned long bphy_max_irq;
+static unsigned long bphy_irq_bmask;
+static struct device *otx_device;
+static struct class *otx_class;
+static struct cdev *otx_cdev;
+static dev_t otx_dev;
+static DEFINE_SPINLOCK(el3_inthandler_lock);
+static int in_use;
+static int irq_installed[MAX_IRQ];
+static struct thread_info *irq_installed_threads[MAX_IRQ];
+static struct task_struct *irq_installed_tasks[MAX_IRQ];
+
+/* SMC definitons */
+/* X1 - irq_num, X2 - sp, X3 - cpu, X4 - ttbr0 */
+#define OCTEONTX_INSTALL_BPHY_PSM_ERRINT 0xc2000803
+/* X1 - irq_num */
+#define OCTEONTX_REMOVE_BPHY_PSM_ERRINT 0xc2000804
+/* no params */
+#define OCTEONTX_GET_BPHY_PSM_MAX_IRQ 0xc2000805
+/* no params */
+#define OCTEONTX_GET_BPHY_PSM_IRQS_BITMASK 0xc2000806
+
+struct otx_irq_usr_data {
+ u64 isr_base;
+ u64 sp;
+ u64 cpu;
+ u64 irq_num;
+};
+
+
+#define OTX_IOC_SET_BPHY_HANDLER \
+ _IOW(OTX_IOC_MAGIC, 1, struct otx_irq_usr_data)
+
+#define OTX_IOC_CLR_BPHY_HANDLER \
+ _IO(OTX_IOC_MAGIC, 2)
+
+#define OTX_IOC_GET_BPHY_MAX_IRQ \
+ _IOR(OTX_IOC_MAGIC, 3, u64)
+
+#define OTX_IOC_GET_BPHY_BMASK_IRQ \
+ _IOR(OTX_IOC_MAGIC, 4, u64)
+
+static inline int __install_el3_inthandler(unsigned long irq_num,
+ unsigned long sp,
+ unsigned long cpu,
+ unsigned long ttbr0)
+{
+ struct arm_smccc_res res;
+ unsigned long flags;
+ int retval = -1;
+
+ spin_lock_irqsave(&el3_inthandler_lock, flags);
+
+ if (!irq_installed[irq_num]) {
+ lock_context(current->group_leader->mm, irq_num);
+ arm_smccc_smc(OCTEONTX_INSTALL_BPHY_PSM_ERRINT, irq_num,
+ sp, cpu, ttbr0, 0, 0, 0, &res);
+ if (res.a0 == 0) {
+ irq_installed[irq_num] = 1;
+ irq_installed_threads[irq_num]
+ = current_thread_info();
+ irq_installed_tasks[irq_num]
+ = current->group_leader;
+ retval = 0;
+ } else {
+ unlock_context_by_index(irq_num);
+ }
+ }
+ spin_unlock_irqrestore(&el3_inthandler_lock, flags);
+ return retval;
+}
+
+static inline int __remove_el3_inthandler(unsigned long irq_num)
+{
+ struct arm_smccc_res res;
+ unsigned long flags;
+ unsigned int retval;
+
+ spin_lock_irqsave(&el3_inthandler_lock, flags);
+
+ if (irq_installed[irq_num]) {
+ arm_smccc_smc(OCTEONTX_REMOVE_BPHY_PSM_ERRINT, irq_num,
+ 0, 0, 0, 0, 0, 0, &res);
+ irq_installed[irq_num] = 0;
+ irq_installed_threads[irq_num] = NULL;
+ irq_installed_tasks[irq_num] = NULL;
+ unlock_context_by_index(irq_num);
+ retval = 0;
+ } else {
+ retval = -1;
+ }
+ spin_unlock_irqrestore(&el3_inthandler_lock, flags);
+ return retval;
+}
+
+static long otx_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ int err = 0;
+ struct otx_irq_usr_data irq_usr;
+ u64 irq_ttbr, irq_isr_base, irq_sp, irq_cpu, irq_num;
+ int ret;
+ //struct task_struct *task = current;
+
+ if (!in_use)
+ return -EINVAL;
+
+ if (_IOC_TYPE(cmd) != OTX_IOC_MAGIC)
+ return -ENOTTY;
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ err = !access_ok((void __user *)arg, _IOC_SIZE(cmd));
+ else if (_IOC_TYPE(cmd) & _IOC_WRITE)
+ err = !access_ok((void __user *)arg, _IOC_SIZE(cmd));
+
+ if (err)
+ return -EFAULT;
+
+ switch (cmd) {
+ case OTX_IOC_SET_BPHY_HANDLER: /*Install ISR handler*/
+ ret = copy_from_user(&irq_usr, (void *)arg, _IOC_SIZE(cmd));
+ if (irq_usr.irq_num >= bphy_max_irq)
+ return -EINVAL;
+ if (ret)
+ return -EFAULT;
+ irq_ttbr = 0;
+ //TODO: reserve a asid to avoid asid rollovers
+ asm volatile("mrs %0, ttbr0_el1\n\t" : "=r"(irq_ttbr));
+ irq_isr_base = irq_usr.isr_base;
+ irq_sp = irq_usr.sp;
+ irq_cpu = irq_usr.cpu;
+ irq_num = irq_usr.irq_num;
+ ret = __install_el3_inthandler(irq_num, irq_sp,
+ irq_cpu, irq_isr_base);
+ if (ret != 0)
+ return -EEXIST;
+ break;
+ case OTX_IOC_CLR_BPHY_HANDLER: /*Clear ISR handler*/
+ irq_usr.irq_num = arg;
+ if (irq_usr.irq_num >= bphy_max_irq)
+ return -EINVAL;
+ ret = __remove_el3_inthandler(irq_usr.irq_num);
+ if (ret != 0)
+ return -ENOENT;
+ break;
+ case OTX_IOC_GET_BPHY_MAX_IRQ:
+ irq_num = bphy_max_irq;
+ if (copy_to_user((u64 *)arg, &irq_num, sizeof(irq_num)))
+ return -EFAULT;
+ break;
+ case OTX_IOC_GET_BPHY_BMASK_IRQ:
+ if (copy_to_user((u64 *)arg, &bphy_irq_bmask,
+ sizeof(bphy_irq_bmask)))
+ return -EFAULT;
+ break;
+ default:
+ return -ENOTTY;
+ }
+ return 0;
+}
+
+static void cleanup_el3_irqs(struct task_struct *task)
+{
+ int i;
+
+ for (i = 0; i < bphy_max_irq; i++) {
+ if (irq_installed[i] &&
+ irq_installed_tasks[i] &&
+ (irq_installed_tasks[i] == task)) {
+ pr_alert("Exiting, removing handler for BPHY IRQ %d\n",
+ i);
+ __remove_el3_inthandler(i);
+ pr_alert("Exited, removed handler for BPHY IRQ %d\n",
+ i);
+ } else {
+ if (irq_installed[i] &&
+ (irq_installed_threads[i]
+ == current_thread_info()))
+ pr_alert("Exiting, thread info matches, not removing handler for BPHY IRQ %d\n",
+ i);
+ }
+ }
+}
+
+static int otx_dev_open(struct inode *inode, struct file *fp)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(OCTEONTX_GET_BPHY_PSM_IRQS_BITMASK, 0,
+ 0, 0, 0, 0, 0, 0, &res);
+ bphy_irq_bmask = res.a0;
+
+ arm_smccc_smc(OCTEONTX_GET_BPHY_PSM_MAX_IRQ, 0,
+ 0, 0, 0, 0, 0, 0, &res);
+ bphy_max_irq = res.a0;
+
+ if (bphy_max_irq > MAX_IRQ)
+ return -1;
+
+ in_use = 1;
+ return 0;
+}
+
+static int otx_dev_release(struct inode *inode, struct file *fp)
+{
+ if (in_use == 0)
+ return -EINVAL;
+
+ in_use = 0;
+ return 0;
+}
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = otx_dev_open,
+ .release = otx_dev_release,
+ .unlocked_ioctl = otx_dev_ioctl
+};
+
+static int __init otx_ctr_dev_init(void)
+{
+ int err = 0;
+
+ /* create a character device */
+ err = alloc_chrdev_region(&otx_dev, 1, 1, DEVICE_NAME);
+ if (err != 0) {
+ pr_err("Failed to create device: %d\n", err);
+ goto alloc_chrdev_err;
+ }
+
+ otx_cdev = cdev_alloc();
+ if (!otx_cdev) {
+ err = -ENODEV;
+ goto cdev_alloc_err;
+ }
+
+ cdev_init(otx_cdev, &fops);
+ err = cdev_add(otx_cdev, otx_dev, 1);
+ if (err < 0) {
+ err = -ENODEV;
+ goto cdev_add_err;
+ }
+
+ /* create new class for sysfs*/
+ otx_class = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(otx_class)) {
+ err = -ENODEV;
+ goto class_create_err;
+ }
+
+ otx_device = device_create(otx_class, NULL, otx_dev, NULL,
+ DEVICE_NAME);
+ if (IS_ERR(otx_device)) {
+ err = -ENODEV;
+ goto device_create_err;
+ }
+
+ /* Register task cleanup handler */
+ err = task_cleanup_handler_add(cleanup_el3_irqs);
+ if (err != 0) {
+ dev_err(otx_device, "Failed to register cleanup handler: %d\n",
+ err);
+ goto cleanup_handler_err;
+ }
+
+ return err;
+
+device_create_err:
+ class_destroy(otx_class);
+
+class_create_err:
+cdev_add_err:
+ cdev_del(otx_cdev);
+cdev_alloc_err:
+ unregister_chrdev_region(otx_dev, 1);
+alloc_chrdev_err:
+ task_cleanup_handler_remove(cleanup_el3_irqs);
+cleanup_handler_err:
+ return err;
+}
+
+static void __exit otx_ctr_dev_exit(void)
+{
+ device_destroy(otx_class, otx_dev);
+ class_destroy(otx_class);
+ cdev_del(otx_cdev);
+ unregister_chrdev_region(otx_dev, 1);
+
+ task_cleanup_handler_remove(cleanup_el3_irqs);
+}
+
+module_init(otx_ctr_dev_init);
+module_exit(otx_ctr_dev_exit);
+
+MODULE_DESCRIPTION("Marvell OTX Control Device Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/rsmu_cdev.c b/drivers/misc/rsmu_cdev.c
new file mode 100644
index 000000000000..8fbe095860b8
--- /dev/null
+++ b/drivers/misc/rsmu_cdev.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * This driver is developed for the IDT ClockMatrix(TM) and 82P33xxx families
+ * of timing and synchronization devices. It will be used by Renesas PTP Clock
+ * Manager for Linux (pcm4l) software to provide support to GNSS assisted
+ * partial timing support (APTS) and other networking timing functions.
+ *
+ * Please note it must work with Renesas MFD driver to access device through
+ * I2C/SPI.
+ *
+ * Copyright (C) 2019 Integrated Device Technology, Inc., a Renesas Company.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/mfd/rsmu.h>
+#include <uapi/linux/rsmu.h>
+
+#include "rsmu_cdev.h"
+
+#define DRIVER_NAME "rsmu"
+#define DRIVER_MAX_DEV BIT(MINORBITS)
+
+static struct class *rsmu_class;
+static dev_t rsmu_cdevt;
+static struct rsmu_ops *ops_array[] = {
+ [RSMU_CM] = &cm_ops,
+ [RSMU_SABRE] = &sabre_ops,
+};
+
+static int
+rsmu_set_combomode(struct rsmu_cdev *rsmu, void __user *arg)
+{
+ struct rsmu_ops *ops = rsmu->ops;
+ struct rsmu_combomode mode;
+ int err;
+
+ if (copy_from_user(&mode, arg, sizeof(mode)))
+ return -EFAULT;
+
+ if (ops->set_combomode == NULL)
+ return -ENOTSUPP;
+
+ mutex_lock(rsmu->lock);
+ err = ops->set_combomode(rsmu, mode.dpll, mode.mode);
+ mutex_unlock(rsmu->lock);
+
+ return err;
+}
+
+static int
+rsmu_get_dpll_state(struct rsmu_cdev *rsmu, void __user *arg)
+{
+ struct rsmu_ops *ops = rsmu->ops;
+ struct rsmu_get_state state_request;
+ u8 state;
+ int err;
+
+ if (copy_from_user(&state_request, arg, sizeof(state_request)))
+ return -EFAULT;
+
+ if (ops->get_dpll_state == NULL)
+ return -ENOTSUPP;
+
+ mutex_lock(rsmu->lock);
+ err = ops->get_dpll_state(rsmu, state_request.dpll, &state);
+ mutex_unlock(rsmu->lock);
+
+ state_request.state = state;
+ if (copy_to_user(arg, &state_request, sizeof(state_request)))
+ return -EFAULT;
+
+ return err;
+}
+
+static int
+rsmu_get_dpll_ffo(struct rsmu_cdev *rsmu, void __user *arg)
+{
+ struct rsmu_ops *ops = rsmu->ops;
+ struct rsmu_get_ffo ffo_request;
+ int err;
+
+ if (copy_from_user(&ffo_request, arg, sizeof(ffo_request)))
+ return -EFAULT;
+
+ if (ops->get_dpll_ffo == NULL)
+ return -ENOTSUPP;
+
+ mutex_lock(rsmu->lock);
+ err = ops->get_dpll_ffo(rsmu, ffo_request.dpll, &ffo_request);
+ mutex_unlock(rsmu->lock);
+
+ if (copy_to_user(arg, &ffo_request, sizeof(ffo_request)))
+ return -EFAULT;
+
+ return err;
+}
+
+static int
+rsmu_open(struct inode *iptr, struct file *fptr)
+{
+ struct rsmu_cdev *rsmu;
+
+ rsmu = container_of(iptr->i_cdev, struct rsmu_cdev, rsmu_cdev);
+ if (!rsmu)
+ return -EAGAIN;
+
+ fptr->private_data = rsmu;
+ return 0;
+}
+
+static int
+rsmu_release(struct inode *iptr, struct file *fptr)
+{
+ struct rsmu_cdev *rsmu;
+
+ rsmu = container_of(iptr->i_cdev, struct rsmu_cdev, rsmu_cdev);
+ if (!rsmu)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static long
+rsmu_ioctl(struct file *fptr, unsigned int cmd, unsigned long data)
+{
+ struct rsmu_cdev *rsmu = fptr->private_data;
+ void __user *arg = (void __user *)data;
+ int err = 0;
+
+ if (!rsmu)
+ return -EINVAL;
+
+ switch (cmd) {
+ case RSMU_SET_COMBOMODE:
+ err = rsmu_set_combomode(rsmu, arg);
+ break;
+ case RSMU_GET_STATE:
+ err = rsmu_get_dpll_state(rsmu, arg);
+ break;
+ case RSMU_GET_FFO:
+ err = rsmu_get_dpll_ffo(rsmu, arg);
+ break;
+ default:
+ /* Should not get here */
+ dev_err(rsmu->dev, "Undefined RSMU IOCTL");
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static long rsmu_compat_ioctl(struct file *fptr, unsigned int cmd,
+ unsigned long data)
+{
+ return rsmu_ioctl(fptr, cmd, data);
+}
+
+static const struct file_operations rsmu_fops = {
+ .owner = THIS_MODULE,
+ .open = rsmu_open,
+ .release = rsmu_release,
+ .unlocked_ioctl = rsmu_ioctl,
+ .compat_ioctl = rsmu_compat_ioctl,
+};
+
+static int rsmu_init_ops(struct rsmu_cdev *rsmu)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ops_array); i++)
+ if (ops_array[i]->type == rsmu->type)
+ break;
+
+ if (i == ARRAY_SIZE(ops_array))
+ return -EINVAL;
+
+ rsmu->ops = ops_array[i];
+ return 0;
+}
+
+static int
+rsmu_probe(struct platform_device *pdev)
+{
+ struct rsmu_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct rsmu_cdev *rsmu;
+ struct device *rsmu_cdev;
+ int err;
+
+ rsmu = devm_kzalloc(&pdev->dev, sizeof(*rsmu), GFP_KERNEL);
+ if (!rsmu)
+ return -ENOMEM;
+
+ rsmu->dev = &pdev->dev;
+ rsmu->mfd = pdev->dev.parent;
+ rsmu->type = pdata->type;
+ rsmu->lock = pdata->lock;
+ rsmu->index = pdata->index;
+
+ /* Save driver private data */
+ platform_set_drvdata(pdev, rsmu);
+
+ cdev_init(&rsmu->rsmu_cdev, &rsmu_fops);
+ rsmu->rsmu_cdev.owner = THIS_MODULE;
+ err = cdev_add(&rsmu->rsmu_cdev,
+ MKDEV(MAJOR(rsmu_cdevt), 0), 1);
+ if (err < 0) {
+ dev_err(rsmu->dev, "cdev_add failed");
+ err = -EIO;
+ goto err_rsmu_dev;
+ }
+
+ if (!rsmu_class) {
+ err = -EIO;
+ dev_err(rsmu->dev, "rsmu class not created correctly");
+ goto err_rsmu_cdev;
+ }
+
+ rsmu_cdev = device_create(rsmu_class, rsmu->dev,
+ MKDEV(MAJOR(rsmu_cdevt), 0),
+ rsmu, "rsmu%d", rsmu->index);
+ if (IS_ERR(rsmu_cdev)) {
+ dev_err(rsmu->dev, "Unable to create char device");
+ err = PTR_ERR(rsmu_cdev);
+ goto err_rsmu_cdev;
+ }
+
+ err = rsmu_init_ops(rsmu);
+ if (err) {
+ dev_err(rsmu->dev, "Unable to match type %d", rsmu->type);
+ goto err_rsmu_cdev;
+ }
+
+ dev_info(rsmu->dev, "Probe SMU type %d successful\n", rsmu->type);
+ return 0;
+
+ /* Failure cleanup */
+err_rsmu_cdev:
+ cdev_del(&rsmu->rsmu_cdev);
+err_rsmu_dev:
+ return err;
+}
+
+static int
+rsmu_remove(struct platform_device *pdev)
+{
+ struct rsmu_cdev *rsmu = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ if (!rsmu)
+ return -ENODEV;
+
+ if (!rsmu_class) {
+ dev_err(dev, "rsmu_class is NULL");
+ return -EIO;
+ }
+
+ device_destroy(rsmu_class, MKDEV(MAJOR(rsmu_cdevt), 0));
+ cdev_del(&rsmu->rsmu_cdev);
+
+ return 0;
+}
+
+static const struct platform_device_id rsmu_id_table[] = {
+ { "rsmu-cdev0", },
+ { "rsmu-cdev1", },
+ { "rsmu-cdev2", },
+ { "rsmu-cdev3", },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, rsmu_id_table);
+
+static struct platform_driver rsmu_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = rsmu_probe,
+ .remove = rsmu_remove,
+ .id_table = rsmu_id_table,
+};
+
+static int __init rsmu_init(void)
+{
+ int err;
+
+ rsmu_class = class_create(THIS_MODULE, DRIVER_NAME);
+ if (IS_ERR(rsmu_class)) {
+ err = PTR_ERR(rsmu_class);
+ pr_err("Unable to register rsmu class");
+ return err;
+ }
+
+ err = alloc_chrdev_region(&rsmu_cdevt, 0, DRIVER_MAX_DEV, DRIVER_NAME);
+ if (err < 0) {
+ pr_err("Unable to get major number");
+ goto err_rsmu_class;
+ }
+
+ err = platform_driver_register(&rsmu_driver);
+ if (err < 0) {
+ pr_err("Unabled to register %s driver", DRIVER_NAME);
+ goto err_rsmu_drv;
+ }
+ return 0;
+
+ /* Error Path */
+err_rsmu_drv:
+ unregister_chrdev_region(rsmu_cdevt, DRIVER_MAX_DEV);
+err_rsmu_class:
+ class_destroy(rsmu_class);
+ return err;
+}
+
+static void __exit rsmu_exit(void)
+{
+ platform_driver_unregister(&rsmu_driver);
+ unregister_chrdev_region(rsmu_cdevt, DRIVER_MAX_DEV);
+ class_destroy(rsmu_class);
+ rsmu_class = NULL;
+}
+
+module_init(rsmu_init);
+module_exit(rsmu_exit);
+
+MODULE_DESCRIPTION("Renesas SMU character device driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/rsmu_cdev.h b/drivers/misc/rsmu_cdev.h
new file mode 100644
index 000000000000..3ced817ce3df
--- /dev/null
+++ b/drivers/misc/rsmu_cdev.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * This driver is developed for the IDT ClockMatrix(TM) of
+ * timing and synchronization devices.
+ *
+ * Copyright (C) 2019 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef __LINUX_RSMU_CDEV_H
+#define __LINUX_RSMU_CDEV_H
+
+#include <linux/cdev.h>
+
+struct rsmu_ops;
+
+/**
+ * struct rsmu_cdev - Driver data for RSMU character device
+ * @dev: pointer to platform device
+ * @mfd: pointer to MFD device
+ * @rsmu_cdev: character device handle
+ * @lock: mutex to protect operations from being interrupted
+ * @type: rsmu device type
+ * @ops: rsmu device methods
+ * @index: rsmu device index
+ */
+struct rsmu_cdev {
+ struct device *dev;
+ struct device *mfd;
+ struct cdev rsmu_cdev;
+ struct mutex *lock;
+ enum rsmu_type type;
+ struct rsmu_ops *ops;
+ u8 index;
+};
+
+extern struct rsmu_ops cm_ops;
+extern struct rsmu_ops sabre_ops;
+
+struct rsmu_ops {
+ enum rsmu_type type;
+ int (*set_combomode)(struct rsmu_cdev *rsmu, u8 dpll, u8 mode);
+ int (*get_dpll_state)(struct rsmu_cdev *rsmu, u8 dpll, u8 *state);
+ int (*get_dpll_ffo)(struct rsmu_cdev *rsmu, u8 dpll,
+ struct rsmu_get_ffo *ffo);
+};
+
+/**
+ * Enumerated type listing DPLL combination modes
+ */
+enum rsmu_dpll_combomode {
+ E_COMBOMODE_CURRENT = 0,
+ E_COMBOMODE_FASTAVG,
+ E_COMBOMODE_SLOWAVG,
+ E_COMBOMODE_HOLDOVER,
+ E_COMBOMODE_MAX
+};
+
+/**
+ * An id used to identify the respective child class states.
+ */
+enum rsmu_class_state {
+ E_SRVLOINITIALSTATE = 0,
+ E_SRVLOUNQUALIFIEDSTATE = 1,
+ E_SRVLOLOCKACQSTATE = 2,
+ E_SRVLOFREQUENCYLOCKEDSTATE = 3,
+ E_SRVLOTIMELOCKEDSTATE = 4,
+ E_SRVLOHOLDOVERINSPECSTATE = 5,
+ E_SRVLOHOLDOVEROUTOFSPECSTATE = 6,
+ E_SRVLOFREERUNSTATE = 7,
+ E_SRVNUMBERLOSTATES = 8,
+ E_SRVLOSTATEINVALID = 9,
+};
+#endif
diff --git a/drivers/misc/rsmu_cm.c b/drivers/misc/rsmu_cm.c
new file mode 100644
index 000000000000..d5af624badff
--- /dev/null
+++ b/drivers/misc/rsmu_cm.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * This driver is developed for the IDT ClockMatrix(TM) of
+ * timing and synchronization devices.
+ *
+ * Copyright (C) 2019 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/mfd/idt8a340_reg.h>
+#include <linux/mfd/rsmu.h>
+#include <uapi/linux/rsmu.h>
+#include <asm/unaligned.h>
+
+#include "rsmu_cdev.h"
+
+static int rsmu_cm_set_combomode(struct rsmu_cdev *rsmu, u8 dpll, u8 mode)
+{
+ u16 dpll_ctrl_n;
+ u8 cfg;
+ int err;
+
+ switch (dpll) {
+ case 0:
+ dpll_ctrl_n = DPLL_CTRL_0;
+ break;
+ case 1:
+ dpll_ctrl_n = DPLL_CTRL_1;
+ break;
+ case 2:
+ dpll_ctrl_n = DPLL_CTRL_2;
+ break;
+ case 3:
+ dpll_ctrl_n = DPLL_CTRL_3;
+ break;
+ case 4:
+ dpll_ctrl_n = DPLL_CTRL_4;
+ break;
+ case 5:
+ dpll_ctrl_n = DPLL_CTRL_5;
+ break;
+ case 6:
+ dpll_ctrl_n = DPLL_CTRL_6;
+ break;
+ case 7:
+ dpll_ctrl_n = DPLL_CTRL_7;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (mode >= E_COMBOMODE_MAX)
+ return -EINVAL;
+
+ err = rsmu_read(rsmu->mfd, dpll_ctrl_n + DPLL_CTRL_COMBO_MASTER_CFG,
+ &cfg, sizeof(cfg));
+ if (err)
+ return err;
+
+ /* Only need to enable/disable COMBO_MODE_HOLD. */
+ if (mode)
+ cfg |= COMBO_MASTER_HOLD;
+ else
+ cfg &= ~COMBO_MASTER_HOLD;
+
+ return rsmu_write(rsmu->mfd, dpll_ctrl_n + DPLL_CTRL_COMBO_MASTER_CFG,
+ &cfg, sizeof(cfg));
+}
+
+static int rsmu_cm_get_dpll_state(struct rsmu_cdev *rsmu, u8 dpll, u8 *state)
+{
+ u8 cfg;
+ int err;
+
+ /* 8 is sys dpll */
+ if (dpll > 8)
+ return -EINVAL;
+
+ err = rsmu_read(rsmu->mfd,
+ STATUS + DPLL0_STATUS + dpll,
+ &cfg, sizeof(cfg));
+ if (err)
+ return err;
+
+ switch (cfg & DPLL_STATE_MASK) {
+ case DPLL_STATE_FREERUN:
+ *state = E_SRVLOUNQUALIFIEDSTATE;
+ break;
+ case DPLL_STATE_LOCKACQ:
+ case DPLL_STATE_LOCKREC:
+ *state = E_SRVLOLOCKACQSTATE;
+ break;
+ case DPLL_STATE_LOCKED:
+ *state = E_SRVLOTIMELOCKEDSTATE;
+ break;
+ case DPLL_STATE_HOLDOVER:
+ *state = E_SRVLOHOLDOVERINSPECSTATE;
+ break;
+ default:
+ *state = E_SRVLOSTATEINVALID;
+ break;
+ }
+
+ return 0;
+}
+
+static int rsmu_cm_get_dpll_ffo(struct rsmu_cdev *rsmu, u8 dpll,
+ struct rsmu_get_ffo *ffo)
+{
+ u8 buf[8] = {0};
+ s64 fcw = 0;
+ u16 dpll_filter_status;
+ int err;
+
+ switch (dpll) {
+ case 0:
+ dpll_filter_status = DPLL0_FILTER_STATUS;
+ break;
+ case 1:
+ dpll_filter_status = DPLL1_FILTER_STATUS;
+ break;
+ case 2:
+ dpll_filter_status = DPLL2_FILTER_STATUS;
+ break;
+ case 3:
+ dpll_filter_status = DPLL3_FILTER_STATUS;
+ break;
+ case 4:
+ dpll_filter_status = DPLL4_FILTER_STATUS;
+ break;
+ case 5:
+ dpll_filter_status = DPLL5_FILTER_STATUS;
+ break;
+ case 6:
+ dpll_filter_status = DPLL6_FILTER_STATUS;
+ break;
+ case 7:
+ dpll_filter_status = DPLL7_FILTER_STATUS;
+ break;
+ case 8:
+ dpll_filter_status = DPLLSYS_FILTER_STATUS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = rsmu_read(rsmu->mfd, STATUS + dpll_filter_status, buf, 6);
+ if (err)
+ return err;
+
+ /* Convert to frequency control word */
+ fcw = sign_extend64(get_unaligned_le64(buf), 47);
+
+ /* FCW unit is 2 ^ -53 = 1.1102230246251565404236316680908e-16 */
+ ffo->ffo = fcw * 111;
+
+ return 0;
+}
+
+struct rsmu_ops cm_ops = {
+ .type = RSMU_CM,
+ .set_combomode = rsmu_cm_set_combomode,
+ .get_dpll_state = rsmu_cm_get_dpll_state,
+ .get_dpll_ffo = rsmu_cm_get_dpll_ffo,
+};
diff --git a/drivers/misc/rsmu_sabre.c b/drivers/misc/rsmu_sabre.c
new file mode 100644
index 000000000000..aa772f1c0854
--- /dev/null
+++ b/drivers/misc/rsmu_sabre.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * This driver is developed for the IDT 82P33XXX series of
+ * timing and synchronization devices.
+ *
+ * Copyright (C) 2019 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/mfd/idt82p33_reg.h>
+#include <linux/mfd/rsmu.h>
+#include <uapi/linux/rsmu.h>
+#include <asm/unaligned.h>
+
+#include "rsmu_cdev.h"
+
+static int rsmu_sabre_set_combomode(struct rsmu_cdev *rsmu, u8 dpll, u8 mode)
+{
+ u16 dpll_ctrl_n;
+ u8 cfg;
+ int err;
+
+ switch (dpll) {
+ case 0:
+ dpll_ctrl_n = DPLL1_OPERATING_MODE_CNFG;
+ break;
+ case 1:
+ dpll_ctrl_n = DPLL2_OPERATING_MODE_CNFG;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (mode >= E_COMBOMODE_MAX)
+ return -EINVAL;
+
+ err = rsmu_read(rsmu->mfd, dpll_ctrl_n, &cfg, sizeof(cfg));
+ if (err)
+ return err;
+
+ cfg &= ~(COMBO_MODE_MASK << COMBO_MODE_SHIFT);
+ cfg |= mode << COMBO_MODE_SHIFT;
+
+ return rsmu_write(rsmu->mfd, dpll_ctrl_n, &cfg, sizeof(cfg));
+}
+
+static int rsmu_sabre_get_dpll_state(struct rsmu_cdev *rsmu, u8 dpll, u8 *state)
+{
+ u16 dpll_sts_n;
+ u8 cfg;
+ int err;
+
+ switch (dpll) {
+ case 0:
+ dpll_sts_n = DPLL1_OPERATING_STS;
+ break;
+ case 1:
+ dpll_sts_n = DPLL2_OPERATING_STS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = rsmu_read(rsmu->mfd, dpll_sts_n, &cfg, sizeof(cfg));
+ if (err)
+ return err;
+
+ switch (cfg & OPERATING_STS_MASK) {
+ case DPLL_STATE_FREERUN:
+ *state = E_SRVLOUNQUALIFIEDSTATE;
+ break;
+ case DPLL_STATE_PRELOCKED2:
+ case DPLL_STATE_PRELOCKED:
+ *state = E_SRVLOLOCKACQSTATE;
+ break;
+ case DPLL_STATE_LOCKED:
+ *state = E_SRVLOTIMELOCKEDSTATE;
+ break;
+ case DPLL_STATE_HOLDOVER:
+ *state = E_SRVLOHOLDOVERINSPECSTATE;
+ break;
+ default:
+ *state = E_SRVLOSTATEINVALID;
+ break;
+ }
+
+ return 0;
+}
+
+static int rsmu_sabre_get_dpll_ffo(struct rsmu_cdev *rsmu, u8 dpll,
+ struct rsmu_get_ffo *ffo)
+{
+ u8 buf[8] = {0};
+ s64 fcw = 0;
+ u16 dpll_freq_n;
+ int err;
+
+ switch (dpll) {
+ case 0:
+ dpll_freq_n = DPLL1_CURRENT_FREQ_STS;
+ break;
+ case 1:
+ dpll_freq_n = DPLL2_CURRENT_FREQ_STS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = rsmu_read(rsmu->mfd, dpll_freq_n, buf, 5);
+ if (err)
+ return err;
+
+ /* Convert to frequency control word */
+ fcw = sign_extend64(get_unaligned_le64(buf), 39);
+
+ /* FCW unit is 77760 / ( 1638400 * 2^48) = 1.68615121864946 * 10^-16 */
+ ffo->ffo = div_s64(fcw * 168615, 1000);
+
+ return 0;
+}
+
+struct rsmu_ops sabre_ops = {
+ .type = RSMU_SABRE,
+ .set_combomode = rsmu_sabre_set_combomode,
+ .get_dpll_state = rsmu_sabre_get_dpll_state,
+ .get_dpll_ffo = rsmu_sabre_get_dpll_ffo,
+};
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index c12fe13e4b14..4c651da4f2d2 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -34,9 +34,22 @@ config PWRSEQ_SIMPLE
This driver can also be built as a module. If so, the module
will be called pwrseq_simple.
+config MMC_PSTORE_BACKEND
+ bool "Log panic/oops to a MMC buffer"
+ depends on MMC_BLOCK
+ help
+ This option will let you create platform backend to store kmsg
+ crash dumps to a user specified MMC device. This is primarily
+ based on pstore/blk.
+
+config MMC_PSTORE
+ tristate
+ select PSTORE_BLK
+
config MMC_BLOCK
tristate "MMC block device driver"
depends on BLOCK
+ select MMC_PSTORE if MMC_PSTORE_BACKEND=y
default y
help
Say Y here to enable the MMC block device driver support.
@@ -80,4 +93,3 @@ config MMC_TEST
This driver is only of interest to those developing or
testing a host driver. Most people should say N here.
-
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 95ffe008ebdf..7cb9a3af4827 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -16,5 +16,6 @@ obj-$(CONFIG_PWRSEQ_EMMC) += pwrseq_emmc.o
mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
obj-$(CONFIG_MMC_BLOCK) += mmc_block.o
mmc_block-objs := block.o queue.o
+mmc_block-$(CONFIG_MMC_PSTORE) += mmcpstore.o
obj-$(CONFIG_MMC_TEST) += mmc_test.o
obj-$(CONFIG_SDIO_UART) += sdio_uart.o
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 99b981a05b6c..0befe07a2cd7 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2893,6 +2893,28 @@ static void mmc_blk_remove_debugfs(struct mmc_card *card,
#endif /* CONFIG_DEBUG_FS */
+#if IS_ENABLED(CONFIG_MMC_PSTORE)
+int mmc_blk_needs_part_switch(struct mmc_card *card)
+{
+ struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
+
+ return (md->part_curr != md->part_type);
+}
+
+sector_t mmc_blk_get_part(struct mmc_card *card, int part_num, sector_t *size)
+{
+ struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
+ struct gendisk *disk = md->disk;
+ struct disk_part_tbl *part_tbl = disk->part_tbl;
+
+ if (part_num < 0 || part_num >= part_tbl->len)
+ return 0;
+
+ *size = part_tbl->part[part_num]->nr_sects << SECTOR_SHIFT;
+ return part_tbl->part[part_num]->start_sect;
+}
+#endif
+
static int mmc_blk_probe(struct mmc_card *card)
{
struct mmc_blk_data *md, *part_md;
@@ -2936,6 +2958,9 @@ static int mmc_blk_probe(struct mmc_card *card)
goto out;
}
+ if (mmc_card_mmc(card) || mmc_card_sd(card))
+ mmcpstore_card_set(card, md->disk->disk_name);
+
/* Add two debugfs entries */
mmc_blk_add_debugfs(card, md);
@@ -3083,6 +3108,7 @@ static void __exit mmc_blk_exit(void)
unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
bus_unregister(&mmc_rpmb_bus_type);
+ unregister_mmcpstore();
}
module_init(mmc_blk_init);
diff --git a/drivers/mmc/core/block.h b/drivers/mmc/core/block.h
index 31153f656f41..0f8cb25bffbc 100644
--- a/drivers/mmc/core/block.h
+++ b/drivers/mmc/core/block.h
@@ -16,5 +16,15 @@ void mmc_blk_mq_recovery(struct mmc_queue *mq);
struct work_struct;
void mmc_blk_mq_complete_work(struct work_struct *work);
+#if IS_ENABLED(CONFIG_MMC_PSTORE)
+int mmc_blk_needs_part_switch(struct mmc_card *card);
+sector_t mmc_blk_get_part(struct mmc_card *card, int part_num, sector_t *size);
+void mmcpstore_card_set(struct mmc_card *card, const char *disk_name);
+void unregister_mmcpstore(void);
+#else
+static inline void mmcpstore_card_set(struct mmc_card *card,
+ const char *disk_name) {}
+static inline void unregister_mmcpstore(void) {}
+#endif
#endif
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index eb82f6aac951..38e82a00d790 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -569,6 +569,30 @@ int mmc_cqe_recovery(struct mmc_host *host)
}
EXPORT_SYMBOL(mmc_cqe_recovery);
+#if IS_ENABLED(CONFIG_MMC_PSTORE)
+/**
+ * mmc_wait_for_pstore_req - initiate a blocking mmc request
+ * @host: MMC host to start command
+ * @mrq: MMC request to start
+ *
+ * Start a blocking MMC request for a host and wait for the request
+ * to complete that is based on polling and timeout.
+ */
+void mmc_wait_for_pstore_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ unsigned int timeout;
+
+ host->ops->req_cleanup_pending(host);
+ mmc_start_request(host, mrq);
+
+ if (mrq->data) {
+ timeout = mrq->data->timeout_ns / NSEC_PER_MSEC;
+ host->ops->req_completion_poll(host, timeout);
+ }
+}
+EXPORT_SYMBOL(mmc_wait_for_pstore_req);
+#endif
+
/**
* mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
* @host: MMC host
@@ -817,6 +841,26 @@ int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx,
}
EXPORT_SYMBOL(__mmc_claim_host);
+#if IS_ENABLED(CONFIG_MMC_PSTORE)
+/**
+ * mmc_claim_host_async - claim host in atomic context
+ * @host: mmc host to claim
+ *
+ * This routine may be called in panic/oops scenarios.
+ * Return zero with host claim success, else busy status.
+ */
+int mmc_claim_host_async(struct mmc_host *host)
+{
+ if (!host->claimed && pm_runtime_active(mmc_dev(host))) {
+ host->claimed = 1;
+ return 0;
+ }
+
+ return -EBUSY;
+}
+EXPORT_SYMBOL(mmc_claim_host_async);
+#endif
+
/**
* mmc_release_host - release a host
* @host: mmc host to release
diff --git a/drivers/mmc/core/mmcpstore.c b/drivers/mmc/core/mmcpstore.c
new file mode 100644
index 000000000000..e394acc07c33
--- /dev/null
+++ b/drivers/mmc/core/mmcpstore.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MMC pstore support based on pstore/blk
+ *
+ * Copyright (c) 2020 Marvell.
+ * Author: Bhaskara Budiredla <bbudiredla@marvell.com>
+ */
+
+#define pr_fmt(fmt) "mmcpstore: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pstore_blk.h>
+#include <linux/blkdev.h>
+#include <linux/mount.h>
+#include <linux/slab.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/scatterlist.h>
+#include "block.h"
+#include "card.h"
+#include "core.h"
+
+static struct mmcpstore_context {
+ char dev_name[BDEVNAME_SIZE];
+ int partno;
+ sector_t start_sect;
+ sector_t size;
+ struct pstore_blk_config conf;
+ struct pstore_blk_info info;
+
+ struct mmc_card *card;
+ struct mmc_request *mrq;
+} oops_cxt;
+
+static void mmc_prep_req(struct mmc_request *mrq,
+ unsigned int sect_offset, unsigned int nsects,
+ struct scatterlist *sg, u32 opcode, unsigned int flags)
+{
+ mrq->cmd->opcode = opcode;
+ mrq->cmd->arg = sect_offset;
+ mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ if (nsects == 1) {
+ mrq->stop = NULL;
+ } else {
+ mrq->stop->opcode = MMC_STOP_TRANSMISSION;
+ mrq->stop->arg = 0;
+ mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
+ }
+
+ mrq->data->blksz = SECTOR_SIZE;
+ mrq->data->blocks = nsects;
+ mrq->data->flags = flags;
+ mrq->data->sg = sg;
+ mrq->data->sg_len = 1;
+}
+
+static int mmcpstore_panic_write_req(const char *buf,
+ unsigned int nsects, unsigned int sect_offset)
+{
+ struct mmcpstore_context *cxt = &oops_cxt;
+ struct mmc_request *mrq = cxt->mrq;
+ struct mmc_card *card = cxt->card;
+ struct mmc_host *host = card->host;
+ struct scatterlist sg;
+ u32 opcode;
+ int ret;
+
+ opcode = (nsects > 1) ? MMC_WRITE_MULTIPLE_BLOCK : MMC_WRITE_BLOCK;
+ mmc_prep_req(mrq, sect_offset, nsects, &sg, opcode, MMC_DATA_WRITE);
+ sg_init_one(&sg, buf, (nsects << SECTOR_SHIFT));
+ mmc_set_data_timeout(mrq->data, cxt->card);
+
+ ret = mmc_claim_host_async(host);
+ if (ret)
+ return ret;
+
+ mmc_wait_for_pstore_req(host, mrq);
+ return 0;
+}
+
+static int mmcpstore_panic_write(const char *buf, sector_t off, sector_t sects)
+{
+ struct mmcpstore_context *cxt = &oops_cxt;
+ struct mmc_card *card = cxt->card;
+ int ret;
+
+ /* Drop the panic record if parition switching is required */
+ if (mmc_card_mmc(card) && mmc_blk_needs_part_switch(card))
+ return -EPERM;
+
+ ret = mmcpstore_panic_write_req(buf, sects, cxt->start_sect + off);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static struct block_device *mmcpstore_open_backend(const char *device)
+{
+ struct block_device *bdev;
+ dev_t devt;
+
+ bdev = blkdev_get_by_path(device, FMODE_READ, NULL);
+ if (IS_ERR(bdev)) {
+ devt = name_to_dev_t(device);
+ if (devt == 0)
+ return ERR_PTR(-ENODEV);
+
+ bdev = blkdev_get_by_dev(devt, FMODE_READ, NULL);
+ if (IS_ERR(bdev))
+ return bdev;
+ }
+
+ return bdev;
+}
+
+static void mmcpstore_close_backend(struct block_device *bdev)
+{
+ if (!bdev)
+ return;
+ blkdev_put(bdev, FMODE_READ);
+}
+
+void mmcpstore_card_set(struct mmc_card *card, const char *disk_name)
+{
+ struct mmcpstore_context *cxt = &oops_cxt;
+ struct pstore_blk_config *conf = &cxt->conf;
+ struct pstore_blk_info *info = &cxt->info;
+ struct block_device *bdev;
+ struct mmc_command *stop;
+ struct mmc_command *cmd;
+ struct mmc_request *mrq;
+ struct mmc_data *data;
+ int ret;
+
+ ret = pstore_blk_get_config(conf);
+ if (!conf->device[0]) {
+ pr_debug("psblk backend is empty\n");
+ return;
+ }
+
+ /* Multiple backend devices not allowed */
+ if (cxt->dev_name[0])
+ return;
+
+ bdev = mmcpstore_open_backend(conf->device);
+ if (IS_ERR(bdev)) {
+ pr_err("%s failed to open with %ld\n",
+ conf->device, PTR_ERR(bdev));
+ return;
+ }
+
+ bdevname(bdev, cxt->dev_name);
+ cxt->partno = bdev->bd_part->partno;
+ mmcpstore_close_backend(bdev);
+
+ if (strncmp(cxt->dev_name, disk_name, strlen(disk_name)))
+ return;
+
+ cxt->start_sect = mmc_blk_get_part(card, cxt->partno, &cxt->size);
+ if (!cxt->start_sect) {
+ pr_err("Non-existent partition %d selected\n", cxt->partno);
+ return;
+ }
+
+ /* Check for host mmc panic write polling function definitions */
+ if (!card->host->ops->req_cleanup_pending ||
+ !card->host->ops->req_completion_poll)
+ return;
+
+ cxt->card = card;
+
+ mrq = kzalloc(sizeof(struct mmc_request), GFP_KERNEL);
+ if (!mrq)
+ goto out;
+
+ cmd = kzalloc(sizeof(struct mmc_command), GFP_KERNEL);
+ if (!cmd)
+ goto free_mrq;
+
+ stop = kzalloc(sizeof(struct mmc_command), GFP_KERNEL);
+ if (!stop)
+ goto free_cmd;
+
+ data = kzalloc(sizeof(struct mmc_data), GFP_KERNEL);
+ if (!data)
+ goto free_stop;
+
+ mrq->cmd = cmd;
+ mrq->data = data;
+ mrq->stop = stop;
+ cxt->mrq = mrq;
+
+ info->major = MMC_BLOCK_MAJOR;
+ info->flags = PSTORE_FLAGS_DMESG;
+ info->panic_write = mmcpstore_panic_write;
+ ret = register_pstore_blk(info);
+ if (ret) {
+ pr_err("%s registering with psblk failed (%d)\n",
+ cxt->dev_name, ret);
+ goto free_data;
+ }
+
+ pr_info("%s registered as psblk backend\n", cxt->dev_name);
+ return;
+
+free_data:
+ kfree(data);
+free_stop:
+ kfree(stop);
+free_cmd:
+ kfree(cmd);
+free_mrq:
+ kfree(mrq);
+out:
+ return;
+}
+
+void unregister_mmcpstore(void)
+{
+ struct mmcpstore_context *cxt = &oops_cxt;
+
+ unregister_pstore_blk(MMC_BLOCK_MAJOR);
+ kfree(cxt->mrq->data);
+ kfree(cxt->mrq->stop);
+ kfree(cxt->mrq->cmd);
+ kfree(cxt->mrq);
+ cxt->card = NULL;
+}
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 30ff42fd173e..6aa5d1cf2666 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -229,6 +229,7 @@ config MMC_SDHCI_CADENCE
tristate "SDHCI support for the Cadence SD/SDIO/eMMC controller"
depends on MMC_SDHCI_PLTFM
depends on OF
+ select MMC_SDHCI_IO_ACCESSORS
help
This selects the Cadence SD/SDIO/eMMC driver.
diff --git a/drivers/mmc/host/cavium-octeon.c b/drivers/mmc/host/cavium-octeon.c
index 2c4b2df52adb..ed23f7d2ff76 100644
--- a/drivers/mmc/host/cavium-octeon.c
+++ b/drivers/mmc/host/cavium-octeon.c
@@ -171,6 +171,7 @@ static int octeon_mmc_probe(struct platform_device *pdev)
host->dmar_fixup_done = octeon_mmc_dmar_fixup_done;
}
+ host->max_freq = MHZ_52;
host->sys_freq = octeon_get_io_clock_rate();
if (of_device_is_compatible(node, "cavium,octeon-7890-mmc")) {
@@ -236,8 +237,8 @@ static int octeon_mmc_probe(struct platform_device *pdev)
/* Only CMD_DONE, DMA_DONE, CMD_ERR, DMA_ERR */
for (i = 1; i <= 4; i++) {
ret = devm_request_irq(&pdev->dev, mmc_irq[i],
- cvm_mmc_interrupt,
- 0, cvm_mmc_irq_names[i], host);
+ cvm_mmc_interrupt, IRQF_NO_THREAD,
+ cvm_mmc_irq_names[i], host);
if (ret < 0) {
dev_err(&pdev->dev, "Error: devm_request_irq %d\n",
mmc_irq[i]);
@@ -246,8 +247,8 @@ static int octeon_mmc_probe(struct platform_device *pdev)
}
} else {
ret = devm_request_irq(&pdev->dev, mmc_irq[0],
- cvm_mmc_interrupt, 0, KBUILD_MODNAME,
- host);
+ cvm_mmc_interrupt, IRQF_NO_THREAD,
+ KBUILD_MODNAME, host);
if (ret < 0) {
dev_err(&pdev->dev, "Error: devm_request_irq %d\n",
mmc_irq[0]);
@@ -266,7 +267,7 @@ static int octeon_mmc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
i = 0;
- for_each_child_of_node(node, cn) {
+ for_each_available_child_of_node(node, cn) {
host->slot_pdev[i] =
of_platform_device_create(cn, NULL, &pdev->dev);
if (!host->slot_pdev[i]) {
diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c
index 76013bbbcff3..70d1bdc9bb9f 100644
--- a/drivers/mmc/host/cavium-thunderx.c
+++ b/drivers/mmc/host/cavium-thunderx.c
@@ -15,22 +15,36 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/bitfield.h>
#include "cavium.h"
static void thunder_mmc_acquire_bus(struct cvm_mmc_host *host)
{
+#if IS_ENABLED(CONFIG_MMC_PSTORE)
+ if (!host->pstore)
+ down(&host->mmc_serializer);
+#else
down(&host->mmc_serializer);
+#endif
}
static void thunder_mmc_release_bus(struct cvm_mmc_host *host)
{
+#if IS_ENABLED(CONFIG_MMC_PSTORE)
+ if (!host->pstore)
+ up(&host->mmc_serializer);
+#else
up(&host->mmc_serializer);
+#endif
}
static void thunder_mmc_int_enable(struct cvm_mmc_host *host, u64 val)
{
writeq(val, host->base + MIO_EMM_INT(host));
writeq(val, host->base + MIO_EMM_INT_EN_SET(host));
+ writeq(MIO_EMM_DMA_INT_DMA,
+ host->dma_base + MIO_EMM_DMA_INT(host));
}
static int thunder_mmc_register_interrupts(struct cvm_mmc_host *host,
@@ -45,14 +59,127 @@ static int thunder_mmc_register_interrupts(struct cvm_mmc_host *host,
/* register interrupts */
for (i = 0; i < nvec; i++) {
ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
- cvm_mmc_interrupt,
- 0, cvm_mmc_irq_names[i], host);
+ cvm_mmc_interrupt, 0,
+ cvm_mmc_irq_names[i], host);
if (ret)
return ret;
}
return 0;
}
+/* calibration evaluates the per tap delay */
+static void thunder_calibrate_mmc(struct cvm_mmc_host *host)
+{
+ u32 retries = 10;
+ u32 delay = 4;
+ unsigned int ps;
+ const char *how = "default";
+
+ if (is_mmc_8xxx(host))
+ return;
+
+ /* set _DEBUG[CLK_ON]=1 as workaround for clock issue */
+ if (host->cond_clock_glitch)
+ writeq(1, host->base + MIO_EMM_DEBUG(host));
+
+ if (host->calibrate_glitch) {
+ /*
+ * Operation of up to 100 MHz may be achieved by skipping the
+ * steps that establish the tap delays and instead assuming
+ * that MIO_EMM_TAP[DELAY] returns 0x4 indicating 78 pS/tap.
+ */
+ } else {
+ u64 tap;
+ u64 emm_cfg = readq(host->base + MIO_EMM_CFG(host));
+ u64 tcfg;
+ u64 emm_io_ctl;
+ u64 emm_switch;
+ u64 emm_wdog;
+ u64 emm_sts_mask;
+ u64 emm_debug;
+ u64 emm_timing;
+ u64 emm_rca;
+
+ /*
+ * MIO_EMM_CFG[BUS_ENA] must be zero for calibration,
+ * but that resets whole host, so save state.
+ */
+ emm_io_ctl = readq(host->base + MIO_EMM_IO_CTL(host));
+ emm_switch = readq(host->base + MIO_EMM_SWITCH(host));
+ emm_wdog = readq(host->base + MIO_EMM_WDOG(host));
+ emm_sts_mask =
+ readq(host->base + MIO_EMM_STS_MASK(host));
+ emm_debug = readq(host->base + MIO_EMM_DEBUG(host));
+ emm_timing = readq(host->base + MIO_EMM_TIMING(host));
+ emm_rca = readq(host->base + MIO_EMM_RCA(host));
+
+ /* reset controller */
+ tcfg = emm_cfg;
+ tcfg &= ~MIO_EMM_CFG_BUS_ENA;
+ writeq(tcfg, host->base + MIO_EMM_CFG(host));
+ udelay(1);
+
+ /* restart with phantom slot 3 */
+ tcfg |= FIELD_PREP(MIO_EMM_CFG_BUS_ENA, 1ull << 3);
+ writeq(tcfg, host->base + MIO_EMM_CFG(host));
+ mdelay(1);
+
+ /* Start calibration */
+ writeq(0, host->base + MIO_EMM_CALB(host));
+ udelay(5);
+ writeq(START_CALIBRATION, host->base + MIO_EMM_CALB(host));
+ udelay(5);
+
+ do {
+ /* wait for approximately 300 coprocessor clock */
+ udelay(5);
+ tap = readq(host->base + MIO_EMM_TAP(host));
+ } while (!tap && retries--);
+
+ /* leave calibration mode */
+ writeq(0, host->base + MIO_EMM_CALB(host));
+ udelay(5);
+
+ if (retries <= 0 || !tap) {
+ how = "fallback";
+ } else {
+ /* calculate the per-tap delay */
+ delay = tap & MIO_EMM_TAP_DELAY;
+ how = "calibrated";
+ }
+
+ /* Reset eMMC subsystem */
+ writeq(0, host->base + MIO_EMM_CFG(host));
+ udelay(1);
+ /* restore old state */
+ writeq(emm_cfg, host->base + MIO_EMM_CFG(host));
+ mdelay(1);
+ writeq(emm_rca, host->base + MIO_EMM_RCA(host));
+ writeq(emm_timing, host->base + MIO_EMM_TIMING(host));
+ writeq(emm_debug, host->base + MIO_EMM_DEBUG(host));
+ writeq(emm_sts_mask,
+ host->base + MIO_EMM_STS_MASK(host));
+ writeq(emm_wdog, host->base + MIO_EMM_WDOG(host));
+ writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
+ writeq(emm_io_ctl, host->base + MIO_EMM_IO_CTL(host));
+ mdelay(1);
+ }
+
+ /*
+ * Scale measured/guessed calibration value to pS:
+ * The delay value should be multiplied by 10 ns(or 10000 ps)
+ * and then divided by no of taps to determine the estimated
+ * delay in pico second. The nominal value is 125 ps per tap.
+ */
+ ps = (delay * PS_10000) / TOTAL_NO_OF_TAPS;
+ if (host->per_tap_delay != ps) {
+ dev_info(host->dev, "%s delay:%d per-tap delay:%dpS\n",
+ how, delay, ps);
+ host->per_tap_delay = ps;
+ host->delay_logged = 0;
+ }
+}
+
static int thunder_mmc_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -61,6 +188,8 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
struct device_node *child_node;
struct cvm_mmc_host *host;
int ret, i = 0;
+ u8 chip_id;
+ u8 rev;
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host)
@@ -83,6 +212,7 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
/* On ThunderX these are identical */
host->dma_base = host->base;
+ host->pdev = pdev;
host->reg_off = 0x2000;
host->reg_off_dma = 0x160;
@@ -111,24 +241,67 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
host->need_irq_handler_lock = true;
host->last_slot = -1;
- ret = dma_set_mask(dev, DMA_BIT_MASK(48));
if (ret)
goto error;
+ rev = pdev->revision;
+ chip_id = (pdev->subsystem_device >> 8) & 0xff;
+ switch (chip_id) {
+ case PCI_SUBSYS_DEVID_96XX:
+ if (rev == REV_ID_0) {
+ host->calibrate_glitch = true;
+ host->cond_clock_glitch = true;
+ host->max_freq = MHZ_100;
+ } else if (rev == REV_ID_1) {
+ host->cond_clock_glitch = true;
+ host->max_freq = MHZ_167;
+ } else if (rev == REV_ID_2) {
+ host->tap_requires_noclk = true;
+ host->max_freq = MHZ_112_5;
+ } else if (rev > REV_ID_2) {
+ host->tap_requires_noclk = true;
+ host->max_freq = MHZ_200;
+ }
+ break;
+ case PCI_SUBSYS_DEVID_95XXMM:
+ case PCI_SUBSYS_DEVID_98XX:
+ host->tap_requires_noclk = true;
+ host->max_freq = MHZ_200;
+ break;
+ case PCI_SUBSYS_DEVID_95XX:
+ if (rev == REV_ID_0)
+ host->cond_clock_glitch = true;
+ host->max_freq = MHZ_167;
+ break;
+ case PCI_SUBSYS_DEVID_LOKI:
+ host->max_freq = MHZ_167;
+ break;
+ default:
+ break;
+ }
/*
* Clear out any pending interrupts that may be left over from
* bootloader. Writing 1 to the bits clears them.
+ * Clear DMA FIFO after IRQ disable, then stub any dangling events
*/
- writeq(127, host->base + MIO_EMM_INT_EN(host));
- writeq(3, host->base + MIO_EMM_DMA_INT_ENA_W1C(host));
- /* Clear DMA FIFO */
- writeq(BIT_ULL(16), host->base + MIO_EMM_DMA_FIFO_CFG(host));
+ writeq(~0, host->base + MIO_EMM_INT(host));
+ writeq(~0, host->dma_base + MIO_EMM_DMA_INT_ENA_W1C(host));
+ writeq(~0, host->base + MIO_EMM_INT_EN_CLR(host));
+ writeq(MIO_EMM_DMA_FIFO_CFG_CLR,
+ host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
+ writeq(~0, host->dma_base + MIO_EMM_DMA_INT(host));
ret = thunder_mmc_register_interrupts(host, pdev);
if (ret)
goto error;
- for_each_child_of_node(node, child_node) {
+ /* Run the calibration to calculate per tap delay that would be
+ * used to evaluate values. These values would be programmed in
+ * MIO_EMM_TIMING.
+ */
+ thunder_calibrate_mmc(host);
+
+ for_each_available_child_of_node(node, child_node) {
/*
* mmc_of_parse and devm* require one device per slot.
* Create a dummy device per slot and set the node pointer to
@@ -141,12 +314,15 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
if (!host->slot_pdev[i])
continue;
+ dev_info(dev, "Probing slot %d\n", i);
+
ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
if (ret)
goto error;
}
i++;
}
+
dev_info(dev, "probed\n");
return 0;
@@ -176,8 +352,11 @@ static void thunder_mmc_remove(struct pci_dev *pdev)
cvm_mmc_of_slot_remove(host->slot[i]);
dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host));
- dma_cfg &= ~MIO_EMM_DMA_CFG_EN;
+ dma_cfg |= MIO_EMM_DMA_CFG_CLR;
writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
+ do {
+ dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host));
+ } while (dma_cfg & MIO_EMM_DMA_CFG_EN);
clk_disable_unprepare(host->clk);
pci_release_regions(pdev);
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
index c5da3aaee334..6e8810f101bf 100644
--- a/drivers/mmc/host/cavium.c
+++ b/drivers/mmc/host/cavium.c
@@ -25,6 +25,8 @@
#include <linux/regulator/consumer.h>
#include <linux/scatterlist.h>
#include <linux/time.h>
+#include <linux/iommu.h>
+#include <linux/swiotlb.h>
#include "cavium.h"
@@ -38,6 +40,8 @@ const char *cvm_mmc_irq_names[] = {
"MMC Switch Error",
"MMC DMA int Fifo",
"MMC DMA int",
+ "MMC NCB Fault",
+ "MMC RAS",
};
/*
@@ -71,7 +75,7 @@ static struct cvm_mmc_cr_type cvm_mmc_cr_types[] = {
{0, 1}, /* CMD16 */
{1, 1}, /* CMD17 */
{1, 1}, /* CMD18 */
- {3, 1}, /* CMD19 */
+ {2, 1}, /* CMD19 */
{2, 1}, /* CMD20 */
{0, 0}, /* CMD21 */
{0, 0}, /* CMD22 */
@@ -118,6 +122,241 @@ static struct cvm_mmc_cr_type cvm_mmc_cr_types[] = {
{0, 0} /* CMD63 */
};
+/*
+ * EMM_CMD hold time from rising edge of EMMC_CLK.
+ * Typically 3.0 ns at frequencies < 26 MHz.
+ * Typically 3.0 ns at frequencies <= 52 MHz SDR.
+ * Typically 2.5 ns at frequencies <= 52 MHz DDR.
+ * Typically 0.8 ns at frequencies > 52 MHz SDR.
+ * Typically 0.8 ns at frequencies > 52 MHz DDR.
+ *
+ * Values are expressed in picoseconds (ps)
+ */
+static const u32 default_cmd_out_taps_dly[MMC_TIMINGS_COUNT] = {
+ 5000, /* Legacy */
+ 2500, /* MMC_HS */
+ 2000, /* SD_HS */
+ 3000, /* UHS_SDR12 */
+ 2000, /* UHS_SDR25 */
+ 2000, /* UHS_SDR50 */
+ 800, /* UHS_SDR104 */
+ 1500, /* UHS_DDR50 */
+ 1500, /* MMC_DDR52 */
+ 800, /* HS200 */
+ 800 /* HS400 */
+};
+
+/* Hints are expressed as number of taps (clock cycles) */
+static const u32 default_hints_taps_dly[MMC_TIMINGS_COUNT] = {
+ 39, /* Legacy */
+ 32, /* MMC_HS */
+ 26, /* SD_HS */
+ 39, /* UHS_SDR12 */
+ 26, /* UHS_SDR25 */
+ 26, /* UHS_SDR50 */
+ 10, /* UHS_SDR104 */
+ 20, /* UHS_DDR50 */
+ 20, /* MMC_DDR52 */
+ 10, /* HS200 */
+ 10 /* HS400 */
+};
+
+static const u32 default_cmd_in_taps_dly[MMC_TIMINGS_COUNT] = {
+ 4000, /* Legacy */
+ 4000, /* MMC_HS */
+ 4000, /* SD_HS */
+ 4000, /* UHS_SDR12 */
+ 4000, /* UHS_SDR25 */
+ 4000, /* UHS_SDR50 */
+ 4000, /* UHS_SDR104 */
+ 4000, /* UHS_DDR50 */
+ 4000, /* MMC_DDR52 */
+ 4000, /* HS200 */
+ 4000 /* HS400 */
+};
+
+static const char * const mmc_modes_name[MMC_TIMINGS_COUNT] = {
+ "Legacy",
+ "MMC HS",
+ "SD HS",
+ "SD UHS SDR12",
+ "SD UHS SDR25",
+ "SD UHS SDR50",
+ "SD UHS SDR104",
+ "SD UHS DDR50",
+ "MMC DDR52",
+ "MMC HS200",
+ "MMC HS400"
+};
+
+static int tapdance;
+module_param(tapdance, int, 0644);
+MODULE_PARM_DESC(tapdance, "adjust bus-timing: (0=mid-eye, positive=Nth_fastest_tap)");
+
+static int clk_scale = 100;
+module_param(clk_scale, int, 0644);
+MODULE_PARM_DESC(clk_scale, "percent scale data_/cmd_out taps (default 100)");
+
+static bool fixed_timing;
+module_param(fixed_timing, bool, 0444);
+MODULE_PARM_DESC(fixed_timing, "use fixed data_/cmd_out taps");
+
+static bool ddr_cmd_taps;
+module_param(ddr_cmd_taps, bool, 0644);
+MODULE_PARM_DESC(ddr_cmd_taps, "reduce cmd_out_taps in DDR modes, as before");
+
+/* Tuning is used in multiple places in the code */
+static int cvm_execute_tuning(struct mmc_host *mmc, u32 opcode);
+
+static bool __cvm_is_mmc_timing_ddr(unsigned char timing)
+{
+ switch (timing) {
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ case MMC_TIMING_MMC_HS400:
+ return true;
+ default:
+ return false;
+ }
+ return false;
+}
+
+bool cvm_is_mmc_timing_ddr(struct cvm_mmc_slot *slot)
+{
+ return __cvm_is_mmc_timing_ddr(slot->mmc->ios.timing);
+}
+
+static void cvm_mmc_clk_config(struct cvm_mmc_host *host, bool flag)
+{
+ u64 emm_debug;
+
+ if (!host->tap_requires_noclk)
+ return;
+
+ /* Turn off the clock */
+ if (flag) {
+ emm_debug = readq(host->base + MIO_EMM_DEBUG(host));
+ emm_debug |= MIO_EMM_DEBUG_CLK_DIS;
+ writeq(emm_debug, host->base + MIO_EMM_DEBUG(host));
+ udelay(1);
+ emm_debug = readq(host->base + MIO_EMM_DEBUG(host));
+ emm_debug |= MIO_EMM_DEBUG_RDSYNC;
+ writeq(emm_debug, host->base + MIO_EMM_DEBUG(host));
+ udelay(1);
+ } else {
+ /* Turn on the clock */
+ emm_debug = readq(host->base + MIO_EMM_DEBUG(host));
+ emm_debug &= MIO_EMM_DEBUG_RDSYNC;
+ writeq(emm_debug, host->base + MIO_EMM_DEBUG(host));
+ udelay(1);
+ emm_debug = readq(host->base + MIO_EMM_DEBUG(host));
+ emm_debug &= MIO_EMM_DEBUG_CLK_DIS;
+ writeq(emm_debug, host->base + MIO_EMM_DEBUG(host));
+ udelay(1);
+ }
+}
+
+static void cvm_mmc_set_timing(struct cvm_mmc_slot *slot)
+{
+ struct cvm_mmc_host *host = slot->host;
+
+ if (!is_mmc_otx2(host))
+ return;
+
+ cvm_mmc_clk_config(host, CLK_OFF);
+ writeq(slot->taps, host->base + MIO_EMM_TIMING(host));
+ cvm_mmc_clk_config(host, CLK_ON);
+}
+
+static int tout(struct cvm_mmc_slot *slot, int ps, int hint)
+{
+ struct cvm_mmc_host *host = slot->host;
+ struct mmc_host *mmc = slot->mmc;
+ int tap_ps = host->per_tap_delay;
+ int timing = mmc->ios.timing;
+ static int old_scale;
+ int taps;
+
+ if (fixed_timing)
+ return hint;
+
+ if (!hint)
+ hint = 63;
+
+ if (!tap_ps)
+ return hint;
+
+ taps = min_t(int, DIV_ROUND_UP(ps * clk_scale, (tap_ps * 100)), 63);
+
+ /* when modparam is adjusted, re-announce timing */
+ if (old_scale != clk_scale) {
+ host->delay_logged = 0;
+ old_scale = clk_scale;
+ }
+
+ if (!test_and_set_bit(timing,
+ &host->delay_logged))
+ dev_info(host->dev, "mmc%d.ios_timing:%d %dpS hint:%d taps:%d\n",
+ mmc->index, timing, ps, hint, taps);
+
+ return taps;
+}
+
+static int cvm_mmc_configure_delay(struct cvm_mmc_slot *slot)
+{
+ struct cvm_mmc_host *host = slot->host;
+ struct mmc_host *mmc = slot->mmc;
+ const char *mode;
+
+ pr_debug("slot%d.configure_delay\n", slot->bus_id);
+
+ if (is_mmc_8xxx(host)) {
+ /* MIO_EMM_SAMPLE is till T83XX */
+ u64 emm_sample =
+ FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) |
+ FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->data_cnt);
+ writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host));
+ } else {
+ int cin, din, cout, dout;
+
+ dev_dbg(host->dev,
+ "%s: mode=%s, cmd_in=%ups, data_in=%ups, cmd_out=%ups, data_out=%ups\n",
+ __func__, mmc_modes_name[mmc->ios.timing],
+ slot->cmd_in_taps_dly[mmc->ios.timing],
+ slot->data_in_taps_dly[mmc->ios.timing],
+ slot->cmd_out_taps_dly[mmc->ios.timing],
+ slot->data_out_taps_dly[mmc->ios.timing]);
+ /* Configure timings */
+ cin = tout(slot,
+ slot->cmd_in_taps_dly[mmc->ios.timing],
+ MAX_NO_OF_TAPS / 2);
+ din = tout(slot,
+ slot->data_in_taps_dly[mmc->ios.timing],
+ MAX_NO_OF_TAPS / 2);
+ cout = tout(slot,
+ slot->cmd_out_taps_dly[mmc->ios.timing],
+ default_hints_taps_dly[mmc->ios.timing]);
+ dout = tout(slot,
+ slot->data_out_taps_dly[mmc->ios.timing],
+ default_hints_taps_dly[mmc->ios.timing]);
+ mode = mmc_modes_name[mmc->ios.timing];
+
+ dev_dbg(host->dev,
+ "%s: command in tap: %d, command out tap: %d, data in tap: %d, data out tap: %d\n",
+ mode, cin, cout, din, dout);
+ slot->taps =
+ FIELD_PREP(MIO_EMM_TIMING_CMD_IN, cin) |
+ FIELD_PREP(MIO_EMM_TIMING_CMD_OUT, cout) |
+ FIELD_PREP(MIO_EMM_TIMING_DATA_IN, din) |
+ FIELD_PREP(MIO_EMM_TIMING_DATA_OUT, dout);
+
+ pr_debug("slot%d.taps %llx\n", slot->bus_id, slot->taps);
+ cvm_mmc_set_timing(slot);
+ }
+
+ return 0;
+}
+
static struct cvm_mmc_cr_mods cvm_mmc_get_cr_mods(struct mmc_command *cmd)
{
struct cvm_mmc_cr_type *cr;
@@ -175,14 +414,14 @@ static void check_switch_errors(struct cvm_mmc_host *host)
dev_err(host->dev, "Switch bus width error\n");
}
-static void clear_bus_id(u64 *reg)
+static inline void clear_bus_id(u64 *reg)
{
u64 bus_id_mask = GENMASK_ULL(61, 60);
*reg &= ~bus_id_mask;
}
-static void set_bus_id(u64 *reg, int bus_id)
+static inline void set_bus_id(u64 *reg, int bus_id)
{
clear_bus_id(reg);
*reg |= FIELD_PREP(GENMASK(61, 60), bus_id);
@@ -193,25 +432,69 @@ static int get_bus_id(u64 reg)
return FIELD_GET(GENMASK_ULL(61, 60), reg);
}
-/*
- * We never set the switch_exe bit since that would interfere
- * with the commands send by the MMC core.
- */
-static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
+/* save old slot details, switch power */
+static bool pre_switch(struct cvm_mmc_host *host, u64 emm_switch)
{
- int retries = 100;
- u64 rsp_sts;
- int bus_id;
+ int bus_id = get_bus_id(emm_switch);
+ struct cvm_mmc_slot *slot = host->slot[bus_id];
+ struct cvm_mmc_slot *old_slot;
+ bool same_vqmmc = false;
- /*
- * Modes setting only taken from slot 0. Work around that hardware
- * issue by first switching to slot 0.
+ if (host->last_slot == bus_id)
+ return false;
+
+ /* when VQMMC is switched, tri-state CMDn over any slot change
+ * to avoid transient states on D0-7 or CLK from level-shifters
*/
- bus_id = get_bus_id(emm_switch);
- clear_bus_id(&emm_switch);
- writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
+ if (host->use_vqmmc) {
+ writeq(1ull << 3, host->base + MIO_EMM_CFG(host));
+ udelay(10);
+ }
+
+ if (host->last_slot >= 0 && host->slot[host->last_slot]) {
+ old_slot = host->slot[host->last_slot];
+ old_slot->cached_switch =
+ readq(host->base + MIO_EMM_SWITCH(host));
+ old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host));
+
+ same_vqmmc = (slot->mmc->supply.vqmmc ==
+ old_slot->mmc->supply.vqmmc);
+ if (!same_vqmmc && !IS_ERR_OR_NULL(old_slot->mmc->supply.vqmmc))
+ regulator_disable(old_slot->mmc->supply.vqmmc);
+ }
+
+ if (!same_vqmmc && !IS_ERR_OR_NULL(slot->mmc->supply.vqmmc)) {
+ int e = regulator_enable(slot->mmc->supply.vqmmc);
+
+ if (e)
+ dev_err(host->dev, "mmc-slot@%d.vqmmc err %d\n",
+ bus_id, e);
+ }
+
+ host->last_slot = slot->bus_id;
+
+ return true;
+}
+
+static void post_switch(struct cvm_mmc_host *host, u64 emm_switch)
+{
+ int bus_id = get_bus_id(emm_switch);
+ struct cvm_mmc_slot *slot = host->slot[bus_id];
+
+ if (host->use_vqmmc) {
+ /* enable new CMDn */
+ writeq(1ull << bus_id, host->base + MIO_EMM_CFG(host));
+ udelay(10);
+ }
+
+ writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host));
+}
+
+static inline void mode_switch(struct cvm_mmc_host *host, u64 emm_switch)
+{
+ u64 rsp_sts;
+ int retries = 100;
- set_bus_id(&emm_switch, bus_id);
writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
/* wait for the switch to finish */
@@ -221,15 +504,49 @@ static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
break;
udelay(10);
} while (--retries);
+}
+
+/*
+ * We never set the switch_exe bit since that would interfere
+ * with the commands send by the MMC core.
+ */
+static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
+{
+ int bus_id = get_bus_id(emm_switch);
+ struct cvm_mmc_slot *slot = host->slot[bus_id];
+ bool slot_changed = pre_switch(host, emm_switch);
+
+ /*
+ * Modes setting only taken from slot 0. Work around that hardware
+ * issue by first switching to slot 0.
+ */
+ if (bus_id) {
+ u64 switch0 = emm_switch;
+
+ clear_bus_id(&switch0);
+ mode_switch(host, switch0);
+ }
+
+ mode_switch(host, emm_switch);
check_switch_errors(host);
+ if (slot_changed)
+ post_switch(host, emm_switch);
+ slot->cached_switch = emm_switch;
+ if (emm_switch & MIO_EMM_SWITCH_CLK)
+ slot->cmd6_pending = false;
}
+/* need to change hardware state to match software requirements? */
static bool switch_val_changed(struct cvm_mmc_slot *slot, u64 new_val)
{
/* Match BUS_ID, HS_TIMING, BUS_WIDTH, POWER_CLASS, CLK_HI, CLK_LO */
- u64 match = 0x3001070fffffffffull;
+ /* For 9xxx add HS200_TIMING and HS400_TIMING */
+ u64 match = (is_mmc_otx2(slot->host)) ?
+ 0x3007070fffffffffull : 0x3001070fffffffffull;
+ if (!slot->host->powered)
+ return true;
return (slot->cached_switch & match) != (new_val & match);
}
@@ -247,58 +564,62 @@ static void set_wdog(struct cvm_mmc_slot *slot, unsigned int ns)
writeq(timeout, slot->host->base + MIO_EMM_WDOG(slot->host));
}
+static void emmc_io_drive_setup(struct cvm_mmc_slot *slot)
+{
+ u64 ioctl_cfg;
+ struct cvm_mmc_host *host = slot->host;
+
+ /* Setup drive and slew only for 9x */
+ if (is_mmc_otx2(host)) {
+ if ((slot->drive < 0) || (slot->slew < 0))
+ return;
+ /* Setup the emmc interface current drive
+ * strength & clk slew rate.
+ */
+ ioctl_cfg = FIELD_PREP(MIO_EMM_IO_CTL_DRIVE, slot->drive) |
+ FIELD_PREP(MIO_EMM_IO_CTL_SLEW, slot->slew);
+ writeq(ioctl_cfg, host->base + MIO_EMM_IO_CTL(host));
+ }
+}
+
static void cvm_mmc_reset_bus(struct cvm_mmc_slot *slot)
{
struct cvm_mmc_host *host = slot->host;
u64 emm_switch, wdog;
- emm_switch = readq(slot->host->base + MIO_EMM_SWITCH(host));
- emm_switch &= ~(MIO_EMM_SWITCH_EXE | MIO_EMM_SWITCH_ERR0 |
- MIO_EMM_SWITCH_ERR1 | MIO_EMM_SWITCH_ERR2);
+ emm_switch = readq(host->base + MIO_EMM_SWITCH(host));
+ emm_switch &= ~(MIO_EMM_SWITCH_EXE | MIO_EMM_SWITCH_ERRS);
set_bus_id(&emm_switch, slot->bus_id);
- wdog = readq(slot->host->base + MIO_EMM_WDOG(host));
- do_switch(slot->host, emm_switch);
-
- slot->cached_switch = emm_switch;
+ wdog = readq(host->base + MIO_EMM_WDOG(host));
+ do_switch(host, emm_switch);
+ host->powered = true;
msleep(20);
- writeq(wdog, slot->host->base + MIO_EMM_WDOG(host));
+ writeq(wdog, host->base + MIO_EMM_WDOG(host));
}
/* Switch to another slot if needed */
static void cvm_mmc_switch_to(struct cvm_mmc_slot *slot)
{
struct cvm_mmc_host *host = slot->host;
- struct cvm_mmc_slot *old_slot;
- u64 emm_sample, emm_switch;
if (slot->bus_id == host->last_slot)
return;
- if (host->last_slot >= 0 && host->slot[host->last_slot]) {
- old_slot = host->slot[host->last_slot];
- old_slot->cached_switch = readq(host->base + MIO_EMM_SWITCH(host));
- old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host));
- }
+ do_switch(host, slot->cached_switch);
+ host->powered = true;
- writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host));
- emm_switch = slot->cached_switch;
- set_bus_id(&emm_switch, slot->bus_id);
- do_switch(host, emm_switch);
-
- emm_sample = FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) |
- FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->dat_cnt);
- writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host));
-
- host->last_slot = slot->bus_id;
+ emmc_io_drive_setup(slot);
+ cvm_mmc_configure_delay(slot);
}
-static void do_read(struct cvm_mmc_host *host, struct mmc_request *req,
+static void do_read(struct cvm_mmc_slot *slot, struct mmc_request *req,
u64 dbuf)
{
- struct sg_mapping_iter *smi = &host->smi;
+ struct cvm_mmc_host *host = slot->host;
+ struct sg_mapping_iter *smi = &slot->smi;
int data_len = req->data->blocks * req->data->blksz;
int bytes_xfered, shift = -1;
u64 dat = 0;
@@ -365,7 +686,7 @@ static void set_cmd_response(struct cvm_mmc_host *host, struct mmc_request *req,
}
}
-static int get_dma_dir(struct mmc_data *data)
+static inline int get_dma_dir(struct mmc_data *data)
{
return (data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
}
@@ -374,6 +695,9 @@ static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
{
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
+
+ writeq(MIO_EMM_DMA_FIFO_CFG_CLR,
+ host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
return 1;
}
@@ -382,6 +706,7 @@ static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
{
u64 fifo_cfg;
int count;
+ void __iomem *dma_intp = host->dma_base + MIO_EMM_DMA_INT(host);
/* Check if there are any pending requests left */
fifo_cfg = readq(host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
@@ -392,8 +717,16 @@ static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
- /* Clear and disable FIFO */
- writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
+ writeq(MIO_EMM_DMA_FIFO_CFG_CLR,
+ host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
+
+ /* on read, wait for internal buffer to flush out to mem */
+ if (get_dma_dir(data) == DMA_FROM_DEVICE) {
+ while (!(readq(dma_intp) & MIO_EMM_DMA_INT_DMA))
+ udelay(10);
+ writeq(MIO_EMM_DMA_INT_DMA, dma_intp);
+ }
+
dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
return 1;
}
@@ -415,7 +748,8 @@ static int check_status(u64 rsp_sts)
if (rsp_sts & MIO_EMM_RSP_STS_RSP_TIMEOUT ||
rsp_sts & MIO_EMM_RSP_STS_BLK_TIMEOUT)
return -ETIMEDOUT;
- if (rsp_sts & MIO_EMM_RSP_STS_DBUF_ERR)
+ if (rsp_sts & MIO_EMM_RSP_STS_DBUF_ERR ||
+ rsp_sts & MIO_EMM_RSP_STS_BLK_CRC_ERR)
return -EIO;
return 0;
}
@@ -435,16 +769,29 @@ static void cleanup_dma(struct cvm_mmc_host *host, u64 rsp_sts)
irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
{
struct cvm_mmc_host *host = dev_id;
- struct mmc_request *req;
+ struct mmc_request *req = NULL;
+ struct cvm_mmc_slot *slot = NULL;
unsigned long flags = 0;
u64 emm_int, rsp_sts;
bool host_done;
+ int bus_id;
if (host->need_irq_handler_lock)
spin_lock_irqsave(&host->irq_handler_lock, flags);
else
__acquire(&host->irq_handler_lock);
+ rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
+ bus_id = get_bus_id(rsp_sts);
+ if (host->slot[bus_id]) {
+ slot = host->slot[bus_id];
+ req = slot->current_req;
+ } else {
+ /* Request can't be handled without the slot mapping */
+ req = NULL;
+ slot = NULL;
+ }
+
/* Clear interrupt bits (write 1 clears ). */
emm_int = readq(host->base + MIO_EMM_INT(host));
writeq(emm_int, host->base + MIO_EMM_INT(host));
@@ -452,25 +799,32 @@ irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
if (emm_int & MIO_EMM_INT_SWITCH_ERR)
check_switch_errors(host);
- req = host->current_req;
- if (!req)
+ if (!req || !slot)
goto out;
- rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
+ /*
+ * dma_pend means DMA has stalled with CRC errs.
+ * start teardown, get irq on completion, mmc stack retries.
+ */
+ if ((rsp_sts & MIO_EMM_RSP_STS_DMA_PEND) && slot->dma_active) {
+ cleanup_dma(host, rsp_sts);
+ goto out;
+ }
+
/*
* dma_val set means DMA is still in progress. Don't touch
* the request and wait for the interrupt indicating that
* the DMA is finished.
*/
- if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && host->dma_active)
+ if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && slot->dma_active)
goto out;
- if (!host->dma_active && req->data &&
+ if (!slot->dma_active && req->data &&
(emm_int & MIO_EMM_INT_BUF_DONE)) {
unsigned int type = (rsp_sts >> 7) & 3;
if (type == 1)
- do_read(host, req, rsp_sts & MIO_EMM_RSP_STS_DBUF);
+ do_read(slot, req, rsp_sts & MIO_EMM_RSP_STS_DBUF);
else if (type == 2)
do_write(req);
}
@@ -480,12 +834,16 @@ irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
emm_int & MIO_EMM_INT_CMD_ERR ||
emm_int & MIO_EMM_INT_DMA_ERR;
+ /* Add NCB_FLT interrupt for octtx2 */
+ if (is_mmc_otx2(host))
+ host_done = host_done || emm_int & MIO_EMM_INT_NCB_FLT;
+
if (!(host_done && req->done))
goto no_req_done;
req->cmd->error = check_status(rsp_sts);
- if (host->dma_active && req->data)
+ if (slot->dma_active && req->data)
if (!finish_dma(host, req->data))
goto no_req_done;
@@ -494,7 +852,18 @@ irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
(rsp_sts & MIO_EMM_RSP_STS_DMA_PEND))
cleanup_dma(host, rsp_sts);
- host->current_req = NULL;
+ /* follow CMD6 timing/width with IMMEDIATE switch */
+ if (slot && slot->cmd6_pending) {
+ if (host_done && !req->cmd->error) {
+ do_switch(host, slot->want_switch);
+ emmc_io_drive_setup(slot);
+ cvm_mmc_configure_delay(slot);
+ } else if (slot) {
+ slot->cmd6_pending = false;
+ }
+ }
+
+ slot->current_req = NULL;
req->done(req);
no_req_done:
@@ -510,6 +879,74 @@ out:
return IRQ_RETVAL(emm_int != 0);
}
+#if IS_ENABLED(CONFIG_MMC_PSTORE)
+static int cvm_req_completion_poll(struct mmc_host *host, unsigned long msecs)
+{
+ struct cvm_mmc_slot *slot = mmc_priv(host);
+ struct cvm_mmc_host *cvm_host = slot->host;
+ u64 emm_int;
+
+ while (msecs) {
+ emm_int = readq(cvm_host->base + MIO_EMM_INT(cvm_host));
+
+ if (emm_int & MIO_EMM_INT_DMA_DONE)
+ return 0;
+ else if (emm_int & MIO_EMM_INT_DMA_ERR)
+ return -EIO;
+ mdelay(1);
+ msecs--;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void cvm_req_cleanup_pending(struct mmc_host *host)
+{
+ struct cvm_mmc_slot *slot = mmc_priv(host);
+ struct cvm_mmc_host *cvm_host = slot->host;
+ u64 fifo_cfg;
+ u64 dma_cfg;
+ u64 emm_int;
+ int cnt;
+
+ cvm_host->pstore = 1;
+
+ /* Clear pending DMA FIFO queue */
+ fifo_cfg = readq(cvm_host->dma_base + MIO_EMM_DMA_FIFO_CFG(cvm_host));
+ if (FIELD_GET(MIO_EMM_DMA_FIFO_CFG_COUNT, fifo_cfg))
+ writeq(MIO_EMM_DMA_FIFO_CFG_CLR,
+ cvm_host->dma_base + MIO_EMM_DMA_FIFO_CFG(cvm_host));
+
+ /* Clear ongoing DMA, if there is any */
+ dma_cfg = readq(cvm_host->dma_base + MIO_EMM_DMA_CFG(cvm_host));
+ if (dma_cfg & MIO_EMM_DMA_CFG_EN) {
+ dma_cfg |= MIO_EMM_DMA_CFG_CLR;
+ writeq(dma_cfg, cvm_host->dma_base +
+ MIO_EMM_DMA_CFG(cvm_host));
+ do {
+ dma_cfg = readq(cvm_host->dma_base +
+ MIO_EMM_DMA_CFG(cvm_host));
+ } while (dma_cfg & MIO_EMM_DMA_CFG_EN);
+ }
+
+ /* Clear pending DMA interrupts */
+ emm_int = readq(cvm_host->base + MIO_EMM_INT(cvm_host));
+ if (emm_int)
+ writeq(emm_int, cvm_host->base + MIO_EMM_INT(cvm_host));
+
+ /* Clear prepared and yet to be fired DMA requests */
+ for (cnt = 0; cnt < CAVIUM_MAX_MMC; cnt++) {
+ if (cvm_host->slot[cnt]) {
+ if (cvm_host->slot[cnt]->current_req) {
+ cvm_host->slot[cnt]->current_req = NULL;
+ cvm_host->slot[cnt]->dma_active = false;
+ break;
+ }
+ }
+ }
+}
+#endif
+
/*
* Program DMA_CFG and if needed DMA_ADR.
* Returns 0 on error, DMA address otherwise.
@@ -609,9 +1046,9 @@ static u64 prepare_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
error:
WARN_ON_ONCE(1);
+ writeq(MIO_EMM_DMA_FIFO_CFG_CLR,
+ host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
- /* Disable FIFO */
- writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
return 0;
}
@@ -653,7 +1090,10 @@ static void cvm_mmc_dma_request(struct mmc_host *mmc,
struct cvm_mmc_slot *slot = mmc_priv(mmc);
struct cvm_mmc_host *host = slot->host;
struct mmc_data *data;
- u64 emm_dma, addr;
+ u64 emm_dma, addr, int_enable_mask = 0;
+
+ /* cleared by successful termination */
+ mrq->cmd->error = -EINVAL;
if (!mrq->data || !mrq->data->sg || !mrq->data->sg_len ||
!mrq->stop || mrq->stop->opcode != MMC_STOP_TRANSMISSION) {
@@ -665,14 +1105,12 @@ static void cvm_mmc_dma_request(struct mmc_host *mmc,
cvm_mmc_switch_to(slot);
data = mrq->data;
+
pr_debug("DMA request blocks: %d block_size: %d total_size: %d\n",
data->blocks, data->blksz, data->blocks * data->blksz);
if (data->timeout_ns)
set_wdog(slot, data->timeout_ns);
- WARN_ON(host->current_req);
- host->current_req = mrq;
-
emm_dma = prepare_ext_dma(mmc, mrq);
addr = prepare_dma(host, data);
if (!addr) {
@@ -680,9 +1118,19 @@ static void cvm_mmc_dma_request(struct mmc_host *mmc,
goto error;
}
- host->dma_active = true;
- host->int_enable(host, MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE |
- MIO_EMM_INT_DMA_ERR);
+ mrq->host = mmc;
+ WARN_ON(slot->current_req);
+ slot->current_req = mrq;
+ slot->dma_active = true;
+
+ int_enable_mask = MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE |
+ MIO_EMM_INT_DMA_ERR;
+
+ /* Add NCB_FLT interrupt for octtx2 */
+ if (is_mmc_otx2(host))
+ int_enable_mask |= MIO_EMM_INT_NCB_FLT;
+
+ host->int_enable(host, int_enable_mask);
if (host->dmar_fixup)
host->dmar_fixup(host, mrq->cmd, data, addr);
@@ -706,16 +1154,17 @@ error:
host->release_bus(host);
}
-static void do_read_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
+static void do_read_request(struct cvm_mmc_slot *slot, struct mmc_request *mrq)
{
- sg_miter_start(&host->smi, mrq->data->sg, mrq->data->sg_len,
+ sg_miter_start(&slot->smi, mrq->data->sg, mrq->data->sg_len,
SG_MITER_ATOMIC | SG_MITER_TO_SG);
}
-static void do_write_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
+static void do_write_request(struct cvm_mmc_slot *slot, struct mmc_request *mrq)
{
+ struct cvm_mmc_host *host = slot->host;
unsigned int data_len = mrq->data->blocks * mrq->data->blksz;
- struct sg_mapping_iter *smi = &host->smi;
+ struct sg_mapping_iter *smi = &slot->smi;
unsigned int bytes_xfered;
int shift = 56;
u64 dat = 0;
@@ -749,6 +1198,51 @@ static void do_write_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
sg_miter_stop(smi);
}
+static void cvm_mmc_track_switch(struct cvm_mmc_slot *slot, u32 cmd_arg)
+{
+ u8 how = (cmd_arg >> 24) & 3;
+ u8 where = (u8)(cmd_arg >> 16);
+ u8 val = (u8)(cmd_arg >> 8);
+
+ slot->want_switch = slot->cached_switch;
+
+ /*
+ * track ext_csd assignments (how==3) for critical entries
+ * to make sure we follow up with MIO_EMM_SWITCH adjustment
+ * before ANY mmc/core interaction at old settings.
+ * Current mmc/core logic (linux 4.14) does not set/clear
+ * bits (how = 1 or 2), which would require more complex
+ * logic to track the intent of a change
+ */
+
+ if (how != 3)
+ return;
+
+ switch (where) {
+ case EXT_CSD_BUS_WIDTH:
+ slot->want_switch &= ~MIO_EMM_SWITCH_BUS_WIDTH;
+ slot->want_switch |=
+ FIELD_PREP(MIO_EMM_SWITCH_BUS_WIDTH, val);
+ break;
+ case EXT_CSD_POWER_CLASS:
+ slot->want_switch &= ~MIO_EMM_SWITCH_POWER_CLASS;
+ slot->want_switch |=
+ FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, val);
+ break;
+ case EXT_CSD_HS_TIMING:
+ slot->want_switch &= ~MIO_EMM_SWITCH_TIMING;
+ if (val)
+ slot->want_switch |=
+ FIELD_PREP(MIO_EMM_SWITCH_TIMING,
+ (1 << (val - 1)));
+ break;
+ default:
+ return;
+ }
+
+ slot->cmd6_pending = true;
+}
+
static void cvm_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct cvm_mmc_slot *slot = mmc_priv(mmc);
@@ -777,23 +1271,27 @@ static void cvm_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
mods = cvm_mmc_get_cr_mods(cmd);
- WARN_ON(host->current_req);
- host->current_req = mrq;
+ WARN_ON(slot->current_req);
+ mrq->host = mmc;
+ slot->current_req = mrq;
if (cmd->data) {
if (cmd->data->flags & MMC_DATA_READ)
- do_read_request(host, mrq);
+ do_read_request(slot, mrq);
else
- do_write_request(host, mrq);
+ do_write_request(slot, mrq);
if (cmd->data->timeout_ns)
set_wdog(slot, cmd->data->timeout_ns);
} else
set_wdog(slot, 0);
- host->dma_active = false;
+ slot->dma_active = false;
host->int_enable(host, MIO_EMM_INT_CMD_DONE | MIO_EMM_INT_CMD_ERR);
+ if (cmd->opcode == MMC_SWITCH)
+ cvm_mmc_track_switch(slot, cmd->arg);
+
emm_cmd = FIELD_PREP(MIO_EMM_CMD_VAL, 1) |
FIELD_PREP(MIO_EMM_CMD_CTYPE_XOR, mods.ctype_xor) |
FIELD_PREP(MIO_EMM_CMD_RTYPE_XOR, mods.rtype_xor) |
@@ -819,37 +1317,535 @@ retry:
if (!retries)
dev_err(host->dev, "Bad status: %llx before command write\n", rsp_sts);
writeq(emm_cmd, host->base + MIO_EMM_CMD(host));
+ if (cmd->opcode == MMC_SWITCH)
+ udelay(1300);
+}
+
+static void cvm_mmc_wait_done(struct mmc_request *cvm_mrq)
+{
+ complete(&cvm_mrq->completion);
+}
+
+static int cvm_mmc_r1_cmd(struct mmc_host *mmc, u32 opcode, int *statp)
+{
+ struct mmc_command cmd = {};
+ struct mmc_request cvm_mrq = {};
+
+ if (!opcode)
+ opcode = MMC_SEND_STATUS;
+ cmd.opcode = opcode;
+ if (mmc->card)
+ cmd.arg = mmc->card->rca << 16;
+ else
+ cmd.arg = 1 << 16;
+ cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
+ cmd.data = NULL;
+ cvm_mrq.cmd = &cmd;
+
+ init_completion(&cvm_mrq.completion);
+ cvm_mrq.done = cvm_mmc_wait_done;
+
+ cvm_mmc_request(mmc, &cvm_mrq);
+ if (!wait_for_completion_timeout(&cvm_mrq.completion,
+ msecs_to_jiffies(10))) {
+ mmc_abort_tuning(mmc, opcode);
+ return -ETIMEDOUT;
+ }
+
+ if (statp)
+ *statp = cmd.resp[0];
+
+ return cvm_mrq.cmd->error;
+}
+
+/* adjusters for the 4 otx2 delay line taps */
+struct adj {
+ const char *name;
+ u64 mask;
+ int (*test)(struct mmc_host *mmc, u32 opcode, int *statp);
+ u32 opcode;
+ bool ddr_only;
+ bool hs200_only;
+ bool non_hs200;
+ u32 num_runs;
+};
+
+static int adjust_tuning(struct mmc_host *mmc, struct adj *adj, u32 opcode)
+{
+ int err = -1, start_run = -1, best_run = 0, best_start = -1;
+ int last_good = -1;
+ bool prev_ok = false;
+ u64 timing, tap;
+ struct cvm_mmc_slot *slot = mmc_priv(mmc);
+ struct cvm_mmc_host *host = slot->host;
+ char how[MAX_NO_OF_TAPS+1] = "";
+ u32 count;
+
+ /* loop over range+1 to simplify processing */
+ for (tap = 0; tap <= MAX_NO_OF_TAPS; tap++, prev_ok = !err) {
+ if (tap < MAX_NO_OF_TAPS) {
+ cvm_mmc_clk_config(host, CLK_OFF);
+ timing = readq(host->base + MIO_EMM_TIMING(host));
+ timing &= ~adj->mask;
+ timing |= (tap << __bf_shf(adj->mask));
+ writeq(timing, host->base + MIO_EMM_TIMING(host));
+
+ cvm_mmc_clk_config(host, CLK_ON);
+ for (count = 0; count < adj->num_runs; count++) {
+ err = adj->test(mmc, opcode, NULL);
+ if (err)
+ break;
+ }
+ how[tap] = "-+"[!err];
+ if (!err)
+ last_good = tap;
+ } else {
+ /*
+ * putting the end+1 case in loop simplifies
+ * logic, allowing 'prev_ok' to process a
+ * sweet spot in tuning which extends to wall.
+ */
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ /*
+ * If no CRC/etc errors in response, but previous
+ * failed, note the start of a new run
+ */
+ if (!prev_ok)
+ start_run = tap;
+ } else if (prev_ok) {
+ int run = tap - 1 - start_run;
+
+ /* did we just exit a wider sweet spot? */
+ if (start_run >= 0 && run > best_run) {
+ best_start = start_run;
+ best_run = run;
+ }
+ }
+ }
+
+ if (best_start < 0) {
+ dev_warn(host->dev, "%s %lldMHz tuning %s failed\n",
+ mmc_hostname(mmc), slot->clock / 1000000, adj->name);
+ dev_info(host->dev, "%s/%s %d/%lld/%d %s\n",
+ mmc_hostname(mmc), adj->name,
+ best_start, tap, best_start + best_run,
+ how);
+ return -EINVAL;
+ }
+
+ tap = best_start + best_run / 2;
+ how[tap] = '@';
+ if (tapdance) {
+ tap = last_good - tapdance;
+ how[tap] = 'X';
+ }
+ dev_info(host->dev, "%s/%s %d/%lld/%d %s\n",
+ mmc_hostname(mmc), adj->name,
+ best_start, tap, best_start + best_run,
+ how);
+ slot->taps &= ~adj->mask;
+ slot->taps |= (tap << __bf_shf(adj->mask));
+ cvm_mmc_set_timing(slot);
+ return 0;
+}
+
+static const u8 octeontx_hs400_tuning_block[512] = {
+ 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
+ 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
+ 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
+ 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
+ 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
+ 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
+ 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
+ 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
+ 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
+ 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
+ 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
+ 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
+ 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
+ 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
+ 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
+ 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
+ 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
+ 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
+ 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
+ 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
+ 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
+ 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
+ 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
+ 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
+ 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
+ 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
+ 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
+ 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
+ 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
+ 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
+ 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
+ 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
+ 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
+ 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
+ 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
+ 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
+ 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
+ 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
+ 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
+ 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
+ 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
+ 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
+ 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
+ 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
+ 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
+ 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
+ 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
+ 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
+ 0xff, 0x00, 0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
+ 0x00, 0xff, 0x00, 0xff, 0x55, 0xaa, 0x55, 0xaa,
+ 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
+ 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
+ 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
+ 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
+ 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
+ 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
+ 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,
+ 0x01, 0xfe, 0x01, 0xfe, 0xcc, 0xcc, 0xcc, 0xff,
+ 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
+ 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
+ 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
+ 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
+ 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
+
+};
+
+/* Initialization for single block read/write operation for tuning */
+static void hs400_prepare_mrq(const struct cvm_mmc_slot *slot,
+ struct mmc_request *mrq, struct mmc_command *cmd,
+ struct mmc_data *data, struct scatterlist *sg,
+ const void *dat_buf, u32 size, bool write)
+{
+ struct mmc_host *mmc = slot->mmc;
+
+ memset(data, 0, sizeof(*data));
+ memset(cmd, 0, sizeof(*cmd));
+ memset(mrq, 0, sizeof(*mrq));
+
+ mrq->cmd = cmd;
+ mrq->data = data;
+ cmd->opcode = write ? MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
+ cmd->arg = slot->hs400_tuning_block;
+ cmd->flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+ data->blksz = size;
+ data->blocks = 1;
+ data->sg = sg;
+ data->sg_len = 1;
+ data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+ init_completion(&(mrq->completion));
+ if (mmc->card)
+ mmc_set_data_timeout(data, mmc->card);
+ else
+ data->timeout_ns = (write ? 80 : 10) * NSEC_PER_MSEC;
+ sg_init_one(sg, dat_buf, size);
+}
+
+static int access_hs400_tuning_block(struct cvm_mmc_slot *slot, bool write)
+{
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
+ struct scatterlist sg;
+ struct mmc_host *mmc = slot->mmc;
+ const int size = mmc->max_blk_size;
+ u8 *data_buf;
+
+ data_buf = kzalloc(size, GFP_KERNEL);
+ if (!data_buf)
+ return -ENOMEM;
+ if (write)
+ memcpy(data_buf, octeontx_hs400_tuning_block, size);
+
+ hs400_prepare_mrq(slot, &mrq, &cmd, &data, &sg, data_buf, size, write);
+
+ mmc_wait_for_req(mmc, &mrq);
+
+ if (!write) {
+ if (memcmp(data_buf, octeontx_hs400_tuning_block,
+ sizeof(octeontx_hs400_tuning_block))) {
+ kfree(data_buf);
+ return -EILSEQ;
+ }
+ }
+ kfree(data_buf);
+
+ if (cmd.error || data.error)
+ dev_dbg(slot->host->dev, "%s op failed, cmd: %d, data: %d\n",
+ write ? "write" : "read", cmd.error, data.error);
+ return (cmd.error || data.error) ? -ENODATA : 0;
+}
+
+/* Check for and write if necessary the tuning block for HS4000 tuning */
+static int check_and_write_hs400_tuning_block(struct cvm_mmc_slot *slot)
+{
+ int err;
+
+ if (slot->hs400_tuning_block == -1 ||
+ slot->hs400_tuning_block_present)
+ return 0;
+
+ /* Read the tuning block first and see if it's already set */
+ err = access_hs400_tuning_block(slot, false);
+ if (err == -ENODATA) {
+ dev_warn(slot->host->dev,
+ "Could not access HS400 tuning block %d in HS200 mode, err: %d\n",
+ slot->hs400_tuning_block, err);
+ return err;
+ } else if (!err) {
+ /* Everything is good, data matches, we're done */
+ goto done;
+ }
+
+ /* Attempt to write the tuning block */
+ err = access_hs400_tuning_block(slot, true);
+ if (err) {
+ dev_warn(slot->host->dev,
+ "err: %d, Could not write HS400 tuning block in HS200 mode\n",
+ err);
+ goto done;
+ }
+
+ /* Read after write, this should pass */
+ err = access_hs400_tuning_block(slot, false);
+ if (err)
+ dev_warn(slot->host->dev,
+ "Could not read HS400 tuning block after write, err: %d\n",
+ err);
+
+done:
+ /* Disable HS400 tuning if we can't access the tuning block */
+ if (err)
+ slot->hs400_tuning_block = -1;
+
+ slot->hs400_tuning_block_present = !err;
+
+ return err;
+}
+
+static int tune_hs400(struct cvm_mmc_slot *slot)
+{
+ int err = 0, start_run = -1, best_run = 0, best_start = -1;
+ int last_good = -1;
+ bool prev_ok = false;
+ u64 timing;
+ int tap;
+ const int size = sizeof(octeontx_hs400_tuning_block);
+ struct mmc_host *mmc = slot->mmc;
+ struct cvm_mmc_host *host = slot->host;
+ struct mmc_request mrq;
+ struct mmc_command cmd;
+ struct mmc_data data;
+ struct scatterlist sg;
+ u8 *data_buf;
+ char how[MAX_NO_OF_TAPS+1] = "";
+
+ if (slot->hs400_tuning_block == -1)
+ return 0;
+
+ /*
+ * Unfortunately, in their infinite wisdom, the eMMC standard does
+ * not allow for tuning in HS400 mode. The problem is that what
+ * makes a good tuning point for HS200 often does not work in HS400
+ * mode. In order to tune HS400 mode, a block (usually block 1) is
+ * set aside for tuning. U-Boot is responsible for writing a data
+ * pattern designed to generate a worst case signal. Most of this
+ * pattern is based off of the HS200 pattern.
+ *
+ * Each data in tap is tested by a read of this block and the center
+ * tap of the longest run of good reads is chosen. This code is
+ * largely similar to adjust_tuning() above.
+ */
+ data_buf = kzalloc(size, GFP_KERNEL);
+ if (!data_buf)
+ return -ENOMEM;
+
+ hs400_prepare_mrq(slot, &mrq, &cmd, &data, &sg, data_buf, size, false);
+
+ /* loop over range+1 to simplify processing */
+ for (tap = 0; tap <= MAX_NO_OF_TAPS; tap++, prev_ok = !err) {
+ if (tap < MAX_NO_OF_TAPS) {
+ cvm_mmc_clk_config(host, CLK_OFF);
+ timing = readq(host->base + MIO_EMM_TIMING(host));
+ timing = FIELD_PREP(MIO_EMM_TIMING_DATA_IN, tap);
+ writeq(timing, host->base + MIO_EMM_TIMING(host));
+ cvm_mmc_clk_config(host, CLK_ON);
+
+ dev_dbg(host->dev, "HS400 testing data in tap %d\n",
+ tap);
+ mmc_wait_for_req(mmc, &mrq);
+ if (cmd.error | data.error) {
+ err = cmd.error ? cmd.error : data.error;
+ how[tap] = '-';
+ dev_dbg(host->dev,
+ "HS400 tuning cmd err: %d, data error: %d\n",
+ cmd.error, data.error);
+ } else { /* Validate data */
+ err = memcmp(data_buf,
+ octeontx_hs400_tuning_block, size);
+
+ how[tap] = "d+"[!err];
+ dev_dbg(host->dev,
+ "HS400 read OK at tap %d, data %s\n",
+ tap, err ? "mismatch" : "ok");
+ }
+
+ if (!err)
+ last_good = tap;
+ } else {
+ /*
+ * putting the end+1 case in loop simplifies
+ * logic, allowing 'prev_ok' to process a
+ * sweet spot in tuning which extends to wall.
+ */
+ err = -EILSEQ;
+ }
+
+ if (!err) {
+ /*
+ * If no CRC/etc errors in response, but previous
+ * failed, note the start of a new run
+ */
+ if (!prev_ok)
+ start_run = tap;
+ } else if (prev_ok) {
+ int run = tap - 1 - start_run;
+
+ /* did we just exit a wider sweet spot? */
+ if (start_run >= 0 && run > best_run) {
+ best_start = start_run;
+ best_run = run;
+ }
+ }
+ }
+
+ kfree(data_buf);
+ if (best_start < 0) {
+ dev_warn(host->dev, "%s %lldMHz tuning HS400 data in failed\n",
+ mmc_hostname(mmc), slot->clock / 1000000);
+ dev_info(host->dev, "%s/HS400 data in %d/%d/%d %s\n",
+ mmc_hostname(mmc), best_start, tap,
+ best_start + best_run, how);
+ return -EINVAL;
+ }
+
+ tap = best_start + best_run / 2;
+ how[tap] = '@';
+ if (tapdance) {
+ tap = last_good - tapdance;
+ how[tap] = 'X';
+ }
+ dev_info(host->dev, "%s/HS400 data in %d/%d/%d %s\n",
+ mmc_hostname(mmc), best_start, tap,
+ best_start + best_run, how);
+ slot->taps &= ~MIO_EMM_TIMING_DATA_IN;
+ slot->taps |= FIELD_PREP(MIO_EMM_TIMING_DATA_IN, tap);
+ slot->data_in_taps_dly[MMC_TIMING_MMC_HS400] = tap * slot->host->per_tap_delay;
+ dev_dbg(host->dev, "HS400 data input tap: %d\n", tap);
+ dev_dbg(host->dev, "%s\n", how);
+ cvm_mmc_set_timing(slot);
+
+ return 0;
+}
+
+static u32 max_supported_frequency(struct cvm_mmc_host *host)
+{
+ /* Default maximum freqeuncey is 52000000 for chip prior to 9X */
+ u32 max_frequency = MHZ_52;
+
+ if (is_mmc_otx2(host))
+ /* Default max frequency is 200MHz for 9X chips */
+ max_frequency = host->max_freq;
+
+ return max_frequency;
+}
+
+static void cvm_mmc_tune_mode(struct cvm_mmc_slot *slot, struct mmc_ios *ios)
+{
+ struct mmc_host *host = slot->mmc;
+ u8 timing = ios->timing;
+ int ret = 0;
+
+ /* Only following modes are supported. HS200 goes different path */
+ if (timing != MMC_TIMING_MMC_HS400 &&
+ timing != MMC_TIMING_MMC_HS &&
+ timing != MMC_TIMING_MMC_DDR52)
+ return;
+
+ if (slot->in_timings_ctl & BIT(timing)) {
+ dev_info(slot->host->dev,
+ "mmc%d: Tuning overided by user settings\n",
+ host->index);
+ return;
+ }
+
+ dev_dbg(slot->host->dev, "mmc%d: Tuning for mode %s\n",
+ host->index, mmc_modes_name[timing]);
+
+ if (timing == MMC_TIMING_MMC_HS400)
+ ret = tune_hs400(slot);
+ else {
+ ret = cvm_execute_tuning(host, MMC_SEND_EXT_CSD);
+ if (!ret) { /* store tuned timings */
+ u32 taps, cmd_timing, data_timing;
+
+ taps = FIELD_GET(MIO_EMM_TIMING_CMD_IN, slot->taps);
+ cmd_timing = taps * slot->host->per_tap_delay;
+
+ taps = FIELD_GET(MIO_EMM_TIMING_DATA_IN, slot->taps);
+ data_timing = taps * slot->host->per_tap_delay;
+
+ slot->cmd_in_taps_dly[timing] = cmd_timing;
+ slot->data_in_taps_dly[timing] = data_timing;
+ }
+ }
+
+ if (ret)
+ dev_info(slot->host->dev,
+ "mmc%d: Tuning exited early due to errors (%d), running with default timings",
+ host->index,
+ ret);
}
static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
+
struct cvm_mmc_slot *slot = mmc_priv(mmc);
struct cvm_mmc_host *host = slot->host;
int clk_period = 0, power_class = 10, bus_width = 0;
- u64 clock, emm_switch;
+ u64 clock, emm_switch, mode;
+ u32 max_f;
+
+ if (ios->power_mode == MMC_POWER_OFF) {
+ if (host->powered) {
+ cvm_mmc_reset_bus(slot);
+ if (host->global_pwr_gpiod)
+ host->set_shared_power(host, 0);
+ else if (!IS_ERR_OR_NULL(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+ host->powered = false;
+ }
+ set_wdog(slot, 0);
+ return;
+ }
host->acquire_bus(host);
cvm_mmc_switch_to(slot);
- /* Set the power state */
- switch (ios->power_mode) {
- case MMC_POWER_ON:
- break;
-
- case MMC_POWER_OFF:
- cvm_mmc_reset_bus(slot);
- if (host->global_pwr_gpiod)
- host->set_shared_power(host, 0);
- else if (!IS_ERR(mmc->supply.vmmc))
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
- break;
-
- case MMC_POWER_UP:
+ if (ios->power_mode == MMC_POWER_UP) {
if (host->global_pwr_gpiod)
host->set_shared_power(host, 1);
- else if (!IS_ERR(mmc->supply.vmmc))
+ else if (!IS_ERR_OR_NULL(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
- break;
}
/* Convert bus width to HW definition */
@@ -866,34 +1862,217 @@ static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
/* DDR is available for 4/8 bit bus width */
- if (ios->bus_width && ios->timing == MMC_TIMING_MMC_DDR52)
- bus_width |= 4;
+ switch (ios->timing) {
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ if (ios->bus_width)
+ bus_width |= 4;
+ break;
+ case MMC_TIMING_MMC_HS400:
+ if (ios->bus_width & 2)
+ bus_width |= 4;
+ break;
+ }
/* Change the clock frequency. */
clock = ios->clock;
- if (clock > 52000000)
- clock = 52000000;
+ max_f = max_supported_frequency(host);
+
+ if (clock < mmc->f_min)
+ clock = mmc->f_min;
+ if (clock > max_f)
+ clock = max_f;
+
slot->clock = clock;
- if (clock)
- clk_period = (host->sys_freq + clock - 1) / (2 * clock);
+ if (clock) {
+ clk_period = host->sys_freq / (2 * clock);
+ /* check to not exceed requested speed */
+ while (1) {
+ int hz = host->sys_freq / (2 * clk_period);
- emm_switch = FIELD_PREP(MIO_EMM_SWITCH_HS_TIMING,
- (ios->timing == MMC_TIMING_MMC_HS)) |
+ if (hz <= clock)
+ break;
+ clk_period++;
+ }
+ }
+
+ emm_switch =
FIELD_PREP(MIO_EMM_SWITCH_BUS_WIDTH, bus_width) |
FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, power_class) |
FIELD_PREP(MIO_EMM_SWITCH_CLK_HI, clk_period) |
FIELD_PREP(MIO_EMM_SWITCH_CLK_LO, clk_period);
+ switch (ios->timing) {
+ case MMC_TIMING_LEGACY:
+ break;
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ case MMC_TIMING_UHS_SDR12:
+ case MMC_TIMING_UHS_SDR25:
+ case MMC_TIMING_UHS_SDR50:
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_HS_TIMING, 1);
+ break;
+ case MMC_TIMING_MMC_HS200:
+ emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_HS200_TIMING, 1);
+ break;
+ case MMC_TIMING_MMC_HS400:
+ emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_HS400_TIMING, 1);
+ break;
+ }
set_bus_id(&emm_switch, slot->bus_id);
+ pr_debug("mmc-slot%d trying switch %llx w%lld hs%lld hs200:%lld hs400:%lld\n",
+ slot->bus_id, emm_switch,
+ FIELD_GET(MIO_EMM_SWITCH_BUS_WIDTH, emm_switch),
+ FIELD_GET(MIO_EMM_SWITCH_HS_TIMING, emm_switch),
+ FIELD_GET(MIO_EMM_SWITCH_HS200_TIMING, emm_switch),
+ FIELD_GET(MIO_EMM_SWITCH_HS400_TIMING, emm_switch));
+
if (!switch_val_changed(slot, emm_switch))
goto out;
set_wdog(slot, 0);
do_switch(host, emm_switch);
+
+ mode = readq(host->base + MIO_EMM_MODE(host, slot->bus_id));
+ pr_debug("mmc-slot%d mode %llx w%lld hs%lld hs200:%lld hs400:%lld\n",
+ slot->bus_id, mode,
+ (mode >> 40) & 7, (mode >> 48) & 1,
+ (mode >> 49) & 1, (mode >> 50) & 1);
+
slot->cached_switch = emm_switch;
+ host->powered = true;
+ cvm_mmc_configure_delay(slot);
out:
host->release_bus(host);
+ if (ios->timing == MMC_TIMING_MMC_HS)
+ check_and_write_hs400_tuning_block(slot);
+
+ cvm_mmc_tune_mode(slot, ios);
+}
+
+static struct adj adj[] = {
+ { "CMD_IN(HS200)", MIO_EMM_TIMING_CMD_IN,
+ cvm_mmc_r1_cmd, MMC_SEND_STATUS, false, true, false, 3, },
+ { "DATA_IN(HS200)", MIO_EMM_TIMING_DATA_IN,
+ mmc_send_tuning, MMC_SEND_TUNING_BLOCK_HS200,
+ false, true, false, 2 },
+ { NULL, },
+};
+
+static int cvm_scan_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct cvm_mmc_slot *slot = mmc_priv(mmc);
+ struct adj *a;
+ int ret;
+
+ for (a = adj; a->name; a++) {
+ if (a->ddr_only && !cvm_is_mmc_timing_ddr(slot))
+ continue;
+ if (a->hs200_only &&
+ mmc->ios.timing != MMC_TIMING_MMC_HS200)
+ continue;
+ if (a->non_hs200 && mmc->ios.timing == MMC_TIMING_MMC_HS200)
+ continue;
+
+ ret = adjust_tuning(mmc, a,
+ a->opcode ?: opcode);
+
+ if (ret)
+ return ret;
+ }
+
+ cvm_mmc_set_timing(slot);
+ if (!slot->hs400_tuning_block_present)
+ check_and_write_hs400_tuning_block(slot);
+ return 0;
+}
+
+static int cvm_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct cvm_mmc_slot *slot = mmc_priv(mmc);
+ struct cvm_mmc_host *host = slot->host;
+ int clk_period, hz;
+
+ int ret;
+
+ do {
+ u64 emm_switch =
+ readq(host->base + MIO_EMM_MODE(host, slot->bus_id));
+
+ clk_period = FIELD_GET(MIO_EMM_SWITCH_CLK_LO, emm_switch);
+ dev_info(slot->host->dev, "%s re-tuning\n",
+ mmc_hostname(mmc));
+ ret = cvm_scan_tuning(mmc, opcode);
+ if (ret) {
+ int inc = clk_period >> 3;
+
+ if (!inc)
+ inc++;
+ clk_period += inc;
+ hz = host->sys_freq / (2 * clk_period);
+ pr_debug("clk_period %d += %d, now %d Hz\n",
+ clk_period - inc, inc, hz);
+
+ if (hz < 400000)
+ break;
+
+ slot->clock = hz;
+ mmc->ios.clock = hz;
+
+ emm_switch &= ~MIO_EMM_SWITCH_CLK_LO;
+ emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_LO,
+ clk_period);
+ emm_switch &= ~MIO_EMM_SWITCH_CLK_HI;
+ emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_HI,
+ clk_period);
+ do_switch(host, emm_switch);
+ }
+ } while (ret);
+
+ return ret;
+}
+
+static int cvm_prepare_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct cvm_mmc_slot *slot = mmc_priv(mmc);
+
+ if (slot->in_timings_ctl & BIT(mmc->ios.timing)) {
+ dev_info(slot->host->dev,
+ "mmc%d: Tuning overided by user settings\n",
+ mmc->index);
+ return 0;
+ }
+
+ return cvm_execute_tuning(mmc, opcode);
+}
+
+static int cvm_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct cvm_mmc_slot *slot = mmc_priv(mmc);
+
+ return cvm_mmc_configure_delay(slot);
+}
+
+static void cvm_mmc_reset(struct mmc_host *mmc)
+{
+ struct cvm_mmc_slot *slot = mmc_priv(mmc);
+ struct cvm_mmc_host *host = slot->host;
+ u64 r;
+
+ cvm_mmc_reset_bus(slot);
+
+ r = FIELD_PREP(MIO_EMM_CMD_VAL, 1) |
+ FIELD_PREP(MIO_EMM_CMD_BUS_ID, slot->bus_id);
+
+ writeq(r, host->base + MIO_EMM_CMD(host));
+
+ do {
+ r = readq(host->base + MIO_EMM_RSP_STS(host));
+ } while (!(r & MIO_EMM_RSP_STS_CMD_DONE));
}
static const struct mmc_host_ops cvm_mmc_ops = {
@@ -901,6 +2080,13 @@ static const struct mmc_host_ops cvm_mmc_ops = {
.set_ios = cvm_mmc_set_ios,
.get_ro = mmc_gpio_get_ro,
.get_cd = mmc_gpio_get_cd,
+ .hw_reset = cvm_mmc_reset,
+ .execute_tuning = cvm_prepare_tuning,
+ .prepare_hs400_tuning = cvm_prepare_hs400_tuning,
+#if IS_ENABLED(CONFIG_MMC_PSTORE)
+ .req_cleanup_pending = cvm_req_cleanup_pending,
+ .req_completion_poll = cvm_req_completion_poll,
+#endif
};
static void cvm_mmc_set_clock(struct cvm_mmc_slot *slot, unsigned int clock)
@@ -917,7 +2103,7 @@ static int cvm_mmc_init_lowlevel(struct cvm_mmc_slot *slot)
struct cvm_mmc_host *host = slot->host;
u64 emm_switch;
- /* Enable this bus slot. */
+ /* Enable this bus slot. Overridden when vqmmc-switching engaged */
host->emm_cfg |= (1ull << slot->bus_id);
writeq(host->emm_cfg, slot->host->base + MIO_EMM_CFG(host));
udelay(10);
@@ -933,8 +2119,8 @@ static int cvm_mmc_init_lowlevel(struct cvm_mmc_slot *slot)
/* Make the changes take effect on this bus slot. */
set_bus_id(&emm_switch, slot->bus_id);
do_switch(host, emm_switch);
-
slot->cached_switch = emm_switch;
+ host->powered = true;
/*
* Set watchdog timeout value and default reset value
@@ -948,12 +2134,77 @@ static int cvm_mmc_init_lowlevel(struct cvm_mmc_slot *slot)
return 0;
}
+/**
+ * Reads device tree entries for bus timings
+ *
+ * @param node device tree node for the slot
+ * @param slot the slot device
+ *
+ */
+static void cvm_mmc_of_parse_timings(const struct device_node *node,
+ struct cvm_mmc_slot *slot)
+{
+ int ret;
+
+ /* Provide user overrides for default output timings */
+ of_property_read_u32(node, "marvell,cmd-out-hs200-dly",
+ &slot->cmd_out_taps_dly[MMC_TIMING_MMC_HS200]);
+ of_property_read_u32(node, "marvell,data-out-hs200-dly",
+ &slot->data_out_taps_dly[MMC_TIMING_MMC_HS200]);
+ of_property_read_u32(node, "marvell,cmd-out-hs400-dly",
+ &slot->cmd_out_taps_dly[MMC_TIMING_MMC_HS400]);
+ of_property_read_u32(node, "marvell,data-out-hs400-dly",
+ &slot->data_out_taps_dly[MMC_TIMING_MMC_HS400]);
+ of_property_read_u32(node, "marvell,cmd-out-hs-sdr-dly",
+ &slot->cmd_out_taps_dly[MMC_TIMING_MMC_HS]);
+ of_property_read_u32(node, "marvell,data-out-hs-sdr-dly",
+ &slot->data_out_taps_dly[MMC_TIMING_MMC_HS]);
+ of_property_read_u32(node, "marvell,cmd-out-hs-ddr-dly",
+ &slot->cmd_out_taps_dly[MMC_TIMING_MMC_DDR52]);
+ of_property_read_u32(node, "marvell,data-out-hs-ddr-dly",
+ &slot->data_out_taps_dly[MMC_TIMING_MMC_DDR52]);
+ of_property_read_u32(node, "marvell,cmd-out-legacy-dly",
+ &slot->cmd_out_taps_dly[MMC_TIMING_LEGACY]);
+ of_property_read_u32(node, "marvell,data-out-legacy-dly",
+ &slot->data_out_taps_dly[MMC_TIMING_LEGACY]);
+ /* Modify the input timings using user inputs */
+ ret = of_property_read_u32(node, "marvell,cmd-in-hs200-dly",
+ &slot->cmd_in_taps_dly[MMC_TIMING_MMC_HS200]);
+ if (!ret)
+ slot->in_timings_ctl |= BIT(MMC_TIMING_MMC_HS200);
+ ret = of_property_read_u32(node, "marvell,data-in-hs200-dly",
+ &slot->data_in_taps_dly[MMC_TIMING_MMC_HS200]);
+ if (!ret)
+ slot->in_timings_ctl |= BIT(MMC_TIMING_MMC_HS200);
+ ret = of_property_read_u32(node, "marvell,cmd-in-hs400-dly",
+ &slot->cmd_in_taps_dly[MMC_TIMING_MMC_HS400]);
+ if (!ret)
+ slot->in_timings_ctl |= BIT(MMC_TIMING_MMC_HS400);
+ ret = of_property_read_u32(node, "marvell,data-in-hs400-dly",
+ &slot->data_in_taps_dly[MMC_TIMING_MMC_HS400]);
+ if (!ret)
+ slot->in_timings_ctl |= BIT(MMC_TIMING_MMC_HS400);
+
+ of_property_read_u32(node, "marvell,cmd-in-hs-sdr-dly",
+ &slot->cmd_in_taps_dly[MMC_TIMING_MMC_HS]);
+ of_property_read_u32(node, "marvell,data-in-hs-sdr-dly",
+ &slot->data_in_taps_dly[MMC_TIMING_MMC_HS]);
+ of_property_read_u32(node, "marvell,cmd-in-hs-ddr-dly",
+ &slot->cmd_in_taps_dly[MMC_TIMING_MMC_DDR52]);
+ of_property_read_u32(node, "marvell,data-in-hs-ddr-dly",
+ &slot->data_in_taps_dly[MMC_TIMING_MMC_DDR52]);
+ of_property_read_u32(node, "marvell,cmd-in-legacy-dly",
+ &slot->cmd_in_taps_dly[MMC_TIMING_LEGACY]);
+ of_property_read_u32(node, "marvell,data-in-legacy-dly",
+ &slot->data_in_taps_dly[MMC_TIMING_LEGACY]);
+}
+
static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
{
u32 id, cmd_skew = 0, dat_skew = 0, bus_width = 0;
struct device_node *node = dev->of_node;
struct mmc_host *mmc = slot->mmc;
- u64 clock_period;
+ u32 max_frequency, current_drive, clk_slew;
int ret;
ret = of_property_read_u32(node, "reg", &id);
@@ -962,19 +2213,25 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
return ret;
}
- if (id >= CAVIUM_MAX_MMC || slot->host->slot[id]) {
- dev_err(dev, "Invalid reg property on %pOF\n", node);
+ if (id >= CAVIUM_MAX_MMC) {
+ dev_err(dev, "Invalid reg=<%d> property on %pOF\n", id, node);
+ return -EINVAL;
+ }
+
+ if (slot->host->slot[id]) {
+ dev_err(dev, "Duplicate reg=<%d> property on %pOF\n",
+ id, node);
return -EINVAL;
}
ret = mmc_regulator_get_supply(mmc);
- if (ret)
+ if (ret == -EPROBE_DEFER)
return ret;
/*
* Legacy Octeon firmware has no regulator entry, fall-back to
* a hard-coded voltage to get a sane OCR.
*/
- if (IS_ERR(mmc->supply.vmmc))
+ if (IS_ERR_OR_NULL(mmc->supply.vmmc))
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
/* Common MMC bindings */
@@ -982,7 +2239,10 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
if (ret)
return ret;
- /* Set bus width */
+ slot->hs400_tuning_block = -1U;
+ of_property_read_u32(node, "marvell,hs400-tuning-block",
+ &slot->hs400_tuning_block);
+ /* Set bus width from obsolete properties, if unset */
if (!(mmc->caps & (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA))) {
of_property_read_u32(node, "cavium,bus-max-width", &bus_width);
if (bus_width == 8)
@@ -991,19 +2251,43 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
mmc->caps |= MMC_CAP_4_BIT_DATA;
}
+ /* Configure bus timings */
+ cvm_mmc_of_parse_timings(node, slot);
+
+ max_frequency = max_supported_frequency(slot->host);
+
/* Set maximum and minimum frequency */
if (!mmc->f_max)
of_property_read_u32(node, "spi-max-frequency", &mmc->f_max);
- if (!mmc->f_max || mmc->f_max > 52000000)
- mmc->f_max = 52000000;
- mmc->f_min = 400000;
+ if (!mmc->f_max || mmc->f_max > max_frequency)
+ mmc->f_max = max_frequency;
+ mmc->f_min = KHZ_400;
/* Sampling register settings, period in picoseconds */
- clock_period = 1000000000000ull / slot->host->sys_freq;
of_property_read_u32(node, "cavium,cmd-clk-skew", &cmd_skew);
of_property_read_u32(node, "cavium,dat-clk-skew", &dat_skew);
- slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period;
- slot->dat_cnt = (dat_skew + clock_period / 2) / clock_period;
+ if (is_mmc_8xxx(slot->host) || is_mmc_otx2(slot->host)) {
+ slot->cmd_cnt = cmd_skew;
+ slot->data_cnt = dat_skew;
+ } else {
+ u64 clock_period = 1000000000000ull / slot->host->sys_freq;
+
+ slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period;
+ slot->data_cnt = (dat_skew + clock_period / 2) / clock_period;
+ }
+
+ /* Get current drive and clk skew */
+ ret = of_property_read_u32(node, "cavium,drv-strength", &current_drive);
+ if (ret)
+ slot->drive = -1;
+ else
+ slot->drive = current_drive;
+
+ ret = of_property_read_u32(node, "cavium,clk-slew", &clk_slew);
+ if (ret)
+ slot->slew = -1;
+ else
+ slot->slew = clk_slew;
return id;
}
@@ -1012,7 +2296,8 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
{
struct cvm_mmc_slot *slot;
struct mmc_host *mmc;
- int ret, id;
+ struct iommu_domain *dom;
+ int ret, id, i;
mmc = mmc_alloc_host(sizeof(struct cvm_mmc_slot), dev);
if (!mmc)
@@ -1022,6 +2307,29 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
slot->mmc = mmc;
slot->host = host;
+ /*
+ * Initialize output timings for the bus.
+ * DAT[0..7] timings are half of CMD line timings in case of DDR mode.
+ */
+ memcpy(slot->cmd_out_taps_dly, default_cmd_out_taps_dly,
+ sizeof(slot->cmd_out_taps_dly));
+
+ for (i = 0; i < MMC_TIMINGS_COUNT; i++) {
+ u32 val = slot->cmd_out_taps_dly[i];
+
+ if (__cvm_is_mmc_timing_ddr(i))
+ val = DIV_ROUND_UP(val, 2);
+ slot->data_out_taps_dly[i] = val;
+ }
+ /* Initialize input timings */
+ memcpy(slot->cmd_in_taps_dly, default_cmd_in_taps_dly,
+ sizeof(slot->cmd_in_taps_dly));
+ /* Input timings for DAT lines are the same as CMD line timings */
+ memcpy(slot->data_in_taps_dly, default_cmd_in_taps_dly,
+ sizeof(slot->data_in_taps_dly));
+ /* Mark all timings as defaults */
+ slot->in_timings_ctl = 0;
+
ret = cvm_mmc_of_parse(dev, slot);
if (ret < 0)
goto error;
@@ -1030,15 +2338,19 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
/* Set up host parameters */
mmc->ops = &cvm_mmc_ops;
+ mmc->caps |= MMC_CAP_BUS_WIDTH_TEST;
+ mmc->caps |= MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD;
+
/*
- * We only have a 3.3v supply, we cannot support any
- * of the UHS modes. We do support the high speed DDR
- * modes up to 52MHz.
+ * We only have a 3.3v supply for slots, we cannot
+ * support any of the UHS modes. We do support the
+ * high speed DDR modes up to 52MHz.
*
* Disable bounce buffers for max_segs = 1
*/
- mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
- MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD | MMC_CAP_3_3V_DDR;
+
+ if (!is_mmc_otx2(host))
+ mmc->caps |= MMC_CAP_3_3V_DDR;
if (host->use_sg)
mmc->max_segs = 16;
@@ -1054,14 +2366,33 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
/* DMA block count field is 15 bits */
mmc->max_blk_count = 32767;
+ dom = iommu_get_domain_for_dev(dev->parent);
+ if (dom && dom->type == IOMMU_DOMAIN_IDENTITY) {
+ unsigned int max_size = (1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE;
+
+ if (mmc->max_seg_size > max_size)
+ mmc->max_seg_size = max_size;
+
+ max_size *= mmc->max_segs;
+
+ if (mmc->max_req_size > max_size)
+ mmc->max_req_size = max_size;
+ }
+
+ mmc_can_retune(mmc);
+
slot->clock = mmc->f_min;
slot->bus_id = id;
slot->cached_rca = 1;
+#if IS_ENABLED(CONFIG_MMC_PSTORE)
+ host->pstore = 0;
+#endif
host->acquire_bus(host);
host->slot[id] = slot;
- cvm_mmc_switch_to(slot);
+ host->use_vqmmc |= !IS_ERR_OR_NULL(slot->mmc->supply.vqmmc);
cvm_mmc_init_lowlevel(slot);
+ cvm_mmc_switch_to(slot);
host->release_bus(host);
ret = mmc_add_host(mmc);
diff --git a/drivers/mmc/host/cavium.h b/drivers/mmc/host/cavium.h
index f3eea5eaa678..4725804d72bb 100644
--- a/drivers/mmc/host/cavium.h
+++ b/drivers/mmc/host/cavium.h
@@ -19,8 +19,59 @@
#include <linux/of.h>
#include <linux/scatterlist.h>
#include <linux/semaphore.h>
+#include <linux/pci.h>
#define CAVIUM_MAX_MMC 4
+#define BLKSZ_EXT_CSD 512
+#define MRVL_OCTEONTX2_96XX_PARTNUM 0xB2
+
+/* Subsystem Device ID */
+#define PCI_SUBSYS_DEVID_8XXX 0xA
+#define PCI_SUBSYS_DEVID_9XXX 0xB
+#define PCI_SUBSYS_DEVID_98XX 0xB1
+#define PCI_SUBSYS_DEVID_96XX 0xB2
+#define PCI_SUBSYS_DEVID_95XX 0xB3
+#define PCI_SUBSYS_DEVID_LOKI 0xB4
+#define PCI_SUBSYS_DEVID_95XXMM 0xB5
+
+/* Chip revision Id */
+#define REV_ID_0 0
+#define REV_ID_1 1
+#define REV_ID_2 2
+
+#define KHZ_400 (400000)
+#define MHZ_26 (26000000)
+#define MHZ_52 (52000000)
+#define MHZ_100 (100000000)
+#define MHZ_112_5 (112500000)
+#define MHZ_150 (150000000)
+#define MHZ_167 (167000000)
+#define MHZ_200 (200000000)
+
+/* octtx2: emmc interface io current drive strength */
+#define MILLI_AMP_2 (0x0)
+#define MILLI_AMP_4 (0x1)
+#define MILLI_AMP_8 (0x2)
+#define MILLI_AMP_16 (0x3)
+
+/* octtx2: emmc interface io clk skew */
+#define LOW_SLEW_RATE (0x0)
+#define HIGH_SLEW_RATE (0x1)
+
+/* octtx2: emmc interface calibration */
+#define START_CALIBRATION (0x1)
+#define TOTAL_NO_OF_TAPS (512)
+#define PS_10000 (10 * 1000)
+#define PS_5000 (5000)
+#define PS_2500 (2500)
+#define PS_800 (800)
+#define PS_400 (400)
+#define MAX_NO_OF_TAPS 64
+
+/* Macros to enable/disable clks */
+#define CLK_ON 0
+#define CLK_OFF 1
+
/* DMA register addresses */
#define MIO_EMM_DMA_FIFO_CFG(x) (0x00 + x->reg_off_dma)
@@ -33,8 +84,17 @@
#define MIO_EMM_DMA_INT_ENA_W1S(x) (0x40 + x->reg_off_dma)
#define MIO_EMM_DMA_INT_ENA_W1C(x) (0x48 + x->reg_off_dma)
+/* octtx2 specific registers */
+#define MIO_EMM_CALB(x) (0xC0 + x->reg_off)
+#define MIO_EMM_TAP(x) (0xC8 + x->reg_off)
+#define MIO_EMM_TIMING(x) (0xD0 + x->reg_off)
+#define MIO_EMM_DEBUG(x) (0xF8 + x->reg_off)
+
/* register addresses */
#define MIO_EMM_CFG(x) (0x00 + x->reg_off)
+#define MIO_EMM_MODE(x, s) (0x08 + 8*(s) + (x)->reg_off)
+/* octtx2 specific register */
+#define MIO_EMM_IO_CTL(x) (0x40 + x->reg_off)
#define MIO_EMM_SWITCH(x) (0x48 + x->reg_off)
#define MIO_EMM_DMA(x) (0x50 + x->reg_off)
#define MIO_EMM_CMD(x) (0x58 + x->reg_off)
@@ -56,6 +116,7 @@ struct cvm_mmc_host {
struct device *dev;
void __iomem *base;
void __iomem *dma_base;
+ struct pci_dev *pdev;
int reg_off;
int reg_off_dma;
u64 emm_cfg;
@@ -63,23 +124,30 @@ struct cvm_mmc_host {
int last_slot;
struct clk *clk;
int sys_freq;
-
- struct mmc_request *current_req;
- struct sg_mapping_iter smi;
- bool dma_active;
+ int max_freq;
bool use_sg;
-
bool has_ciu3;
+ bool powered;
+ bool use_vqmmc; /* must disable slots over switch */
bool big_dma_addr;
bool need_irq_handler_lock;
+ bool tap_requires_noclk;
+ bool calibrate_glitch;
+ bool cond_clock_glitch;
spinlock_t irq_handler_lock;
struct semaphore mmc_serializer;
+#if IS_ENABLED(CONFIG_MMC_PSTORE)
+ bool pstore;
+#endif
struct gpio_desc *global_pwr_gpiod;
atomic_t shared_power_users;
struct cvm_mmc_slot *slot[CAVIUM_MAX_MMC];
struct platform_device *slot_pdev[CAVIUM_MAX_MMC];
+ /* octtx2 specific */
+ unsigned int per_tap_delay; /* per tap delay in pico second */
+ unsigned long delay_logged; /* per-ios.timing bitmask */
void (*set_shared_power)(struct cvm_mmc_host *, int);
void (*acquire_bus)(struct cvm_mmc_host *);
@@ -94,16 +162,42 @@ struct cvm_mmc_host {
struct cvm_mmc_slot {
struct mmc_host *mmc; /* slot-level mmc_core object */
struct cvm_mmc_host *host; /* common hw for all slots */
+ struct mmc_request *current_req;
u64 clock;
+ u32 ecount, gcount;
u64 cached_switch;
u64 cached_rca;
- unsigned int cmd_cnt; /* sample delay */
- unsigned int dat_cnt; /* sample delay */
+ struct sg_mapping_iter smi;
+ bool dma_active;
+
+ u64 taps; /* otx2: MIO_EMM_TIMING */
+ unsigned int cmd_cnt; /* otx: sample cmd in delay */
+ unsigned int data_cnt; /* otx: sample data in delay */
+
+ int drive; /* Current drive */
+ int slew; /* clock skew */
int bus_id;
+ bool cmd6_pending;
+ u64 want_switch;
+ u32 hs400_tuning_block; /* Block number used for tuning */
+ bool hs400_tuning_block_present;
+
+#define MMC_TIMINGS_COUNT ((MMC_TIMING_MMC_HS400) + 1)
+ u32 data_out_taps_dly[MMC_TIMINGS_COUNT];
+ u32 cmd_out_taps_dly[MMC_TIMINGS_COUNT];
+ u32 cmd_in_taps_dly[MMC_TIMINGS_COUNT];
+ u32 data_in_taps_dly[MMC_TIMINGS_COUNT];
+ /*
+ * Flags indicate input timings modified by user.
+ * Flag is set when cmd-in-XXX or data-in-XXX value is set for given
+ * mode in DT. They can be used to control behavior of the timings
+ * tuning alorithm.
+ */
+ u32 in_timings_ctl;
};
struct cvm_mmc_cr_type {
@@ -161,6 +255,21 @@ struct cvm_mmc_cr_mods {
#define MIO_EMM_DMA_CFG_SIZE GENMASK_ULL(55, 36)
#define MIO_EMM_DMA_CFG_ADR GENMASK_ULL(35, 0)
+#define MIO_EMM_CFG_BUS_ENA GENMASK_ULL(3, 0)
+
+#define MIO_EMM_IO_CTL_DRIVE GENMASK_ULL(3, 2)
+#define MIO_EMM_IO_CTL_SLEW BIT_ULL(0)
+
+#define MIO_EMM_CALB_START BIT_ULL(0)
+#define MIO_EMM_TAP_DELAY GENMASK_ULL(7, 0)
+
+#define MIO_EMM_TIMING_CMD_IN GENMASK_ULL(53, 48)
+#define MIO_EMM_TIMING_CMD_OUT GENMASK_ULL(37, 32)
+#define MIO_EMM_TIMING_DATA_IN GENMASK_ULL(21, 16)
+#define MIO_EMM_TIMING_DATA_OUT GENMASK_ULL(5, 0)
+
+#define MIO_EMM_INT_NCB_RAS BIT_ULL(8)
+#define MIO_EMM_INT_NCB_FLT BIT_ULL(7)
#define MIO_EMM_INT_SWITCH_ERR BIT_ULL(6)
#define MIO_EMM_INT_SWITCH_DONE BIT_ULL(5)
#define MIO_EMM_INT_DMA_ERR BIT_ULL(4)
@@ -169,6 +278,9 @@ struct cvm_mmc_cr_mods {
#define MIO_EMM_INT_CMD_DONE BIT_ULL(1)
#define MIO_EMM_INT_BUF_DONE BIT_ULL(0)
+#define MIO_EMM_DMA_INT_FIFO BIT_ULL(1)
+#define MIO_EMM_DMA_INT_DMA BIT_ULL(0)
+
#define MIO_EMM_RSP_STS_BUS_ID GENMASK_ULL(61, 60)
#define MIO_EMM_RSP_STS_CMD_VAL BIT_ULL(59)
#define MIO_EMM_RSP_STS_SWITCH_VAL BIT_ULL(58)
@@ -200,16 +312,48 @@ struct cvm_mmc_cr_mods {
#define MIO_EMM_SWITCH_ERR0 BIT_ULL(58)
#define MIO_EMM_SWITCH_ERR1 BIT_ULL(57)
#define MIO_EMM_SWITCH_ERR2 BIT_ULL(56)
+#define MIO_EMM_SWITCH_ERRS GENMASK_ULL(58, 56)
+#define MIO_EMM_SWITCH_HS400_TIMING BIT_ULL(50)
+#define MIO_EMM_SWITCH_HS200_TIMING BIT_ULL(49)
#define MIO_EMM_SWITCH_HS_TIMING BIT_ULL(48)
+#define MIO_EMM_SWITCH_TIMING GENMASK_ULL(50, 48)
#define MIO_EMM_SWITCH_BUS_WIDTH GENMASK_ULL(42, 40)
#define MIO_EMM_SWITCH_POWER_CLASS GENMASK_ULL(35, 32)
+#define MIO_EMM_SWITCH_CLK GENMASK_ULL(31, 0)
#define MIO_EMM_SWITCH_CLK_HI GENMASK_ULL(31, 16)
#define MIO_EMM_SWITCH_CLK_LO GENMASK_ULL(15, 0)
+#define MIO_EMM_DEBUG_CLK_DIS BIT_ULL(20)
+#define MIO_EMM_DEBUG_RDSYNC BIT_ULL(21)
/* Protoypes */
irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id);
int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host);
int cvm_mmc_of_slot_remove(struct cvm_mmc_slot *slot);
+
extern const char *cvm_mmc_irq_names[];
+static inline bool is_mmc_8xxx(struct cvm_mmc_host *host)
+{
+#ifdef CONFIG_ARM64
+ struct pci_dev *pdev = host->pdev;
+ u32 chip_id = (pdev->subsystem_device >> 12) & 0xF;
+
+ return (chip_id == PCI_SUBSYS_DEVID_8XXX);
+#else
+ return false;
+#endif
+}
+
+static inline bool is_mmc_otx2(struct cvm_mmc_host *host)
+{
+#ifdef CONFIG_ARM64
+ struct pci_dev *pdev = host->pdev;
+ u32 chip_id = (pdev->subsystem_device >> 12) & 0xF;
+
+ return (chip_id == PCI_SUBSYS_DEVID_9XXX);
+#else
+ return false;
+#endif
+}
+
#endif
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index 6f2de54a5987..a399fe72798d 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -15,30 +15,117 @@
#include "sdhci-pltfm.h"
-/* HRS - Host Register Set (specific to Cadence) */
-#define SDHCI_CDNS_HRS04 0x10 /* PHY access port */
-#define SDHCI_CDNS_HRS04_ACK BIT(26)
-#define SDHCI_CDNS_HRS04_RD BIT(25)
-#define SDHCI_CDNS_HRS04_WR BIT(24)
-#define SDHCI_CDNS_HRS04_RDATA GENMASK(23, 16)
-#define SDHCI_CDNS_HRS04_WDATA GENMASK(15, 8)
-#define SDHCI_CDNS_HRS04_ADDR GENMASK(5, 0)
-
-#define SDHCI_CDNS_HRS06 0x18 /* eMMC control */
-#define SDHCI_CDNS_HRS06_TUNE_UP BIT(15)
-#define SDHCI_CDNS_HRS06_TUNE GENMASK(13, 8)
-#define SDHCI_CDNS_HRS06_MODE GENMASK(2, 0)
-#define SDHCI_CDNS_HRS06_MODE_SD 0x0
-#define SDHCI_CDNS_HRS06_MODE_MMC_SDR 0x2
-#define SDHCI_CDNS_HRS06_MODE_MMC_DDR 0x3
-#define SDHCI_CDNS_HRS06_MODE_MMC_HS200 0x4
-#define SDHCI_CDNS_HRS06_MODE_MMC_HS400 0x5
-#define SDHCI_CDNS_HRS06_MODE_MMC_HS400ES 0x6
+#define DRV_CALC_SETTINGS (1)
+
+#define SDMCLK_MAX_FREQ 200000000
+
+#define SDHCI_CDNS_HRS00 0x00
+#define SDHCI_CDNS_HRS00_SWR BIT(0)
+
+#define SDHCI_CDNS_HRS02 0x08 /* PHY access port */
+#define SDHCI_CDNS_HRS04 0x10 /* PHY access port */
+/* SD 4.0 Controller HRS - Host Register Set (specific to Cadence) */
+#define SDHCI_CDNS_SD4_HRS04_ACK BIT(26)
+#define SDHCI_CDNS_SD4_HRS04_RD BIT(25)
+#define SDHCI_CDNS_SD4_HRS04_WR BIT(24)
+#define SDHCI_CDNS_SD4_HRS04_RDATA GENMASK(23, 16)
+#define SDHCI_CDNS_SD4_HRS04_WDATA GENMASK(15, 8)
+#define SDHCI_CDNS_SD4_HRS04_ADDR GENMASK(5, 0)
+
+#define SDHCI_CDNS_HRS06 0x18 /* eMMC control */
+#define SDHCI_CDNS_HRS06_TUNE_UP BIT(15)
+#define SDHCI_CDNS_HRS06_TUNE GENMASK(13, 8)
+#define SDHCI_CDNS_HRS06_MODE GENMASK(2, 0)
+#define SDHCI_CDNS_HRS06_MODE_SD 0x0
+#define SDHCI_CDNS_HRS06_MODE_LEGACY 0x1
+#define SDHCI_CDNS_HRS06_MODE_MMC_SDR 0x2
+#define SDHCI_CDNS_HRS06_MODE_MMC_DDR 0x3
+#define SDHCI_CDNS_HRS06_MODE_MMC_HS200 0x4
+#define SDHCI_CDNS_HRS06_MODE_MMC_HS400 0x5
+#define SDHCI_CDNS_HRS06_MODE_MMC_HS400ES 0x6
+
+/* SD 6.0 Controller HRS - Host Register Set (Specific to Cadence) */
+#define SDHCI_CDNS_SD6_HRS04_ADDR GENMASK(15, 0)
+
+#define SDHCI_CDNS_HRS05 0x14
+
+#define SDHCI_CDNS_HRS07 0x1C
+#define SDHCI_CDNS_HRS07_RW_COMPENSATE GENMASK(20, 16)
+#define SDHCI_CDNS_HRS07_IDELAY_VAL GENMASK(4, 0)
+
+#define SDHCI_CDNS_HRS09 0x24
+#define SDHCI_CDNS_HRS09_RDDATA_EN BIT(5)
+#define SDHCI_CDNS_HRS09_RDCMD_EN BIT(4)
+#define SDHCI_CDNS_HRS09_EXTENDED_WR_MODE BIT(3)
+#define SDHCI_CDNS_HRS09_EXTENDED_RD_MODE BIT(2)
+#define SDHCI_CDNS_HRS09_PHY_INIT_COMPLETE BIT(1)
+#define SDHCI_CDNS_HRS09_PHY_SW_RESET BIT(0)
+
+#define SDHCI_CDNS_HRS10 0x28
+#define SDHCI_CDNS_HRS10_HCSDCLKADJ GENMASK(19, 16)
+
+#define SDHCI_CDNS_HRS11 0x2c
+/*Reset related*/
+#define SDHCI_CDNS_SRS11_SW_RESET_ALL (1 << 24)
+#define SDHCI_CDNS_SRS11_SW_RESET_CMD (1 << 25)
+#define SDHCI_CDNS_SRS11_SW_RESET_DAT (1 << 26)
+
+
+#define SDHCI_CDNS_HRS16 0x40
+#define SDHCI_CDNS_HRS16_WRDATA1_SDCLK_DLY GENMASK(31, 28)
+#define SDHCI_CDNS_HRS16_WRDATA0_SDCLK_DLY GENMASK(27, 24)
+#define SDHCI_CDNS_HRS16_WRCMD1_SDCLK_DLY GENMASK(23, 20)
+#define SDHCI_CDNS_HRS16_WRCMD0_SDCLK_DLY GENMASK(19, 16)
+#define SDHCI_CDNS_HRS16_WRDATA1_DLY GENMASK(15, 12)
+#define SDHCI_CDNS_HRS16_WRDATA0_DLY GENMASK(11, 8)
+#define SDHCI_CDNS_HRS16_WRCMD1_DLY GENMASK(7, 4)
+#define SDHCI_CDNS_HRS16_WRCMD0_DLY GENMASK(3, 0)
+
+
+/* PHY registers for SD6 controller */
+#define SDHCI_CDNS_SD6_PHY_DQ_TIMING 0x2000
+#define SDHCI_CDNS_SD6_PHY_DQ_TIMING_IO_MASK_ALWAYS_ON BIT(31)
+#define SDHCI_CDNS_SD6_PHY_DQ_TIMING_IO_MASK_END GENMASK(29, 27)
+#define SDHCI_CDNS_SD6_PHY_DQ_TIMING_IO_MASK_START GENMASK(26, 24)
+#define SDHCI_CDNS_SD6_PHY_DQ_TIMING_DATA_SELECT_OE_END GENMASK(2, 0)
+
+#define SDHCI_CDNS_SD6_PHY_DQS_TIMING 0x2004
+#define SDHCI_CDNS_SD6_PHY_DQS_TIMING_USE_EXT_LPBK_DQS BIT(22)
+#define SDHCI_CDNS_SD6_PHY_DQS_TIMING_USE_LPBK_DQS BIT(21)
+#define SDHCI_CDNS_SD6_PHY_DQS_TIMING_USE_PHONY_DQS BIT(20)
+#define SDHCI_CDNS_SD6_PHY_DQS_TIMING_USE_PHONY_DQS_CMD BIT(19)
+
+#define SDHCI_CDNS_SD6_PHY_GATE_LPBK 0x2008
+#define SDHCI_CDNS_SD6_PHY_GATE_LPBK_SYNC_METHOD BIT(31)
+#define SDHCI_CDNS_SD6_PHY_GATE_LPBK_SW_HALF_CYCLE_SHIFT BIT(28)
+#define SDHCI_CDNS_SD6_PHY_GATE_LPBK_RD_DEL_SEL GENMASK(24, 19)
+#define SDHCI_CDNS_SD6_PHY_GATE_LPBK_GATE_CFG_ALWAYS_ON BIT(6)
+
+#define SDHCI_CDNS_SD6_PHY_DLL_MASTER 0x200C
+#define SDHCI_CDNS_SD6_PHY_DLL_MASTER_BYPASS_MODE BIT(23)
+#define SDHCI_CDNS_SD6_PHY_DLL_MASTER_PHASE_DETECT_SEL GENMASK(22, 20)
+#define SDHCI_CDNS_SD6_PHY_DLL_MASTER_DLL_LOCK_NUM GENMASK(18, 16)
+#define SDHCI_CDNS_SD6_PHY_DLL_MASTER_DLL_START_POINT GENMASK(7, 0)
+
+#define SDHCI_CDNS_SD6_PHY_DLL_SLAVE 0x2010
+#define SDHCI_CDNS_SD6_PHY_DLL_SLAVE_READ_DQS_CMD_DELAY GENMASK(31, 24)
+#define SDHCI_CDNS_SD6_PHY_DLL_SLAVE_CLK_WRDQS_DELAY GENMASK(23, 16)
+#define SDHCI_CDNS_SD6_PHY_DLL_SLAVE_CLK_WR_DELAY GENMASK(15, 8)
+#define SDHCI_CDNS_SD6_PHY_DLL_SLAVE_READ_DQS_DELAY GENMASK(7, 0)
+
+#define SDHCI_CDNS_SD6_PHY_CTRL 0x2080
+#define SDHCI_CDNS_SD6_PHY_CTRL_PHONY_DQS_TIMING GENMASK(9, 4)
+
+#define SDHCI_CDNS_SD6_PHY_GPIO_CTRL1 0x208c
+#define SDHCI_CDNS_SD6_PHY_GPIO_CTRL1_DRV GENMASK(6, 5)
+#define SDHCI_CDNS_SD6_PHY_GPIO_CTRL1_DRV_OVR_EN BIT(4)
+#define SDHCI_CDNS_SD6_PHY_GPIO_CTRL1_SLEW GENMASK(2, 1)
+#define SDHCI_CDNS_SD6_PHY_GPIO_CTRL1_SLEW_OVR_EN BIT(0)
/* SRS - Slot Register Set (SDHCI-compatible) */
#define SDHCI_CDNS_SRS_BASE 0x200
-/* PHY */
+/* PHY registers for SD4 controller */
#define SDHCI_CDNS_PHY_DLY_SD_HS 0x00
#define SDHCI_CDNS_PHY_DLY_SD_DEFAULT 0x01
#define SDHCI_CDNS_PHY_DLY_UHS_SDR12 0x02
@@ -59,24 +146,43 @@
*/
#define SDHCI_CDNS_MAX_TUNING_LOOP 40
-struct sdhci_cdns_phy_param {
+struct sdhci_cdns_priv;
+
+struct sdhci_cdns_sd4_phy_param {
u8 addr;
u8 data;
};
+struct sdhci_cdns_data {
+ int (*phy_init)(struct sdhci_cdns_priv *priv);
+ int (*set_tune_val)(struct sdhci_host *host, unsigned int val);
+};
+
+struct sdhci_cdns_sd4_phy {
+ unsigned int nr_phy_params;
+ struct sdhci_cdns_sd4_phy_param phy_params[];
+};
+
struct sdhci_cdns_priv {
void __iomem *hrs_addr;
bool enhanced_strobe;
- unsigned int nr_phy_params;
- struct sdhci_cdns_phy_param phy_params[];
+ const struct sdhci_cdns_data *cdns_data;
+ void *phy;
};
-struct sdhci_cdns_phy_cfg {
+struct sdhci_cdns_sd4_phy_cfg {
const char *property;
u8 addr;
};
-static const struct sdhci_cdns_phy_cfg sdhci_cdns_phy_cfgs[] = {
+struct sdhci_cdns_of_data {
+ const struct sdhci_pltfm_data *pltfm_data;
+ const struct sdhci_cdns_data *cdns_data;
+ int (*phy_probe)(struct platform_device *pdev,
+ struct sdhci_cdns_priv *priv);
+};
+
+static const struct sdhci_cdns_sd4_phy_cfg sdhci_cdns_sd4_phy_cfgs[] = {
{ "cdns,phy-input-delay-sd-highspeed", SDHCI_CDNS_PHY_DLY_SD_HS, },
{ "cdns,phy-input-delay-legacy", SDHCI_CDNS_PHY_DLY_SD_DEFAULT, },
{ "cdns,phy-input-delay-sd-uhs-sdr12", SDHCI_CDNS_PHY_DLY_UHS_SDR12, },
@@ -90,79 +196,969 @@ static const struct sdhci_cdns_phy_cfg sdhci_cdns_phy_cfgs[] = {
{ "cdns,phy-dll-delay-strobe", SDHCI_CDNS_PHY_DLY_STROBE, },
};
-static int sdhci_cdns_write_phy_reg(struct sdhci_cdns_priv *priv,
- u8 addr, u8 data)
+enum sdhci_cdns_sd6_phy_lock_mode {
+ SDHCI_CDNS_SD6_PHY_LOCK_MODE_FULL_CLK = 0,
+ SDHCI_CDNS_SD6_PHY_LOCK_MODE_HALF_CLK = 2,
+ SDHCI_CDNS_SD6_PHY_LOCK_MODE_SATURATION = 3,
+};
+
+struct sdhci_cdns_sd6_phy_timings {
+ u32 t_cmd_output_min;
+ u32 t_cmd_output_max;
+ u32 t_dat_output_min;
+ u32 t_dat_output_max;
+ u32 t_cmd_input_min;
+ u32 t_cmd_input_max;
+ u32 t_dat_input_min;
+ u32 t_dat_input_max;
+ u32 t_sdclk_min;
+ u32 t_sdclk_max;
+};
+
+struct sdhci_cdns_sd6_phy_delays {
+ u32 phy_sdclk_delay;
+ u32 phy_cmd_o_delay;
+ u32 phy_dat_o_delay;
+ u32 iocell_input_delay;
+ u32 iocell_output_delay;
+ u32 delay_element_org;
+ u32 delay_element;
+};
+
+struct sdhci_cdns_sd6_phy_settings {
+ /* SDHCI_CDNS_SD6_PHY_DLL_SLAVE */
+ u32 cp_read_dqs_cmd_delay;
+ u32 cp_read_dqs_delay;
+ u32 cp_clk_wr_delay;
+ u32 cp_clk_wrdqs_delay;
+
+ /* SDHCI_CDNS_SD6_PHY_DLL_MASTER */
+ u32 cp_dll_bypass_mode;
+ u32 cp_dll_start_point;
+
+ /* SDHCI_CDNS_SD6_PHY_DLL_OBS_REG0 */
+ u32 cp_dll_locked_mode;
+
+ /* SDHCI_CDNS_SD6_PHY_GATE_LPBK */
+ u32 cp_gate_cfg_always_on;
+ u32 cp_sync_method;
+ u32 cp_rd_del_sel;
+ u32 cp_sw_half_cycle_shift;
+ u32 cp_underrun_suppress;
+
+ /* SDHCI_CDNS_SD6_PHY_DQ_TIMING */
+ u32 cp_io_mask_always_on;
+ u32 cp_io_mask_end;
+ u32 cp_io_mask_start;
+ u32 cp_data_select_oe_end;
+
+ /* SDHCI_CDNS_SD6_PHY_DQS_TIMING */
+ u32 cp_use_ext_lpbk_dqs;
+ u32 cp_use_lpbk_dqs;
+ u8 cp_use_phony_dqs;
+ u8 cp_use_phony_dqs_cmd;
+
+ /* HRS 09 */
+ u8 sdhc_extended_rd_mode;
+ u8 sdhc_extended_wr_mode;
+ u32 sdhc_rdcmd_en;
+ u32 sdhc_rddata_en;
+
+ /* HRS10 */
+ u32 sdhc_hcsdclkadj;
+
+ /* HRS 07 */
+ u32 sdhc_idelay_val;
+ u32 sdhc_rw_compensate;
+
+ /* SRS 11 */
+ u32 sdhc_sdcfsh;
+ u32 sdhc_sdcfsl;
+
+ /* HRS 16 */
+ u32 sdhc_wrcmd0_dly;
+ u32 sdhc_wrcmd0_sdclk_dly;
+ u32 sdhc_wrcmd1_dly;
+ u32 sdhc_wrcmd1_sdclk_dly;
+ u32 sdhc_wrdata0_dly;
+ u32 sdhc_wrdata0_sdclk_dly;
+ u32 sdhc_wrdata1_dly;
+ u32 sdhc_wrdata1_sdclk_dly;
+
+ u32 hs200_tune_val;
+ u32 drive;
+ u32 slew;
+};
+
+struct sdhci_cdns_sd6_phy_intermediate_results {
+ /* TODO consider to move the following variables to out calculations */
+ u32 t_sdmclk_calc;
+ u32 dll_max_value;
+};
+
+struct sdhci_cdns_sd6_phy {
+ struct sdhci_cdns_sd6_phy_timings t;
+ struct sdhci_cdns_sd6_phy_delays d;
+ u32 t_sdmclk;
+ struct sdhci_cdns_sd6_phy_settings settings;
+ struct sdhci_cdns_sd6_phy_intermediate_results vars;
+ bool ddr;
+ bool tune_cmd;
+ bool tune_dat;
+ bool strobe_cmd;
+ bool strobe_dat;
+ int mode;
+ int t_sdclk;
+};
+
+static void init_hs(struct sdhci_cdns_sd6_phy_timings *t, int t_sdclk)
+{
+ *t = (struct sdhci_cdns_sd6_phy_timings){
+ .t_cmd_output_min = 2000, .t_cmd_output_max = t_sdclk - 6000,
+ .t_dat_output_min = 2000, .t_dat_output_max = t_sdclk - 6000,
+ .t_cmd_input_min = 14000, .t_cmd_input_max = t_sdclk + 2500,
+ .t_dat_input_min = 14000, .t_dat_input_max = t_sdclk + 2500,
+ .t_sdclk_min = 1000000 / 50, .t_sdclk_max = 1000000 / 0.4
+ };
+}
+
+static void init_uhs_sdr12(struct sdhci_cdns_sd6_phy_timings *t, int t_sdclk)
+{
+ *t = (struct sdhci_cdns_sd6_phy_timings){
+ .t_cmd_output_min = 800, .t_cmd_output_max = t_sdclk - 3000,
+ .t_dat_output_min = 800, .t_dat_output_max = t_sdclk - 3000,
+ .t_cmd_input_min = 14000, .t_cmd_input_max = t_sdclk + 1500,
+ .t_dat_input_min = 14000, .t_dat_input_max = t_sdclk + 1500,
+ .t_sdclk_min = 1000000 / 25, .t_sdclk_max = 1000000 / 0.4
+ };
+}
+
+static void init_uhs_sdr25(struct sdhci_cdns_sd6_phy_timings *t, int t_sdclk)
+{
+ *t = (struct sdhci_cdns_sd6_phy_timings){
+ .t_cmd_output_min = 800, .t_cmd_output_max = t_sdclk - 3000,
+ .t_dat_output_min = 800, .t_dat_output_max = t_sdclk - 3000,
+ .t_cmd_input_min = 14000, .t_cmd_input_max = t_sdclk + 1500,
+ .t_dat_input_min = 14000, .t_dat_input_max = t_sdclk + 1500,
+ .t_sdclk_min = 1000000 / 50, .t_sdclk_max = 1000000 / 0.4
+ };
+}
+
+static void init_uhs_sdr50(struct sdhci_cdns_sd6_phy_timings *t, int t_sdclk)
+{
+ *t = (struct sdhci_cdns_sd6_phy_timings){
+ .t_cmd_output_min = 800, .t_cmd_output_max = t_sdclk - 3000,
+ .t_dat_output_min = 800, .t_dat_output_max = t_sdclk - 3000,
+ .t_cmd_input_min = 7500, .t_cmd_input_max = t_sdclk + 1500,
+ .t_dat_input_min = 7500, .t_dat_input_max = t_sdclk + 1500,
+ .t_sdclk_min = 1000000 / 100, .t_sdclk_max = 1000000 / 0.4
+ };
+}
+
+static void init_uhs_sdr104(struct sdhci_cdns_sd6_phy_timings *t, int t_sdclk)
+{
+ *t = (struct sdhci_cdns_sd6_phy_timings){
+ .t_cmd_output_min = 800, .t_cmd_output_max = t_sdclk - 1400,
+ .t_dat_output_min = 800, .t_dat_output_max = t_sdclk - 1400,
+ .t_cmd_input_min = 1000, .t_cmd_input_max = t_sdclk + 1000,
+ .t_dat_input_min = 1000, .t_dat_input_max = t_sdclk + 1000,
+ .t_sdclk_min = 1000000 / 200, .t_sdclk_max = 1000000 / 100
+ };
+}
+
+static void init_uhs_ddr50(struct sdhci_cdns_sd6_phy_timings *t, int t_sdclk)
+{
+ *t = (struct sdhci_cdns_sd6_phy_timings){
+ .t_cmd_output_min = 800, .t_cmd_output_max = t_sdclk - 3000,
+ .t_dat_output_min = 800, .t_dat_output_max = t_sdclk - 3000,
+ .t_cmd_input_min = 13700, .t_cmd_input_max = t_sdclk + 1500,
+ .t_dat_input_min = 7000, .t_dat_input_max = t_sdclk + 1500,
+ .t_sdclk_min = 1000000 / 50, .t_sdclk_max = 1000000 / 0.4
+ };
+}
+
+static void init_emmc_legacy(struct sdhci_cdns_sd6_phy_timings *t, int t_sdclk)
+{
+ *t = (struct sdhci_cdns_sd6_phy_timings){
+ .t_cmd_output_min = 3000, .t_cmd_output_max = t_sdclk - 3000,
+ .t_dat_output_min = 3000, .t_dat_output_max = t_sdclk - 3000,
+ .t_cmd_input_min = 11700, .t_cmd_input_max = t_sdclk + 8300,
+ .t_dat_input_min = 11700, .t_dat_input_max = t_sdclk + 8300,
+ .t_sdclk_min = 1000000 / 25, .t_sdclk_max = 1000000 / 0.4
+ };
+}
+
+static void init_emmc_sdr(struct sdhci_cdns_sd6_phy_timings *t, int t_sdclk)
+{
+ *t = (struct sdhci_cdns_sd6_phy_timings){
+ .t_cmd_output_min = 3000, .t_cmd_output_max = t_sdclk - 3000,
+ .t_dat_output_min = 3000, .t_dat_output_max = t_sdclk - 3000,
+ .t_cmd_input_min = 13700, .t_cmd_input_max = t_sdclk + 2500,
+ .t_dat_input_min = 13700, .t_dat_input_max = t_sdclk + 2500,
+ .t_sdclk_min = 1000000 / 50, .t_sdclk_max = 1000000 / 0.4
+ };
+}
+
+static void init_emmc_ddr(struct sdhci_cdns_sd6_phy_timings *t, int t_sdclk)
+{
+ *t = (struct sdhci_cdns_sd6_phy_timings){
+ .t_cmd_output_min = 3000, .t_cmd_output_max = t_sdclk - 3000,
+ .t_dat_output_min = 2500, .t_dat_output_max = t_sdclk - 2500,
+ .t_cmd_input_min = 13700, .t_cmd_input_max = t_sdclk + 2500,
+ .t_dat_input_min = 7000, .t_dat_input_max = t_sdclk + 1500,
+ .t_sdclk_min = 1000000 / 50, .t_sdclk_max = 1000000 / 0.4
+ };
+}
+
+static void init_emmc_hs200(struct sdhci_cdns_sd6_phy_timings *t, int t_sdclk)
+{
+ *t = (struct sdhci_cdns_sd6_phy_timings){
+ .t_cmd_output_min = 800, .t_cmd_output_max = t_sdclk - 1400,
+ .t_dat_output_min = 800, .t_dat_output_max = t_sdclk - 1400,
+ .t_cmd_input_min = 1000, .t_cmd_input_max = t_sdclk + 1000,
+ .t_dat_input_min = 1000, .t_dat_input_max = t_sdclk + 1000,
+ .t_sdclk_min = 1000000 / 200, .t_sdclk_max = 1000000 / 100
+ };
+}
+
+/* HS400 and HS400ES */
+static void init_emmc_hs400(struct sdhci_cdns_sd6_phy_timings *t, int t_sdclk)
+{
+ *t = (struct sdhci_cdns_sd6_phy_timings){
+ .t_cmd_output_min = 800, .t_cmd_output_max = t_sdclk - 1400,
+ .t_dat_output_min = 400, .t_dat_output_max = t_sdclk - 400,
+ .t_cmd_input_min = 1000, .t_cmd_input_max = t_sdclk + 1000,
+ .t_dat_input_min = 1000, .t_dat_input_max = t_sdclk + 1000,
+ .t_sdclk_min = 1000000 / 200, .t_sdclk_max = 1000000 / 100
+ };
+}
+
+static void (*(init_timings[]))(struct sdhci_cdns_sd6_phy_timings*, int) = {
+ &init_hs, &init_emmc_legacy, &init_emmc_sdr,
+ &init_emmc_ddr, &init_emmc_hs200, &init_emmc_hs400,
+ &init_uhs_sdr12, &init_uhs_sdr25, &init_uhs_sdr50,
+ &init_uhs_sdr104, &init_uhs_ddr50
+};
+
+static u32 sdhci_cdns_sd6_get_mode(struct sdhci_host *host, unsigned int timing);
+
+#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
+static u32 sdhci_cdns_sd6_readl(struct sdhci_host *host, int reg)
+{
+ return readl(host->ioaddr + reg);
+}
+
+static void sdhci_cdns_sd6_writel(struct sdhci_host *host, u32 val, int reg)
+{
+ writel(val, host->ioaddr + reg);
+}
+
+static u16 sdhci_cdns_sd6_readw(struct sdhci_host *host, int reg)
+{
+ u32 val, regoff;
+
+ regoff = reg & ~3;
+
+ val = readl(host->ioaddr + regoff);
+ if ((reg & 0x3) == 0)
+ return (val & 0xFFFF);
+ else
+ return ((val >> 16) & 0xFFFF);
+}
+
+static void sdhci_cdns_sd6_writew(struct sdhci_host *host, u16 val, int reg)
+{
+ writew(val, host->ioaddr + reg);
+}
+
+static u8 sdhci_cdns_sd6_readb(struct sdhci_host *host, int reg)
+{
+ u32 val, regoff;
+
+ regoff = reg & ~3;
+
+ val = readl(host->ioaddr + regoff);
+ switch (reg & 3) {
+ case 0:
+ return (val & 0xFF);
+ case 1:
+ return ((val >> 8) & 0xFF);
+ case 2:
+ return ((val >> 16) & 0xFF);
+ case 3:
+ return ((val >> 24) & 0xFF);
+ }
+ return 0;
+}
+
+static void sdhci_cdns_sd6_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+ writeb(val, host->ioaddr + reg);
+}
+#endif
+
+static int sdhci_cdns_sd6_phy_clock_validate(struct sdhci_cdns_sd6_phy *phy)
+{
+ int status = 0;
+ u32 t_sdclk;
+
+ if (phy->t_sdclk < phy->t.t_sdclk_min)
+ t_sdclk = phy->t.t_sdclk_min;
+ else
+ t_sdclk = phy->t_sdclk;
+
+#ifndef DRV_CALC_SETTINGS
+ if (t_sdclk < phy->t_sdmclk)
+ status = -1;
+
+ if (t_sdclk % phy->t_sdmclk)
+ status = -1;
+
+ if ((t_sdclk < phy->t.t_sdclk_min) || (t_sdclk > phy->t.t_sdclk_max))
+ status = -1;
+#endif
+
+ return status;
+}
+
+static int sdhci_cdns_sd6_phy_lock_dll(struct sdhci_cdns_sd6_phy *phy)
+{
+ u32 delay_element = phy->d.delay_element_org;
+ u32 delay_elements_in_sdmclk;
+ enum sdhci_cdns_sd6_phy_lock_mode mode;
+
+ delay_elements_in_sdmclk = DIV_ROUND_UP(phy->t_sdmclk, delay_element);
+ if (delay_elements_in_sdmclk > 256) {
+ delay_element *= 2;
+ delay_elements_in_sdmclk = DIV_ROUND_UP(phy->t_sdmclk,
+ delay_element);
+
+ if (delay_elements_in_sdmclk > 256)
+ return -1;
+
+ mode = SDHCI_CDNS_SD6_PHY_LOCK_MODE_HALF_CLK;
+ phy->vars.dll_max_value = 127;
+ } else {
+ mode = SDHCI_CDNS_SD6_PHY_LOCK_MODE_FULL_CLK;
+ phy->vars.dll_max_value = 255;
+ }
+
+ phy->vars.t_sdmclk_calc = delay_element * delay_elements_in_sdmclk;
+ phy->d.delay_element = delay_element;
+ phy->settings.cp_dll_locked_mode = mode;
+ phy->settings.cp_dll_bypass_mode = 0;
+
+ return 0;
+}
+
+static void sdhci_cdns_sd6_phy_dll_bypass(struct sdhci_cdns_sd6_phy *phy)
+{
+ phy->vars.dll_max_value = 256;
+ phy->settings.cp_dll_bypass_mode = 1;
+ phy->settings.cp_dll_locked_mode =
+ SDHCI_CDNS_SD6_PHY_LOCK_MODE_SATURATION;
+}
+
+static void sdhci_cdns_sd6_phy_configure_dll(struct sdhci_cdns_sd6_phy *phy)
+{
+ if (phy->settings.sdhc_extended_wr_mode == 0) {
+ if (sdhci_cdns_sd6_phy_lock_dll(phy) == 0)
+ return;
+ }
+ sdhci_cdns_sd6_phy_dll_bypass(phy);
+}
+
+static void sdhci_cdns_sd6_phy_calc_out(struct sdhci_cdns_sd6_phy *phy,
+ bool cmd_not_dat)
+{
+ u32 wr0_dly = 0, wr1_dly = 0, output_min, output_max, phy_o_delay,
+ clk_wr_delay = 0, wr0_sdclk_dly = 0, wr1_sdclk_dly = 0;
+ bool data_ddr = phy->ddr && !cmd_not_dat;
+ int t;
+
+ if (cmd_not_dat) {
+ output_min = phy->t.t_cmd_output_min;
+ output_max = phy->t.t_cmd_output_max;
+ phy_o_delay = phy->d.phy_cmd_o_delay;
+ } else {
+ output_min = phy->t.t_dat_output_min;
+ output_max = phy->t.t_dat_output_max;
+ phy_o_delay = phy->d.phy_dat_o_delay;
+ }
+
+ clk_wr_delay = 0;
+ if (data_ddr)
+ wr0_sdclk_dly = wr1_sdclk_dly = 1;
+
+ t = phy_o_delay - phy->d.phy_sdclk_delay - output_min;
+ if ((t < 0) && (phy->settings.sdhc_extended_wr_mode == 1)) {
+ u32 n_half_cycle = DIV_ROUND_UP(-t * 2, phy->t_sdmclk);
+
+ wr0_dly = (n_half_cycle + 1) / 2;
+ if (data_ddr)
+ wr1_dly = (n_half_cycle + 1) / 2;
+ else
+ wr1_dly = (n_half_cycle + 1) % 2 + wr0_dly - 1;
+ }
+
+ if (phy->settings.sdhc_extended_wr_mode == 0) {
+ u32 out_hold, out_setup, out_hold_margin;
+ u32 n;
+
+ if (!data_ddr)
+ wr0_dly = 1;
+
+ out_setup = output_max;
+ out_hold = output_min;
+ out_hold_margin = DIV_ROUND_UP(out_setup - out_hold, 4);
+ out_hold += out_hold_margin;
+
+ if (phy->settings.cp_dll_bypass_mode == 0)
+ n = DIV_ROUND_UP(256 * out_hold, phy->vars.t_sdmclk_calc);
+ else
+ n = DIV_ROUND_UP(out_hold, phy->d.delay_element) - 1;
+
+ if (n <= phy->vars.dll_max_value)
+ clk_wr_delay = n;
+ else
+ clk_wr_delay = 255;
+ } else {
+ /* sdhc_extended_wr_mode = 1 - PHY IO cell work in SDR mode */
+ clk_wr_delay = 0;
+ }
+
+ if (cmd_not_dat) {
+ phy->settings.sdhc_wrcmd0_dly = wr0_dly;
+ phy->settings.sdhc_wrcmd1_dly = wr1_dly;
+ phy->settings.cp_clk_wrdqs_delay = clk_wr_delay;
+ phy->settings.sdhc_wrcmd0_sdclk_dly = wr0_sdclk_dly;
+ phy->settings.sdhc_wrcmd1_sdclk_dly = wr1_sdclk_dly;
+ } else {
+ phy->settings.sdhc_wrdata0_dly = wr0_dly;
+ phy->settings.sdhc_wrdata1_dly = wr1_dly;
+ phy->settings.cp_clk_wr_delay = clk_wr_delay;
+ phy->settings.sdhc_wrdata0_sdclk_dly = wr0_sdclk_dly;
+ phy->settings.sdhc_wrdata1_sdclk_dly = wr1_sdclk_dly;
+ }
+}
+
+static void sdhci_cdns_sd6_phy_calc_cmd_out(struct sdhci_cdns_sd6_phy *phy)
+{
+ sdhci_cdns_sd6_phy_calc_out(phy, true);
+}
+
+static void sdhci_cdns_sd6_phy_calc_cmd_in(struct sdhci_cdns_sd6_phy *phy)
+{
+ phy->settings.cp_io_mask_end =
+ ((phy->d.iocell_output_delay + phy->d.iocell_input_delay) * 2)
+ / phy->t_sdmclk;
+
+ if (phy->settings.cp_io_mask_end >= 8)
+ phy->settings.cp_io_mask_end = 7;
+
+ if (phy->strobe_cmd && (phy->settings.cp_io_mask_end > 0))
+ phy->settings.cp_io_mask_end--;
+
+ if (phy->strobe_cmd) {
+ phy->settings.cp_use_phony_dqs_cmd = 0;
+ phy->settings.cp_read_dqs_cmd_delay = 64;
+ } else {
+ phy->settings.cp_use_phony_dqs_cmd = 1;
+ phy->settings.cp_read_dqs_cmd_delay = 0;
+ }
+
+ if ((phy->mode == MMC_TIMING_MMC_HS400 && !phy->strobe_cmd)
+ || phy->mode == MMC_TIMING_MMC_HS200)
+ phy->settings.cp_read_dqs_cmd_delay =
+ phy->settings.hs200_tune_val;
+}
+
+static void sdhci_cdns_sd6_phy_calc_dat_in(struct sdhci_cdns_sd6_phy *phy)
+{
+ u32 hcsdclkadj = 0;
+
+ if (phy->strobe_dat) {
+ phy->settings.cp_use_phony_dqs = 0;
+ phy->settings.cp_read_dqs_delay = 64;
+ } else {
+ phy->settings.cp_use_phony_dqs = 1;
+ phy->settings.cp_read_dqs_delay = 0;
+ }
+
+ if (phy->mode == MMC_TIMING_MMC_HS200)
+ phy->settings.cp_read_dqs_delay =
+ phy->settings.hs200_tune_val;
+
+ if (phy->strobe_dat) {
+ /* dqs loopback input via IO cell */
+ hcsdclkadj += phy->d.iocell_input_delay;
+ /* dfi_dqs_in: mem_dqs -> clean_dqs_mod; delay of hic_dll_dqs_nand2 */
+ hcsdclkadj += phy->d.delay_element / 2;
+ /* delay line */
+ hcsdclkadj += phy->t_sdclk / 2;
+ /* PHY FIFO write pointer */
+ hcsdclkadj += phy->t_sdclk / 2 + phy->d.delay_element;
+ /* 1st synchronizer */
+ hcsdclkadj += DIV_ROUND_UP(hcsdclkadj, phy->t_sdmclk)
+ * phy->t_sdmclk - hcsdclkadj;
+ /*
+ * 2nd synchronizer + PHY FIFO read pointer + PHY rddata
+ * + PHY rddata registered, + FIFO 1st ciu_en
+ */
+ hcsdclkadj += 5 * phy->t_sdmclk;
+ /* FIFO 2st ciu_en */
+ hcsdclkadj += phy->t_sdclk;
+
+ hcsdclkadj /= phy->t_sdclk;
+ } else {
+ u32 n;
+
+ /* rebar PHY delay */
+ hcsdclkadj += 2 * phy->t_sdmclk;
+ /* rebar output via IO cell */
+ hcsdclkadj += phy->d.iocell_output_delay;
+ /* dqs loopback input via IO cell */
+ hcsdclkadj += phy->d.iocell_input_delay;
+ /* dfi_dqs_in: mem_dqs -> clean_dqs_mod delay of hic_dll_dqs_nand2 */
+ hcsdclkadj += phy->d.delay_element / 2;
+ /* dll: one delay element between SIGI_0 and SIGO_0 */
+ hcsdclkadj += phy->d.delay_element;
+ /* dfi_dqs_in: mem_dqs_delayed -> clk_dqs delay of hic_dll_dqs_nand2 */
+ hcsdclkadj += phy->d.delay_element / 2;
+ /* deskew DLL: clk_dqs -> clk_dqN: one delay element */
+ hcsdclkadj += phy->d.delay_element;
+
+ if (phy->t_sdclk == phy->t_sdmclk)
+ n = (hcsdclkadj - 2 * phy->t_sdmclk) / phy->t_sdclk;
+ else
+ n = hcsdclkadj / phy->t_sdclk;
+
+ /* phase shift within one t_sdclk clock cycle caused by rebar - lbk dqs delay */
+ hcsdclkadj = hcsdclkadj % phy->t_sdclk;
+ /* PHY FIFO write pointer */
+ hcsdclkadj += phy->t_sdclk / 2;
+ /* 1st synchronizer */
+ hcsdclkadj += DIV_ROUND_UP(hcsdclkadj, phy->t_sdmclk)
+ * phy->t_sdmclk - hcsdclkadj;
+ /*
+ * 2nd synchronizer + PHY FIFO read pointer + PHY rddata
+ * + PHY rddata registered
+ */
+ hcsdclkadj += 4 * phy->t_sdmclk;
+
+ if ((phy->t_sdclk / phy->t_sdmclk) > 1) {
+ u32 tmp1, tmp2;
+
+ tmp1 = hcsdclkadj;
+ tmp2 = (hcsdclkadj / phy->t_sdclk) * phy->t_sdclk
+ + phy->t_sdclk - phy->t_sdmclk;
+ if (tmp1 == tmp2)
+ tmp2 += phy->t_sdclk;
+
+ /* FIFO aligns to clock cycle before ciu_en */
+ hcsdclkadj += tmp2 - tmp1;
+ }
+
+ /* FIFO 1st ciu_en */
+ hcsdclkadj += phy->t_sdmclk;
+ /* FIFO 2nd ciu_en */
+ hcsdclkadj += phy->t_sdclk;
+
+ hcsdclkadj /= phy->t_sdclk;
+
+ hcsdclkadj += n;
+
+ if ((phy->t_sdclk / phy->t_sdmclk) >= 2) {
+ if ((phy->mode == MMC_TIMING_UHS_DDR50)
+ || (phy->mode == MMC_TIMING_MMC_DDR52))
+ hcsdclkadj -= 2;
+ else
+ hcsdclkadj -= 1;
+ } else if ((phy->t_sdclk / phy->t_sdmclk) == 1) {
+ hcsdclkadj += 2;
+ }
+
+ if (phy->tune_dat)
+ hcsdclkadj -= 1;
+ }
+
+ if (hcsdclkadj > 15)
+ hcsdclkadj = 15;
+
+ phy->settings.sdhc_hcsdclkadj = hcsdclkadj;
+}
+
+static void sdhci_cdns_sd6_phy_calc_dat_out(struct sdhci_cdns_sd6_phy *phy)
+{
+ sdhci_cdns_sd6_phy_calc_out(phy, false);
+}
+
+static void sdhci_cdns_sd6_phy_calc_io(struct sdhci_cdns_sd6_phy *phy)
+{
+ u32 rw_compensate;
+
+ rw_compensate = (phy->d.iocell_input_delay + phy->d.iocell_output_delay)
+ / phy->t_sdmclk + phy->settings.sdhc_wrdata0_dly + 5 + 3;
+
+ phy->settings.sdhc_idelay_val = (2 * phy->d.iocell_input_delay)
+ / phy->t_sdmclk;
+
+ phy->settings.cp_io_mask_start = 0;
+ if ((phy->t_sdclk == phy->t_sdmclk) && (rw_compensate > 10))
+ phy->settings.cp_io_mask_start = 2 * (rw_compensate - 10);
+
+ if (phy->mode == MMC_TIMING_UHS_SDR104)
+ phy->settings.cp_io_mask_start++;
+
+ if ((phy->t_sdclk == phy->t_sdmclk) && (phy->mode == MMC_TIMING_UHS_SDR50))
+ phy->settings.cp_io_mask_start++;
+
+ phy->settings.sdhc_rw_compensate = rw_compensate;
+}
+
+static void sdhci_cdns_sd6_phy_calc_settings(struct sdhci_cdns_sd6_phy *phy)
+{
+ sdhci_cdns_sd6_phy_calc_cmd_out(phy);
+ sdhci_cdns_sd6_phy_calc_cmd_in(phy);
+ sdhci_cdns_sd6_phy_calc_dat_out(phy);
+ sdhci_cdns_sd6_phy_calc_dat_in(phy);
+ sdhci_cdns_sd6_phy_calc_io(phy);
+}
+
+static int sdhci_cdns_sd4_write_phy_reg(struct sdhci_cdns_priv *priv,
+ u8 addr, u8 data)
{
void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS04;
u32 tmp;
int ret;
- ret = readl_poll_timeout(reg, tmp, !(tmp & SDHCI_CDNS_HRS04_ACK),
+ ret = readl_poll_timeout(reg, tmp, !(tmp & SDHCI_CDNS_SD4_HRS04_ACK),
0, 10);
if (ret)
return ret;
- tmp = FIELD_PREP(SDHCI_CDNS_HRS04_WDATA, data) |
- FIELD_PREP(SDHCI_CDNS_HRS04_ADDR, addr);
+ tmp = FIELD_PREP(SDHCI_CDNS_SD4_HRS04_WDATA, data) |
+ FIELD_PREP(SDHCI_CDNS_SD4_HRS04_ADDR, addr);
writel(tmp, reg);
- tmp |= SDHCI_CDNS_HRS04_WR;
+ tmp |= SDHCI_CDNS_SD4_HRS04_WR;
writel(tmp, reg);
- ret = readl_poll_timeout(reg, tmp, tmp & SDHCI_CDNS_HRS04_ACK, 0, 10);
+ ret = readl_poll_timeout(reg, tmp, tmp & SDHCI_CDNS_SD4_HRS04_ACK, 0, 10);
if (ret)
return ret;
- tmp &= ~SDHCI_CDNS_HRS04_WR;
+ tmp &= ~SDHCI_CDNS_SD4_HRS04_WR;
writel(tmp, reg);
- ret = readl_poll_timeout(reg, tmp, !(tmp & SDHCI_CDNS_HRS04_ACK),
+ ret = readl_poll_timeout(reg, tmp, !(tmp & SDHCI_CDNS_SD4_HRS04_ACK),
0, 10);
return ret;
}
-static unsigned int sdhci_cdns_phy_param_count(struct device_node *np)
+static unsigned int sdhci_cdns_sd4_phy_param_count(struct device_node *np)
{
unsigned int count = 0;
int i;
- for (i = 0; i < ARRAY_SIZE(sdhci_cdns_phy_cfgs); i++)
- if (of_property_read_bool(np, sdhci_cdns_phy_cfgs[i].property))
+ for (i = 0; i < ARRAY_SIZE(sdhci_cdns_sd4_phy_cfgs); i++)
+ if (of_property_read_bool(np, sdhci_cdns_sd4_phy_cfgs[i].property))
count++;
return count;
}
-static void sdhci_cdns_phy_param_parse(struct device_node *np,
- struct sdhci_cdns_priv *priv)
+static void sdhci_cdns_sd4_phy_param_parse(struct device_node *np,
+ struct sdhci_cdns_sd4_phy *phy)
{
- struct sdhci_cdns_phy_param *p = priv->phy_params;
+ struct sdhci_cdns_sd4_phy_param *p = phy->phy_params;
u32 val;
int ret, i;
- for (i = 0; i < ARRAY_SIZE(sdhci_cdns_phy_cfgs); i++) {
- ret = of_property_read_u32(np, sdhci_cdns_phy_cfgs[i].property,
+ for (i = 0; i < ARRAY_SIZE(sdhci_cdns_sd4_phy_cfgs); i++) {
+ ret = of_property_read_u32(np, sdhci_cdns_sd4_phy_cfgs[i].property,
&val);
if (ret)
continue;
- p->addr = sdhci_cdns_phy_cfgs[i].addr;
+ p->addr = sdhci_cdns_sd4_phy_cfgs[i].addr;
p->data = val;
p++;
}
}
-static int sdhci_cdns_phy_init(struct sdhci_cdns_priv *priv)
+static int sdhci_cdns_sd4_phy_init(struct sdhci_cdns_priv *priv)
{
int ret, i;
+ struct sdhci_cdns_sd4_phy *phy = priv->phy;
- for (i = 0; i < priv->nr_phy_params; i++) {
- ret = sdhci_cdns_write_phy_reg(priv, priv->phy_params[i].addr,
- priv->phy_params[i].data);
+ for (i = 0; i < phy->nr_phy_params; i++) {
+ ret = sdhci_cdns_sd4_write_phy_reg(priv, phy->phy_params[i].addr,
+ phy->phy_params[i].data);
if (ret)
return ret;
}
+ return 0;
+}
+
+void sdhci_cdns_sd6_fullsw_reset(struct sdhci_cdns_priv *priv)
+{
+ u32 regval;
+
+ regval = readl(priv->hrs_addr + SDHCI_CDNS_HRS00);
+ regval |= SDHCI_CDNS_HRS00_SWR;
+ writel(regval, priv->hrs_addr + SDHCI_CDNS_HRS00);
+
+ do {
+ regval = readl(priv->hrs_addr + SDHCI_CDNS_HRS00);
+ } while (regval & SDHCI_CDNS_HRS00_SWR);
+
+ pr_debug("Success in reset of eMMC controller 0x%x\n", regval);
+}
+
+static u32 sdhci_cdns_sd6_read_phy_reg(struct sdhci_cdns_priv *priv,
+ u32 addr)
+{
+ writel(FIELD_PREP(SDHCI_CDNS_SD6_HRS04_ADDR, addr),
+ priv->hrs_addr + SDHCI_CDNS_HRS04);
+ return readl(priv->hrs_addr + SDHCI_CDNS_HRS05);
+}
+
+static void sdhci_cdns_sd6_write_phy_reg(struct sdhci_cdns_priv *priv,
+ u32 addr, u32 data)
+{
+ u32 data_read;
+
+ writel(FIELD_PREP(SDHCI_CDNS_SD6_HRS04_ADDR, addr),
+ priv->hrs_addr + SDHCI_CDNS_HRS04);
+ writel(data, priv->hrs_addr + SDHCI_CDNS_HRS05);
+
+ //TODO remove it
+ writel(FIELD_PREP(SDHCI_CDNS_SD6_HRS04_ADDR, addr),
+ priv->hrs_addr + SDHCI_CDNS_HRS04);
+ data_read = readl(priv->hrs_addr + SDHCI_CDNS_HRS05);
+}
+
+
+static int sdhci_cdns_sd6_dll_reset(struct sdhci_cdns_priv *priv, bool doReset)
+{
+ uint32_t reg;
+ int ret = 0;
+
+ reg = readl(priv->hrs_addr + SDHCI_CDNS_HRS09);
+ if (doReset)
+ reg &= ~SDHCI_CDNS_HRS09_PHY_SW_RESET;
+ else
+ reg |= SDHCI_CDNS_HRS09_PHY_SW_RESET;
+
+ writel(reg, priv->hrs_addr + SDHCI_CDNS_HRS09);
+
+ if (!doReset)
+ ret = readl_poll_timeout(priv->hrs_addr + SDHCI_CDNS_HRS09,
+ reg,
+ (reg &
+ SDHCI_CDNS_HRS09_PHY_INIT_COMPLETE),
+ 0, 0);
+
+ return ret;
+}
+
+static void sdhci_cdns_sd6_calc_phy(struct sdhci_cdns_sd6_phy *phy)
+{
+ if (phy->mode == MMC_TIMING_MMC_HS) {
+ phy->settings.cp_clk_wr_delay = 0;
+ phy->settings.cp_clk_wrdqs_delay = 0;
+ phy->settings.cp_data_select_oe_end = 1;
+ phy->settings.cp_dll_bypass_mode = 1;
+ phy->settings.cp_dll_locked_mode = 3;
+ phy->settings.cp_dll_start_point = 4;
+ phy->settings.cp_gate_cfg_always_on = 1;
+ phy->settings.cp_io_mask_always_on = 0;
+ phy->settings.cp_io_mask_end = 0;
+ phy->settings.cp_io_mask_start = 0;
+ phy->settings.cp_rd_del_sel = 52;
+ phy->settings.cp_read_dqs_cmd_delay = 0;
+ phy->settings.cp_read_dqs_delay = 0;
+ phy->settings.cp_sw_half_cycle_shift = 0;
+ phy->settings.cp_sync_method = 1;
+ phy->settings.cp_underrun_suppress = 1;
+ phy->settings.cp_use_ext_lpbk_dqs = 1;
+ phy->settings.cp_use_lpbk_dqs = 1;
+ phy->settings.cp_use_phony_dqs = 1;
+ phy->settings.cp_use_phony_dqs_cmd = 1;
+ phy->settings.sdhc_extended_rd_mode = 1;
+ phy->settings.sdhc_extended_wr_mode = 1;
+ phy->settings.sdhc_hcsdclkadj = 2;
+ phy->settings.sdhc_idelay_val = 0;
+ phy->settings.sdhc_rdcmd_en = 1;
+ phy->settings.sdhc_rddata_en = 1;
+ phy->settings.sdhc_rw_compensate = 9;
+ phy->settings.sdhc_sdcfsh = 0;
+ phy->settings.sdhc_sdcfsl = 4;
+ phy->settings.sdhc_wrcmd0_dly = 1;
+ phy->settings.sdhc_wrcmd0_sdclk_dly = 0;
+ phy->settings.sdhc_wrcmd1_dly = 0;
+ phy->settings.sdhc_wrcmd1_sdclk_dly = 0;
+ phy->settings.sdhc_wrdata0_dly = 1;
+ phy->settings.sdhc_wrdata0_sdclk_dly = 0;
+ phy->settings.sdhc_wrdata1_dly = 0;
+ phy->settings.sdhc_wrdata1_sdclk_dly = 0;
+ }
+}
+
+static int sdhci_cdns_sd6_phy_init(struct sdhci_cdns_priv *priv)
+{
+ int ret;
+ u32 reg;
+ struct sdhci_cdns_sd6_phy *phy = priv->phy;
+
+#ifndef DRV_CALC_SETTINGS
+ /* Override the values for now till the driver is fixed */
+ sdhci_cdns_sd6_calc_phy(phy);
+#endif
+ sdhci_cdns_sd6_dll_reset(priv, true);
+
+ reg = sdhci_cdns_sd6_read_phy_reg(priv, SDHCI_CDNS_SD6_PHY_DQS_TIMING);
+ reg &= ~SDHCI_CDNS_SD6_PHY_DQS_TIMING_USE_EXT_LPBK_DQS;
+ reg &= ~SDHCI_CDNS_SD6_PHY_DQS_TIMING_USE_LPBK_DQS;
+ reg &= ~SDHCI_CDNS_SD6_PHY_DQS_TIMING_USE_PHONY_DQS;
+ reg &= ~SDHCI_CDNS_SD6_PHY_DQS_TIMING_USE_PHONY_DQS_CMD;
+ reg |= FIELD_PREP(SDHCI_CDNS_SD6_PHY_DQS_TIMING_USE_EXT_LPBK_DQS,
+ phy->settings.cp_use_ext_lpbk_dqs);
+ reg |= FIELD_PREP(SDHCI_CDNS_SD6_PHY_DQS_TIMING_USE_LPBK_DQS,
+ phy->settings.cp_use_lpbk_dqs);
+ reg |= FIELD_PREP(SDHCI_CDNS_SD6_PHY_DQS_TIMING_USE_PHONY_DQS,
+ phy->settings.cp_use_phony_dqs);
+ reg |= FIELD_PREP(SDHCI_CDNS_SD6_PHY_DQS_TIMING_USE_PHONY_DQS_CMD,
+ phy->settings.cp_use_phony_dqs_cmd);
+ sdhci_cdns_sd6_write_phy_reg(priv, SDHCI_CDNS_SD6_PHY_DQS_TIMING, reg);
+
+ reg = sdhci_cdns_sd6_read_phy_reg(priv, SDHCI_CDNS_SD6_PHY_GATE_LPBK);
+ reg &= ~SDHCI_CDNS_SD6_PHY_GATE_LPBK_SYNC_METHOD;
+ reg &= ~SDHCI_CDNS_SD6_PHY_GATE_LPBK_SW_HALF_CYCLE_SHIFT;
+ reg &= ~SDHCI_CDNS_SD6_PHY_GATE_LPBK_RD_DEL_SEL;
+ reg &= ~SDHCI_CDNS_SD6_PHY_GATE_LPBK_GATE_CFG_ALWAYS_ON;
+ reg |= FIELD_PREP(SDHCI_CDNS_SD6_PHY_GATE_LPBK_SYNC_METHOD,
+ phy->settings.cp_sync_method);
+ reg |= FIELD_PREP(SDHCI_CDNS_SD6_PHY_GATE_LPBK_SW_HALF_CYCLE_SHIFT,
+ phy->settings.cp_sw_half_cycle_shift);
+ reg |= FIELD_PREP(SDHCI_CDNS_SD6_PHY_GATE_LPBK_RD_DEL_SEL,
+ phy->settings.cp_rd_del_sel);
+ reg |= FIELD_PREP(SDHCI_CDNS_SD6_PHY_GATE_LPBK_GATE_CFG_ALWAYS_ON,
+ phy->settings.cp_gate_cfg_always_on);
+ sdhci_cdns_sd6_write_phy_reg(priv, SDHCI_CDNS_SD6_PHY_GATE_LPBK, reg);
+
+ reg = 0x0;
+ reg |= FIELD_PREP(SDHCI_CDNS_SD6_PHY_DLL_MASTER_BYPASS_MODE,
+ phy->settings.cp_dll_bypass_mode);
+ reg |= FIELD_PREP(SDHCI_CDNS_SD6_PHY_DLL_MASTER_PHASE_DETECT_SEL, 2);
+ reg |= FIELD_PREP(SDHCI_CDNS_SD6_PHY_DLL_MASTER_DLL_LOCK_NUM, 0);
+ reg |= FIELD_PREP(SDHCI_CDNS_SD6_PHY_DLL_MASTER_DLL_START_POINT,
+ phy->settings.cp_dll_start_point);
+ sdhci_cdns_sd6_write_phy_reg(priv, SDHCI_CDNS_SD6_PHY_DLL_MASTER, reg);
+
+ reg = 0x0;
+ reg = FIELD_PREP(SDHCI_CDNS_SD6_PHY_DLL_SLAVE_READ_DQS_CMD_DELAY,
+ phy->settings.cp_read_dqs_cmd_delay);
+ reg |= FIELD_PREP(SDHCI_CDNS_SD6_PHY_DLL_SLAVE_CLK_WRDQS_DELAY,
+ phy->settings.cp_clk_wrdqs_delay);
+ reg |= FIELD_PREP(SDHCI_CDNS_SD6_PHY_DLL_SLAVE_CLK_WR_DELAY,
+ phy->settings.cp_clk_wr_delay);
+ reg |= FIELD_PREP(SDHCI_CDNS_SD6_PHY_DLL_SLAVE_READ_DQS_DELAY,
+ phy->settings.cp_read_dqs_delay);
+ sdhci_cdns_sd6_write_phy_reg(priv, SDHCI_CDNS_SD6_PHY_DLL_SLAVE, reg);
+
+ reg = sdhci_cdns_sd6_read_phy_reg(priv, SDHCI_CDNS_SD6_PHY_CTRL);
+ reg &= ~SDHCI_CDNS_SD6_PHY_CTRL_PHONY_DQS_TIMING;
+ sdhci_cdns_sd6_write_phy_reg(priv, SDHCI_CDNS_SD6_PHY_CTRL, reg);
+
+ reg = sdhci_cdns_sd6_read_phy_reg(priv, SDHCI_CDNS_SD6_PHY_GPIO_CTRL1);
+ if (phy->settings.drive != 0xFF) {
+ reg |= SDHCI_CDNS_SD6_PHY_GPIO_CTRL1_DRV_OVR_EN;
+ reg |= FIELD_PREP(SDHCI_CDNS_SD6_PHY_GPIO_CTRL1_DRV,
+ phy->settings.drive);
+ }
+ if (phy->settings.slew != 0xFF) {
+ reg |= SDHCI_CDNS_SD6_PHY_GPIO_CTRL1_SLEW_OVR_EN;
+ reg |= FIELD_PREP(SDHCI_CDNS_SD6_PHY_GPIO_CTRL1_SLEW,
+ phy->settings.slew);
+ }
+ sdhci_cdns_sd6_write_phy_reg(priv, SDHCI_CDNS_SD6_PHY_GPIO_CTRL1, reg);
+
+ ret = sdhci_cdns_sd6_dll_reset(priv, false);
+ if (ret)
+ return ret;
+
+ reg = sdhci_cdns_sd6_read_phy_reg(priv, SDHCI_CDNS_SD6_PHY_DQ_TIMING);
+ reg &= ~SDHCI_CDNS_SD6_PHY_DQ_TIMING_IO_MASK_ALWAYS_ON;
+ reg &= ~SDHCI_CDNS_SD6_PHY_DQ_TIMING_IO_MASK_END;
+ reg &= ~SDHCI_CDNS_SD6_PHY_DQ_TIMING_IO_MASK_START;
+ reg &= ~SDHCI_CDNS_SD6_PHY_DQ_TIMING_DATA_SELECT_OE_END;
+ reg |= FIELD_PREP(SDHCI_CDNS_SD6_PHY_DQ_TIMING_IO_MASK_ALWAYS_ON,
+ phy->settings.cp_io_mask_always_on);
+ reg |= FIELD_PREP(SDHCI_CDNS_SD6_PHY_DQ_TIMING_IO_MASK_END,
+ phy->settings.cp_io_mask_end);
+ reg |= FIELD_PREP(SDHCI_CDNS_SD6_PHY_DQ_TIMING_IO_MASK_START,
+ phy->settings.cp_io_mask_start);
+ reg |= FIELD_PREP(SDHCI_CDNS_SD6_PHY_DQ_TIMING_DATA_SELECT_OE_END,
+ phy->settings.cp_data_select_oe_end);
+ sdhci_cdns_sd6_write_phy_reg(priv, SDHCI_CDNS_SD6_PHY_DQ_TIMING, reg);
+
+ reg = readl(priv->hrs_addr + SDHCI_CDNS_HRS09);
+ if (phy->settings.sdhc_extended_wr_mode)
+ reg |= SDHCI_CDNS_HRS09_EXTENDED_WR_MODE;
+ else
+ reg &= ~SDHCI_CDNS_HRS09_EXTENDED_WR_MODE;
+
+ if (phy->settings.sdhc_extended_rd_mode)
+ reg |= SDHCI_CDNS_HRS09_EXTENDED_RD_MODE;
+ else
+ reg &= ~SDHCI_CDNS_HRS09_EXTENDED_RD_MODE;
+
+ if (phy->settings.sdhc_rddata_en)
+ reg |= SDHCI_CDNS_HRS09_RDDATA_EN;
+ else
+ reg &= ~SDHCI_CDNS_HRS09_RDDATA_EN;
+
+ if (phy->settings.sdhc_rdcmd_en)
+ reg |= SDHCI_CDNS_HRS09_RDCMD_EN;
+ else
+ reg &= ~SDHCI_CDNS_HRS09_RDCMD_EN;
+
+ writel(reg, priv->hrs_addr + SDHCI_CDNS_HRS09);
+
+ writel(0x30004, priv->hrs_addr + SDHCI_CDNS_HRS02);
+
+ reg = 0x0;
+ reg = FIELD_PREP(SDHCI_CDNS_HRS10_HCSDCLKADJ, phy->settings.sdhc_hcsdclkadj);
+ writel(reg, priv->hrs_addr + SDHCI_CDNS_HRS10);
+
+ reg = 0x0;
+ reg = FIELD_PREP(SDHCI_CDNS_HRS16_WRDATA1_SDCLK_DLY,
+ phy->settings.sdhc_wrdata1_sdclk_dly);
+ reg |= FIELD_PREP(SDHCI_CDNS_HRS16_WRDATA0_SDCLK_DLY,
+ phy->settings.sdhc_wrdata0_sdclk_dly);
+ reg |= FIELD_PREP(SDHCI_CDNS_HRS16_WRCMD1_SDCLK_DLY,
+ phy->settings.sdhc_wrcmd1_sdclk_dly);
+ reg |= FIELD_PREP(SDHCI_CDNS_HRS16_WRCMD0_SDCLK_DLY,
+ phy->settings.sdhc_wrcmd0_sdclk_dly);
+ reg |= FIELD_PREP(SDHCI_CDNS_HRS16_WRDATA1_DLY,
+ phy->settings.sdhc_wrdata1_dly);
+ reg |= FIELD_PREP(SDHCI_CDNS_HRS16_WRDATA0_DLY,
+ phy->settings.sdhc_wrdata0_dly);
+ reg |= FIELD_PREP(SDHCI_CDNS_HRS16_WRCMD1_DLY,
+ phy->settings.sdhc_wrcmd1_dly);
+ reg |= FIELD_PREP(SDHCI_CDNS_HRS16_WRCMD0_DLY,
+ phy->settings.sdhc_wrcmd0_dly);
+ writel(reg, priv->hrs_addr + SDHCI_CDNS_HRS16);
+
+ reg = 0x0;
+ reg = FIELD_PREP(SDHCI_CDNS_HRS07_RW_COMPENSATE,
+ phy->settings.sdhc_rw_compensate);
+ reg |= FIELD_PREP(SDHCI_CDNS_HRS07_IDELAY_VAL,
+ phy->settings.sdhc_idelay_val);
+ writel(reg, priv->hrs_addr + SDHCI_CDNS_HRS07);
return 0;
}
@@ -174,6 +1170,19 @@ static void *sdhci_cdns_priv(struct sdhci_host *host)
return sdhci_pltfm_priv(pltfm_host);
}
+static int sdhci_cdns_sd6_set_tune_val(struct sdhci_host *host,
+ unsigned int val)
+{
+ struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
+ struct sdhci_cdns_sd6_phy *phy = priv->phy;
+
+ phy->settings.hs200_tune_val = val;
+ phy->settings.cp_read_dqs_cmd_delay = val;
+ phy->settings.cp_read_dqs_delay = val;
+
+ return sdhci_cdns_sd6_phy_init(priv);
+}
+
static unsigned int sdhci_cdns_get_timeout_clock(struct sdhci_host *host)
{
/*
@@ -183,6 +1192,11 @@ static unsigned int sdhci_cdns_get_timeout_clock(struct sdhci_host *host)
return host->max_clk;
}
+static unsigned int sdhci_cdns_get_max_clock(struct sdhci_host *host)
+{
+ return SDMCLK_MAX_FREQ;
+}
+
static void sdhci_cdns_set_emmc_mode(struct sdhci_cdns_priv *priv, u32 mode)
{
u32 tmp;
@@ -202,7 +1216,317 @@ static u32 sdhci_cdns_get_emmc_mode(struct sdhci_cdns_priv *priv)
return FIELD_GET(SDHCI_CDNS_HRS06_MODE, tmp);
}
-static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val)
+static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int timing)
+{
+ struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
+ u32 mode;
+
+ switch (timing) {
+ case MMC_TIMING_MMC_HS:
+ mode = SDHCI_CDNS_HRS06_MODE_MMC_SDR;
+ break;
+ case MMC_TIMING_MMC_DDR52:
+ mode = SDHCI_CDNS_HRS06_MODE_MMC_DDR;
+ break;
+ case MMC_TIMING_MMC_HS200:
+ mode = SDHCI_CDNS_HRS06_MODE_MMC_HS200;
+ break;
+ case MMC_TIMING_MMC_HS400:
+ if (priv->enhanced_strobe)
+ mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400ES;
+ else
+ mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400;
+ break;
+ case MMC_TIMING_SD_HS:
+ mode = SDHCI_CDNS_HRS06_MODE_SD;
+ break;
+ default:
+ mode = SDHCI_CDNS_HRS06_MODE_LEGACY;
+ break;
+ }
+
+ pr_debug("%s mode %d timing %d\n", __func__, mode, timing);
+ sdhci_cdns_set_emmc_mode(priv, mode);
+
+ /* For SD, fall back to the default handler */
+ if (mode == SDHCI_CDNS_HRS06_MODE_SD)
+ sdhci_set_uhs_signaling(host, timing);
+}
+
+static int sdhci_cdns_sd6_phy_update_timings(struct sdhci_host *host)
+{
+ struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
+ struct sdhci_cdns_sd6_phy *phy = priv->phy;
+ int t_sdmclk = phy->t_sdmclk;
+ int mode;
+
+ mode = sdhci_cdns_sd6_get_mode(host, host->mmc->ios.timing);
+ /* initialize input */
+ init_timings[mode](&phy->t, phy->t_sdclk);
+
+ phy->mode = host->mmc->ios.timing;
+ phy->strobe_dat = false;
+
+ switch (phy->mode) {
+ case MMC_TIMING_UHS_SDR104:
+ phy->tune_cmd = true;
+ phy->tune_dat = true;
+ break;
+ case MMC_TIMING_UHS_DDR50:
+ phy->ddr = true;
+ break;
+ case MMC_TIMING_MMC_DDR52:
+ phy->ddr = true;
+ break;
+ case MMC_TIMING_MMC_HS200:
+ phy->tune_dat = true;
+ phy->tune_cmd = true;
+ break;
+ case MMC_TIMING_MMC_HS400:
+ phy->tune_cmd = true;
+ phy->ddr = true;
+ phy->strobe_dat = true;
+ break;
+ }
+
+ if (priv->enhanced_strobe)
+ phy->strobe_cmd = true;
+
+ phy->d.phy_sdclk_delay = 2 * t_sdmclk;
+ phy->d.phy_cmd_o_delay = 2 * t_sdmclk + t_sdmclk / 2;
+ phy->d.phy_dat_o_delay = 2 * t_sdmclk + t_sdmclk / 2;
+
+ if (sdhci_cdns_sd6_phy_clock_validate(phy))
+ return -1;
+
+ if (phy->t_sdclk == phy->t_sdmclk) {
+ phy->settings.sdhc_extended_wr_mode = 0;
+ phy->settings.sdhc_extended_rd_mode = 0;
+ } else {
+ phy->settings.sdhc_extended_wr_mode = 1;
+ phy->settings.sdhc_extended_rd_mode = 1;
+ }
+
+ phy->settings.cp_gate_cfg_always_on = 1;
+ //phy->settings.sdhc_rdcmd_en = 1;
+ //phy->settings.sdhc_rddata_en = 1;
+
+ sdhci_cdns_sd6_phy_configure_dll(phy);
+
+ sdhci_cdns_sd6_phy_calc_settings(phy);
+
+ return 0;
+}
+
+static u32 sdhci_cdns_sd6_get_mode(struct sdhci_host *host,
+ unsigned int timing)
+{
+ struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
+ u32 mode;
+
+ switch (timing) {
+ case MMC_TIMING_MMC_HS:
+ mode = SDHCI_CDNS_HRS06_MODE_MMC_SDR;
+ break;
+ case MMC_TIMING_MMC_DDR52:
+ mode = SDHCI_CDNS_HRS06_MODE_MMC_DDR;
+ break;
+ case MMC_TIMING_MMC_HS200:
+ mode = SDHCI_CDNS_HRS06_MODE_MMC_HS200;
+ break;
+ case MMC_TIMING_MMC_HS400:
+ if (priv->enhanced_strobe)
+ mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400ES;
+ else
+ mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400;
+ break;
+ case MMC_TIMING_SD_HS:
+ mode = SDHCI_CDNS_HRS06_MODE_SD;
+ break;
+ default:
+ mode = SDHCI_CDNS_HRS06_MODE_MMC_SDR;
+ break;
+ }
+
+ return mode;
+}
+
+static uint32_t sdhci_cdns_sd6_irq(struct sdhci_host *host, u32 intmask)
+{
+ struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
+ uint64_t reg1, reg = readq(priv->hrs_addr + 0x718);
+
+ if (intmask)
+ sdhci_cdns_sd6_writel(host, intmask, SDHCI_INT_STATUS);
+
+ writeq(reg, priv->hrs_addr + 0x718);
+ reg1 = readq(priv->hrs_addr + 0x718);
+
+ return intmask;
+}
+
+static void sdhci_cdns_sd6_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int timing)
+{
+ struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
+ struct sdhci_cdns_sd6_phy *phy = priv->phy;
+
+ sdhci_cdns_set_uhs_signaling(host, timing);
+
+ if ((phy->mode == -1) || (phy->t_sdclk == -1))
+ return;
+
+ if (sdhci_cdns_sd6_phy_update_timings(host))
+ pr_debug("%s: update timings failed\n", __func__);
+
+ if (sdhci_cdns_sd6_phy_init(priv))
+ pr_debug("%s: phy init failed\n", __func__);
+}
+
+static void sdhci_cdns_sd6_set_clock(struct sdhci_host *host,
+ unsigned int clock)
+{
+ struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
+ struct sdhci_cdns_sd6_phy *phy = priv->phy;
+
+#ifdef DRV_CALC_SETTINGS
+ phy->t_sdclk = DIV_ROUND_DOWN_ULL(1e12, clock);
+#endif
+
+ pr_debug("%s %d %d\n", __func__, phy->mode, clock);
+
+ if (sdhci_cdns_sd6_phy_update_timings(host))
+ pr_debug("%s: update timings failed\n", __func__);
+
+ if (sdhci_cdns_sd6_phy_init(priv))
+ pr_debug("%s: phy init failed\n", __func__);
+
+ sdhci_set_clock(host, clock);
+}
+
+static int sdhci_cdns_sd4_phy_probe(struct platform_device *pdev,
+ struct sdhci_cdns_priv *priv)
+{
+ unsigned int nr_phy_params;
+ struct sdhci_cdns_sd4_phy *phy;
+ struct device *dev = &pdev->dev;
+
+ nr_phy_params = sdhci_cdns_sd4_phy_param_count(dev->of_node);
+ phy = devm_kzalloc(dev, struct_size(phy, phy_params, nr_phy_params),
+ GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->nr_phy_params = nr_phy_params;
+
+ sdhci_cdns_sd4_phy_param_parse(dev->of_node, phy);
+ priv->phy = phy;
+
+ return 0;
+}
+
+static int sdhci_cdns_sd6_phy_probe(struct platform_device *pdev,
+ struct sdhci_cdns_priv *priv)
+{
+ struct device *dev = &pdev->dev;
+ struct sdhci_cdns_sd6_phy *phy;
+#ifdef DRV_CALC_SETTINGS
+ u32 val;
+ struct clk *clk;
+#endif
+ int ret;
+ const char *mode_name;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ clk = devm_clk_get(dev, "sdmclk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "sdmclk get error\n");
+ return PTR_ERR(clk);
+ }
+
+ val = clk_get_rate(clk);
+ phy->t_sdmclk = DIV_ROUND_DOWN_ULL(1e12, val);
+
+ ret = of_property_read_u32(dev->of_node, "cdns,host_slew",
+ &phy->settings.slew);
+ if (ret)
+ phy->settings.slew = 0xFF;
+
+ ret = of_property_read_u32(dev->of_node, "cdns,host_drive",
+ &phy->settings.drive);
+ if (ret)
+ phy->settings.drive = 0xFF;
+
+ ret = of_property_read_u32(dev->of_node, "cdns,iocell_input_delay",
+ &phy->d.iocell_input_delay);
+ if (ret)
+ phy->d.iocell_input_delay = 2500;
+
+ ret = of_property_read_u32(dev->of_node, "cdns,iocell_output_delay",
+ &phy->d.iocell_output_delay);
+ if (ret)
+ phy->d.iocell_output_delay = 2500;
+
+ ret = of_property_read_u32(dev->of_node, "cdns,delay_element",
+ &phy->d.delay_element);
+ if (ret)
+ phy->d.delay_element = 24;
+
+ ret = of_property_read_string_index(dev->of_node, "cdns,mode", 0,
+ &mode_name);
+ if (!ret) {
+ if (!strcmp("emmc_sdr", mode_name))
+ phy->mode = MMC_TIMING_MMC_HS;
+ else if (!strcmp("emmc_ddr", mode_name))
+ phy->mode = MMC_TIMING_MMC_DDR52;
+ else if (!strcmp("emmc_hs200", mode_name))
+ phy->mode = MMC_TIMING_MMC_HS200;
+ else if (!strcmp("emmc_hs400", mode_name))
+ phy->mode = MMC_TIMING_MMC_HS400;
+ else if (!strcmp("sd_hs", mode_name))
+ phy->mode = MMC_TIMING_SD_HS;
+ else
+ phy->mode = MMC_TIMING_MMC_HS;
+ } else
+ phy->mode = MMC_TIMING_MMC_HS;
+
+ /* Override dts entry for now */
+ phy->d.delay_element_org = phy->d.delay_element = 24;
+ phy->d.iocell_input_delay = 650;
+ phy->d.iocell_output_delay = 1800;
+
+ switch (phy->mode) {
+ case MMC_TIMING_MMC_HS:
+ phy->t_sdclk = 10000;
+ break;
+ case MMC_TIMING_MMC_DDR52:
+ phy->t_sdclk = 10000;
+ break;
+ case MMC_TIMING_MMC_HS200:
+ phy->t_sdclk = 5000;
+ break;
+ case MMC_TIMING_MMC_HS400:
+ phy->t_sdclk = 5000;
+ break;
+ case MMC_TIMING_SD_HS:
+ phy->t_sdclk = 100000;
+ break;
+ default:
+ phy->t_sdclk = 10000;
+ break;
+ }
+
+ priv->phy = phy;
+
+ sdhci_cdns_sd6_calc_phy(phy);
+ return 0;
+}
+
+static int sdhci_cdns_sd4_set_tune_val(struct sdhci_host *host, unsigned int val)
{
struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS06;
@@ -241,6 +1565,8 @@ static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val)
*/
static int sdhci_cdns_execute_tuning(struct sdhci_host *host, u32 opcode)
{
+// struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
int cur_streak = 0;
int max_streak = 0;
int end_of_streak = 0;
@@ -255,7 +1581,7 @@ static int sdhci_cdns_execute_tuning(struct sdhci_host *host, u32 opcode)
return 0;
for (i = 0; i < SDHCI_CDNS_MAX_TUNING_LOOP; i++) {
- if (sdhci_cdns_set_tune_val(host, i) ||
+ if (priv->cdns_data->set_tune_val(host, i) ||
mmc_send_tuning(host->mmc, opcode, NULL)) { /* bad */
cur_streak = 0;
} else { /* good */
@@ -272,61 +1598,72 @@ static int sdhci_cdns_execute_tuning(struct sdhci_host *host, u32 opcode)
return -EIO;
}
- return sdhci_cdns_set_tune_val(host, end_of_streak - max_streak / 2);
-}
-
-static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host,
- unsigned int timing)
-{
- struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
- u32 mode;
-
- switch (timing) {
- case MMC_TIMING_MMC_HS:
- mode = SDHCI_CDNS_HRS06_MODE_MMC_SDR;
- break;
- case MMC_TIMING_MMC_DDR52:
- mode = SDHCI_CDNS_HRS06_MODE_MMC_DDR;
- break;
- case MMC_TIMING_MMC_HS200:
- mode = SDHCI_CDNS_HRS06_MODE_MMC_HS200;
- break;
- case MMC_TIMING_MMC_HS400:
- if (priv->enhanced_strobe)
- mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400ES;
- else
- mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400;
- break;
- default:
- mode = SDHCI_CDNS_HRS06_MODE_SD;
- break;
- }
-
- sdhci_cdns_set_emmc_mode(priv, mode);
-
- /* For SD, fall back to the default handler */
- if (mode == SDHCI_CDNS_HRS06_MODE_SD)
- sdhci_set_uhs_signaling(host, timing);
+ return priv->cdns_data->set_tune_val(host, end_of_streak - max_streak / 2);
}
-static const struct sdhci_ops sdhci_cdns_ops = {
+static const struct sdhci_ops sdhci_cdns_sd4_ops = {
.set_clock = sdhci_set_clock,
.get_timeout_clock = sdhci_cdns_get_timeout_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
- .platform_execute_tuning = sdhci_cdns_execute_tuning,
+ //.platform_execute_tuning = sdhci_cdns_execute_tuning,
.set_uhs_signaling = sdhci_cdns_set_uhs_signaling,
};
+static const struct sdhci_ops sdhci_cdns_sd6_ops = {
+#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
+ .read_l = sdhci_cdns_sd6_readl,
+ .write_l = sdhci_cdns_sd6_writel,
+ .read_w = sdhci_cdns_sd6_readw,
+ .write_w = sdhci_cdns_sd6_writew,
+ .read_b = sdhci_cdns_sd6_readb,
+ .write_b = sdhci_cdns_sd6_writeb,
+#endif
+ .get_max_clock = sdhci_cdns_get_max_clock,
+ .set_clock = sdhci_cdns_sd6_set_clock,
+ .get_timeout_clock = sdhci_cdns_get_timeout_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .platform_execute_tuning = sdhci_cdns_execute_tuning,
+ .set_uhs_signaling = sdhci_cdns_sd6_set_uhs_signaling,
+ .irq = sdhci_cdns_sd6_irq,
+};
static const struct sdhci_pltfm_data sdhci_cdns_uniphier_pltfm_data = {
- .ops = &sdhci_cdns_ops,
+ .ops = &sdhci_cdns_sd4_ops,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
-static const struct sdhci_pltfm_data sdhci_cdns_pltfm_data = {
- .ops = &sdhci_cdns_ops,
+static const struct sdhci_pltfm_data sdhci_cdns_sd4_pltfm_data = {
+ .ops = &sdhci_cdns_sd4_ops,
+};
+
+static const struct sdhci_pltfm_data sdhci_cdns_sd6_pltfm_data = {
+ .ops = &sdhci_cdns_sd6_ops,
+};
+
+static const struct sdhci_cdns_data sdhci_cdns_sd4_data = {
+ .phy_init = sdhci_cdns_sd4_phy_init,
+ .set_tune_val = sdhci_cdns_sd4_set_tune_val,
+};
+
+static const struct sdhci_cdns_data sdhci_cdns_sd6_data = {
+ .phy_init = sdhci_cdns_sd6_phy_init,
+ .set_tune_val = sdhci_cdns_sd6_set_tune_val,
+};
+
+static const struct sdhci_cdns_of_data sdhci_cdns_sd4_of_data = {
+ .pltfm_data = &sdhci_cdns_sd4_pltfm_data,
+ .cdns_data = &sdhci_cdns_sd4_data,
+ .phy_probe = sdhci_cdns_sd4_phy_probe,
+};
+
+static const struct sdhci_cdns_of_data sdhci_cdns_sd6_of_data = {
+ .pltfm_data = &sdhci_cdns_sd6_pltfm_data,
+ .cdns_data = &sdhci_cdns_sd6_data,
+ .phy_probe = sdhci_cdns_sd6_phy_probe,
};
+
static void sdhci_cdns_hs400_enhanced_strobe(struct mmc_host *mmc,
struct mmc_ios *ios)
{
@@ -350,14 +1687,12 @@ static void sdhci_cdns_hs400_enhanced_strobe(struct mmc_host *mmc,
static int sdhci_cdns_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
- const struct sdhci_pltfm_data *data;
+ const struct sdhci_cdns_of_data *data;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_cdns_priv *priv;
struct clk *clk;
- unsigned int nr_phy_params;
int ret;
struct device *dev = &pdev->dev;
- static const u16 version = SDHCI_SPEC_400 << SDHCI_SPEC_VER_SHIFT;
clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk))
@@ -368,12 +1703,12 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
return ret;
data = of_device_get_match_data(dev);
- if (!data)
- data = &sdhci_cdns_pltfm_data;
+ if (!data) {
+ return PTR_ERR(clk);
+ goto disable_clk;
+ }
- nr_phy_params = sdhci_cdns_phy_param_count(dev->of_node);
- host = sdhci_pltfm_init(pdev, data,
- struct_size(priv, phy_params, nr_phy_params));
+ host = sdhci_pltfm_init(pdev, data->pltfm_data, sizeof(*priv));
if (IS_ERR(host)) {
ret = PTR_ERR(host);
goto disable_clk;
@@ -382,15 +1717,17 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
pltfm_host = sdhci_priv(host);
pltfm_host->clk = clk;
+ host->clk_mul = 0;
+ host->max_clk = SDMCLK_MAX_FREQ;
+ host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
+ host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
priv = sdhci_pltfm_priv(pltfm_host);
- priv->nr_phy_params = nr_phy_params;
priv->hrs_addr = host->ioaddr;
priv->enhanced_strobe = false;
+ priv->cdns_data = data->cdns_data;
host->ioaddr += SDHCI_CDNS_SRS_BASE;
host->mmc_host_ops.hs400_enhanced_strobe =
sdhci_cdns_hs400_enhanced_strobe;
- sdhci_enable_v4_mode(host);
- __sdhci_read_caps(host, &version, NULL, NULL);
sdhci_get_of_property(pdev);
@@ -398,12 +1735,15 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
if (ret)
goto free;
- sdhci_cdns_phy_param_parse(dev->of_node, priv);
+ ret = data->phy_probe(pdev, priv);
+ if (ret)
+ goto free;
- ret = sdhci_cdns_phy_init(priv);
+ ret = priv->cdns_data->phy_init(priv);
if (ret)
goto free;
+ sdhci_enable_v4_mode(host);
ret = sdhci_add_host(host);
if (ret)
goto free;
@@ -429,7 +1769,7 @@ static int sdhci_cdns_resume(struct device *dev)
if (ret)
return ret;
- ret = sdhci_cdns_phy_init(priv);
+ ret = priv->cdns_data->phy_init(priv);
if (ret)
goto disable_clk;
@@ -455,7 +1795,14 @@ static const struct of_device_id sdhci_cdns_match[] = {
.compatible = "socionext,uniphier-sd4hc",
.data = &sdhci_cdns_uniphier_pltfm_data,
},
- { .compatible = "cdns,sd4hc" },
+ {
+ .compatible = "cdns,sd4hc",
+ .data = &sdhci_cdns_sd4_of_data,
+ },
+ {
+ .compatible = "cdns,sd6hc",
+ .data = &sdhci_cdns_sd6_of_data,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdhci_cdns_match);
diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c
index 03ce57ef4585..58a203767444 100644
--- a/drivers/mmc/host/sdhci-xenon-phy.c
+++ b/drivers/mmc/host/sdhci-xenon-phy.c
@@ -216,6 +216,24 @@ static int xenon_alloc_emmc_phy(struct sdhci_host *host)
return 0;
}
+static int xenon_check_stability_internal_clk(struct sdhci_host *host)
+{
+ u32 reg;
+ ktime_t timeout;
+
+ /* Wait max 20 ms */
+ timeout = ktime_add_ms(ktime_get(), 20);
+ while (!((reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
+ & SDHCI_CLOCK_INT_STABLE)) {
+ if (ktime_after(ktime_get(), timeout)) {
+ dev_err(mmc_dev(host->mmc), "phy_init: Internal clock never stabilised.\n");
+ return -ETIMEDOUT;
+ }
+ usleep_range(900, 1100);
+ }
+ return 0;
+}
+
/*
* eMMC 5.0/5.1 PHY init/re-init.
* eMMC PHY init should be executed after:
@@ -226,12 +244,16 @@ static int xenon_alloc_emmc_phy(struct sdhci_host *host)
*/
static int xenon_emmc_phy_init(struct sdhci_host *host)
{
- u32 reg;
- u32 wait, clock;
+ int ret;
+ u32 reg, wait, clock;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
struct xenon_emmc_phy_regs *phy_regs = priv->emmc_phy_regs;
+ ret = xenon_check_stability_internal_clk(host);
+ if (ret)
+ return ret;
+
reg = sdhci_readl(host, phy_regs->timing_adj);
reg |= XENON_PHY_INITIALIZAION;
sdhci_writel(host, reg, phy_regs->timing_adj);
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index d509198c00c8..9f3d420f8da0 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -663,6 +663,7 @@ static const struct dev_pm_ops sdhci_xenon_dev_pm_ops = {
static const struct of_device_id sdhci_xenon_dt_ids[] = {
{ .compatible = "marvell,armada-ap806-sdhci",},
+ { .compatible = "marvell,armada-ap807-sdhci",},
{ .compatible = "marvell,armada-cp110-sdhci",},
{ .compatible = "marvell,armada-3700-sdhci",},
{}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index d42e86cdff12..5cfc63fa2232 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1439,6 +1439,8 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
} else {
/* clear Auto CMD settings for no data CMDs */
mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
+ mode = (mode & ~(SDHCI_TRNS_AUTO_CMD12 |
+ SDHCI_TRNS_AUTO_CMD23));
sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
}
@@ -1685,13 +1687,13 @@ static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
else
timeout += 10 * HZ;
+
sdhci_mod_timer(host, cmd->mrq, timeout);
if (host->use_external_dma)
sdhci_external_dma_pre_transfer(host, cmd);
sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
-
return true;
}
@@ -2978,6 +2980,59 @@ static void sdhci_card_event(struct mmc_host *mmc)
spin_unlock_irqrestore(&host->lock, flags);
}
+#ifdef CONFIG_MMC_PSTORE
+static int sdhci_req_completion_poll(struct mmc_host *host,
+ unsigned long msecs)
+{
+ struct sdhci_host *sdhci_host = mmc_priv(host);
+ u32 int_mask;
+
+ while (msecs) {
+ int_mask = sdhci_readl(sdhci_host, SDHCI_INT_STATUS);
+ if (int_mask & SDHCI_INT_DATA_END)
+ return 0;
+ else if (int_mask & SDHCI_INT_ADMA_ERROR)
+ return -EIO;
+ else if ((int_mask & SDHCI_INT_DATA_CRC) ||
+ (int_mask & SDHCI_INT_DATA_END_BIT))
+ return -EILSEQ;
+ else if (int_mask & SDHCI_INT_DATA_TIMEOUT)
+ return -ETIMEDOUT;
+
+ mdelay(1);
+ msecs--;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void sdhci_req_cleanup_pending(struct mmc_host *host)
+{
+ struct sdhci_host *sdhci_host = mmc_priv(host);
+ u32 int_mask;
+
+ /* Clear pending DMA interrupts */
+ int_mask = sdhci_readl(sdhci_host, SDHCI_INT_STATUS);
+ int_mask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
+ SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
+ SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
+ SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
+ if (int_mask)
+ sdhci_writel(sdhci_host, int_mask, SDHCI_INT_STATUS);
+
+ /* Clear fired or pending DMA requests */
+ if (sdhci_host->cmd || sdhci_host->data_cmd || sdhci_host->data) {
+ if (sdhci_host->cmd)
+ __sdhci_finish_mrq(sdhci_host, sdhci_host->cmd->mrq);
+ if (sdhci_host->data_cmd)
+ __sdhci_finish_mrq(sdhci_host,
+ sdhci_host->data_cmd->mrq);
+ if (sdhci_host->data)
+ __sdhci_finish_mrq(sdhci_host, sdhci_host->data->mrq);
+ }
+}
+#endif
+
static const struct mmc_host_ops sdhci_ops = {
.request = sdhci_request,
.post_req = sdhci_post_req,
@@ -2993,6 +3048,10 @@ static const struct mmc_host_ops sdhci_ops = {
.execute_tuning = sdhci_execute_tuning,
.card_event = sdhci_card_event,
.card_busy = sdhci_card_busy,
+#ifdef CONFIG_MMC_PSTORE
+ .req_cleanup_pending = sdhci_req_cleanup_pending,
+ .req_completion_poll = sdhci_req_completion_poll,
+#endif
};
/*****************************************************************************\
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 2b26a875a855..0410c3d911de 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -2185,6 +2185,57 @@ write_err:
return ret;
}
+#if IS_ENABLED(CONFIG_MTD_PSTORE)
+static int spi_nor_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ size_t page_offset, page_remain, i;
+ ssize_t ret;
+
+ if (nor->prepare) {
+ ret = nor->prepare(nor, SPI_NOR_OPS_WRITE);
+ if (ret) {
+ dev_err(nor->dev, "failed in the preparation.\n");
+ return ret;
+ }
+ }
+ nor->pstore = 1;
+ for (i = 0; i < len; ) {
+ ssize_t written;
+ loff_t addr = to + i;
+
+ if (hweight32(nor->page_size) == 1) {
+ page_offset = addr & (nor->page_size - 1);
+ } else {
+ uint64_t aux = addr;
+
+ page_offset = do_div(aux, nor->page_size);
+ }
+ /* the size of data remaining on the first page */
+ page_remain = min_t(size_t,
+ nor->page_size - page_offset, len - i);
+
+ addr = spi_nor_convert_addr(nor, addr);
+
+ write_enable(nor);
+ ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
+ if (ret < 0)
+ return ret;
+ written = ret;
+ while (!spi_nor_ready(nor))
+ ;
+
+ *retlen += written;
+ i += written;
+ }
+
+ if (nor->unprepare)
+ nor->unprepare(nor, SPI_NOR_OPS_WRITE);
+ return 0;
+}
+#endif
+
static int spi_nor_check(struct spi_nor *nor)
{
if (!nor->dev ||
@@ -3188,6 +3239,9 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
mtd->_erase = spi_nor_erase;
mtd->_read = spi_nor_read;
mtd->_resume = spi_nor_resume;
+#if IS_ENABLED(CONFIG_MTD_PSTORE)
+ mtd->_panic_write = spi_nor_panic_write;
+#endif
mtd->_get_device = spi_nor_get_device;
mtd->_put_device = spi_nor_put_device;
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
index 662b212787d4..ee35aab52747 100644
--- a/drivers/mtd/spi-nor/macronix.c
+++ b/drivers/mtd/spi-nor/macronix.c
@@ -84,6 +84,8 @@ static const struct flash_info macronix_parts[] = {
SPI_NOR_QUAD_READ) },
{ "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048,
SPI_NOR_QUAD_READ) },
+ { "mx25um51245g", INFO(0xc2803a, 0, 64 * 1024, 1024,
+ SECT_4K | SPI_NOR_4B_OPCODES) },
{ "mx66u2g45g", INFO(0xc2253c, 0, 64 * 1024, 4096,
SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
index ef3695080710..681e2bab5a44 100644
--- a/drivers/mtd/spi-nor/micron-st.c
+++ b/drivers/mtd/spi-nor/micron-st.c
@@ -32,6 +32,9 @@ static const struct flash_info st_parts[] = {
SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
+ { "mt25qu128a", INFO(0x20bb18, 0x104400, 64 * 1024, 256,
+ SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256,
SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
{ "mt25ql256a", INFO6(0x20ba19, 0x104400, 64 * 1024, 512,
@@ -45,6 +48,9 @@ static const struct flash_info st_parts[] = {
SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512,
SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+ { "mt25qu256a", INFO(0x20bb19, 0x104400, 64 * 1024, 512,
+ SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "mt25ql512a", INFO6(0x20ba20, 0x104400, 64 * 1024, 1024,
SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
index 8429b4af999a..ca89e094f8bd 100644
--- a/drivers/mtd/spi-nor/spansion.c
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -64,6 +64,8 @@ static const struct flash_info spansion_parts[] = {
{ "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR)
.fixups = &s25fs_s_fixups, },
+ { "s25fs128s", INFO6(0x012018, 0x4d0081, 64 * 1024, 256,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
{ "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
{ "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64,
diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c
index e5dfa786f190..2d9c2b3d0cad 100644
--- a/drivers/mtd/spi-nor/winbond.c
+++ b/drivers/mtd/spi-nor/winbond.c
@@ -93,6 +93,8 @@ static const struct flash_info winbond_parts[] = {
.fixups = &w25q256_fixups },
{ "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "w25q256fw", INFO(0xef6019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 9e32ea9c1164..905ffc63a578 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -133,4 +133,13 @@ config NET_DSA_VITESSE_VSC73XX_PLATFORM
This enables support for the Vitesse VSC7385, VSC7388, VSC7395
and VSC7398 SparX integrated ethernet switches, connected over
a CPU-attached address bus and work in memory-mapped I/O mode.
+
+config NET_DSA_MVMDIO
+ tristate "Sysfs for soho switch register access"
+ depends on NET_DSA && MVMDIO
+ help
+ This option adds the ability to read or write switch registers.
+ The files read,write,dump in /sys/devices/platform/dsa_mvmdio provides
+ register access to switch registers, internal phy registers.
+
endmenu
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 4a943ccc2ca4..38c36ad69da0 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -23,3 +23,4 @@ obj-y += mv88e6xxx/
obj-y += ocelot/
obj-y += qca/
obj-y += sja1105/
+obj-$(CONFIG_NET_DSA_MVMDIO) += dsa_mvmdio.o
diff --git a/drivers/net/dsa/dsa_mvmdio.c b/drivers/net/dsa/dsa_mvmdio.c
new file mode 100644
index 000000000000..38c620fc52f5
--- /dev/null
+++ b/drivers/net/dsa/dsa_mvmdio.c
@@ -0,0 +1,528 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019 Marvell International Ltd. */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/of_mdio.h>
+
+#define MV_PHY_CMD_REG 0
+#define MV_PHY_DATA_REG 1
+#define MV_SMIBUSY_OFFSET 15
+#define MV_SMIFUNC_OFFSET 13
+#define MV_SMIFUNC_INT 0
+#define MV_SMIFUNC_EXT 1
+#define MV_SMIFUNC_SIZE 2
+#define MV_SMIMODE_OFFSET 12
+#define MV_SMIOP_OFFSET 10
+#define MV_SMIOP_SIZE 2
+#define MV_SMIOP_READ 2
+#define MV_SMIOP_WRITE 1
+#define MV_DEVAD_OFFSET 5
+#define MV_DEVAD_SIZE 5
+#define MV_DEVAD_MASK 0x1F
+#define MV_REGAD_OFFSET 0
+#define MV_REGAD_SIZE 5
+#define MV_REGAD_MASK 0x1F
+#define MV_MAX_REGS_PER_PORT 0x20
+#define MV_SMI_PHY_CMD 0x18
+#define MV_SMI_PHY_DATA 0x19
+#define MV_GLOBAL1 0x1B
+#define MV_GLOBAL2 0x1C
+#define MV_MAX_CHARS 1024
+#define MV_INVALID_PHY_ADDR 0xFF
+
+static struct mii_bus *mv_mii_bus;
+static struct mii_bus *mv_xmii_bus;
+static unsigned int mv_phy_addr;
+
+enum mv_reg_type {
+ MV_REG_TYPE_SWITCH,
+ MV_REG_TYPE_PHY_INT,
+ MV_REG_TYPE_PHY_EXT,
+ MV_REG_TYPE_MDIO,
+ MV_REG_TYPE_XMDIO
+};
+
+/* Read regular phy register that connected on mdio bus.
+ * Returns: register value on success or error value on failure.
+ */
+static int dsa_mvmdio_read_mdio(unsigned char phy, unsigned char reg)
+{
+ return mdiobus_read(mv_mii_bus, phy, reg);
+}
+
+/* Write regular phy register that is connected on mdio bus.
+ * Returns: 0 on success or an error value on failure
+ */
+static int dsa_mvmdio_write_mdio(unsigned char phy,
+ unsigned char reg,
+ unsigned short val)
+{
+ return mdiobus_write(mv_mii_bus, phy, reg, val);
+}
+
+/* Read extended phy register that connected on xmdio bus.
+ * Returns: register value on success or error value on failure.
+ */
+static int dsa_mvmdio_read_xmdio(unsigned char phy,
+ unsigned char dev,
+ unsigned char reg)
+{
+ return mdiobus_read(mv_xmii_bus, phy, (dev << 16) | reg);
+}
+
+/* Write extended phy register that is connected on xmdio bus.
+ * Returns: 0 on success or an error value on failure
+ */
+static int dsa_mvmdio_write_xmdio(unsigned char phy,
+ unsigned char dev,
+ unsigned char reg,
+ unsigned short val)
+{
+ return mdiobus_write(mv_xmii_bus, phy, (dev << 16) | reg, val);
+}
+
+/* Read switch register that is connected on mdio bus.
+ * Uses direct access if the switch is configured in siglechip addressing mode.
+ * Otherwise in multichip addressing mode it uses indirect acces through
+ * command and data registers of switch.
+ * Returns: register value on success or error value on failure.
+ */
+static int dsa_mvmdio_read_register(unsigned char dev, unsigned char reg)
+{
+ int ret;
+ unsigned short cmd_data;
+
+ if (mv_phy_addr == 0)
+ return mdiobus_read(mv_mii_bus, dev, reg);
+
+ /* Write to SMI Command Register */
+ cmd_data = (1 << MV_SMIBUSY_OFFSET) |
+ (MV_SMIFUNC_INT << MV_SMIFUNC_OFFSET) |
+ (1 << MV_SMIMODE_OFFSET) | (MV_SMIOP_READ << MV_SMIOP_OFFSET) |
+ ((dev & MV_DEVAD_MASK) << MV_DEVAD_OFFSET) |
+ ((reg & MV_REGAD_MASK) << MV_REGAD_OFFSET);
+
+ ret = mdiobus_write(mv_mii_bus, mv_phy_addr, MV_PHY_CMD_REG,
+ cmd_data);
+ if (ret < 0)
+ return ret;
+
+ /* Read from SMI Data Register */
+ ret = mdiobus_read(mv_mii_bus, mv_phy_addr, MV_PHY_DATA_REG);
+
+ return ret;
+}
+
+/* Write switch register that is connected on mdio bus.
+ * Uses direct access if the switch is configured in siglechip addressing mode.
+ * Otherwise in multichip addressing mode it uses indirect acces through
+ * command and data registers of switch.
+ * Returns: 0 on success or an error value on failure.
+ */
+static int dsa_mvmdio_write_register(unsigned char dev,
+ unsigned char reg,
+ unsigned short data)
+{
+ int ret;
+ unsigned short cmd_data;
+
+ if (mv_phy_addr == 0)
+ return mdiobus_write(mv_mii_bus, dev, reg, data);
+
+ /* Write data to SMI Data Register */
+ ret = mdiobus_write(mv_mii_bus, mv_phy_addr, MV_PHY_DATA_REG, data);
+ if (ret < 0)
+ return ret;
+
+ /* Write to SMI Command Register */
+ cmd_data = (1 << MV_SMIBUSY_OFFSET) |
+ (MV_SMIFUNC_INT << MV_SMIFUNC_OFFSET) |
+ (1 << MV_SMIMODE_OFFSET) |
+ (MV_SMIOP_WRITE << MV_SMIOP_OFFSET) |
+ ((dev & MV_DEVAD_MASK) << MV_DEVAD_OFFSET) |
+ ((reg & MV_REGAD_MASK) << MV_REGAD_OFFSET);
+
+ ret = mdiobus_write(mv_mii_bus, mv_phy_addr, MV_PHY_CMD_REG,
+ cmd_data);
+
+ return ret;
+}
+
+/* Read switch internal phy register when smi_func = 0.
+ * Read external phy register that is connected to the switch port when
+ * smi_func = 1.
+ * Returns: register value on success or error value on failure.
+ */
+static int dsa_mvmdio_phy_read_register(unsigned char dev,
+ unsigned char reg,
+ unsigned char smi_func)
+{
+ int ret;
+ unsigned short cmd_data;
+
+ /* Write to SMI Command Register */
+ cmd_data = (1 << MV_SMIBUSY_OFFSET) | (smi_func << MV_SMIFUNC_OFFSET) |
+ (1 << MV_SMIMODE_OFFSET) | (MV_SMIOP_READ << MV_SMIOP_OFFSET) |
+ ((dev & MV_DEVAD_MASK) << MV_DEVAD_OFFSET) |
+ ((reg & MV_REGAD_MASK) << MV_REGAD_OFFSET);
+
+ ret = dsa_mvmdio_write_register(MV_GLOBAL2, MV_SMI_PHY_CMD, cmd_data);
+ if (ret < 0)
+ return ret;
+
+ /* Read from SMI Data Register */
+ ret = dsa_mvmdio_read_register(MV_GLOBAL2, MV_SMI_PHY_DATA);
+ return ret;
+}
+
+/* Write switch internal phy register when smi_func = 0.
+ * Write external phy register that is connected to the switch port when
+ * smi_func = 1.
+ * Returns: 0 on success or an error value on failure.
+ */
+static int dsa_mvmdio_phy_write_register(unsigned char dev,
+ unsigned char reg,
+ unsigned short data,
+ unsigned char smi_func)
+{
+ int ret;
+ unsigned short cmd_data;
+
+ /* Write data to SMI Data Register */
+ ret = dsa_mvmdio_write_register(MV_GLOBAL2, MV_SMI_PHY_DATA, data);
+ if (ret < 0)
+ return ret;
+
+ /* Write to SMI Command Register */
+ cmd_data = (1 << MV_SMIBUSY_OFFSET) | (smi_func << MV_SMIFUNC_OFFSET) |
+ (1 << MV_SMIMODE_OFFSET) | (MV_SMIOP_WRITE << MV_SMIOP_OFFSET) |
+ ((dev & MV_DEVAD_MASK) << MV_DEVAD_OFFSET) |
+ ((reg & MV_REGAD_MASK) << MV_REGAD_OFFSET);
+
+ ret = dsa_mvmdio_write_register(MV_GLOBAL2, MV_SMI_PHY_CMD, cmd_data);
+
+ return ret;
+}
+
+/* Processing "read" command in sysfs.
+ * Returns: register value on success or error value on failure for a given
+ * register type.
+ */
+static int dsa_mvmdio_read(unsigned char port,
+ unsigned char dev_addr,
+ unsigned char reg,
+ unsigned char type,
+ unsigned int *value)
+{
+ int ret = 0;
+
+ if (type == MV_REG_TYPE_SWITCH)
+ ret = dsa_mvmdio_read_register(port, reg);
+ else if (type == MV_REG_TYPE_PHY_INT)
+ ret = dsa_mvmdio_phy_read_register(port, reg, MV_SMIFUNC_INT);
+ else if (type == MV_REG_TYPE_PHY_EXT)
+ ret = dsa_mvmdio_phy_read_register(port, reg, MV_SMIFUNC_EXT);
+ else if (type == MV_REG_TYPE_MDIO)
+ ret = dsa_mvmdio_read_mdio(port, reg);
+ else if (type == MV_REG_TYPE_XMDIO)
+ ret = dsa_mvmdio_read_xmdio(port, dev_addr, reg);
+
+ if (ret < 0)
+ return ret;
+
+ *value = ret;
+
+ return 0;
+}
+
+/* Processing "write" command in sysfsi.
+ * Returns: 0 on success or error value on failure.
+ */
+static int dsa_mvmdio_write(unsigned char port,
+ unsigned char dev_addr,
+ unsigned char reg,
+ unsigned char type,
+ unsigned short value)
+{
+ int ret = 0;
+
+ if (type == MV_REG_TYPE_SWITCH)
+ ret = dsa_mvmdio_write_register(port, reg, value);
+ else if (type == MV_REG_TYPE_PHY_INT)
+ ret = dsa_mvmdio_phy_write_register(port, reg, value,
+ MV_SMIFUNC_INT);
+ else if (type == MV_REG_TYPE_PHY_EXT)
+ ret = dsa_mvmdio_phy_write_register(port, reg, value,
+ MV_SMIFUNC_EXT);
+ else if (type == MV_REG_TYPE_MDIO)
+ ret = dsa_mvmdio_write_mdio(port, reg, value);
+ else if (type == MV_REG_TYPE_XMDIO)
+ ret = dsa_mvmdio_write_xmdio(port, dev_addr, reg, value);
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* Processing "dump" command in sysfs.
+ * Print 0 to 32 registers values of a given register type.
+ * Returns: 0 on success or error value on failure.
+ */
+static int dsa_mvmdio_dump(unsigned char port,
+ unsigned char dev_addr,
+ unsigned char type)
+{
+ int i;
+ int max_regs = MV_MAX_REGS_PER_PORT;
+ int off = 0;
+ char buf[MV_MAX_CHARS];
+
+ for (i = 0; i < max_regs; i++) {
+ if (i % 4 == 0)
+ off += sprintf(buf + off, "(%02X-%02X) ", i, i + 3);
+ if (type == MV_REG_TYPE_SWITCH)
+ off += sprintf(buf + off, "%04X ",
+ dsa_mvmdio_read_register(port, i));
+ else if (type == MV_REG_TYPE_PHY_INT)
+ off += sprintf(buf + off, "%04X ",
+ dsa_mvmdio_phy_read_register(port, i, MV_SMIFUNC_INT));
+ else if (type == MV_REG_TYPE_PHY_EXT)
+ off += sprintf(buf + off, "%04X ",
+ dsa_mvmdio_phy_read_register(port, i, MV_SMIFUNC_EXT));
+ else if (type == MV_REG_TYPE_MDIO)
+ off += sprintf(buf + off, "%04X ",
+ dsa_mvmdio_read_mdio(port, i));
+ else if (type == MV_REG_TYPE_XMDIO)
+ off += sprintf(buf + off, "%04X ",
+ dsa_mvmdio_read_xmdio(port, dev_addr, i));
+ if (i % 4 == 3)
+ off += sprintf(buf + off, "\n");
+ }
+
+ pr_err("%s", buf);
+ return 0;
+}
+
+/* Processing "help" command in sysfs */
+static ssize_t dsa_mvmdio_help(char *buf)
+{
+ int off = 0;
+
+ off += scnprintf(buf + off, PAGE_SIZE - off, "cat help - print help\n");
+ off += scnprintf(buf + off, PAGE_SIZE - off, "echo [t] [p] [x] [r] > read - read register\n");
+ off += scnprintf(buf + off, PAGE_SIZE - off, "echo [t] [p] [x] [r] [v] > write - write register\n");
+ off += scnprintf(buf + off, PAGE_SIZE - off, "echo [t] [p] [x] > dump - dump 32 registers\n");
+ off += scnprintf(buf + off, PAGE_SIZE - off, "parameters (in hexadecimal):\n");
+ off += scnprintf(buf + off, PAGE_SIZE - off, " [t] type. 0-switch, 1-internal phy, 2-external phy regs\n");
+ off += scnprintf(buf + off, PAGE_SIZE - off, " 3-regular phy, 4-extended phy\n");
+ off += scnprintf(buf + off, PAGE_SIZE - off, " [p] port addr or phy-id.\n");
+ off += scnprintf(buf + off, PAGE_SIZE - off, " [x] device address. valid only for extended phy.\n");
+ off += scnprintf(buf + off, PAGE_SIZE - off, " [r] register address.\n");
+ off += scnprintf(buf + off, PAGE_SIZE - off, " [v] value.\n");
+ off += scnprintf(buf + off, PAGE_SIZE - off, "Examples:\n");
+ off += scnprintf(buf + off, PAGE_SIZE - off, " 1. echo 0 1 0 3 > read - read switch register\n");
+ off += scnprintf(buf + off, PAGE_SIZE - off, " 2. echo 0 1b 0 1c > read - read switch global1 register\n");
+ off += scnprintf(buf + off, PAGE_SIZE - off, " 3. echo 1 3 0 2 > read - read internal phy register\n");
+ off += scnprintf(buf + off, PAGE_SIZE - off, " 4. echo 2 0 0 2 > read - read external phy register\n");
+ off += scnprintf(buf + off, PAGE_SIZE - off, " 5. echo 3 1 0 2 > read - read regular phy register, phyid=1\n");
+ off += scnprintf(buf + off, PAGE_SIZE - off, " 6. echo 4 0 7 3c > read - read xmdio phy, EEE advertisement register\n");
+ off += scnprintf(buf + off, PAGE_SIZE - off, " 7. echo 0 2 0 7 5 > write - write switch register, set vlan id\n");
+ off += scnprintf(buf + off, PAGE_SIZE - off, " 8. echo 1 3 0 > dump - dump internal switch phy registers\n");
+ off += scnprintf(buf + off, PAGE_SIZE - off, " 9. echo 4 0 7 > dump - dump xmdio phy registers for dev-addr=7");
+ return off;
+}
+
+static ssize_t dsa_mvmdio_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int off = 0;
+ ssize_t len = 0;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ len = dsa_mvmdio_help(buf);
+ pr_err("%s\n", buf);
+
+ return off;
+}
+
+static ssize_t dsa_mvmdio_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ const char *name = attr->attr.name;
+ unsigned long flags;
+ unsigned int err = 0, dev_addr = 0, port = 0, reg = 0, type = 0;
+ unsigned int val;
+ unsigned int data = 0;
+ int ret;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* Read arguments */
+ ret = sscanf(buf, "%x %x %x %x %x",
+ &type, &port, &dev_addr, &reg, &val);
+
+ if (mv_phy_addr == MV_INVALID_PHY_ADDR && type < MV_REG_TYPE_MDIO) {
+ pr_err("\"sw-smi-addr\" property not defined in dts file. Assuming switch not connected\n");
+ return len;
+ }
+
+ local_irq_save(flags);
+ if (!strcmp(name, "read")) {
+ err = dsa_mvmdio_read((unsigned char)port,
+ (unsigned char)dev_addr,
+ (unsigned char)reg,
+ (unsigned char)type, &data);
+ if (err)
+ pr_err("Register read failed, err - %d\n", err);
+ else
+ pr_err("read:: type:%d, port=0x%X, dev=0x%X,reg=0x%X, val=0x%04X\n", type, port, dev_addr, reg, data);
+ } else if (!strcmp(name, "write")) {
+ err = dsa_mvmdio_write((unsigned char)port,
+ (unsigned char)dev_addr,
+ (unsigned char)reg,
+ (unsigned char)type,
+ (unsigned short)val);
+ if (err)
+ pr_err("Register write failed, err - %d\n", err);
+ else
+ pr_err("write:: type:%d, port=0x%X, dev=0x%X,reg=0x%X, val=0x%X\n", type, port, dev_addr, reg, val);
+ } else if (!strcmp(name, "dump")) {
+ err = dsa_mvmdio_dump((unsigned char)port,
+ (unsigned char)dev_addr,
+ (unsigned char)type);
+ if (err)
+ pr_err("Register dump failed, err - %d\n", err);
+ else
+ pr_err("dump:: type %d, port=0x%X, dev=0x%X\n",
+ type, port, dev_addr);
+ }
+
+ local_irq_restore(flags);
+
+ return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(read, 0200, dsa_mvmdio_show, dsa_mvmdio_store);
+static DEVICE_ATTR(write, 0200, dsa_mvmdio_show, dsa_mvmdio_store);
+static DEVICE_ATTR(dump, 0200, dsa_mvmdio_show, dsa_mvmdio_store);
+static DEVICE_ATTR(help, 0400, dsa_mvmdio_show, dsa_mvmdio_store);
+
+static struct attribute *dsa_mvmdio_attrs[] = {
+ &dev_attr_read.attr,
+ &dev_attr_write.attr,
+ &dev_attr_dump.attr,
+ &dev_attr_help.attr,
+ NULL
+};
+
+static struct attribute_group dsa_mvmdio_group = {
+ .name = "dsa_mvmdio",
+ .attrs = dsa_mvmdio_attrs,
+};
+
+static int dsa_mvmdio_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct device_node *mdio;
+ struct device_node *xmdio;
+ int ret;
+
+ np = pdev->dev.of_node;
+ mdio = of_parse_phandle(np, "mii-bus", 0);
+ if (!mdio) {
+ pr_err("%s : parse mii-bus handle failed\n", __func__);
+ return -EINVAL;
+ }
+
+ mv_mii_bus = of_mdio_find_bus(mdio);
+ if (!mv_mii_bus) {
+ pr_err("%s : mdio find bus failed\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(np, "reg", &mv_phy_addr);
+ if (ret) {
+ pr_err("%s : switch smi addr not defined\n", __func__);
+ mv_phy_addr = MV_INVALID_PHY_ADDR;
+ }
+
+ xmdio = of_parse_phandle(np, "xmii-bus", 0);
+ if (!xmdio) {
+ pr_err("%s : parse handle failed\n", __func__);
+ return -EINVAL;
+ }
+
+ mv_xmii_bus = of_mdio_find_bus(xmdio);
+ if (!mv_xmii_bus) {
+ pr_err("%s : xmdio find bus failed\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dsa_mvmdio_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id dsa_mvmdio_match[] = {
+ { .compatible = "marvell,dsa-mvmdio" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, dsa_mvmdio_match);
+
+static struct platform_driver dsa_mvmdio_driver = {
+ .probe = dsa_mvmdio_probe,
+ .remove = dsa_mvmdio_remove,
+ .driver = {
+ .name = "dsa-mvmdio",
+ .of_match_table = dsa_mvmdio_match,
+ },
+};
+
+static int dsa_mvmdio_init(void)
+{
+ int err;
+ struct device *pd;
+
+ err = platform_driver_register(&dsa_mvmdio_driver);
+ if (err) {
+ pr_err("register dsa_mvmdio_driver() failed\n");
+ return err;
+ }
+
+ pd = &platform_bus;
+ err = sysfs_create_group(&pd->kobj, &dsa_mvmdio_group);
+ if (err)
+ pr_err("init sysfs group %s failed %d\n",
+ dsa_mvmdio_group.name, err);
+
+ return err;
+}
+
+static void dsa_mvmdio_exit(void)
+{
+ platform_driver_unregister(&dsa_mvmdio_driver);
+}
+
+late_initcall(dsa_mvmdio_init);
+module_exit(dsa_mvmdio_exit);
+
+MODULE_AUTHOR("Ravindra Reddy K. <ravindra@marvell.com>");
+MODULE_DESCRIPTION("Mdio access for switch from userspace through sysfs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 816453a4f8d6..34dd13fa2cd8 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -318,6 +318,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
{
unsigned bgx_map = bgx_get_map(nic->node);
int bgx, next_bgx_lmac = 0;
+ unsigned long lmac_bmap;
int lmac, lmac_cnt = 0;
u64 lmac_credit;
@@ -327,7 +328,9 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
if (!(bgx_map & (1 << bgx)))
continue;
lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
- for (lmac = 0; lmac < lmac_cnt; lmac++)
+ lmac_bmap = bgx_get_lmac_bmap(nic->node, bgx);
+
+ for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_BGX)
nic->vf_lmac_map[next_bgx_lmac++] =
NIC_SET_VF_LMAC_MAP(bgx, lmac);
nic->num_vf_en += lmac_cnt;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index c7bdac79299a..4c0dade7fe68 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -512,25 +512,40 @@ static int nicvf_set_ringparam(struct net_device *netdev,
static int nicvf_get_rss_hash_opts(struct nicvf *nic,
struct ethtool_rxnfc *info)
{
+ u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
+
info->data = 0;
+ if (!(rss_cfg & BIT_ULL(RSS_HASH_IP)))
+ return 0;
+
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+
switch (info->flow_type) {
case TCP_V4_FLOW:
case TCP_V6_FLOW:
+ if (rss_cfg & BIT_ULL(RSS_HASH_TCP))
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
case UDP_V4_FLOW:
case UDP_V6_FLOW:
+ if (rss_cfg & BIT_ULL(RSS_HASH_UDP))
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
case SCTP_V4_FLOW:
case SCTP_V6_FLOW:
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- fallthrough;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
case IPV4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
case IPV6_FLOW:
- info->data |= RXH_IP_SRC | RXH_IP_DST;
break;
default:
return -EINVAL;
}
-
return 0;
}
@@ -596,19 +611,6 @@ static int nicvf_set_rss_hash_opts(struct nicvf *nic,
return -EINVAL;
}
break;
- case SCTP_V4_FLOW:
- case SCTP_V6_FLOW:
- switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- rss_cfg &= ~(1ULL << RSS_HASH_L4ETC);
- break;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- rss_cfg |= (1ULL << RSS_HASH_L4ETC);
- break;
- default:
- return -EINVAL;
- }
- break;
case IPV4_FLOW:
case IPV6_FLOW:
rss_cfg = RSS_HASH_IP;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 0ccd5b40ef5c..77bf7bc7ff85 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -359,7 +359,7 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
/* Release additional page references held for recycling */
head = 0;
while (head < rbdr->pgcnt) {
- pgcache = &rbdr->pgcache[head];
+ pgcache = &rbdr->pgcache[head++];
if (pgcache->page && page_ref_count(pgcache->page) != 0) {
if (rbdr->is_xdp) {
page_ref_sub(pgcache->page,
@@ -367,9 +367,8 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
}
put_page(pgcache->page);
}
- head++;
}
-
+ kfree(rbdr->pgcache);
/* Free RBDR ring */
nicvf_free_q_desc_mem(nic, &rbdr->dmem);
}
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 8ff28ed04b7f..309a54718649 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -74,12 +74,14 @@ struct bgx {
struct pci_dev *pdev;
bool is_dlm;
bool is_rgx;
+ unsigned long lmac_bmap; /* bitmap of enabled lmacs */
};
static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
static int lmac_count; /* Total no of LMACs in system */
static int bgx_xaui_check_link(struct lmac *lmac);
+static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac);
/* Supported devices */
static const struct pci_device_id bgx_id_table[] = {
@@ -195,12 +197,24 @@ int bgx_get_lmac_count(int node, int bgx_idx)
bgx = get_bgx(node, bgx_idx);
if (bgx)
- return bgx->lmac_count;
+ return hweight64(bgx->lmac_bmap);
return 0;
}
EXPORT_SYMBOL(bgx_get_lmac_count);
+unsigned long bgx_get_lmac_bmap(int node, int bgx_idx)
+{
+ struct bgx *bgx;
+
+ bgx = get_bgx(node, bgx_idx);
+ if (bgx)
+ return bgx->lmac_bmap;
+
+ return 0;
+}
+EXPORT_SYMBOL(bgx_get_lmac_bmap);
+
/* Returns the current link status of LMAC */
void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
{
@@ -579,6 +593,14 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac)
}
bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
+ if (!bgx->is_rgx) {
+ bgx_reg_modify(bgx, lmac->lmacid, BGX_GMP_PCS_MRX_CTL,
+ PCS_MRX_CTL_RESET);
+ if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_PCS_MRX_CTL,
+ PCS_MRX_CTL_RESET, true)) {
+ dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n");
+ }
+ }
/* Restore CMR config settings */
cmr_cfg |= (rx_en ? CMR_PKT_RX_EN : 0) | (tx_en ? CMR_PKT_TX_EN : 0);
@@ -592,35 +614,21 @@ static void bgx_lmac_handler(struct net_device *netdev)
{
struct lmac *lmac = container_of(netdev, struct lmac, netdev);
struct phy_device *phydev;
- int link_changed = 0;
if (!lmac)
return;
phydev = lmac->phydev;
- if (!phydev->link && lmac->last_link)
- link_changed = -1;
-
- if (phydev->link &&
- (lmac->last_duplex != phydev->duplex ||
- lmac->last_link != phydev->link ||
- lmac->last_speed != phydev->speed)) {
- link_changed = 1;
- }
+ if (phydev->link == 1)
+ lmac->link_up = true;
+ else
+ lmac->link_up = false;
lmac->last_link = phydev->link;
lmac->last_speed = phydev->speed;
lmac->last_duplex = phydev->duplex;
- if (!link_changed)
- return;
-
- if (link_changed > 0)
- lmac->link_up = true;
- else
- lmac->link_up = false;
-
if (lmac->is_sgmii)
bgx_sgmii_change_link_state(lmac);
else
@@ -1098,10 +1106,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
/* Restore default cfg, incase low level firmware changed it */
bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
- if ((lmac->lmac_type != BGX_MODE_XFI) &&
- (lmac->lmac_type != BGX_MODE_XLAUI) &&
- (lmac->lmac_type != BGX_MODE_40G_KR) &&
- (lmac->lmac_type != BGX_MODE_10G_KR)) {
+ if (lmac->is_sgmii) {
if (!lmac->phydev) {
if (lmac->autoneg) {
bgx_reg_write(bgx, lmacid,
@@ -1200,7 +1205,7 @@ static void bgx_init_hw(struct bgx *bgx)
dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id);
/* Set lmac type and lane2serdes mapping */
- for (i = 0; i < bgx->lmac_count; i++) {
+ for_each_set_bit(i, &bgx->lmac_bmap, MAX_LMAC_PER_BGX) {
lmac = &bgx->lmac[i];
bgx_reg_write(bgx, i, BGX_CMRX_CFG,
(lmac->lmac_type << 8) | lmac->lane_to_sds);
@@ -1212,7 +1217,7 @@ static void bgx_init_hw(struct bgx *bgx)
bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
/* Set the backpressure AND mask */
- for (i = 0; i < bgx->lmac_count; i++)
+ for_each_set_bit(i, &bgx->lmac_bmap, MAX_LMAC_PER_BGX)
bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
(i * MAX_BGX_CHANS_PER_LMAC));
@@ -1248,7 +1253,10 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
switch (lmac->lmac_type) {
case BGX_MODE_SGMII:
- dev_info(dev, "%s: SGMII\n", (char *)str);
+ if (bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL) & 0x100)
+ dev_info(dev, "%s: 1000Base-X\n", (char *)str);
+ else
+ dev_info(dev, "%s: SGMII\n", (char *)str);
break;
case BGX_MODE_XAUI:
dev_info(dev, "%s: XAUI\n", (char *)str);
@@ -1469,6 +1477,7 @@ static int bgx_init_of_phy(struct bgx *bgx)
{
struct fwnode_handle *fwn;
struct device_node *node = NULL;
+ bool phy_reset;
u8 lmac = 0;
device_for_each_child_node(&bgx->pdev->dev, fwn) {
@@ -1476,6 +1485,7 @@ static int bgx_init_of_phy(struct bgx *bgx)
struct device_node *phy_np;
const char *mac;
+ phy_reset = false;
/* Should always be an OF node. But if it is not, we
* cannot handle it, so exit the loop.
*/
@@ -1500,10 +1510,12 @@ static int bgx_init_of_phy(struct bgx *bgx)
/* Wait until the phy drivers are available */
pd = of_phy_find_device(phy_np);
if (!pd)
- goto defer;
+ phy_reset = true;
bgx->lmac[lmac].phydev = pd;
}
+ if (!phy_reset)
+ set_bit(bgx->lmac[lmac].lmacid, &bgx->lmac_bmap);
lmac++;
if (lmac == bgx->max_lmac) {
of_node_put(node);
@@ -1511,20 +1523,6 @@ static int bgx_init_of_phy(struct bgx *bgx)
}
}
return 0;
-
-defer:
- /* We are bailing out, try not to leak device reference counts
- * for phy devices we may have already found.
- */
- while (lmac) {
- if (bgx->lmac[lmac].phydev) {
- put_device(&bgx->lmac[lmac].phydev->mdio.dev);
- bgx->lmac[lmac].phydev = NULL;
- }
- lmac--;
- }
- of_node_put(node);
- return -EPROBE_DEFER;
}
#else
@@ -1550,7 +1548,7 @@ static irqreturn_t bgx_intr_handler(int irq, void *data)
u64 status, val;
int lmac;
- for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
+ for_each_set_bit(lmac, &bgx->lmac_bmap, MAX_LMAC_PER_BGX) {
status = bgx_reg_read(bgx, lmac, BGX_GMP_GMI_TXX_INT);
if (status & GMI_TXX_INT_UNDFLW) {
pci_err(bgx->pdev, "BGX%d lmac%d UNDFLW\n",
@@ -1649,8 +1647,10 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
bgx_get_qlm_mode(bgx);
- err = bgx_init_phy(bgx);
- if (err)
+ bgx_init_phy(bgx);
+
+ /* Fail case where no lmac is enabled */
+ if (!bgx->lmac_bmap)
goto err_enable;
bgx_init_hw(bgx);
@@ -1658,7 +1658,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
bgx_register_intr(pdev);
/* Enable all LMACs */
- for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
+ for_each_set_bit(lmac, &bgx->lmac_bmap, MAX_LMAC_PER_BGX) {
err = bgx_lmac_enable(bgx, lmac);
if (err) {
dev_err(dev, "BGX%d failed to enable lmac%d\n",
@@ -1688,7 +1688,7 @@ static void bgx_remove(struct pci_dev *pdev)
u8 lmac;
/* Disable all LMACs */
- for (lmac = 0; lmac < bgx->lmac_count; lmac++)
+ for_each_set_bit(lmac, &bgx->lmac_bmap, MAX_LMAC_PER_BGX)
bgx_lmac_disable(bgx, lmac);
pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index cdea49392185..38732c593464 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -240,6 +240,7 @@ u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx);
u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx);
#define BGX_RX_STATS_COUNT 11
#define BGX_TX_STATS_COUNT 18
+unsigned long bgx_get_lmac_bmap(int node, int bgx);
struct bgx_stats {
u64 rx_stats[BGX_RX_STATS_COUNT];
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 41815b609569..a6a74d4227da 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -85,6 +85,7 @@ config MVNETA_BM
config MVPP2
tristate "Marvell Armada 375/7K/8K network interface support"
depends on ARCH_MVEBU || COMPILE_TEST
+ depends on NET_DSA
select MVMDIO
select PHYLINK
select PAGE_POOL
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index d14762d93640..52459a13e5c1 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -80,6 +80,18 @@ enum orion_mdio_bus_type {
BUS_TYPE_XSMI
};
+struct orion_mdio_data {
+ enum orion_mdio_bus_type bus_type;
+};
+
+const struct orion_mdio_data smi_bus = {
+ .bus_type = BUS_TYPE_SMI,
+};
+
+const struct orion_mdio_data xsmi_bus = {
+ .bus_type = BUS_TYPE_XSMI,
+};
+
struct orion_mdio_ops {
int (*is_done)(struct orion_mdio_dev *);
unsigned int poll_interval_min;
@@ -275,13 +287,13 @@ static irqreturn_t orion_mdio_err_irq(int irq, void *dev_id)
static int orion_mdio_probe(struct platform_device *pdev)
{
- enum orion_mdio_bus_type type;
+ struct orion_mdio_data *data;
struct resource *r;
struct mii_bus *bus;
struct orion_mdio_dev *dev;
int i, ret;
- type = (enum orion_mdio_bus_type)of_device_get_match_data(&pdev->dev);
+ data = (struct orion_mdio_data *)of_device_get_match_data(&pdev->dev);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
@@ -294,7 +306,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
if (!bus)
return -ENOMEM;
- switch (type) {
+ switch (data->bus_type) {
case BUS_TYPE_SMI:
bus->read = orion_mdio_smi_read;
bus->write = orion_mdio_smi_write;
@@ -415,8 +427,8 @@ static int orion_mdio_remove(struct platform_device *pdev)
}
static const struct of_device_id orion_mdio_match[] = {
- { .compatible = "marvell,orion-mdio", .data = (void *)BUS_TYPE_SMI },
- { .compatible = "marvell,xmdio", .data = (void *)BUS_TYPE_XSMI },
+ { .compatible = "marvell,orion-mdio", .data = &smi_bus },
+ { .compatible = "marvell,xmdio", .data = &xsmi_bus },
{ }
};
MODULE_DEVICE_TABLE(of, orion_mdio_match);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index d825eb021b22..2361879d7188 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -15,11 +15,14 @@
#include <linux/net_tstamp.h>
#include <linux/phy.h>
#include <linux/phylink.h>
-#include <net/flow_offload.h>
#include <net/page_pool.h>
#include <linux/bpf.h>
#include <net/xdp.h>
+#ifndef CACHE_LINE_MASK
+#define CACHE_LINE_MASK (~(L1_CACHE_BYTES - 1))
+#endif
+
/* The PacketOffset field is measured in units of 32 bytes and is 3 bits wide,
* so the maximum offset is 7 * 32 = 224
*/
@@ -56,10 +59,16 @@
#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
#define MVPP2_RXQ_DISABLE_MASK BIT(31)
+/* Total max number of hw RX queues */
+#define MVPP2_RXQ_MAX_NUM 128
/* Top Registers */
#define MVPP2_MH_REG(port) (0x5040 + 4 * (port))
+#define MVPP2_DSA_NON_EXTENDED BIT(4)
#define MVPP2_DSA_EXTENDED BIT(5)
+#define MVPP2_VER_ID_REG 0x50b0
+#define MVPP2_VER_PP22 0x10
+#define MVPP2_VER_PP23 0x11
/* Parser Registers */
#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
@@ -85,13 +94,23 @@
/* RSS Registers */
#define MVPP22_RSS_INDEX 0x1500
+#define MVPP22_RSS_IDX_ENTRY_NUM_OFF 0
+#define MVPP22_RSS_IDX_ENTRY_NUM_MASK 0x1F
+#define MVPP22_RSS_IDX_TBL_NUM_OFF 8
+#define MVPP22_RSS_IDX_TBL_NUM_MASK 0x700
+#define MVPP22_RSS_IDX_RXQ_NUM_OFF 16
+#define MVPP22_RSS_IDX_RXQ_NUM_MASK 0xFF0000
#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx)
#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8)
#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16)
#define MVPP22_RXQ2RSS_TABLE 0x1504
#define MVPP22_RSS_TABLE_POINTER(p) (p)
#define MVPP22_RSS_TABLE_ENTRY 0x1508
+#define MVPP22_RSS_TBL_ENTRY_OFF 0
+#define MVPP22_RSS_TBL_ENTRY_MASK 0xFF
#define MVPP22_RSS_WIDTH 0x150c
+#define MVPP22_RSS_WIDTH_OFF 0
+#define MVPP22_RSS_WIDTH_MASK 0xF
/* Classifier Registers */
#define MVPP2_CLS_MODE_REG 0x1800
@@ -116,7 +135,8 @@
#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
#define MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK 0x7
#define MVPP2_CLS_FLOW_TBL1_N_FIELDS(x) (x)
-#define MVPP2_CLS_FLOW_TBL1_LU_TYPE(lu) (((lu) & 0x3f) << 3)
+#define MVPP2_CLS_FLOW_TBL1_LKP_TYPE_MASK 0x3f
+#define MVPP2_CLS_FLOW_TBL1_LKP_TYPE(x) ((x) << 3)
#define MVPP2_CLS_FLOW_TBL1_PRIO_MASK 0x3f
#define MVPP2_CLS_FLOW_TBL1_PRIO(x) ((x) << 9)
#define MVPP2_CLS_FLOW_TBL1_SEQ_MASK 0x7
@@ -139,18 +159,17 @@
#define MVPP22_CLS_C2_TCAM_DATA2 0x1b18
#define MVPP22_CLS_C2_TCAM_DATA3 0x1b1c
#define MVPP22_CLS_C2_TCAM_DATA4 0x1b20
-#define MVPP22_CLS_C2_LU_TYPE(lu) ((lu) & 0x3f)
#define MVPP22_CLS_C2_PORT_ID(port) ((port) << 8)
-#define MVPP22_CLS_C2_PORT_MASK (0xff << 8)
-#define MVPP22_CLS_C2_TCAM_INV 0x1b24
-#define MVPP22_CLS_C2_TCAM_INV_BIT BIT(31)
+#define MVPP2_CLS2_TCAM_INV_REG 0x1b24
+#define MVPP2_CLS2_TCAM_INV_INVALID 31
+#define MVPP22_CLS_C2_LKP_TYPE(type) (type)
+#define MVPP22_CLS_C2_LKP_TYPE_MASK (0x3f)
#define MVPP22_CLS_C2_HIT_CTR 0x1b50
#define MVPP22_CLS_C2_ACT 0x1b60
#define MVPP22_CLS_C2_ACT_RSS_EN(act) (((act) & 0x3) << 19)
#define MVPP22_CLS_C2_ACT_FWD(act) (((act) & 0x7) << 13)
#define MVPP22_CLS_C2_ACT_QHIGH(act) (((act) & 0x3) << 11)
#define MVPP22_CLS_C2_ACT_QLOW(act) (((act) & 0x3) << 9)
-#define MVPP22_CLS_C2_ACT_COLOR(act) ((act) & 0x7)
#define MVPP22_CLS_C2_ATTR0 0x1b64
#define MVPP22_CLS_C2_ATTR0_QHIGH(qh) (((qh) & 0x1f) << 24)
#define MVPP22_CLS_C2_ATTR0_QHIGH_MASK 0x1f
@@ -162,8 +181,8 @@
#define MVPP22_CLS_C2_ATTR2 0x1b6c
#define MVPP22_CLS_C2_ATTR2_RSS_EN BIT(30)
#define MVPP22_CLS_C2_ATTR3 0x1b70
-#define MVPP22_CLS_C2_TCAM_CTRL 0x1b90
-#define MVPP22_CLS_C2_TCAM_BYPASS_FIFO BIT(0)
+#define MVPP2_CLS2_TCAM_CTRL_REG 0x1b90
+#define MVPP2_CLS2_TCAM_CTRL_BYPASS_FIFO_STAGES BIT(0)
/* Descriptor Manager Top Registers */
#define MVPP2_RXQ_NUM_REG 0x2040
@@ -232,6 +251,7 @@
#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
+#define MVPP22_AXI_TX_DATA_RD_QOS_ATTRIBUTE (0x3 << 4)
#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
@@ -276,8 +296,8 @@
#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
-#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(version) \
- ((version) == MVPP21 ? 0xffff : 0xff)
+#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(variant) \
+ (static_branch_unlikely(&variant) ? 0xffff : 0xff)
#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16
#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
@@ -292,6 +312,8 @@
#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
+#define MVPP2_ISR_RX_ERR_CAUSE_REG(port) (0x5520 + 4 * (port))
+#define MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK 0x00ff
/* Buffer Manager registers */
#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
@@ -319,6 +341,10 @@
#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
MVPP2_BM_HIGH_THRESH_OFFS)
+#define MVPP2_BM_BPPI_HIGH_THRESH 0x1E
+#define MVPP2_BM_BPPI_LOW_THRESH 0x1C
+#define MVPP23_BM_BPPI_HIGH_THRESH 0x34
+#define MVPP23_BM_BPPI_LOW_THRESH 0x28
#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
@@ -343,26 +369,12 @@
#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
-/* Packet Processor per-port counters */
-#define MVPP2_OVERRUN_ETH_DROP 0x7000
-#define MVPP2_CLS_ETH_DROP 0x7020
+#define MVPP22_BM_POOL_BASE_ADDR_HIGH_REG 0x6310
+#define MVPP22_BM_POOL_BASE_ADDR_HIGH_MASK 0xff
+#define MVPP23_BM_8POOL_MODE BIT(8)
/* Hit counters registers */
#define MVPP2_CTRS_IDX 0x7040
-#define MVPP22_CTRS_TX_CTR(port, txq) ((txq) | ((port) << 3) | BIT(7))
-#define MVPP2_TX_DESC_ENQ_CTR 0x7100
-#define MVPP2_TX_DESC_ENQ_TO_DDR_CTR 0x7104
-#define MVPP2_TX_BUFF_ENQ_TO_DDR_CTR 0x7108
-#define MVPP2_TX_DESC_ENQ_HW_FWD_CTR 0x710c
-#define MVPP2_RX_DESC_ENQ_CTR 0x7120
-#define MVPP2_TX_PKTS_DEQ_CTR 0x7130
-#define MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR 0x7200
-#define MVPP2_TX_PKTS_EARLY_DROP_CTR 0x7204
-#define MVPP2_TX_PKTS_BM_DROP_CTR 0x7208
-#define MVPP2_TX_PKTS_BM_MC_DROP_CTR 0x720c
-#define MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR 0x7220
-#define MVPP2_RX_PKTS_EARLY_DROP_CTR 0x7224
-#define MVPP2_RX_PKTS_BM_DROP_CTR 0x7228
#define MVPP2_CLS_DEC_TBL_HIT_CTR 0x7700
#define MVPP2_CLS_FLOW_TBL_HIT_CTR 0x7704
@@ -443,12 +455,15 @@
#define MVPP2_GMAC_STATUS0_GMII_SPEED BIT(1)
#define MVPP2_GMAC_STATUS0_MII_SPEED BIT(2)
#define MVPP2_GMAC_STATUS0_FULL_DUPLEX BIT(3)
-#define MVPP2_GMAC_STATUS0_RX_PAUSE BIT(4)
-#define MVPP2_GMAC_STATUS0_TX_PAUSE BIT(5)
+#define MVPP2_GMAC_STATUS0_RX_PAUSE BIT(6)
+#define MVPP2_GMAC_STATUS0_TX_PAUSE BIT(7)
#define MVPP2_GMAC_STATUS0_AN_COMPLETE BIT(11)
+#define MVPP2_GMAC_PORT_FIFO_CFG_0_REG 0x18
+#define MVPP2_GMAC_TX_FIFO_WM_MASK 0xffff
+#define MVPP2_GMAC_TX_FIFO_WM_LOW_OFFSET 8
#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
-#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
+#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x3fc0
#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
#define MVPP22_GMAC_INT_STAT 0x20
@@ -469,14 +484,12 @@
#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1)
#define MVPP22_GMAC_INT_SUM_MASK_PTP BIT(2)
-/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
+/* Per-port XGMAC registers. PPv2.2 and PPv2.3, only for GOP port 0,
* relative to port->base.
*/
#define MVPP22_XLG_CTRL0_REG 0x100
#define MVPP22_XLG_CTRL0_PORT_EN BIT(0)
#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1)
-#define MVPP22_XLG_CTRL0_FORCE_LINK_DOWN BIT(2)
-#define MVPP22_XLG_CTRL0_FORCE_LINK_PASS BIT(3)
#define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7)
#define MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN BIT(8)
#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14)
@@ -503,10 +516,11 @@
#define MVPP22_XLG_CTRL4_REG 0x184
#define MVPP22_XLG_CTRL4_FWD_FC BIT(5)
#define MVPP22_XLG_CTRL4_FWD_PFC BIT(6)
+#define MVPP22_XLG_CTRL4_USE_XPCS BIT(8)
#define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12)
#define MVPP22_XLG_CTRL4_EN_IDLE_CHECK BIT(14)
-/* SMI registers. PPv2.2 only, relative to priv->iface_base. */
+/* SMI registers. PPv2.2 and PPv2.3, relative to priv->iface_base. */
#define MVPP22_SMI_MISC_CFG_REG 0x1204
#define MVPP22_SMI_POLLING_EN BIT(10)
@@ -582,7 +596,7 @@
#define MVPP2_QUEUE_NEXT_DESC(q, index) \
(((index) < (q)->last_desc) ? ((index) + 1) : 0)
-/* XPCS registers. PPv2.2 only */
+/* XPCS registers.PPv2.2 and PPv2.3 */
#define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000)
#define MVPP22_MPCS_CTRL 0x14
#define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10)
@@ -593,7 +607,16 @@
#define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4)
#define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11)
-/* XPCS registers. PPv2.2 only */
+/* FCA registers. PPv2.2 and PPv2.3 */
+#define MVPP22_FCA_BASE(port) (0x7600 + (port) * 0x1000)
+#define MVPP22_FCA_REG_SIZE 16
+#define MVPP22_FCA_REG_MASK 0xFFFF
+#define MVPP22_FCA_CONTROL_REG 0x0
+#define MVPP22_FCA_ENABLE_PERIODIC BIT(11)
+#define MVPP22_PERIODIC_COUNTER_LSB_REG (0x110)
+#define MVPP22_PERIODIC_COUNTER_MSB_REG (0x114)
+
+/* XPCS registers. PPv2.2 and PPv2.3 */
#define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000)
#define MVPP22_XPCS_CFG0 0x0
#define MVPP22_XPCS_CFG0_RESET_DIS BIT(0)
@@ -658,11 +681,14 @@
/* Various constants */
/* Coalescing */
-#define MVPP2_TXDONE_COAL_PKTS_THRESH 64
+#define MVPP2_TXDONE_COAL_PKTS_THRESH 32
#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
+#define MVPP2_GUARD_TXDONE_HRTIMER_NS (10 * NSEC_PER_MSEC)
#define MVPP2_TXDONE_COAL_USEC 1000
#define MVPP2_RX_COAL_PKTS 32
#define MVPP2_RX_COAL_USEC 64
+#define MVPP2_TX_BULK_TIME (50 * NSEC_PER_USEC)
+#define MVPP2_TX_BULK_MAX_PACKETS (MVPP2_AGGR_TXQ_SIZE / MVPP2_MAX_PORTS)
/* The two bytes Marvell header. Either contains a special value used
* by Marvell switches when a specific hardware mode is enabled (not
@@ -677,6 +703,7 @@
#define MVPP2_PPPOE_HDR_SIZE 8
#define MVPP2_VLAN_TAG_LEN 4
#define MVPP2_VLAN_TAG_EDSA_LEN 8
+#define MVPP2_MPLS_HEADER_LEN 4
/* Lbtd 802.3 type */
#define MVPP2_IP_LBDT_TYPE 0xfffa
@@ -695,32 +722,47 @@
/* Maximum number of supported ports */
#define MVPP2_MAX_PORTS 4
+/* Loopback port index */
+#define MVPP2_LOOPBACK_PORT_INDEX 3
+
/* Maximum number of TXQs used by single port */
#define MVPP2_MAX_TXQ 8
-/* MVPP2_MAX_TSO_SEGS is the maximum number of fragments to allow in the GSO
- * skb. As we need a maxium of two descriptors per fragments (1 header, 1 data),
- * multiply this value by two to count the maximum number of skb descs needed.
+/* SKB/TSO/TX-ring-size/pause-wakeup constatnts depend upon the
+ * MAX_TSO_SEGS - the max number of fragments to allow in the GSO skb.
+ * Min-Min requirement for it = maxPacket(64kB)/stdMTU(1500)=44 fragments
+ * and MVPP2_MAX_TSO_SEGS=max(MVPP2_MAX_TSO_SEGS, MAX_SKB_FRAGS).
+ * MAX_SKB_DESCS: we need 2 descriptors per TSO fragment (1 header, 1 data)
+ * + per-cpu-reservation MVPP2_CPU_DESC_CHUNK*CPUs for optimization.
+ * TX stop activation threshold (e.g. Queue is full) is MAX_SKB_DESCS
+ * TX stop-to-wake hysteresis is MAX_TSO_SEGS
+ * The Tx ring size cannot be smaller than TSO_SEGS + HYSTERESIS + SKBs
+ * The numbers depend upon num cpus (online) used by the driver
*/
-#define MVPP2_MAX_TSO_SEGS 300
-#define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
+#define MVPP2_MAX_TSO_SEGS 44
+#define MVPP2_MAX_SKB_DESCS(ncpus) (MVPP2_MAX_TSO_SEGS * 2 + \
+ MVPP2_CPU_DESC_CHUNK * ncpus)
+#define MVPP2_TX_PAUSE_HYSTERESIS (MVPP2_MAX_TSO_SEGS * 2)
/* Max number of RXQs per port */
#define MVPP2_PORT_MAX_RXQ 32
/* Max number of Rx descriptors */
-#define MVPP2_MAX_RXD_MAX 1024
-#define MVPP2_MAX_RXD_DFLT 128
+#define MVPP2_MAX_RXD_MAX 2048
+#define MVPP2_MAX_RXD_DFLT MVPP2_MAX_RXD_MAX
/* Max number of Tx descriptors */
#define MVPP2_MAX_TXD_MAX 2048
-#define MVPP2_MAX_TXD_DFLT 1024
+#define MVPP2_MAX_TXD_DFLT MVPP2_MAX_TXD_MAX
+#define MVPP2_MIN_TXD(ncpus) ALIGN(MVPP2_MAX_TSO_SEGS + \
+ MVPP2_MAX_SKB_DESCS(ncpus) + \
+ MVPP2_TX_PAUSE_HYSTERESIS, 32)
/* Amount of Tx descriptors that can be reserved at once by CPU */
#define MVPP2_CPU_DESC_CHUNK 64
/* Max number of Tx descriptors in each aggregated queue */
-#define MVPP2_AGGR_TXQ_SIZE 256
+#define MVPP2_AGGR_TXQ_SIZE 512
/* Descriptor aligned size */
#define MVPP2_DESC_ALIGNED_SIZE 32
@@ -729,33 +771,57 @@
#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
/* RX FIFO constants */
+#define MVPP2_RX_FIFO_PORT_DATA_SIZE_44KB 0xb000
#define MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB 0x8000
#define MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB 0x2000
#define MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB 0x1000
-#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB 0x200
-#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB 0x80
+#define MVPP2_RX_FIFO_PORT_ATTR_SIZE(data_size) (data_size >> 6)
#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB 0x40
#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
/* TX FIFO constants */
-#define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa
-#define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3
-#define MVPP2_TX_FIFO_THRESHOLD_MIN 256
-#define MVPP2_TX_FIFO_THRESHOLD_10KB \
- (MVPP22_TX_FIFO_DATA_SIZE_10KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
-#define MVPP2_TX_FIFO_THRESHOLD_3KB \
- (MVPP22_TX_FIFO_DATA_SIZE_3KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
+#define MVPP22_TX_FIFO_DATA_SIZE_18KB 18
+#define MVPP22_TX_FIFO_DATA_SIZE_10KB 10
+#define MVPP22_TX_FIFO_DATA_SIZE_1KB 1
+#define MVPP22_TX_FIFO_DATA_SIZE_MIN 3
+#define MVPP22_TX_FIFO_DATA_SIZE_MAX 15
+#define MVPP2_TX_FIFO_THRESHOLD_MIN 256 /* Bytes */
+#define MVPP2_TX_FIFO_THRESHOLD(kb) \
+ (kb * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
+#define MVPP22_TX_FIFO_EXTRA_PARAM_MASK 0xFF
+#define MVPP22_TX_FIFO_EXTRA_PARAM_OFFS(port) (8 * (port))
+#define MVPP22_TX_FIFO_EXTRA_PARAM_SIZE(port, val) \
+ (((val) >> MVPP22_TX_FIFO_EXTRA_PARAM_OFFS(port)) & \
+ MVPP22_TX_FIFO_EXTRA_PARAM_MASK)
+
+/* RX FIFO threshold in 1KB granularity */
+#define MVPP23_PORT0_FIFO_TRSH (9 * 1024)
+#define MVPP23_PORT1_FIFO_TRSH (4 * 1024)
+#define MVPP23_PORT2_FIFO_TRSH (2 * 1024)
+
+/* RX Flow Control Registers */
+#define MVPP2_RX_FC_REG(port) (0x150 + 4 * (port))
+#define MVPP2_RX_FC_EN BIT(24)
+#define MVPP2_RX_FC_TRSH_OFFS 16
+#define MVPP2_RX_FC_TRSH_MASK (0xFF << MVPP2_RX_FC_TRSH_OFFS)
+#define MVPP2_RX_FC_TRSH_UNIT 256
+
+/* GMAC TX FIFO configuration */
+#define MVPP2_GMAC_TX_FIFO_MIN_TH \
+ MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(50)
+#define MVPP2_GMAC_TX_FIFO_LOW_WM 75
+#define MVPP2_GMAC_TX_FIFO_HI_WM 77
/* RX buffer constants */
#define MVPP2_SKB_SHINFO_SIZE \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+#define MVPP2_MTU_OVERHEAD_SIZE \
+ (MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + ETH_HLEN + ETH_FCS_LEN)
#define MVPP2_RX_PKT_SIZE(mtu) \
- ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
- ETH_HLEN + ETH_FCS_LEN, cache_line_size())
+ ALIGN((mtu) + MVPP2_MTU_OVERHEAD_SIZE, cache_line_size())
#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + MVPP2_SKB_HEADROOM)
-#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
((total_size) - MVPP2_SKB_HEADROOM - MVPP2_SKB_SHINFO_SIZE)
@@ -765,15 +831,10 @@
#define MVPP2_BIT_TO_WORD(bit) ((bit) / 32)
#define MVPP2_BIT_IN_WORD(bit) ((bit) % 32)
-#define MVPP2_N_PRS_FLOWS 52
-#define MVPP2_N_RFS_ENTRIES_PER_FLOW 4
-
-/* There are 7 supported high-level flows */
-#define MVPP2_N_RFS_RULES (MVPP2_N_RFS_ENTRIES_PER_FLOW * 7)
-
/* RSS constants */
-#define MVPP22_N_RSS_TABLES 8
#define MVPP22_RSS_TABLE_ENTRIES 32
+#define MVPP22_RSS_TBL_NUM 8
+#define MVPP22_RSS_WIDTH_MAX 8
/* IPv6 max L3 address size */
#define MVPP2_MAX_L3_ADDR_SIZE 16
@@ -781,6 +842,9 @@
/* Port flags */
#define MVPP2_F_LOOPBACK BIT(0)
#define MVPP2_F_DT_COMPAT BIT(1)
+#define MVPP22_F_IF_MUSDK BIT(2) /* musdk port */
+/* BIT(1 and 2) are reserved */
+#define MVPP2_F_IF_TX_ON BIT(3)
/* Marvell tag types */
enum mvpp2_tag_type {
@@ -843,18 +907,17 @@ enum mvpp22_ptp_packet_format {
#define MVPP22_PTP_TIMESTAMPQUEUESELECT BIT(18)
/* BM constants */
-#define MVPP2_BM_JUMBO_BUF_NUM 512
-#define MVPP2_BM_LONG_BUF_NUM 1024
+#define MVPP2_BM_JUMBO_BUF_NUM 2048
+#define MVPP2_BM_LONG_BUF_NUM 2048
#define MVPP2_BM_SHORT_BUF_NUM 2048
#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
#define MVPP2_BM_POOL_PTR_ALIGN 128
-#define MVPP2_BM_MAX_POOLS 8
/* BM cookie (32 bits) definition */
#define MVPP2_BM_COOKIE_POOL_OFFS 8
#define MVPP2_BM_COOKIE_CPU_OFFS 24
-#define MVPP2_BM_SHORT_FRAME_SIZE 736 /* frame size 128 */
+#define MVPP2_BM_SHORT_FRAME_SIZE 1024
#define MVPP2_BM_LONG_FRAME_SIZE 2240 /* frame size 1664 */
#define MVPP2_BM_JUMBO_FRAME_SIZE 10432 /* frame size 9856 */
/* BM short pool packet size
@@ -897,7 +960,7 @@ enum mvpp22_ptp_packet_format {
#define MVPP2_MIB_FC_RCVD 0x58
#define MVPP2_MIB_RX_FIFO_OVERRUN 0x5c
#define MVPP2_MIB_UNDERSIZE_RCVD 0x60
-#define MVPP2_MIB_FRAGMENTS_RCVD 0x64
+#define MVPP2_MIB_FRAGMENTS_ERR_RCVD 0x64
#define MVPP2_MIB_OVERSIZE_RCVD 0x68
#define MVPP2_MIB_JABBER_RCVD 0x6c
#define MVPP2_MIB_MAC_RCV_ERROR 0x70
@@ -907,6 +970,18 @@ enum mvpp22_ptp_packet_format {
#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ)
+/* Other counters */
+#define MVPP2_OVERRUN_DROP_REG(port) (0x7000 + 4 * (port))
+#define MVPP2_CLS_DROP_REG(port) (0x7020 + 4 * (port))
+#define MVPP2_CNT_IDX_REG 0x7040
+#define MVPP2_TX_PKT_FULLQ_DROP_REG 0x7200
+#define MVPP2_TX_PKT_EARLY_DROP_REG 0x7204
+#define MVPP2_TX_PKT_BM_DROP_REG 0x7208
+#define MVPP2_TX_PKT_BM_MC_DROP_REG 0x720c
+#define MVPP2_RX_PKT_FULLQ_DROP_REG 0x7220
+#define MVPP2_RX_PKT_EARLY_DROP_REG 0x7224
+#define MVPP2_RX_PKT_BM_DROP_REG 0x7228
+
#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40)
/* Buffer header info bits */
@@ -919,12 +994,67 @@ enum mvpp22_ptp_packet_format {
struct mvpp2_tai;
-/* Definitions */
-struct mvpp2_dbgfs_entries;
+/* MSS Flow control */
+#define MSS_SRAM_SIZE 0x800
+#define MSS_FC_COM_REG 0
+#define FLOW_CONTROL_ENABLE_BIT BIT(0)
+#define FLOW_CONTROL_UPDATE_COMMAND_BIT BIT(31)
+#define FC_QUANTA 0xFFFF
+#define FC_CLK_DIVIDER 0x140
+
+#define MSS_BUF_POOL_BASE 0x40
+#define MSS_BUF_POOL_OFFS 4
+#define MSS_BUF_POOL_REG(id) (MSS_BUF_POOL_BASE \
+ + (id) * MSS_BUF_POOL_OFFS)
+
+#define MSS_BUF_POOL_STOP_MASK 0xFFF
+#define MSS_BUF_POOL_START_MASK (0xFFF << MSS_BUF_POOL_START_OFFS)
+#define MSS_BUF_POOL_START_OFFS 12
+#define MSS_BUF_POOL_PORTS_MASK (0xF << MSS_BUF_POOL_PORTS_OFFS)
+#define MSS_BUF_POOL_PORTS_OFFS 24
+#define MSS_BUF_POOL_PORT_OFFS(id) (0x1 << \
+ ((id) + MSS_BUF_POOL_PORTS_OFFS))
+
+#define MSS_RXQ_TRESH_BASE 0x200
+#define MSS_RXQ_TRESH_OFFS 4
+#define MSS_RXQ_TRESH_REG(q, fq) (MSS_RXQ_TRESH_BASE + (((q) + (fq)) \
+ * MSS_RXQ_TRESH_OFFS))
+
+#define MSS_RXQ_TRESH_START_MASK 0xFFFF
+#define MSS_RXQ_TRESH_STOP_MASK (0xFFFF << MSS_RXQ_TRESH_STOP_OFFS)
+#define MSS_RXQ_TRESH_STOP_OFFS 16
+
+#define MSS_RXQ_ASS_BASE 0x80
+#define MSS_RXQ_ASS_OFFS 4
+#define MSS_RXQ_ASS_PER_REG 4
+#define MSS_RXQ_ASS_PER_OFFS 8
+#define MSS_RXQ_ASS_PORTID_OFFS 0
+#define MSS_RXQ_ASS_PORTID_MASK 0x3
+#define MSS_RXQ_ASS_HOSTID_OFFS 2
+#define MSS_RXQ_ASS_HOSTID_MASK 0x3F
+
+#define MSS_RXQ_ASS_Q_BASE(q, fq) ((((q) + (fq)) % MSS_RXQ_ASS_PER_REG) \
+ * MSS_RXQ_ASS_PER_OFFS)
+#define MSS_RXQ_ASS_PQ_BASE(q, fq) ((((q) + (fq)) / MSS_RXQ_ASS_PER_REG) \
+ * MSS_RXQ_ASS_OFFS)
+#define MSS_RXQ_ASS_REG(q, fq) (MSS_RXQ_ASS_BASE + MSS_RXQ_ASS_PQ_BASE(q, fq))
+
+#define MSS_THRESHOLD_STOP 768
+#define MSS_THRESHOLD_START 1024
+#define MSS_FC_MAX_TIMEOUT 5000
+
+#define MVPP2_PRS_TCAM_SRAM_SIZE 256
+#define MVPP2_N_FLOWS 52
-struct mvpp2_rss_table {
- u32 indir[MVPP22_RSS_TABLE_ENTRIES];
-};
+/* Buffer header info bits */
+#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
+#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
+#define MVPP2_B_HDR_INFO_LAST_OFFS 12
+#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
+#define MVPP2_B_HDR_INFO_IS_LAST(info) \
+ (((info) & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
+
+/* Definitions */
struct mvpp2_buff_hdr {
__le32 next_phys_addr;
@@ -945,16 +1075,18 @@ struct mvpp2 {
/* Shared registers' base addresses */
void __iomem *lms_base;
void __iomem *iface_base;
+ void __iomem *cm3_base;
- /* On PPv2.2, each "software thread" can access the base
+ /* On PPv2.2 and PPv2.3, each "software thread" can access the base
* register through a separate address space, each 64 KB apart
* from each other. Typically, such address spaces will be
* used per CPU.
*/
void __iomem *swth_base[MVPP2_MAX_THREADS];
- /* On PPv2.2, some port control registers are located into the system
- * controller space. These registers are accessible through a regmap.
+ /* On PPv2.2 and PPv2.3, some port control registers are located into
+ * the system controller space. These registers are accessible
+ * through a regmap.
*/
struct regmap *sysctrl_base;
@@ -968,7 +1100,8 @@ struct mvpp2 {
/* List of pointers to port structures */
int port_count;
struct mvpp2_port *port_list[MVPP2_MAX_PORTS];
- struct mvpp2_tai *tai;
+ /* Map of enabled ports */
+ unsigned long port_map;
/* Number of Tx threads used */
unsigned int nthreads;
@@ -978,11 +1111,9 @@ struct mvpp2 {
/* Aggregated TXQs */
struct mvpp2_tx_queue *aggr_txqs;
- /* Are we using page_pool with per-cpu pools? */
- int percpu_pools;
-
/* BM pools */
struct mvpp2_bm_pool *bm_pools;
+ struct mvpp2_bm_pool **pools_pcpu;
/* PRS shadow table */
struct mvpp2_prs_shadow *prs_shadow;
@@ -993,7 +1124,10 @@ struct mvpp2 {
u32 tclk;
/* HW version */
- enum { MVPP21, MVPP22 } hw_version;
+ enum { MVPP21, MVPP22, MVPP23 } hw_version;
+
+ /* Bitmap of the participating cpu's */
+ u16 cpu_map;
/* Maximum number of RXQs per port */
unsigned int max_port_rxqs;
@@ -1004,17 +1138,39 @@ struct mvpp2 {
/* Debugfs root entry */
struct dentry *dbgfs_dir;
+ struct mvpp2_dbgfs_prs_entry *dbgfs_prs_entry[MVPP2_PRS_TCAM_SRAM_SIZE];
+ struct mvpp2_dbgfs_flow_entry *dbgfs_flow_entry[MVPP2_N_FLOWS];
+
+ /* CM3 SRAM pool */
+ struct gen_pool *sram_pool;
- /* Debugfs entries private data */
- struct mvpp2_dbgfs_entries *dbgfs_entries;
+ /* Global TX Flow Control config */
+ bool global_tx_fc;
- /* RSS Indirection tables */
- struct mvpp2_rss_table *rss_tables[MVPP22_N_RSS_TABLES];
+ bool custom_dma_mask;
+
+ /* Spinlocks for CM3 shared memory configuration */
+ spinlock_t mss_spinlock;
/* page_pool allocator */
struct page_pool *page_pool[MVPP2_PORT_MAX_RXQ];
};
+struct mvpp2_dbgfs_prs_entry {
+ int tid;
+ struct mvpp2 *priv;
+};
+
+struct mvpp2_dbgfs_flow_entry {
+ int flow;
+ struct mvpp2 *priv;
+};
+
+struct mvpp2_dbgfs_port_flow_entry {
+ struct mvpp2_port *port;
+ struct mvpp2_dbgfs_flow_entry *dbg_fe;
+};
+
struct mvpp2_pcpu_stats {
struct u64_stats_sync syncp;
u64 rx_packets;
@@ -1033,9 +1189,24 @@ struct mvpp2_pcpu_stats {
/* Per-CPU port control */
struct mvpp2_port_pcpu {
+ /* Timer & Tasklet for bulk-tx optimization */
+ struct hrtimer bulk_timer;
+ bool bulk_timer_scheduled;
+ bool bulk_timer_restart_req;
+ struct tasklet_struct bulk_tasklet;
+
+ /* Timer & Tasklet for egress finalization */
struct hrtimer tx_done_timer;
- struct net_device *dev;
- bool timer_scheduled;
+ bool tx_done_timer_scheduled;
+ bool guard_timer_scheduled;
+ struct tasklet_struct tx_done_tasklet;
+
+ /* tx-done guard timer fields */
+ struct mvpp2_port *port; /* reference to get from tx_done_timer */
+ bool tx_done_passed; /* tx-done passed since last guard-check */
+ u8 txq_coal_is_zero_map; /* map tx queues (max=8) forced coal=Zero */
+ u8 txq_busy_suspect_map; /* map suspect txq to be forced */
+ u32 tx_guard_cntr; /* statistic */
};
struct mvpp2_queue_vector {
@@ -1051,37 +1222,6 @@ struct mvpp2_queue_vector {
struct cpumask *mask;
};
-/* Internal represention of a Flow Steering rule */
-struct mvpp2_rfs_rule {
- /* Rule location inside the flow*/
- int loc;
-
- /* Flow type, such as TCP_V4_FLOW, IP6_FLOW, etc. */
- int flow_type;
-
- /* Index of the C2 TCAM entry handling this rule */
- int c2_index;
-
- /* Header fields that needs to be extracted to match this flow */
- u16 hek_fields;
-
- /* CLS engine : only c2 is supported for now. */
- u8 engine;
-
- /* TCAM key and mask for C2-based steering. These fields should be
- * encapsulated in a union should we add more engines.
- */
- u64 c2_tcam;
- u64 c2_tcam_mask;
-
- struct flow_rule *flow;
-};
-
-struct mvpp2_ethtool_fs {
- struct mvpp2_rfs_rule rule;
- struct ethtool_rxnfc rxnfc;
-};
-
struct mvpp2_hwtstamp_queue {
struct sk_buff *skb[32];
u8 next;
@@ -1118,6 +1258,7 @@ struct mvpp2_port {
struct bpf_prog *xdp_prog;
int pkt_size;
+ u32 num_tc_queues;
/* Per-CPU port control */
struct mvpp2_port_pcpu __percpu *pcpu;
@@ -1145,11 +1286,14 @@ struct mvpp2_port {
struct device_node *of_node;
phy_interface_t phy_interface;
+ phy_interface_t of_phy_interface;
struct phylink *phylink;
struct phylink_config phylink_config;
struct phylink_pcs phylink_pcs;
struct phy *comphy;
+ bool phy_exist;
+
struct mvpp2_bm_pool *pool_long;
struct mvpp2_bm_pool *pool_short;
@@ -1162,19 +1306,32 @@ struct mvpp2_port {
u32 tx_time_coal;
- /* List of steering rules active on that port */
- struct mvpp2_ethtool_fs *rfs_rules[MVPP2_N_RFS_ENTRIES_PER_FLOW];
- int n_rfs_rules;
+ /* RSS indirection table */
+ u32 indir[MVPP22_RSS_TABLE_ENTRIES];
- /* Each port has its own view of the rss contexts, so that it can number
- * them from 0
- */
- int rss_ctx[MVPP22_N_RSS_TABLES];
+ /* us private storage, allocated/used by User/Kernel mode toggling */
+ void *us_cfg;
+
+ /* Coherency-update for TX-ON from link_status_irq */
+ struct tasklet_struct txqs_on_tasklet;
+
+ /* Firmware TX flow control */
+ bool tx_fc;
+
+ /* Indication, whether port is connected to XLG MAC */
+ bool has_xlg_mac;
+
+#if IS_ENABLED(CONFIG_NET_DSA)
+ /* Notifier required when the port is connected to the switch */
+ struct notifier_block dsa_notifier;
+#endif
bool hwtstamp;
bool rx_hwtstamp;
enum hwtstamp_tx_types tx_hwtstamp_type;
struct mvpp2_hwtstamp_queue tx_hwtstamp_queue[2];
+
+ struct mvpp2_dbgfs_port_flow_entry *dbgfs_port_flow_entry;
};
/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
@@ -1195,7 +1352,7 @@ struct mvpp2_port {
#define MVPP2_RXD_ERR_SUMMARY BIT(15)
#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
-#define MVPP2_RXD_ERR_CRC 0x0
+#define MVPP2_RXD_ERR_MAC 0x0
#define MVPP2_RXD_ERR_OVERRUN BIT(13)
#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
#define MVPP2_RXD_BM_POOL_ID_OFFS 16
@@ -1209,6 +1366,20 @@ struct mvpp2_port {
#define MVPP2_RXD_L3_IP6 BIT(30)
#define MVPP2_RXD_BUF_HDR BIT(31)
+struct mvpp2_buff_hdr {
+ __le32 next_dma_addr;
+ __le32 next_cookie_addr;
+ __le16 byte_count;
+ __le16 info;
+ __le16 reserved1; /* bm_qset (for future use, BM) */
+ u8 next_dma_addr_high;
+ u8 next_cookie_addr_high;
+ __le16 reserved2;
+ __le16 reserved3;
+ __le16 reserved4;
+ __le16 reserved5;
+};
+
/* HW TX descriptor for PPv2.1 */
struct mvpp21_tx_desc {
__le32 command; /* Options used by HW for packet transmitting.*/
@@ -1237,7 +1408,7 @@ struct mvpp21_rx_desc {
__le32 reserved8;
};
-/* HW TX descriptor for PPv2.2 */
+/* HW TX descriptor for PPv2.2 and PPv2.3 */
struct mvpp22_tx_desc {
__le32 command;
u8 packet_offset;
@@ -1249,7 +1420,7 @@ struct mvpp22_tx_desc {
__le64 buf_cookie_misc;
};
-/* HW RX descriptor for PPv2.2 */
+/* HW RX descriptor for PPv2.2 and PPv2.3 */
struct mvpp22_rx_desc {
__le32 status;
__le16 reserved1;
@@ -1311,8 +1482,10 @@ struct mvpp2_txq_pcpu {
*/
int count;
- int wake_threshold;
- int stop_threshold;
+ u16 wake_threshold;
+ u16 stop_threshold;
+ /* TXQ-number above stop_threshold to be wake-up */
+ u16 stopped_on_txq_id;
/* Number of Tx DMA descriptors reserved for each CPU */
int reserved_num;
@@ -1343,6 +1516,7 @@ struct mvpp2_tx_queue {
/* Number of currently used Tx DMA descriptor in the descriptor ring */
int count;
+ int pending;
/* Per-CPU control of physical Tx queues */
struct mvpp2_txq_pcpu __percpu *pcpu;
@@ -1360,40 +1534,46 @@ struct mvpp2_tx_queue {
/* Index of the next Tx DMA descriptor to process */
int next_desc_to_proc;
-};
+} __aligned(L1_CACHE_BYTES);
struct mvpp2_rx_queue {
+ /* Virtual address of the RX DMA descriptors array */
+ struct mvpp2_rx_desc *descs;
+
+ /* Index of the next-to-process and last RX DMA descriptor */
+ int next_desc_to_proc;
+ int last_desc;
+
/* RX queue number, in the range 0-31 for physical RXQs */
u8 id;
+ /* Port's logic RXQ number to which physical RXQ is mapped */
+ u8 logic_rxq;
+
+ /* Num of RXed packets seen in HW but meanwhile not handled by SW */
+ u16 rx_pending;
+
/* Num of rx descriptors in the rx descriptor ring */
int size;
u32 pkts_coal;
u32 time_coal;
- /* Virtual address of the RX DMA descriptors array */
- struct mvpp2_rx_desc *descs;
-
/* DMA address of the RX DMA descriptors array */
dma_addr_t descs_dma;
- /* Index of the last RX DMA descriptor */
- int last_desc;
-
- /* Index of the next RX DMA descriptor to process */
- int next_desc_to_proc;
-
/* ID of port to which physical RXQ is mapped */
int port;
- /* Port's logic RXQ number to which physical RXQ is mapped */
- int logic_rxq;
-
/* XDP memory accounting */
struct xdp_rxq_info xdp_rxq_short;
struct xdp_rxq_info xdp_rxq_long;
-};
+} __aligned(L1_CACHE_BYTES);
+
+enum mvpp2_bm_pool_type {
+ MVPP2_BM_SHORT,
+ MVPP2_BM_JUMBO,
+ MVPP2_BM_LONG,
struct mvpp2_bm_pool {
/* Pool number in the range 0-7 */
@@ -1411,6 +1591,9 @@ struct mvpp2_bm_pool {
int pkt_size;
int frag_size;
+ /* Pool type (short/long/jumbo) */
+ enum mvpp2_bm_pool_type type;
+
/* BPPE virtual base address */
u32 *virt_addr;
/* BPPE DMA base address */
@@ -1420,20 +1603,123 @@ struct mvpp2_bm_pool {
u32 port_map;
};
+#define MVPP2_BM_POOLS_NUM (recycle ? (2 + num_present_cpus()) : 3)
+#define MVPP2_BM_POOLS_NUM_MAX 8
+
#define IS_TSO_HEADER(txq_pcpu, addr) \
((addr) >= (txq_pcpu)->tso_headers_dma && \
(addr) < (txq_pcpu)->tso_headers_dma + \
(txq_pcpu)->size * TSO_HEADER_SIZE)
+#define TSO_HEADER_MARK ((void *)BIT(0))
#define MVPP2_DRIVER_NAME "mvpp2"
#define MVPP2_DRIVER_VERSION "1.0"
-void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data);
-u32 mvpp2_read(struct mvpp2 *priv, u32 offset);
+/* Run-time critical Utility/helper methods */
+static inline
+void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
+{
+ writel(data, priv->swth_base[0] + offset);
+}
-void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
+static inline
+u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
+{
+ return readl(priv->swth_base[0] + offset);
+}
+
+static inline
+u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
+{
+ return readl_relaxed(priv->swth_base[0] + offset);
+}
+
+static inline
+u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
+{
+ return cpu % priv->nthreads;
+}
+
+static inline
+void mvpp2_cm3_write(struct mvpp2 *priv, u32 offset, u32 data)
+{
+ writel(data, priv->cm3_base + offset);
+}
+
+static inline
+u32 mvpp2_cm3_read(struct mvpp2 *priv, u32 offset)
+{
+ return readl(priv->cm3_base + offset);
+}
+/* These accessors should be used to access:
+ *
+ * - per-thread registers, where each thread has its own copy of the
+ * register.
+ *
+ * MVPP2_BM_VIRT_ALLOC_REG
+ * MVPP2_BM_ADDR_HIGH_ALLOC
+ * MVPP22_BM_ADDR_HIGH_RLS_REG
+ * MVPP2_BM_VIRT_RLS_REG
+ * MVPP2_ISR_RX_TX_CAUSE_REG
+ * MVPP2_ISR_RX_TX_MASK_REG
+ * MVPP2_TXQ_NUM_REG
+ * MVPP2_AGGR_TXQ_UPDATE_REG
+ * MVPP2_TXQ_RSVD_REQ_REG
+ * MVPP2_TXQ_RSVD_RSLT_REG
+ * MVPP2_TXQ_SENT_REG
+ * MVPP2_RXQ_NUM_REG
+ *
+ * - global registers that must be accessed through a specific thread
+ * window, because they are related to an access to a per-thread
+ * register
+ *
+ * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
+ * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
+ * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
+ */
+static inline
+void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread,
+ u32 offset, u32 data)
+{
+ writel(data, priv->swth_base[thread] + offset);
+}
+
+static inline
+u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread, u32 offset)
+{
+ return readl(priv->swth_base[thread] + offset);
+}
+
+static inline
+void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread,
+ u32 offset, u32 data)
+{
+ writel_relaxed(data, priv->swth_base[thread] + offset);
+}
+
+static inline
+u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread,
+ u32 offset)
+{
+ return readl_relaxed(priv->swth_base[thread] + offset);
+}
+
+void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv);
+void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en);
+u32 mvpp2_get_tc_width(struct mvpp2_port *port);
+int mvpp22_rss_fill_table_per_tc(struct mvpp2_port *port);
#ifdef CONFIG_MVPP2_PTP
int mvpp22_tai_probe(struct device *dev, struct mvpp2 *priv);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 41d935d1aaf6..8e7d4046cc4a 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -22,302 +22,302 @@
} \
}
-static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
+static struct mvpp2_cls_flow cls_flows[MVPP2_N_FLOWS] = {
/* TCP over IPv4 flows, Not fragmented, no vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
MVPP22_CLS_HEK_IP4_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
MVPP22_CLS_HEK_IP4_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
MVPP22_CLS_HEK_IP4_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* TCP over IPv4 flows, Not fragmented, with vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
- MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
- MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
- MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
/* TCP over IPv4 flows, fragmented, no vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* TCP over IPv4 flows, fragmented, with vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
/* UDP over IPv4 flows, Not fragmented, no vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
MVPP22_CLS_HEK_IP4_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
MVPP22_CLS_HEK_IP4_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
MVPP22_CLS_HEK_IP4_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* UDP over IPv4 flows, Not fragmented, with vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
- MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
- MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
- MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
/* UDP over IPv4 flows, fragmented, no vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* UDP over IPv4 flows, fragmented, with vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
/* TCP over IPv6 flows, not fragmented, no vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG,
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
MVPP22_CLS_HEK_IP6_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG,
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
MVPP22_CLS_HEK_IP6_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* TCP over IPv6 flows, not fragmented, with vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG,
- MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG,
- MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
/* TCP over IPv6 flows, fragmented, no vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP6_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP6_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* TCP over IPv6 flows, fragmented, with vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG,
- MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG,
- MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
/* UDP over IPv6 flows, not fragmented, no vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG,
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
MVPP22_CLS_HEK_IP6_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG,
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
MVPP22_CLS_HEK_IP6_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* UDP over IPv6 flows, not fragmented, with vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG,
- MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG,
- MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
/* UDP over IPv6 flows, fragmented, no vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP6_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP6_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* UDP over IPv6 flows, fragmented, with vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG,
- MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG,
- MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
/* IPv4 flows, no vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4,
MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT,
MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER,
MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
/* IPv4 flows, with vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4,
MVPP2_PRS_RI_L3_PROTO_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OPT,
MVPP2_PRS_RI_L3_PROTO_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OTHER,
MVPP2_PRS_RI_L3_PROTO_MASK),
/* IPv6 flows, no vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG,
+ MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
MVPP22_CLS_HEK_IP6_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG,
+ MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
MVPP22_CLS_HEK_IP6_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
/* IPv6 flows, with vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG,
- MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_L3_PROTO_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG,
- MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_L3_PROTO_MASK),
/* Non IP flow, no vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_UNTAG,
+ MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_UNTAG,
0,
MVPP2_PRS_RI_VLAN_NONE,
MVPP2_PRS_RI_VLAN_MASK),
/* Non IP flow, with vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG,
+ MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_TAG,
MVPP22_CLS_HEK_OPT_VLAN,
0, 0),
};
@@ -344,9 +344,9 @@ static void mvpp2_cls_flow_write(struct mvpp2 *priv,
struct mvpp2_cls_flow_entry *fe)
{
mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
- mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
- mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
- mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
+ mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
+ mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
+ mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
}
u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index)
@@ -436,6 +436,19 @@ static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe,
fe->data[0] |= !!is_last;
}
+static bool mvpp2_cls_flow_last_get(struct mvpp2_cls_flow_entry *fe)
+{
+ return (fe->data[0] & MVPP2_CLS_FLOW_TBL0_LAST);
+}
+
+static void mvpp2_cls_flow_lkp_type_set(struct mvpp2_cls_flow_entry *fe,
+ int lkp_type)
+{
+ fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_LKP_TYPE(
+ MVPP2_CLS_FLOW_TBL1_LKP_TYPE_MASK);
+ fe->data[1] |= MVPP2_CLS_FLOW_TBL1_LKP_TYPE(lkp_type);
+}
+
static void mvpp2_cls_flow_pri_set(struct mvpp2_cls_flow_entry *fe, int prio)
{
fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_PRIO(MVPP2_CLS_FLOW_TBL1_PRIO_MASK);
@@ -448,22 +461,14 @@ static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry *fe,
fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
}
-static void mvpp2_cls_flow_port_remove(struct mvpp2_cls_flow_entry *fe,
- u32 port)
-{
- fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
-}
-
-static void mvpp2_cls_flow_lu_type_set(struct mvpp2_cls_flow_entry *fe,
- u8 lu_type)
+static int mvpp2_cls_flow_port_get(struct mvpp2_cls_flow_entry *fe)
{
- fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK);
- fe->data[1] |= MVPP2_CLS_FLOW_TBL1_LU_TYPE(lu_type);
+ return ((fe->data[0] >> 4) & MVPP2_CLS_FLOW_TBL0_PORT_ID_MASK);
}
/* Initialize the parser entry for the given flow */
static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv,
- const struct mvpp2_cls_flow *flow)
+ struct mvpp2_cls_flow *flow)
{
mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri,
flow->prs_ri.ri_mask);
@@ -471,7 +476,7 @@ static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv,
/* Initialize the Lookup Id table entry for the given flow */
static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
- const struct mvpp2_cls_flow *flow)
+ struct mvpp2_cls_flow *flow)
{
struct mvpp2_cls_lookup_entry le;
@@ -484,7 +489,7 @@ static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
/* We point on the first lookup in the sequence for the flow, that is
* the C2 lookup.
*/
- le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_CLS_FLT_FIRST(flow->flow_id));
+ le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_FLOW_C2_ENTRY(flow->flow_id));
/* CLS is always enabled, RSS is enabled/disabled in C2 lookup */
le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
@@ -492,113 +497,21 @@ static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
mvpp2_cls_lookup_write(priv, &le);
}
-static void mvpp2_cls_c2_write(struct mvpp2 *priv,
- struct mvpp2_cls_c2_entry *c2)
-{
- u32 val;
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
-
- val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV);
- if (c2->valid)
- val &= ~MVPP22_CLS_C2_TCAM_INV_BIT;
- else
- val |= MVPP22_CLS_C2_TCAM_INV_BIT;
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_INV, val);
-
- mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
-
- mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
- mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
- mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
- mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
-
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
- /* Writing TCAM_DATA4 flushes writes to TCAM_DATA0-4 and INV to HW */
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
-}
-
-void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
- struct mvpp2_cls_c2_entry *c2)
-{
- u32 val;
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
-
- c2->index = index;
-
- c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
- c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
- c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
- c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
- c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
-
- c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
-
- c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
- c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
- c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
- c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
-
- val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV);
- c2->valid = !(val & MVPP22_CLS_C2_TCAM_INV_BIT);
-}
-
-static int mvpp2_cls_ethtool_flow_to_type(int flow_type)
-{
- switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
- case ETHER_FLOW:
- return MVPP22_FLOW_ETHERNET;
- case TCP_V4_FLOW:
- return MVPP22_FLOW_TCP4;
- case TCP_V6_FLOW:
- return MVPP22_FLOW_TCP6;
- case UDP_V4_FLOW:
- return MVPP22_FLOW_UDP4;
- case UDP_V6_FLOW:
- return MVPP22_FLOW_UDP6;
- case IPV4_FLOW:
- return MVPP22_FLOW_IP4;
- case IPV6_FLOW:
- return MVPP22_FLOW_IP6;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int mvpp2_cls_c2_port_flow_index(struct mvpp2_port *port, int loc)
-{
- return MVPP22_CLS_C2_RFS_LOC(port->id, loc);
-}
-
/* Initialize the flow table entries for the given flow */
-static void mvpp2_cls_flow_init(struct mvpp2 *priv,
- const struct mvpp2_cls_flow *flow)
+static void mvpp2_cls_flow_init(struct mvpp2 *priv, struct mvpp2_cls_flow *flow)
{
struct mvpp2_cls_flow_entry fe;
- int i, pri = 0;
-
- /* Assign default values to all entries in the flow */
- for (i = MVPP2_CLS_FLT_FIRST(flow->flow_id);
- i <= MVPP2_CLS_FLT_LAST(flow->flow_id); i++) {
- memset(&fe, 0, sizeof(fe));
- fe.index = i;
- mvpp2_cls_flow_pri_set(&fe, pri++);
-
- if (i == MVPP2_CLS_FLT_LAST(flow->flow_id))
- mvpp2_cls_flow_last_set(&fe, 1);
-
- mvpp2_cls_flow_write(priv, &fe);
- }
+ int i;
- /* RSS config C2 lookup */
- mvpp2_cls_flow_read(priv, MVPP2_CLS_FLT_C2_RSS_ENTRY(flow->flow_id),
- &fe);
+ /* C2 lookup */
+ memset(&fe, 0, sizeof(fe));
+ fe.index = MVPP2_FLOW_C2_ENTRY(flow->flow_id);
mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2);
mvpp2_cls_flow_port_id_sel(&fe, true);
- mvpp2_cls_flow_lu_type_set(&fe, MVPP22_CLS_LU_TYPE_ALL);
+ mvpp2_cls_flow_last_set(&fe, 0);
+ mvpp2_cls_flow_pri_set(&fe, 0);
+ mvpp2_cls_flow_lkp_type_set(&fe, MVPP2_CLS_LKP_DEFAULT);
/* Add all ports */
for (i = 0; i < MVPP2_MAX_PORTS; i++)
@@ -608,19 +521,22 @@ static void mvpp2_cls_flow_init(struct mvpp2 *priv,
/* C3Hx lookups */
for (i = 0; i < MVPP2_MAX_PORTS; i++) {
- mvpp2_cls_flow_read(priv,
- MVPP2_CLS_FLT_HASH_ENTRY(i, flow->flow_id),
- &fe);
+ memset(&fe, 0, sizeof(fe));
+ fe.index = MVPP2_PORT_FLOW_INDEX(i, flow->flow_id);
- /* Set a default engine. Will be overwritten when setting the
- * real HEK parameters
- */
mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C3HA);
mvpp2_cls_flow_port_id_sel(&fe, true);
+ mvpp2_cls_flow_pri_set(&fe, i + 1);
mvpp2_cls_flow_port_add(&fe, BIT(i));
+ mvpp2_cls_flow_lkp_type_set(&fe, MVPP2_CLS_LKP_HASH);
mvpp2_cls_flow_write(priv, &fe);
}
+
+ /* Update the last entry */
+ mvpp2_cls_flow_last_set(&fe, 1);
+
+ mvpp2_cls_flow_write(priv, &fe);
}
/* Adds a field to the Header Extracted Key generation parameters*/
@@ -639,6 +555,20 @@ static int mvpp2_flow_add_hek_field(struct mvpp2_cls_flow_entry *fe,
return 0;
}
+static void mvpp2_cls_c2_inv_set(struct mvpp2 *priv,
+ int index)
+{
+ /* write index reg */
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
+
+ /* set invalid bit */
+ mvpp2_write(priv, MVPP2_CLS2_TCAM_INV_REG,
+ (1 << MVPP2_CLS2_TCAM_INV_INVALID));
+
+ /* trigger */
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, 0);
+}
+
static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
unsigned long hash_opts)
{
@@ -651,15 +581,9 @@ static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
switch (BIT(i)) {
- case MVPP22_CLS_HEK_OPT_MAC_DA:
- field_id = MVPP22_CLS_FIELD_MAC_DA;
- break;
case MVPP22_CLS_HEK_OPT_VLAN:
field_id = MVPP22_CLS_FIELD_VLAN;
break;
- case MVPP22_CLS_HEK_OPT_VLAN_PRI:
- field_id = MVPP22_CLS_FIELD_VLAN_PRI;
- break;
case MVPP22_CLS_HEK_OPT_IP4SA:
field_id = MVPP22_CLS_FIELD_IP4SA;
break;
@@ -688,36 +612,42 @@ static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
return 0;
}
-/* Returns the size, in bits, of the corresponding HEK field */
-static int mvpp2_cls_hek_field_size(u32 field)
+struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
{
- switch (field) {
- case MVPP22_CLS_HEK_OPT_MAC_DA:
- return 48;
- case MVPP22_CLS_HEK_OPT_VLAN:
- return 12;
- case MVPP22_CLS_HEK_OPT_VLAN_PRI:
- return 3;
- case MVPP22_CLS_HEK_OPT_IP4SA:
- case MVPP22_CLS_HEK_OPT_IP4DA:
- return 32;
- case MVPP22_CLS_HEK_OPT_IP6SA:
- case MVPP22_CLS_HEK_OPT_IP6DA:
- return 128;
- case MVPP22_CLS_HEK_OPT_L4SIP:
- case MVPP22_CLS_HEK_OPT_L4DIP:
- return 16;
- default:
- return -1;
- }
+ if (flow >= MVPP2_N_FLOWS)
+ return NULL;
+
+ return &cls_flows[flow];
}
-const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
+int mvpp2_cls_flow_hash_find(struct mvpp2_port *port,
+ struct mvpp2_cls_flow *flow,
+ struct mvpp2_cls_flow_entry *fe,
+ int *flow_index)
{
- if (flow >= MVPP2_N_PRS_FLOWS)
- return NULL;
+ int engine, flow_offset, port_bm, idx = 0, is_last = 0;
- return &cls_flows[flow];
+ flow_offset = 0;
+ do {
+ idx = MVPP2_PORT_FLOW_INDEX(flow_offset, flow->flow_id);
+ if (idx >= MVPP2_CLS_FLOWS_TBL_SIZE)
+ break;
+ mvpp2_cls_flow_read(port->priv, idx, fe);
+ engine = mvpp2_cls_flow_eng_get(fe);
+ port_bm = mvpp2_cls_flow_port_get(fe);
+ is_last = mvpp2_cls_flow_last_get(fe);
+ if ((engine == MVPP22_CLS_ENGINE_C3HA ||
+ engine == MVPP22_CLS_ENGINE_C3HB) &&
+ (port_bm & BIT(port->id)))
+ break;
+ flow_offset++;
+ } while (!is_last);
+
+ *flow_index = idx;
+ if (is_last)
+ return -EINVAL;
+
+ return 0;
}
/* Set the hash generation options for the given traffic flow.
@@ -734,17 +664,21 @@ const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type,
u16 requested_opts)
{
- const struct mvpp2_cls_flow *flow;
struct mvpp2_cls_flow_entry fe;
+ struct mvpp2_cls_flow *flow;
int i, engine, flow_index;
u16 hash_opts;
- for_each_cls_flow_id_with_type(i, flow_type) {
+ for (i = 0; i < MVPP2_N_FLOWS; i++) {
flow = mvpp2_cls_flow_get(i);
if (!flow)
return -EINVAL;
- flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id);
+ if (flow->flow_type != flow_type)
+ continue;
+
+ if (mvpp2_cls_flow_hash_find(port, flow, &fe, &flow_index))
+ return -EINVAL;
mvpp2_cls_flow_read(port->priv, flow_index, &fe);
@@ -786,9 +720,6 @@ u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe)
case MVPP22_CLS_FIELD_VLAN:
hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
break;
- case MVPP22_CLS_FIELD_VLAN_PRI:
- hash_opts |= MVPP22_CLS_HEK_OPT_VLAN_PRI;
- break;
case MVPP22_CLS_FIELD_L3_PROTO:
hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
break;
@@ -822,17 +753,21 @@ u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe)
*/
static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type)
{
- const struct mvpp2_cls_flow *flow;
struct mvpp2_cls_flow_entry fe;
+ struct mvpp2_cls_flow *flow;
int i, flow_index;
u16 hash_opts = 0;
- for_each_cls_flow_id_with_type(i, flow_type) {
+ for (i = 0; i < MVPP2_N_FLOWS; i++) {
flow = mvpp2_cls_flow_get(i);
if (!flow)
return 0;
- flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id);
+ if (flow->flow_type != flow_type)
+ continue;
+
+ if (mvpp2_cls_flow_hash_find(port, flow, &fe, &flow_index))
+ return 0;
mvpp2_cls_flow_read(port->priv, flow_index, &fe);
@@ -844,10 +779,10 @@ static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type)
static void mvpp2_cls_port_init_flows(struct mvpp2 *priv)
{
- const struct mvpp2_cls_flow *flow;
+ struct mvpp2_cls_flow *flow;
int i;
- for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) {
+ for (i = 0; i < MVPP2_N_FLOWS; i++) {
flow = mvpp2_cls_flow_get(i);
if (!flow)
break;
@@ -858,6 +793,51 @@ static void mvpp2_cls_port_init_flows(struct mvpp2 *priv)
}
}
+static void mvpp2_cls_c2_write(struct mvpp2 *priv,
+ struct mvpp2_cls_c2_entry *c2)
+{
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
+
+ mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
+
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
+
+ /* write valid bit*/
+ mvpp2_write(priv, MVPP2_CLS2_TCAM_INV_REG,
+ (0 << MVPP2_CLS2_TCAM_INV_INVALID));
+
+ /* Write TCAM */
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
+}
+
+void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
+ struct mvpp2_cls_c2_entry *c2)
+{
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
+
+ c2->index = index;
+
+ c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
+ c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
+ c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
+ c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
+ c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
+
+ c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
+
+ c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
+ c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
+ c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
+ c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
+}
+
static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
{
struct mvpp2_cls_c2_entry c2;
@@ -871,9 +851,9 @@ static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
- /* Match on Lookup Type */
- c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK));
- c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(MVPP22_CLS_LU_TYPE_ALL);
+ /* Set lkp_type */
+ c2.tcam[4] |= MVPP22_CLS_C2_LKP_TYPE(MVPP2_CLS_LKP_DEFAULT);
+ c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LKP_TYPE_MASK);
/* Update RSS status after matching this entry */
c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
@@ -893,17 +873,27 @@ static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
MVPP22_CLS_C2_ATTR0_QLOW(ql);
- c2.valid = true;
-
mvpp2_cls_c2_write(port->priv, &c2);
}
+static void mvpp2_cls_c2_init(struct mvpp2 *priv)
+{
+ int index;
+
+ /* Toggle C2 from Built-In Self-Test mode to Functional mode */
+ mvpp2_write(priv, MVPP2_CLS2_TCAM_CTRL_REG,
+ MVPP2_CLS2_TCAM_CTRL_BYPASS_FIFO_STAGES);
+
+ /* Invalidate all C2 entries */
+ for (index = 0; index < MVPP22_CLS_C2_MAX_ENTRIES; index++)
+ mvpp2_cls_c2_inv_set(priv, index);
+}
+
/* Classifier default initialization */
void mvpp2_cls_init(struct mvpp2 *priv)
{
struct mvpp2_cls_lookup_entry le;
struct mvpp2_cls_flow_entry fe;
- struct mvpp2_cls_c2_entry c2;
int index;
/* Enable classifier */
@@ -927,21 +917,15 @@ void mvpp2_cls_init(struct mvpp2 *priv)
mvpp2_cls_lookup_write(priv, &le);
}
- /* Clear C2 TCAM engine table */
- memset(&c2, 0, sizeof(c2));
- c2.valid = false;
- for (index = 0; index < MVPP22_CLS_C2_N_ENTRIES; index++) {
- c2.index = index;
- mvpp2_cls_c2_write(priv, &c2);
- }
-
- /* Disable the FIFO stages in C2 engine, which are only used in BIST
- * mode
+ /* Clear CLS_SWFWD_PCTRL register - value of QueueHigh is defined by
+ * the Classifier
*/
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_CTRL,
- MVPP22_CLS_C2_TCAM_BYPASS_FIFO);
+ mvpp2_write(priv, MVPP2_CLS_SWFWD_PCTRL_REG, 0);
mvpp2_cls_port_init_flows(priv);
+
+ /* Initialize C2 */
+ mvpp2_cls_c2_init(priv);
}
void mvpp2_cls_port_config(struct mvpp2_port *port)
@@ -981,22 +965,12 @@ u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index)
return mvpp2_read(priv, MVPP22_CLS_C2_HIT_CTR);
}
-static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port, u32 ctx)
+static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port)
{
struct mvpp2_cls_c2_entry c2;
- u8 qh, ql;
mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
- /* The RxQ number is used to select the RSS table. It that case, we set
- * it to be the ctx number.
- */
- qh = (ctx >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
- ql = ctx & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
-
- c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
- MVPP22_CLS_C2_ATTR0_QLOW(ql);
-
c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
mvpp2_cls_c2_write(port->priv, &c2);
@@ -1005,446 +979,29 @@ static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port, u32 ctx)
static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port)
{
struct mvpp2_cls_c2_entry c2;
- u8 qh, ql;
mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
- /* Reset the default destination RxQ to the port's first rx queue. */
- qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
- ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
-
- c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
- MVPP22_CLS_C2_ATTR0_QLOW(ql);
-
c2.attr[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN;
mvpp2_cls_c2_write(port->priv, &c2);
}
-static inline int mvpp22_rss_ctx(struct mvpp2_port *port, int port_rss_ctx)
-{
- return port->rss_ctx[port_rss_ctx];
-}
-
-int mvpp22_port_rss_enable(struct mvpp2_port *port)
+void mvpp22_rss_enable(struct mvpp2_port *port)
{
- if (mvpp22_rss_ctx(port, 0) < 0)
- return -EINVAL;
-
- mvpp2_rss_port_c2_enable(port, mvpp22_rss_ctx(port, 0));
-
- return 0;
+ mvpp2_rss_port_c2_enable(port);
}
-int mvpp22_port_rss_disable(struct mvpp2_port *port)
+void mvpp22_rss_disable(struct mvpp2_port *port)
{
- if (mvpp22_rss_ctx(port, 0) < 0)
- return -EINVAL;
-
mvpp2_rss_port_c2_disable(port);
-
- return 0;
-}
-
-static void mvpp22_port_c2_lookup_disable(struct mvpp2_port *port, int entry)
-{
- struct mvpp2_cls_c2_entry c2;
-
- mvpp2_cls_c2_read(port->priv, entry, &c2);
-
- /* Clear the port map so that the entry doesn't match anymore */
- c2.tcam[4] &= ~(MVPP22_CLS_C2_PORT_ID(BIT(port->id)));
-
- mvpp2_cls_c2_write(port->priv, &c2);
}
/* Set CPU queue number for oversize packets */
void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
{
- u32 val;
-
mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
-
- mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
- (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
-
- val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
- val &= ~MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
- mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
-}
-
-static int mvpp2_port_c2_tcam_rule_add(struct mvpp2_port *port,
- struct mvpp2_rfs_rule *rule)
-{
- struct flow_action_entry *act;
- struct mvpp2_cls_c2_entry c2;
- u8 qh, ql, pmap;
- int index, ctx;
-
- if (!flow_action_basic_hw_stats_check(&rule->flow->action, NULL))
- return -EOPNOTSUPP;
-
- memset(&c2, 0, sizeof(c2));
-
- index = mvpp2_cls_c2_port_flow_index(port, rule->loc);
- if (index < 0)
- return -EINVAL;
- c2.index = index;
-
- act = &rule->flow->action.entries[0];
-
- rule->c2_index = c2.index;
-
- c2.tcam[3] = (rule->c2_tcam & 0xffff) |
- ((rule->c2_tcam_mask & 0xffff) << 16);
- c2.tcam[2] = ((rule->c2_tcam >> 16) & 0xffff) |
- (((rule->c2_tcam_mask >> 16) & 0xffff) << 16);
- c2.tcam[1] = ((rule->c2_tcam >> 32) & 0xffff) |
- (((rule->c2_tcam_mask >> 32) & 0xffff) << 16);
- c2.tcam[0] = ((rule->c2_tcam >> 48) & 0xffff) |
- (((rule->c2_tcam_mask >> 48) & 0xffff) << 16);
-
- pmap = BIT(port->id);
- c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
- c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
-
- /* Match on Lookup Type */
- c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK));
- c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(rule->loc);
-
- if (act->id == FLOW_ACTION_DROP) {
- c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_RED_LOCK);
- } else {
- /* We want to keep the default color derived from the Header
- * Parser drop entries, for VLAN and MAC filtering. This will
- * assign a default color of Green or Red, and we want matches
- * with a non-drop action to keep that color.
- */
- c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_NO_UPD_LOCK);
-
- /* Update RSS status after matching this entry */
- if (act->queue.ctx)
- c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
-
- /* Always lock the RSS_EN decision. We might have high prio
- * rules steering to an RXQ, and a lower one steering to RSS,
- * we don't want the low prio RSS rule overwriting this flag.
- */
- c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
-
- /* Mark packet as "forwarded to software", needed for RSS */
- c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
-
- c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD_LOCK) |
- MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD_LOCK);
-
- if (act->queue.ctx) {
- /* Get the global ctx number */
- ctx = mvpp22_rss_ctx(port, act->queue.ctx);
- if (ctx < 0)
- return -EINVAL;
-
- qh = (ctx >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
- ql = ctx & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
- } else {
- qh = ((act->queue.index + port->first_rxq) >> 3) &
- MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
- ql = (act->queue.index + port->first_rxq) &
- MVPP22_CLS_C2_ATTR0_QLOW_MASK;
- }
-
- c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
- MVPP22_CLS_C2_ATTR0_QLOW(ql);
- }
-
- c2.valid = true;
-
- mvpp2_cls_c2_write(port->priv, &c2);
-
- return 0;
-}
-
-static int mvpp2_port_c2_rfs_rule_insert(struct mvpp2_port *port,
- struct mvpp2_rfs_rule *rule)
-{
- return mvpp2_port_c2_tcam_rule_add(port, rule);
-}
-
-static int mvpp2_port_cls_rfs_rule_remove(struct mvpp2_port *port,
- struct mvpp2_rfs_rule *rule)
-{
- const struct mvpp2_cls_flow *flow;
- struct mvpp2_cls_flow_entry fe;
- int index, i;
-
- for_each_cls_flow_id_containing_type(i, rule->flow_type) {
- flow = mvpp2_cls_flow_get(i);
- if (!flow)
- return 0;
-
- index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc);
-
- mvpp2_cls_flow_read(port->priv, index, &fe);
- mvpp2_cls_flow_port_remove(&fe, BIT(port->id));
- mvpp2_cls_flow_write(port->priv, &fe);
- }
-
- if (rule->c2_index >= 0)
- mvpp22_port_c2_lookup_disable(port, rule->c2_index);
-
- return 0;
-}
-
-static int mvpp2_port_flt_rfs_rule_insert(struct mvpp2_port *port,
- struct mvpp2_rfs_rule *rule)
-{
- const struct mvpp2_cls_flow *flow;
- struct mvpp2 *priv = port->priv;
- struct mvpp2_cls_flow_entry fe;
- int index, ret, i;
-
- if (rule->engine != MVPP22_CLS_ENGINE_C2)
- return -EOPNOTSUPP;
-
- ret = mvpp2_port_c2_rfs_rule_insert(port, rule);
- if (ret)
- return ret;
-
- for_each_cls_flow_id_containing_type(i, rule->flow_type) {
- flow = mvpp2_cls_flow_get(i);
- if (!flow)
- return 0;
-
- if ((rule->hek_fields & flow->supported_hash_opts) != rule->hek_fields)
- continue;
-
- index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc);
-
- mvpp2_cls_flow_read(priv, index, &fe);
- mvpp2_cls_flow_eng_set(&fe, rule->engine);
- mvpp2_cls_flow_port_id_sel(&fe, true);
- mvpp2_flow_set_hek_fields(&fe, rule->hek_fields);
- mvpp2_cls_flow_lu_type_set(&fe, rule->loc);
- mvpp2_cls_flow_port_add(&fe, 0xf);
-
- mvpp2_cls_flow_write(priv, &fe);
- }
-
- return 0;
-}
-
-static int mvpp2_cls_c2_build_match(struct mvpp2_rfs_rule *rule)
-{
- struct flow_rule *flow = rule->flow;
- int offs = 0;
-
- /* The order of insertion in C2 tcam must match the order in which
- * the fields are found in the header
- */
- if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
- struct flow_match_vlan match;
-
- flow_rule_match_vlan(flow, &match);
- if (match.mask->vlan_id) {
- rule->hek_fields |= MVPP22_CLS_HEK_OPT_VLAN;
-
- rule->c2_tcam |= ((u64)match.key->vlan_id) << offs;
- rule->c2_tcam_mask |= ((u64)match.mask->vlan_id) << offs;
-
- /* Don't update the offset yet */
- }
-
- if (match.mask->vlan_priority) {
- rule->hek_fields |= MVPP22_CLS_HEK_OPT_VLAN_PRI;
-
- /* VLAN pri is always at offset 13 relative to the
- * current offset
- */
- rule->c2_tcam |= ((u64)match.key->vlan_priority) <<
- (offs + 13);
- rule->c2_tcam_mask |= ((u64)match.mask->vlan_priority) <<
- (offs + 13);
- }
-
- if (match.mask->vlan_dei)
- return -EOPNOTSUPP;
-
- /* vlan id and prio always seem to take a full 16-bit slot in
- * the Header Extracted Key.
- */
- offs += 16;
- }
-
- if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
- struct flow_match_ports match;
-
- flow_rule_match_ports(flow, &match);
- if (match.mask->src) {
- rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4SIP;
-
- rule->c2_tcam |= ((u64)ntohs(match.key->src)) << offs;
- rule->c2_tcam_mask |= ((u64)ntohs(match.mask->src)) << offs;
- offs += mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4SIP);
- }
-
- if (match.mask->dst) {
- rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4DIP;
-
- rule->c2_tcam |= ((u64)ntohs(match.key->dst)) << offs;
- rule->c2_tcam_mask |= ((u64)ntohs(match.mask->dst)) << offs;
- offs += mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4DIP);
- }
- }
-
- if (hweight16(rule->hek_fields) > MVPP2_FLOW_N_FIELDS)
- return -EOPNOTSUPP;
-
- return 0;
-}
-
-static int mvpp2_cls_rfs_parse_rule(struct mvpp2_rfs_rule *rule)
-{
- struct flow_rule *flow = rule->flow;
- struct flow_action_entry *act;
-
- if (!flow_action_basic_hw_stats_check(&rule->flow->action, NULL))
- return -EOPNOTSUPP;
-
- act = &flow->action.entries[0];
- if (act->id != FLOW_ACTION_QUEUE && act->id != FLOW_ACTION_DROP)
- return -EOPNOTSUPP;
-
- /* When both an RSS context and an queue index are set, the index
- * is considered as an offset to be added to the indirection table
- * entries. We don't support this, so reject this rule.
- */
- if (act->queue.ctx && act->queue.index)
- return -EOPNOTSUPP;
-
- /* For now, only use the C2 engine which has a HEK size limited to 64
- * bits for TCAM matching.
- */
- rule->engine = MVPP22_CLS_ENGINE_C2;
-
- if (mvpp2_cls_c2_build_match(rule))
- return -EINVAL;
-
- return 0;
-}
-
-int mvpp2_ethtool_cls_rule_get(struct mvpp2_port *port,
- struct ethtool_rxnfc *rxnfc)
-{
- struct mvpp2_ethtool_fs *efs;
-
- if (rxnfc->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
- return -EINVAL;
-
- efs = port->rfs_rules[rxnfc->fs.location];
- if (!efs)
- return -ENOENT;
-
- memcpy(rxnfc, &efs->rxnfc, sizeof(efs->rxnfc));
-
- return 0;
-}
-
-int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port,
- struct ethtool_rxnfc *info)
-{
- struct ethtool_rx_flow_spec_input input = {};
- struct ethtool_rx_flow_rule *ethtool_rule;
- struct mvpp2_ethtool_fs *efs, *old_efs;
- int ret = 0;
-
- if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
- return -EINVAL;
-
- efs = kzalloc(sizeof(*efs), GFP_KERNEL);
- if (!efs)
- return -ENOMEM;
-
- input.fs = &info->fs;
-
- /* We need to manually set the rss_ctx, since this info isn't present
- * in info->fs
- */
- if (info->fs.flow_type & FLOW_RSS)
- input.rss_ctx = info->rss_context;
-
- ethtool_rule = ethtool_rx_flow_rule_create(&input);
- if (IS_ERR(ethtool_rule)) {
- ret = PTR_ERR(ethtool_rule);
- goto clean_rule;
- }
-
- efs->rule.flow = ethtool_rule->rule;
- efs->rule.flow_type = mvpp2_cls_ethtool_flow_to_type(info->fs.flow_type);
- if (efs->rule.flow_type < 0) {
- ret = efs->rule.flow_type;
- goto clean_rule;
- }
-
- ret = mvpp2_cls_rfs_parse_rule(&efs->rule);
- if (ret)
- goto clean_eth_rule;
-
- efs->rule.loc = info->fs.location;
-
- /* Replace an already existing rule */
- if (port->rfs_rules[efs->rule.loc]) {
- old_efs = port->rfs_rules[efs->rule.loc];
- ret = mvpp2_port_cls_rfs_rule_remove(port, &old_efs->rule);
- if (ret)
- goto clean_eth_rule;
- kfree(old_efs);
- port->n_rfs_rules--;
- }
-
- ret = mvpp2_port_flt_rfs_rule_insert(port, &efs->rule);
- if (ret)
- goto clean_eth_rule;
-
- ethtool_rx_flow_rule_destroy(ethtool_rule);
- efs->rule.flow = NULL;
-
- memcpy(&efs->rxnfc, info, sizeof(*info));
- port->rfs_rules[efs->rule.loc] = efs;
- port->n_rfs_rules++;
-
- return ret;
-
-clean_eth_rule:
- ethtool_rx_flow_rule_destroy(ethtool_rule);
-clean_rule:
- kfree(efs);
- return ret;
-}
-
-int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port,
- struct ethtool_rxnfc *info)
-{
- struct mvpp2_ethtool_fs *efs;
- int ret;
-
- if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
- return -EINVAL;
-
- efs = port->rfs_rules[info->fs.location];
- if (!efs)
- return -EINVAL;
-
- /* Remove the rule from the engines. */
- ret = mvpp2_port_cls_rfs_rule_remove(port, &efs->rule);
- if (ret)
- return ret;
-
- port->n_rfs_rules--;
- port->rfs_rules[info->fs.location] = NULL;
- kfree(efs);
-
- return 0;
}
static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq)
@@ -1466,181 +1023,37 @@ static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq)
return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs);
}
-static void mvpp22_rss_fill_table(struct mvpp2_port *port,
- struct mvpp2_rss_table *table,
- u32 rss_ctx)
+void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table)
{
struct mvpp2 *priv = port->priv;
int i;
for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
- u32 sel = MVPP22_RSS_INDEX_TABLE(rss_ctx) |
+ u32 sel = MVPP22_RSS_INDEX_TABLE(table) |
MVPP22_RSS_INDEX_TABLE_ENTRY(i);
mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY,
- mvpp22_rxfh_indir(port, table->indir[i]));
- }
-}
-
-static int mvpp22_rss_context_create(struct mvpp2_port *port, u32 *rss_ctx)
-{
- struct mvpp2 *priv = port->priv;
- u32 ctx;
-
- /* Find the first free RSS table */
- for (ctx = 0; ctx < MVPP22_N_RSS_TABLES; ctx++) {
- if (!priv->rss_tables[ctx])
- break;
- }
-
- if (ctx == MVPP22_N_RSS_TABLES)
- return -EINVAL;
-
- priv->rss_tables[ctx] = kzalloc(sizeof(*priv->rss_tables[ctx]),
- GFP_KERNEL);
- if (!priv->rss_tables[ctx])
- return -ENOMEM;
-
- *rss_ctx = ctx;
-
- /* Set the table width: replace the whole classifier Rx queue number
- * with the ones configured in RSS table entries.
- */
- mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(ctx));
- mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
-
- mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(ctx));
- mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE, MVPP22_RSS_TABLE_POINTER(ctx));
-
- return 0;
-}
-
-int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *port_ctx)
-{
- u32 rss_ctx;
- int ret, i;
-
- ret = mvpp22_rss_context_create(port, &rss_ctx);
- if (ret)
- return ret;
-
- /* Find the first available context number in the port, starting from 1.
- * Context 0 on each port is reserved for the default context.
- */
- for (i = 1; i < MVPP22_N_RSS_TABLES; i++) {
- if (port->rss_ctx[i] < 0)
- break;
- }
-
- if (i == MVPP22_N_RSS_TABLES)
- return -EINVAL;
-
- port->rss_ctx[i] = rss_ctx;
- *port_ctx = i;
-
- return 0;
-}
-
-static struct mvpp2_rss_table *mvpp22_rss_table_get(struct mvpp2 *priv,
- int rss_ctx)
-{
- if (rss_ctx < 0 || rss_ctx >= MVPP22_N_RSS_TABLES)
- return NULL;
-
- return priv->rss_tables[rss_ctx];
-}
-
-int mvpp22_port_rss_ctx_delete(struct mvpp2_port *port, u32 port_ctx)
-{
- struct mvpp2 *priv = port->priv;
- struct ethtool_rxnfc *rxnfc;
- int i, rss_ctx, ret;
-
- rss_ctx = mvpp22_rss_ctx(port, port_ctx);
-
- if (rss_ctx < 0 || rss_ctx >= MVPP22_N_RSS_TABLES)
- return -EINVAL;
-
- /* Invalidate any active classification rule that use this context */
- for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
- if (!port->rfs_rules[i])
- continue;
-
- rxnfc = &port->rfs_rules[i]->rxnfc;
- if (!(rxnfc->fs.flow_type & FLOW_RSS) ||
- rxnfc->rss_context != port_ctx)
- continue;
-
- ret = mvpp2_ethtool_cls_rule_del(port, rxnfc);
- if (ret) {
- netdev_warn(port->dev,
- "couldn't remove classification rule %d associated to this context",
- rxnfc->fs.location);
- }
+ mvpp22_rxfh_indir(port, port->indir[i]));
}
-
- kfree(priv->rss_tables[rss_ctx]);
-
- priv->rss_tables[rss_ctx] = NULL;
- port->rss_ctx[port_ctx] = -1;
-
- return 0;
-}
-
-int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 port_ctx,
- const u32 *indir)
-{
- int rss_ctx = mvpp22_rss_ctx(port, port_ctx);
- struct mvpp2_rss_table *rss_table = mvpp22_rss_table_get(port->priv,
- rss_ctx);
-
- if (!rss_table)
- return -EINVAL;
-
- memcpy(rss_table->indir, indir,
- MVPP22_RSS_TABLE_ENTRIES * sizeof(rss_table->indir[0]));
-
- mvpp22_rss_fill_table(port, rss_table, rss_ctx);
-
- return 0;
-}
-
-int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 port_ctx,
- u32 *indir)
-{
- int rss_ctx = mvpp22_rss_ctx(port, port_ctx);
- struct mvpp2_rss_table *rss_table = mvpp22_rss_table_get(port->priv,
- rss_ctx);
-
- if (!rss_table)
- return -EINVAL;
-
- memcpy(indir, rss_table->indir,
- MVPP22_RSS_TABLE_ENTRIES * sizeof(rss_table->indir[0]));
-
- return 0;
}
int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
{
u16 hash_opts = 0;
- u32 flow_type;
- flow_type = mvpp2_cls_ethtool_flow_to_type(info->flow_type);
-
- switch (flow_type) {
- case MVPP22_FLOW_TCP4:
- case MVPP22_FLOW_UDP4:
- case MVPP22_FLOW_TCP6:
- case MVPP22_FLOW_UDP6:
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
if (info->data & RXH_L4_B_0_1)
hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
if (info->data & RXH_L4_B_2_3)
hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
fallthrough;
- case MVPP22_FLOW_IP4:
- case MVPP22_FLOW_IP6:
+ case IPV4_FLOW:
+ case IPV6_FLOW:
if (info->data & RXH_L2DA)
hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
if (info->data & RXH_VLAN)
@@ -1657,18 +1070,15 @@ int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
default: return -EOPNOTSUPP;
}
- return mvpp2_port_rss_hash_opts_set(port, flow_type, hash_opts);
+ return mvpp2_port_rss_hash_opts_set(port, info->flow_type, hash_opts);
}
int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
{
unsigned long hash_opts;
- u32 flow_type;
int i;
- flow_type = mvpp2_cls_ethtool_flow_to_type(info->flow_type);
-
- hash_opts = mvpp2_port_rss_hash_opts_get(port, flow_type);
+ hash_opts = mvpp2_port_rss_hash_opts_get(port, info->flow_type);
info->data = 0;
for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
@@ -1703,40 +1113,56 @@ int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
return 0;
}
-int mvpp22_port_rss_init(struct mvpp2_port *port)
+void mvpp22_rss_port_init(struct mvpp2_port *port)
{
- struct mvpp2_rss_table *table;
- u32 context = 0;
- int i, ret;
-
- for (i = 0; i < MVPP22_N_RSS_TABLES; i++)
- port->rss_ctx[i] = -1;
+ struct mvpp2 *priv = port->priv;
+ int i;
- ret = mvpp22_rss_context_create(port, &context);
- if (ret)
- return ret;
+ /* Set the table width: replace the whole classifier Rx queue number
+ * with the ones configured in RSS table entries.
+ */
+ mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(port->id));
+ mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
- table = mvpp22_rss_table_get(port->priv, context);
- if (!table)
- return -EINVAL;
+ if (port->num_tc_queues > 1) {
+ int rxq;
+ u32 tc_width;
+ int tc_mask;
- port->rss_ctx[0] = context;
+ tc_width = mvpp2_get_tc_width(port);
+ tc_mask = ((1 << tc_width) - 1);
+ for (rxq = 0; rxq < port->nrxqs; rxq++) {
+ mvpp2_write(priv, MVPP22_RSS_INDEX,
+ MVPP22_RSS_INDEX_QUEUE(port->rxqs[rxq]->id));
+ mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE,
+ MVPP22_RSS_TABLE_POINTER(port->rxqs[rxq]->id & tc_mask));
+ }
+ } else {
+ /* The default RxQ is used as a key to select the RSS table to use.
+ * We use one RSS table per port.
+ */
+ mvpp2_write(priv, MVPP22_RSS_INDEX,
+ MVPP22_RSS_INDEX_QUEUE(port->first_rxq));
+ mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE,
+ MVPP22_RSS_TABLE_POINTER(port->id));
+ }
/* Configure the first table to evenly distribute the packets across
* real Rx Queues. The table entries map a hash to a port Rx Queue.
*/
for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++)
- table->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
+ port->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
- mvpp22_rss_fill_table(port, table, mvpp22_rss_ctx(port, 0));
+ if (port->num_tc_queues > 1)
+ mvpp22_rss_fill_table_per_tc(port);
+ else
+ mvpp22_rss_fill_table(port, port->id);
/* Configure default flows */
- mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP4, MVPP22_CLS_HEK_IP4_2T);
- mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP6, MVPP22_CLS_HEK_IP6_2T);
- mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP4, MVPP22_CLS_HEK_IP4_5T);
- mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP6, MVPP22_CLS_HEK_IP6_5T);
- mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP4, MVPP22_CLS_HEK_IP4_5T);
- mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP6, MVPP22_CLS_HEK_IP6_5T);
-
- return 0;
+ mvpp2_port_rss_hash_opts_set(port, IPV4_FLOW, MVPP22_CLS_HEK_IP4_2T);
+ mvpp2_port_rss_hash_opts_set(port, IPV6_FLOW, MVPP22_CLS_HEK_IP6_2T);
+ mvpp2_port_rss_hash_opts_set(port, TCP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
+ mvpp2_port_rss_hash_opts_set(port, TCP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
+ mvpp2_port_rss_hash_opts_set(port, UDP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
+ mvpp2_port_rss_hash_opts_set(port, UDP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
index 8867f25afab4..e5b7d28abc07 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
@@ -33,16 +33,15 @@ enum mvpp2_cls_engine {
};
#define MVPP22_CLS_HEK_OPT_MAC_DA BIT(0)
-#define MVPP22_CLS_HEK_OPT_VLAN_PRI BIT(1)
-#define MVPP22_CLS_HEK_OPT_VLAN BIT(2)
-#define MVPP22_CLS_HEK_OPT_L3_PROTO BIT(3)
-#define MVPP22_CLS_HEK_OPT_IP4SA BIT(4)
-#define MVPP22_CLS_HEK_OPT_IP4DA BIT(5)
-#define MVPP22_CLS_HEK_OPT_IP6SA BIT(6)
-#define MVPP22_CLS_HEK_OPT_IP6DA BIT(7)
-#define MVPP22_CLS_HEK_OPT_L4SIP BIT(8)
-#define MVPP22_CLS_HEK_OPT_L4DIP BIT(9)
-#define MVPP22_CLS_HEK_N_FIELDS 10
+#define MVPP22_CLS_HEK_OPT_VLAN BIT(1)
+#define MVPP22_CLS_HEK_OPT_L3_PROTO BIT(2)
+#define MVPP22_CLS_HEK_OPT_IP4SA BIT(3)
+#define MVPP22_CLS_HEK_OPT_IP4DA BIT(4)
+#define MVPP22_CLS_HEK_OPT_IP6SA BIT(5)
+#define MVPP22_CLS_HEK_OPT_IP6DA BIT(6)
+#define MVPP22_CLS_HEK_OPT_L4SIP BIT(7)
+#define MVPP22_CLS_HEK_OPT_L4DIP BIT(8)
+#define MVPP22_CLS_HEK_N_FIELDS 9
#define MVPP22_CLS_HEK_L4_OPTS (MVPP22_CLS_HEK_OPT_L4SIP | \
MVPP22_CLS_HEK_OPT_L4DIP)
@@ -60,12 +59,8 @@ enum mvpp2_cls_engine {
#define MVPP22_CLS_HEK_IP6_5T (MVPP22_CLS_HEK_IP6_2T | \
MVPP22_CLS_HEK_L4_OPTS)
-#define MVPP22_CLS_HEK_TAGGED (MVPP22_CLS_HEK_OPT_VLAN | \
- MVPP22_CLS_HEK_OPT_VLAN_PRI)
-
enum mvpp2_cls_field_id {
MVPP22_CLS_FIELD_MAC_DA = 0x03,
- MVPP22_CLS_FIELD_VLAN_PRI = 0x05,
MVPP22_CLS_FIELD_VLAN = 0x06,
MVPP22_CLS_FIELD_L3_PROTO = 0x0f,
MVPP22_CLS_FIELD_IP4SA = 0x10,
@@ -76,6 +71,19 @@ enum mvpp2_cls_field_id {
MVPP22_CLS_FIELD_L4DIP = 0x1e,
};
+enum mvpp2_cls_lkp_type {
+ MVPP2_CLS_LKP_HASH = 0,
+ MVPP2_CLS_LKP_DEFAULT = 3,
+ MVPP2_CLS_LKP_MAX,
+};
+enum mvpp2_cls_flow_seq {
+ MVPP2_CLS_FLOW_SEQ_NORMAL = 0,
+ MVPP2_CLS_FLOW_SEQ_FIRST1,
+ MVPP2_CLS_FLOW_SEQ_FIRST2,
+ MVPP2_CLS_FLOW_SEQ_LAST,
+ MVPP2_CLS_FLOW_SEQ_MIDDLE
+};
+
/* Classifier C2 engine constants */
#define MVPP22_CLS_C2_TCAM_EN(data) ((data) << 16)
@@ -97,62 +105,40 @@ enum mvpp22_cls_c2_fwd_action {
MVPP22_C2_FWD_HW_LOW_LAT_LOCK,
};
-enum mvpp22_cls_c2_color_action {
- MVPP22_C2_COL_NO_UPD = 0,
- MVPP22_C2_COL_NO_UPD_LOCK,
- MVPP22_C2_COL_GREEN,
- MVPP22_C2_COL_GREEN_LOCK,
- MVPP22_C2_COL_YELLOW,
- MVPP22_C2_COL_YELLOW_LOCK,
- MVPP22_C2_COL_RED, /* Drop */
- MVPP22_C2_COL_RED_LOCK, /* Drop */
-};
-
#define MVPP2_CLS_C2_TCAM_WORDS 5
#define MVPP2_CLS_C2_ATTR_WORDS 5
struct mvpp2_cls_c2_entry {
u32 index;
- /* TCAM lookup key */
u32 tcam[MVPP2_CLS_C2_TCAM_WORDS];
- /* Actions to perform upon TCAM match */
u32 act;
- /* Attributes relative to the actions to perform */
u32 attr[MVPP2_CLS_C2_ATTR_WORDS];
- /* Entry validity */
- u8 valid;
};
-#define MVPP22_FLOW_ETHER_BIT BIT(0)
-#define MVPP22_FLOW_IP4_BIT BIT(1)
-#define MVPP22_FLOW_IP6_BIT BIT(2)
-#define MVPP22_FLOW_TCP_BIT BIT(3)
-#define MVPP22_FLOW_UDP_BIT BIT(4)
-
-#define MVPP22_FLOW_TCP4 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT | MVPP22_FLOW_TCP_BIT)
-#define MVPP22_FLOW_TCP6 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT | MVPP22_FLOW_TCP_BIT)
-#define MVPP22_FLOW_UDP4 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT | MVPP22_FLOW_UDP_BIT)
-#define MVPP22_FLOW_UDP6 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT | MVPP22_FLOW_UDP_BIT)
-#define MVPP22_FLOW_IP4 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT)
-#define MVPP22_FLOW_IP6 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT)
-#define MVPP22_FLOW_ETHERNET (MVPP22_FLOW_ETHER_BIT)
-
/* Classifier C2 engine entries */
-#define MVPP22_CLS_C2_N_ENTRIES 256
-
-/* Number of per-port dedicated entries in the C2 TCAM */
-#define MVPP22_CLS_C2_PORT_N_FLOWS MVPP2_N_RFS_ENTRIES_PER_FLOW
+#define MVPP22_CLS_C2_MAX_ENTRIES 256
+#define MVPP22_CLS_C2_RSS_ENTRY(port) (port)
+#define MVPP22_CLS_C2_N_ENTRIES MVPP2_MAX_PORTS
-/* Each port has oen range per flow type + one entry controling the global RSS
- * setting and the default rx queue
+/* RSS flow entries in the flow table. We have 2 entries per port for RSS.
+ *
+ * The first performs a lookup using the C2 TCAM engine, to tag the
+ * packet for software forwarding (needed for RSS), enable or disable RSS, and
+ * assign the default rx queue.
+ *
+ * The second configures the hash generation, by specifying which fields of the
+ * packet header are used to generate the hash, and specifies the relevant hash
+ * engine to use.
*/
-#define MVPP22_CLS_C2_PORT_RANGE (MVPP22_CLS_C2_PORT_N_FLOWS + 1)
-#define MVPP22_CLS_C2_PORT_FIRST(p) ((p) * MVPP22_CLS_C2_PORT_RANGE)
-#define MVPP22_CLS_C2_RSS_ENTRY(p) (MVPP22_CLS_C2_PORT_FIRST((p) + 1) - 1)
-
-#define MVPP22_CLS_C2_PORT_FLOW_FIRST(p) (MVPP22_CLS_C2_PORT_FIRST(p))
+#define MVPP22_RSS_FLOW_C2_OFFS 0
+#define MVPP22_RSS_FLOW_HASH_OFFS 1
+#define MVPP22_RSS_FLOW_SIZE (MVPP22_RSS_FLOW_HASH_OFFS + 1)
-#define MVPP22_CLS_C2_RFS_LOC(p, loc) (MVPP22_CLS_C2_PORT_FLOW_FIRST(p) + (loc))
+#define MVPP22_RSS_FLOW_C2(port) ((port) * MVPP22_RSS_FLOW_SIZE + \
+ MVPP22_RSS_FLOW_C2_OFFS)
+#define MVPP22_RSS_FLOW_HASH(port) ((port) * MVPP22_RSS_FLOW_SIZE + \
+ MVPP22_RSS_FLOW_HASH_OFFS)
+#define MVPP22_RSS_FLOW_FIRST(port) MVPP22_RSS_FLOW_C2(port)
/* Packet flow ID */
enum mvpp2_prs_flow {
@@ -182,16 +168,6 @@ enum mvpp2_prs_flow {
MVPP2_FL_LAST,
};
-/* LU Type defined for all engines, and specified in the flow table */
-#define MVPP2_CLS_LU_TYPE_MASK 0x3f
-
-enum mvpp2_cls_lu_type {
- /* rule->loc is used as a lu-type for the entries 0 - 62. */
- MVPP22_CLS_LU_TYPE_ALL = 63,
-};
-
-#define MVPP2_N_FLOWS (MVPP2_FL_LAST - MVPP2_FL_START)
-
struct mvpp2_cls_flow {
/* The L2-L4 traffic flow type */
int flow_type;
@@ -206,47 +182,13 @@ struct mvpp2_cls_flow {
struct mvpp2_prs_result_info prs_ri;
};
-#define MVPP2_CLS_FLT_ENTRIES_PER_FLOW (MVPP2_MAX_PORTS + 1 + 16)
-#define MVPP2_CLS_FLT_FIRST(id) (((id) - MVPP2_FL_START) * \
- MVPP2_CLS_FLT_ENTRIES_PER_FLOW)
-
-#define MVPP2_CLS_FLT_C2_RFS(port, id, rfs_n) (MVPP2_CLS_FLT_FIRST(id) + \
- ((port) * MVPP2_MAX_PORTS) + \
- (rfs_n))
+#define MVPP2_N_FLOWS 52
-#define MVPP2_CLS_FLT_C2_RSS_ENTRY(id) (MVPP2_CLS_FLT_C2_RFS(MVPP2_MAX_PORTS, id, 0))
-#define MVPP2_CLS_FLT_HASH_ENTRY(port, id) (MVPP2_CLS_FLT_C2_RSS_ENTRY(id) + 1 + (port))
-#define MVPP2_CLS_FLT_LAST(id) (MVPP2_CLS_FLT_FIRST(id) + \
- MVPP2_CLS_FLT_ENTRIES_PER_FLOW - 1)
-
-/* Iterate on each classifier flow id. Sets 'i' to be the index of the first
- * entry in the cls_flows table for each different flow_id.
- * This relies on entries having the same flow_id in the cls_flows table being
- * contiguous.
- */
-#define for_each_cls_flow_id(i) \
- for ((i) = 0; (i) < MVPP2_N_PRS_FLOWS; (i)++) \
- if ((i) > 0 && \
- cls_flows[(i)].flow_id == cls_flows[(i) - 1].flow_id) \
- continue; \
- else
-
-/* Iterate on each classifier flow that has a given flow_type. Sets 'i' to be
- * the index of the first entry in the cls_flow table for each different flow_id
- * that has the given flow_type. This allows to operate on all flows that
- * matches a given ethtool flow type.
- */
-#define for_each_cls_flow_id_with_type(i, type) \
- for_each_cls_flow_id((i)) \
- if (cls_flows[(i)].flow_type != (type)) \
- continue; \
- else
-
-#define for_each_cls_flow_id_containing_type(i, type) \
- for_each_cls_flow_id((i)) \
- if ((cls_flows[(i)].flow_type & (type)) != (type)) \
- continue; \
- else
+#define MVPP2_ENTRIES_PER_FLOW (MVPP2_MAX_PORTS + 1)
+#define MVPP2_FLOW_C2_ENTRY(id) ((((id) - MVPP2_FL_START) * \
+ MVPP2_ENTRIES_PER_FLOW) + 1)
+#define MVPP2_PORT_FLOW_INDEX(offset, id) (MVPP2_FLOW_C2_ENTRY(id) + \
+ 1 + (offset))
struct mvpp2_cls_flow_entry {
u32 index;
@@ -259,18 +201,12 @@ struct mvpp2_cls_lookup_entry {
u32 data;
};
-int mvpp22_port_rss_init(struct mvpp2_port *port);
-
-int mvpp22_port_rss_enable(struct mvpp2_port *port);
-int mvpp22_port_rss_disable(struct mvpp2_port *port);
+void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table);
-int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *rss_ctx);
-int mvpp22_port_rss_ctx_delete(struct mvpp2_port *port, u32 rss_ctx);
+void mvpp22_rss_port_init(struct mvpp2_port *port);
-int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 rss_ctx,
- const u32 *indir);
-int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 rss_ctx,
- u32 *indir);
+void mvpp22_rss_enable(struct mvpp2_port *port);
+void mvpp22_rss_disable(struct mvpp2_port *port);
int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info);
int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info);
@@ -285,7 +221,7 @@ int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe);
u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe);
-const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow);
+struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow);
u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index);
@@ -302,13 +238,9 @@ u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index);
void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
struct mvpp2_cls_c2_entry *c2);
-int mvpp2_ethtool_cls_rule_get(struct mvpp2_port *port,
- struct ethtool_rxnfc *rxnfc);
-
-int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port,
- struct ethtool_rxnfc *info);
-
-int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port,
- struct ethtool_rxnfc *info);
+int mvpp2_cls_flow_hash_find(struct mvpp2_port *port,
+ struct mvpp2_cls_flow *flow,
+ struct mvpp2_cls_flow_entry *fe,
+ int *flow_index);
#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
index 4a3baa7e0142..1e614771f3a1 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -13,53 +13,12 @@
#include "mvpp2_prs.h"
#include "mvpp2_cls.h"
-struct mvpp2_dbgfs_prs_entry {
- int tid;
- struct mvpp2 *priv;
-};
-
-struct mvpp2_dbgfs_c2_entry {
- int id;
- struct mvpp2 *priv;
-};
-
-struct mvpp2_dbgfs_flow_entry {
- int flow;
- struct mvpp2 *priv;
-};
-
-struct mvpp2_dbgfs_flow_tbl_entry {
- int id;
- struct mvpp2 *priv;
-};
-
-struct mvpp2_dbgfs_port_flow_entry {
- struct mvpp2_port *port;
- struct mvpp2_dbgfs_flow_entry *dbg_fe;
-};
-
-struct mvpp2_dbgfs_entries {
- /* Entries for Header Parser debug info */
- struct mvpp2_dbgfs_prs_entry prs_entries[MVPP2_PRS_TCAM_SRAM_SIZE];
-
- /* Entries for Classifier C2 engine debug info */
- struct mvpp2_dbgfs_c2_entry c2_entries[MVPP22_CLS_C2_N_ENTRIES];
-
- /* Entries for Classifier Flow Table debug info */
- struct mvpp2_dbgfs_flow_tbl_entry flt_entries[MVPP2_CLS_FLOWS_TBL_SIZE];
-
- /* Entries for Classifier flows debug info */
- struct mvpp2_dbgfs_flow_entry flow_entries[MVPP2_N_PRS_FLOWS];
-
- /* Entries for per-port flows debug info */
- struct mvpp2_dbgfs_port_flow_entry port_flow_entries[MVPP2_MAX_PORTS];
-};
-
static int mvpp2_dbgfs_flow_flt_hits_show(struct seq_file *s, void *unused)
{
- struct mvpp2_dbgfs_flow_tbl_entry *entry = s->private;
+ struct mvpp2_dbgfs_flow_entry *entry = s->private;
+ int id = MVPP2_FLOW_C2_ENTRY(entry->flow);
- u32 hits = mvpp2_cls_flow_hits(entry->priv, entry->id);
+ u32 hits = mvpp2_cls_flow_hits(entry->priv, id);
seq_printf(s, "%u\n", hits);
@@ -84,7 +43,7 @@ DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_dec_hits);
static int mvpp2_dbgfs_flow_type_show(struct seq_file *s, void *unused)
{
struct mvpp2_dbgfs_flow_entry *entry = s->private;
- const struct mvpp2_cls_flow *f;
+ struct mvpp2_cls_flow *f;
const char *flow_name;
f = mvpp2_cls_flow_get(entry->flow);
@@ -119,12 +78,21 @@ static int mvpp2_dbgfs_flow_type_show(struct seq_file *s, void *unused)
return 0;
}
-DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_type);
+static int mvpp2_dbgfs_flow_type_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mvpp2_dbgfs_flow_type_show, inode->i_private);
+}
+
+static const struct file_operations mvpp2_dbgfs_flow_type_fops = {
+ .open = mvpp2_dbgfs_flow_type_open,
+ .read = seq_read,
+ .release = single_release,
+};
static int mvpp2_dbgfs_flow_id_show(struct seq_file *s, void *unused)
{
- const struct mvpp2_dbgfs_flow_entry *entry = s->private;
- const struct mvpp2_cls_flow *f;
+ struct mvpp2_dbgfs_flow_entry *entry = s->private;
+ struct mvpp2_cls_flow *f;
f = mvpp2_cls_flow_get(entry->flow);
if (!f)
@@ -142,7 +110,7 @@ static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused)
struct mvpp2_dbgfs_port_flow_entry *entry = s->private;
struct mvpp2_port *port = entry->port;
struct mvpp2_cls_flow_entry fe;
- const struct mvpp2_cls_flow *f;
+ struct mvpp2_cls_flow *f;
int flow_index;
u16 hash_opts;
@@ -150,7 +118,8 @@ static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused)
if (!f)
return -EINVAL;
- flow_index = MVPP2_CLS_FLT_HASH_ENTRY(entry->port->id, f->flow_id);
+ if (mvpp2_cls_flow_hash_find(port, f, &fe, &flow_index))
+ return -EINVAL;
mvpp2_cls_flow_read(port->priv, flow_index, &fe);
@@ -161,21 +130,33 @@ static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused)
return 0;
}
-DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_hash_opt);
+static int mvpp2_dbgfs_port_flow_hash_opt_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, mvpp2_dbgfs_port_flow_hash_opt_show,
+ inode->i_private);
+}
+
+static const struct file_operations mvpp2_dbgfs_port_flow_hash_opt_fops = {
+ .open = mvpp2_dbgfs_port_flow_hash_opt_open,
+ .read = seq_read,
+ .release = single_release,
+};
static int mvpp2_dbgfs_port_flow_engine_show(struct seq_file *s, void *unused)
{
struct mvpp2_dbgfs_port_flow_entry *entry = s->private;
struct mvpp2_port *port = entry->port;
struct mvpp2_cls_flow_entry fe;
- const struct mvpp2_cls_flow *f;
+ struct mvpp2_cls_flow *f;
int flow_index, engine;
f = mvpp2_cls_flow_get(entry->dbg_fe->flow);
if (!f)
return -EINVAL;
- flow_index = MVPP2_CLS_FLT_HASH_ENTRY(entry->port->id, f->flow_id);
+ if (mvpp2_cls_flow_hash_find(port, f, &fe, &flow_index))
+ return -EINVAL;
mvpp2_cls_flow_read(port->priv, flow_index, &fe);
@@ -190,10 +171,11 @@ DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_engine);
static int mvpp2_dbgfs_flow_c2_hits_show(struct seq_file *s, void *unused)
{
- struct mvpp2_dbgfs_c2_entry *entry = s->private;
+ struct mvpp2_port *port = s->private;
u32 hits;
- hits = mvpp2_cls_c2_hit_count(entry->priv, entry->id);
+ hits = mvpp2_cls_c2_hit_count(port->priv,
+ MVPP22_CLS_C2_RSS_ENTRY(port->id));
seq_printf(s, "%u\n", hits);
@@ -204,11 +186,11 @@ DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_hits);
static int mvpp2_dbgfs_flow_c2_rxq_show(struct seq_file *s, void *unused)
{
- struct mvpp2_dbgfs_c2_entry *entry = s->private;
+ struct mvpp2_port *port = s->private;
struct mvpp2_cls_c2_entry c2;
u8 qh, ql;
- mvpp2_cls_c2_read(entry->priv, entry->id, &c2);
+ mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
qh = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QHIGH_OFFS) &
MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
@@ -225,11 +207,11 @@ DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_rxq);
static int mvpp2_dbgfs_flow_c2_enable_show(struct seq_file *s, void *unused)
{
- struct mvpp2_dbgfs_c2_entry *entry = s->private;
+ struct mvpp2_port *port = s->private;
struct mvpp2_cls_c2_entry c2;
int enabled;
- mvpp2_cls_c2_read(entry->priv, entry->id, &c2);
+ mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
enabled = !!(c2.attr[2] & MVPP22_CLS_C2_ATTR2_RSS_EN);
@@ -275,6 +257,41 @@ static int mvpp2_dbgfs_port_vid_show(struct seq_file *s, void *unused)
DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_vid);
+static int mvpp2_prs_hw_hits_dump(struct seq_file *s,
+ struct mvpp2_prs_entry *pe)
+{
+ struct mvpp2 *priv = ((struct mvpp2_port *)s->private)->priv;
+ unsigned int cnt;
+
+ cnt = mvpp2_prs_hits(priv, pe->index);
+ if (cnt != 0)
+ seq_printf(s, "----- HITS: %d ------\n", cnt);
+ return 0;
+}
+
+static int mvpp2_dbgfs_port_parser_dump(struct seq_file *s,
+ struct mvpp2_prs_entry *pe)
+{
+ int i;
+
+ /* hw entry id */
+ seq_printf(s, " [%4d] ", pe->index);
+
+ i = MVPP2_PRS_TCAM_WORDS - 1;
+ seq_printf(s, "%1.1x ", pe->tcam[i--] & MVPP2_PRS_LU_MASK);
+
+ while (i >= 0)
+ seq_printf(s, "%4.4x ", (pe->tcam[i--]) & MVPP2_PRS_WORD_MASK);
+
+ seq_printf(s, "| %4.4x %8.8x %8.8x %8.8x\n",
+ pe->sram[3] & MVPP2_PRS_WORD_MASK,
+ pe->sram[2], pe->sram[1], pe->sram[0]);
+
+ mvpp2_prs_hw_hits_dump(s, pe);
+
+ return 0;
+}
+
static int mvpp2_dbgfs_port_parser_show(struct seq_file *s, void *unused)
{
struct mvpp2_port *port = s->private;
@@ -288,7 +305,7 @@ static int mvpp2_dbgfs_port_parser_show(struct seq_file *s, void *unused)
pmap = mvpp2_prs_tcam_port_map_get(&pe);
if (priv->prs_shadow[i].valid && test_bit(port->id, &pmap))
- seq_printf(s, "%03d\n", i);
+ mvpp2_dbgfs_port_parser_dump(s, &pe);
}
return 0;
@@ -442,7 +459,16 @@ static int mvpp2_dbgfs_prs_valid_show(struct seq_file *s, void *unused)
return 0;
}
-DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_valid);
+static int mvpp2_dbgfs_prs_valid_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mvpp2_dbgfs_prs_valid_show, inode->i_private);
+}
+
+static const struct file_operations mvpp2_dbgfs_prs_valid_fops = {
+ .open = mvpp2_dbgfs_prs_valid_open,
+ .read = seq_read,
+ .release = single_release,
+};
static int mvpp2_dbgfs_flow_port_init(struct dentry *parent,
struct mvpp2_port *port,
@@ -452,11 +478,16 @@ static int mvpp2_dbgfs_flow_port_init(struct dentry *parent,
struct dentry *port_dir;
port_dir = debugfs_create_dir(port->dev->name, parent);
+ if (IS_ERR(port_dir))
+ return PTR_ERR(port_dir);
- port_entry = &port->priv->dbgfs_entries->port_flow_entries[port->id];
+ port_entry = kmalloc(sizeof(*port_entry), GFP_KERNEL);
+ if (!port_entry)
+ return -ENOMEM;
port_entry->port = port;
port_entry->dbg_fe = entry;
+ port->dbgfs_port_flow_entry = port_entry;
debugfs_create_file("hash_opts", 0444, port_dir, port_entry,
&mvpp2_dbgfs_port_flow_hash_opt_fops);
@@ -478,11 +509,19 @@ static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent,
sprintf(flow_entry_name, "%02d", flow);
flow_entry_dir = debugfs_create_dir(flow_entry_name, parent);
+ if (!flow_entry_dir)
+ return -ENOMEM;
- entry = &priv->dbgfs_entries->flow_entries[flow];
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
entry->flow = flow;
entry->priv = priv;
+ priv->dbgfs_flow_entry[flow] = entry;
+
+ debugfs_create_file("flow_hits", 0444, flow_entry_dir, entry,
+ &mvpp2_dbgfs_flow_flt_hits_fops);
debugfs_create_file("dec_hits", 0444, flow_entry_dir, entry,
&mvpp2_dbgfs_flow_dec_hits_fops);
@@ -500,7 +539,6 @@ static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent,
if (ret)
return ret;
}
-
return 0;
}
@@ -510,8 +548,10 @@ static int mvpp2_dbgfs_flow_init(struct dentry *parent, struct mvpp2 *priv)
int i, ret;
flow_dir = debugfs_create_dir("flows", parent);
+ if (!flow_dir)
+ return -ENOMEM;
- for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) {
+ for (i = 0; i < MVPP2_N_FLOWS; i++) {
ret = mvpp2_dbgfs_flow_entry_init(flow_dir, priv, i);
if (ret)
return ret;
@@ -533,11 +573,16 @@ static int mvpp2_dbgfs_prs_entry_init(struct dentry *parent,
sprintf(prs_entry_name, "%03d", tid);
prs_entry_dir = debugfs_create_dir(prs_entry_name, parent);
+ if (!prs_entry_dir)
+ return -ENOMEM;
- entry = &priv->dbgfs_entries->prs_entries[tid];
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
entry->tid = tid;
entry->priv = priv;
+ priv->dbgfs_prs_entry[tid] = entry;
/* Create each attr */
debugfs_create_file("sram", 0444, prs_entry_dir, entry,
@@ -558,9 +603,6 @@ static int mvpp2_dbgfs_prs_entry_init(struct dentry *parent,
debugfs_create_file("hits", 0444, prs_entry_dir, entry,
&mvpp2_dbgfs_prs_hits_fops);
- debugfs_create_file("pmap", 0444, prs_entry_dir, entry,
- &mvpp2_dbgfs_prs_pmap_fops);
-
return 0;
}
@@ -570,6 +612,8 @@ static int mvpp2_dbgfs_prs_init(struct dentry *parent, struct mvpp2 *priv)
int i, ret;
prs_dir = debugfs_create_dir("parser", parent);
+ if (!prs_dir)
+ return -ENOMEM;
for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
ret = mvpp2_dbgfs_prs_entry_init(prs_dir, priv, i);
@@ -580,104 +624,14 @@ static int mvpp2_dbgfs_prs_init(struct dentry *parent, struct mvpp2 *priv)
return 0;
}
-static int mvpp2_dbgfs_c2_entry_init(struct dentry *parent,
- struct mvpp2 *priv, int id)
-{
- struct mvpp2_dbgfs_c2_entry *entry;
- struct dentry *c2_entry_dir;
- char c2_entry_name[10];
-
- if (id >= MVPP22_CLS_C2_N_ENTRIES)
- return -EINVAL;
-
- sprintf(c2_entry_name, "%03d", id);
-
- c2_entry_dir = debugfs_create_dir(c2_entry_name, parent);
- if (!c2_entry_dir)
- return -ENOMEM;
-
- entry = &priv->dbgfs_entries->c2_entries[id];
-
- entry->id = id;
- entry->priv = priv;
-
- debugfs_create_file("hits", 0444, c2_entry_dir, entry,
- &mvpp2_dbgfs_flow_c2_hits_fops);
-
- debugfs_create_file("default_rxq", 0444, c2_entry_dir, entry,
- &mvpp2_dbgfs_flow_c2_rxq_fops);
-
- debugfs_create_file("rss_enable", 0444, c2_entry_dir, entry,
- &mvpp2_dbgfs_flow_c2_enable_fops);
-
- return 0;
-}
-
-static int mvpp2_dbgfs_flow_tbl_entry_init(struct dentry *parent,
- struct mvpp2 *priv, int id)
-{
- struct mvpp2_dbgfs_flow_tbl_entry *entry;
- struct dentry *flow_tbl_entry_dir;
- char flow_tbl_entry_name[10];
-
- if (id >= MVPP2_CLS_FLOWS_TBL_SIZE)
- return -EINVAL;
-
- sprintf(flow_tbl_entry_name, "%03d", id);
-
- flow_tbl_entry_dir = debugfs_create_dir(flow_tbl_entry_name, parent);
- if (!flow_tbl_entry_dir)
- return -ENOMEM;
-
- entry = &priv->dbgfs_entries->flt_entries[id];
-
- entry->id = id;
- entry->priv = priv;
-
- debugfs_create_file("hits", 0444, flow_tbl_entry_dir, entry,
- &mvpp2_dbgfs_flow_flt_hits_fops);
-
- return 0;
-}
-
-static int mvpp2_dbgfs_cls_init(struct dentry *parent, struct mvpp2 *priv)
-{
- struct dentry *cls_dir, *c2_dir, *flow_tbl_dir;
- int i, ret;
-
- cls_dir = debugfs_create_dir("classifier", parent);
- if (!cls_dir)
- return -ENOMEM;
-
- c2_dir = debugfs_create_dir("c2", cls_dir);
- if (!c2_dir)
- return -ENOMEM;
-
- for (i = 0; i < MVPP22_CLS_C2_N_ENTRIES; i++) {
- ret = mvpp2_dbgfs_c2_entry_init(c2_dir, priv, i);
- if (ret)
- return ret;
- }
-
- flow_tbl_dir = debugfs_create_dir("flow_table", cls_dir);
- if (!flow_tbl_dir)
- return -ENOMEM;
-
- for (i = 0; i < MVPP2_CLS_FLOWS_TBL_SIZE; i++) {
- ret = mvpp2_dbgfs_flow_tbl_entry_init(flow_tbl_dir, priv, i);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static int mvpp2_dbgfs_port_init(struct dentry *parent,
struct mvpp2_port *port)
{
struct dentry *port_dir;
port_dir = debugfs_create_dir(port->dev->name, parent);
+ if (IS_ERR(port_dir))
+ return PTR_ERR(port_dir);
debugfs_create_file("parser_entries", 0444, port_dir, port,
&mvpp2_dbgfs_port_parser_fops);
@@ -688,14 +642,29 @@ static int mvpp2_dbgfs_port_init(struct dentry *parent,
debugfs_create_file("vid_filter", 0444, port_dir, port,
&mvpp2_dbgfs_port_vid_fops);
+ debugfs_create_file("c2_hits", 0444, port_dir, port,
+ &mvpp2_dbgfs_flow_c2_hits_fops);
+
+ debugfs_create_file("default_rxq", 0444, port_dir, port,
+ &mvpp2_dbgfs_flow_c2_rxq_fops);
+
+ debugfs_create_file("rss_enable", 0444, port_dir, port,
+ &mvpp2_dbgfs_flow_c2_enable_fops);
+
return 0;
}
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
{
- debugfs_remove_recursive(priv->dbgfs_dir);
+ int i;
- kfree(priv->dbgfs_entries);
+ for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++)
+ kfree(priv->dbgfs_prs_entry[i]);
+
+ for (i = 0; i < MVPP2_N_FLOWS; i++)
+ kfree(priv->dbgfs_flow_entry[i]);
+
+ debugfs_remove_recursive(priv->dbgfs_dir);
}
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
@@ -704,24 +673,22 @@ void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
int ret, i;
mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL);
- if (!mvpp2_root)
+ if (!mvpp2_root) {
mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
+ if (IS_ERR(mvpp2_root))
+ return;
+ }
mvpp2_dir = debugfs_create_dir(name, mvpp2_root);
+ if (IS_ERR(mvpp2_dir))
+ return;
priv->dbgfs_dir = mvpp2_dir;
- priv->dbgfs_entries = kzalloc(sizeof(*priv->dbgfs_entries), GFP_KERNEL);
- if (!priv->dbgfs_entries)
- goto err;
ret = mvpp2_dbgfs_prs_init(mvpp2_dir, priv);
if (ret)
goto err;
- ret = mvpp2_dbgfs_cls_init(mvpp2_dir, priv);
- if (ret)
- goto err;
-
for (i = 0; i < priv->port_count; i++) {
ret = mvpp2_dbgfs_port_init(mvpp2_dir, priv->port_list[i]);
if (ret)
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 542cd6f2c9bd..ad63ac5604f9 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -8,6 +8,7 @@
*/
#include <linux/acpi.h>
+#include <linux/dma-direct.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -17,6 +18,7 @@
#include <linux/mbus.h>
#include <linux/module.h>
#include <linux/mfd/syscon.h>
+#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/cpumask.h>
#include <linux/of.h>
@@ -25,6 +27,8 @@
#include <linux/of_net.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/genalloc.h>
#include <linux/phy.h>
#include <linux/phylink.h>
#include <linux/phy/phy.h>
@@ -34,62 +38,117 @@
#include <linux/ktime.h>
#include <linux/regmap.h>
#include <uapi/linux/ppp_defs.h>
+#include <net/dsa.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/tso.h>
#include <linux/bpf_trace.h>
+#include <net/busy_poll.h>
#include "mvpp2.h"
#include "mvpp2_prs.h"
#include "mvpp2_cls.h"
-enum mvpp2_bm_pool_log_num {
- MVPP2_BM_SHORT,
- MVPP2_BM_LONG,
- MVPP2_BM_JUMBO,
- MVPP2_BM_POOLS_NUM
+/* RX-TX fast-forwarding path optimization */
+#define MVPP2_RXTX_HASH 0xbac0
+#define MVPP2_RXTX_HASH_CONST_MASK 0xfff0
+#define MVPP2_RXTX_HASH_BMID_MASK 0xf
+/* HashBits[31..16] contain skb->head[22..7], the head is aligned and [6..0]=0,
+ * so skb->head is shifted left for (16-7) bits.
+ * This hash permits to detect 2 non-recyclable cases:
+ * - new skb with old hash inside
+ * - same skb but NET-stack has replaced the data-buffer with another one
+ */
+#define MVPP2_HEAD_HASH_SHIFT (16 - 7)
+#define MVPP2_RXTX_HASH_GENER(skb, bm_pool_id) \
+ (((u32)(phys_addr_t)skb->head << MVPP2_HEAD_HASH_SHIFT) | \
+ MVPP2_RXTX_HASH | bm_pool_id)
+#define MVPP2_RXTX_HASH_IS_OK(skb, hash) \
+ (MVPP2_RXTX_HASH_GENER(skb, 0) == (hash & ~MVPP2_RXTX_HASH_BMID_MASK))
+#define MVPP2_RXTX_HASH_IS_OK_TX(skb, hash) \
+ (((((u32)(phys_addr_t)skb->head << MVPP2_HEAD_HASH_SHIFT) | \
+ MVPP2_RXTX_HASH) ^ hash) <= MVPP2_RXTX_HASH_BMID_MASK)
+
+/* The recycle pool size should be "effectively big" but limited (to eliminate
+ * memory-wasting on TX-pick). It should be >8 (Net-stack-forwarding-buffer)
+ * and >pkt-coalescing. For "effective" >=NAPI_POLL_WEIGHT.
+ * For 4 ports we need more buffers but not x4, statistically it is enough x3.
+ * SKB-pool is shared for Small/Large/Jumbo buffers so we need more SKBs,
+ * statistically it is enough x5.
+ */
+#define MVPP2_RECYCLE_FULL (NAPI_POLL_WEIGHT * 3)
+#define MVPP2_RECYCLE_FULL_SKB (NAPI_POLL_WEIGHT * 5)
+
+#define MVPP2_NUM_OF_TC 1
+
+struct mvpp2_recycle_pool {
+ void *pbuf[MVPP2_RECYCLE_FULL_SKB];
};
-static struct {
- int pkt_size;
- int buf_num;
-} mvpp2_pools[MVPP2_BM_POOLS_NUM];
+struct mvpp2_recycle_pcpu {
+ /* All pool-indexes are in 1 cache-line */
+ short int idx[MVPP2_BM_POOLS_NUM_MAX];
+ /* BM/SKB-buffer pools */
+ struct mvpp2_recycle_pool pool[MVPP2_BM_POOLS_NUM_MAX];
+} __aligned(L1_CACHE_BYTES);
+
+struct mvpp2_share {
+ struct mvpp2_recycle_pcpu *recycle;
+ void *recycle_base;
+
+ /* Counters set by Probe/Init/Open */
+ int num_open_ports;
+} __aligned(L1_CACHE_BYTES);
+
+/* Normal RSS entry */
+struct mvpp2_rss_tbl_entry {
+ u8 tbl_id;
+ u8 tbl_line;
+ u8 width;
+ u8 rxq;
+};
+
+struct mvpp2_share mvpp2_share;
+
+#ifndef MODULE
+static inline void mvpp2_recycle_put(struct mvpp2_port *port,
+ struct mvpp2_txq_pcpu *txq_pcpu,
+ struct mvpp2_txq_pcpu_buf *tx_buf);
+#endif
+
+static void mvpp2_tx_done_guard_force_irq(struct mvpp2_port *port,
+ int sw_thread, u8 to_zero_map);
+static inline void mvpp2_tx_done_guard_timer_set(struct mvpp2_port *port,
+ int sw_thread);
+static u32 mvpp2_tx_done_guard_get_stats(struct mvpp2_port *port, int cpu);
/* The prototype is added here to be used in start_dev when using ACPI. This
* will be removed once phylink is used for all modes (dt+ACPI).
*/
static void mvpp2_acpi_start(struct mvpp2_port *port);
+/* Branch prediction switches */
+DEFINE_STATIC_KEY_FALSE(mvpp21_variant);
+DEFINE_STATIC_KEY_FALSE(mvpp2_recycle_ena);
+
/* Queue modes */
#define MVPP2_QDIST_SINGLE_MODE 0
#define MVPP2_QDIST_MULTI_MODE 1
static int queue_mode = MVPP2_QDIST_MULTI_MODE;
+static int tx_fifo_protection;
+static int bm_underrun_protect = 1;
+static int recycle;
+static u32 tx_fifo_map;
module_param(queue_mode, int, 0444);
MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
-/* Utility/helper methods */
-
-void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
-{
- writel(data, priv->swth_base[0] + offset);
-}
+module_param(tx_fifo_protection, int, 0444);
+MODULE_PARM_DESC(tx_fifo_protection, "Set tx_fifo_protection (off=0, on=1)");
-u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
-{
- return readl(priv->swth_base[0] + offset);
-}
-
-static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
-{
- return readl_relaxed(priv->swth_base[0] + offset);
-}
-
-static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
-{
- return cpu % priv->nthreads;
-}
+module_param(bm_underrun_protect, int, 0444);
+MODULE_PARM_DESC(bm_underrun_protect, "Set BM underrun protect feature (0-1), def=1");
static struct page_pool *
mvpp2_create_page_pool(struct device *dev, int num, int len,
@@ -109,70 +168,16 @@ mvpp2_create_page_pool(struct device *dev, int num, int len,
return page_pool_create(&pp_params);
}
-/* These accessors should be used to access:
- *
- * - per-thread registers, where each thread has its own copy of the
- * register.
- *
- * MVPP2_BM_VIRT_ALLOC_REG
- * MVPP2_BM_ADDR_HIGH_ALLOC
- * MVPP22_BM_ADDR_HIGH_RLS_REG
- * MVPP2_BM_VIRT_RLS_REG
- * MVPP2_ISR_RX_TX_CAUSE_REG
- * MVPP2_ISR_RX_TX_MASK_REG
- * MVPP2_TXQ_NUM_REG
- * MVPP2_AGGR_TXQ_UPDATE_REG
- * MVPP2_TXQ_RSVD_REQ_REG
- * MVPP2_TXQ_RSVD_RSLT_REG
- * MVPP2_TXQ_SENT_REG
- * MVPP2_RXQ_NUM_REG
- *
- * - global registers that must be accessed through a specific thread
- * window, because they are related to an access to a per-thread
- * register
- *
- * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
- * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
- * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
- * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
- * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
- * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
- * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
- * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
- * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
- * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
- * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
- * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
- * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
- */
-static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread,
- u32 offset, u32 data)
-{
- writel(data, priv->swth_base[thread] + offset);
-}
+module_param(recycle, int, 0444);
+MODULE_PARM_DESC(recycle, "Recycle: 0:disable(default), >=1:enable");
-static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread,
- u32 offset)
-{
- return readl(priv->swth_base[thread] + offset);
-}
-
-static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread,
- u32 offset, u32 data)
-{
- writel_relaxed(data, priv->swth_base[thread] + offset);
-}
-
-static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread,
- u32 offset)
-{
- return readl_relaxed(priv->swth_base[thread] + offset);
-}
+module_param(tx_fifo_map, uint, 0444);
+MODULE_PARM_DESC(tx_fifo_map, "Set PPv2 TX FIFO ports map");
static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
struct mvpp2_tx_desc *tx_desc)
{
- if (port->priv->hw_version == MVPP21)
+ if (static_branch_unlikely(&mvpp21_variant))
return le32_to_cpu(tx_desc->pp21.buf_dma_addr);
else
return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) &
@@ -188,7 +193,7 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
offset = dma_addr & MVPP2_TX_DESC_ALIGN;
- if (port->priv->hw_version == MVPP21) {
+ if (static_branch_unlikely(&mvpp21_variant)) {
tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr);
tx_desc->pp21.packet_offset = offset;
} else {
@@ -203,7 +208,7 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
struct mvpp2_tx_desc *tx_desc)
{
- if (port->priv->hw_version == MVPP21)
+ if (static_branch_unlikely(&mvpp21_variant))
return le16_to_cpu(tx_desc->pp21.data_size);
else
return le16_to_cpu(tx_desc->pp22.data_size);
@@ -213,7 +218,7 @@ static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
struct mvpp2_tx_desc *tx_desc,
size_t size)
{
- if (port->priv->hw_version == MVPP21)
+ if (static_branch_unlikely(&mvpp21_variant))
tx_desc->pp21.data_size = cpu_to_le16(size);
else
tx_desc->pp22.data_size = cpu_to_le16(size);
@@ -223,7 +228,7 @@ static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
struct mvpp2_tx_desc *tx_desc,
unsigned int txq)
{
- if (port->priv->hw_version == MVPP21)
+ if (static_branch_unlikely(&mvpp21_variant))
tx_desc->pp21.phys_txq = txq;
else
tx_desc->pp22.phys_txq = txq;
@@ -233,7 +238,7 @@ static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
struct mvpp2_tx_desc *tx_desc,
unsigned int command)
{
- if (port->priv->hw_version == MVPP21)
+ if (static_branch_unlikely(&mvpp21_variant))
tx_desc->pp21.command = cpu_to_le32(command);
else
tx_desc->pp22.command = cpu_to_le32(command);
@@ -242,7 +247,7 @@ static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
struct mvpp2_tx_desc *tx_desc)
{
- if (port->priv->hw_version == MVPP21)
+ if (static_branch_unlikely(&mvpp21_variant))
return tx_desc->pp21.packet_offset;
else
return tx_desc->pp22.packet_offset;
@@ -251,27 +256,17 @@ static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
struct mvpp2_rx_desc *rx_desc)
{
- if (port->priv->hw_version == MVPP21)
+ if (static_branch_unlikely(&mvpp21_variant))
return le32_to_cpu(rx_desc->pp21.buf_dma_addr);
else
return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) &
MVPP2_DESC_DMA_MASK;
}
-static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
- struct mvpp2_rx_desc *rx_desc)
-{
- if (port->priv->hw_version == MVPP21)
- return le32_to_cpu(rx_desc->pp21.buf_cookie);
- else
- return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) &
- MVPP2_DESC_DMA_MASK;
-}
-
static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
struct mvpp2_rx_desc *rx_desc)
{
- if (port->priv->hw_version == MVPP21)
+ if (static_branch_unlikely(&mvpp21_variant))
return le16_to_cpu(rx_desc->pp21.data_size);
else
return le16_to_cpu(rx_desc->pp22.data_size);
@@ -280,7 +275,7 @@ static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
struct mvpp2_rx_desc *rx_desc)
{
- if (port->priv->hw_version == MVPP21)
+ if (static_branch_unlikely(&mvpp21_variant))
return le32_to_cpu(rx_desc->pp21.status);
else
return le32_to_cpu(rx_desc->pp22.status);
@@ -314,26 +309,6 @@ static void mvpp2_txq_inc_put(struct mvpp2_port *port,
txq_pcpu->txq_put_index = 0;
}
-/* Get number of maximum RXQ */
-static int mvpp2_get_nrxqs(struct mvpp2 *priv)
-{
- unsigned int nrxqs;
-
- if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE)
- return 1;
-
- /* According to the PPv2.2 datasheet and our experiments on
- * PPv2.1, RX queues have an allocation granularity of 4 (when
- * more than a single one on PPv2.2).
- * Round up to nearest multiple of 4.
- */
- nrxqs = (num_possible_cpus() + 3) & ~0x3;
- if (nrxqs > MVPP2_PORT_MAX_RXQ)
- nrxqs = MVPP2_PORT_MAX_RXQ;
-
- return nrxqs;
-}
-
/* Get number of physical egress port */
static inline int mvpp2_egress_port(struct mvpp2_port *port)
{
@@ -372,8 +347,85 @@ static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool,
/* Buffer Manager configuration routines */
+/* Get default packet size for given BM pool type */
+static int mvpp2_bm_pool_default_pkt_size(enum mvpp2_bm_pool_type bm_pool_type)
+{
+ switch (bm_pool_type) {
+ case MVPP2_BM_SHORT:
+ return MVPP2_BM_SHORT_PKT_SIZE;
+ case MVPP2_BM_JUMBO:
+ return MVPP2_BM_JUMBO_PKT_SIZE;
+ case MVPP2_BM_LONG:
+ return MVPP2_BM_LONG_PKT_SIZE;
+ default:
+ return -EINVAL;
+ }
+}
+
+/* Get default buffer count for given BM pool type */
+static int mvpp2_bm_pool_default_buf_num(enum mvpp2_bm_pool_type bm_pool_type)
+{
+ switch (bm_pool_type) {
+ case MVPP2_BM_SHORT:
+ return MVPP2_BM_SHORT_BUF_NUM;
+ case MVPP2_BM_JUMBO:
+ return MVPP2_BM_JUMBO_BUF_NUM;
+ case MVPP2_BM_LONG:
+ return MVPP2_BM_LONG_BUF_NUM;
+ default:
+ return -EINVAL;
+ }
+}
+
+/* Get BM pool type mapping - return the hardware Buffer Manager pools
+ * type according to the mapping to its ID:
+ * POOL#0 - short packets
+ * POOL#1 - jumbo packets
+ * POOL#2 - long packets
+ * In case the KS recycling feature is enabled, ID = 2 is
+ * the first (CPU#0) out of the per-CPU pools for long packets.
+ */
+static enum mvpp2_bm_pool_type mvpp2_bm_pool_get_type(int id)
+{
+ switch (id) {
+ case 0:
+ return MVPP2_BM_SHORT;
+ case 1:
+ return MVPP2_BM_JUMBO;
+ case 2:
+ return MVPP2_BM_LONG;
+ default:
+ if (recycle)
+ return MVPP2_BM_LONG;
+ return -EINVAL;
+ }
+}
+
+/* Get BM pool ID mapping - return the hardware Buffer Manager pools
+ * ID according to the mapping to its type:
+ * Short packets - POOL#0
+ * Jumbo packets - POOL#1
+ * Long packets - POOL#2
+ * In case the KS recycling feature is enabled, ID = 2 is
+ * the first (CPU#0) out of the per-CPU pools for long packets.
+ */
+static int mvpp2_bm_pool_get_id(enum mvpp2_bm_pool_type bm_pool_type)
+{
+ switch (bm_pool_type) {
+ case MVPP2_BM_SHORT:
+ return 0;
+ case MVPP2_BM_JUMBO:
+ return 1;
+ case MVPP2_BM_LONG:
+ return 2;
+ default:
+ return -EINVAL;
+ }
+}
+
/* Create pool */
-static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
+static int mvpp2_bm_pool_create(struct platform_device *pdev,
+ struct mvpp2 *priv,
struct mvpp2_bm_pool *bm_pool, int size)
{
u32 val;
@@ -384,7 +436,7 @@ static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
if (!IS_ALIGNED(size, 16))
return -EINVAL;
- /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
+ /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 and PPv2.3 needs 16
* bytes per buffer pointer
*/
if (priv->hw_version == MVPP21)
@@ -392,7 +444,7 @@ static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
else
bm_pool->size_bytes = 2 * sizeof(u64) * size;
- bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes,
+ bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
&bm_pool->dma_addr,
GFP_KERNEL);
if (!bm_pool->virt_addr)
@@ -400,9 +452,9 @@ static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
MVPP2_BM_POOL_PTR_ALIGN)) {
- dma_free_coherent(dev, bm_pool->size_bytes,
+ dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
bm_pool->virt_addr, bm_pool->dma_addr);
- dev_err(dev, "BM pool %d is not %d bytes aligned\n",
+ dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
return -ENOMEM;
}
@@ -413,11 +465,27 @@ static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
val |= MVPP2_BM_START_MASK;
+
+ val &= ~MVPP2_BM_LOW_THRESH_MASK;
+ val &= ~MVPP2_BM_HIGH_THRESH_MASK;
+
+ /* Set 8 Pools BPPI threshold if BM underrun protection feature
+ * were enabled
+ */
+ if (priv->hw_version == MVPP23 && bm_underrun_protect) {
+ val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP23_BM_BPPI_LOW_THRESH);
+ val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP23_BM_BPPI_HIGH_THRESH);
+ } else {
+ val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP2_BM_BPPI_LOW_THRESH);
+ val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP2_BM_BPPI_HIGH_THRESH);
+ }
+
mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
bm_pool->size = size;
bm_pool->pkt_size = 0;
bm_pool->buf_num = 0;
+ bm_pool->type = mvpp2_bm_pool_get_type(bm_pool->id);
return 0;
}
@@ -444,23 +512,16 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
*dma_addr = mvpp2_thread_read(priv, thread,
MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
- *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
- if (priv->hw_version == MVPP22) {
+ if (priv->hw_version != MVPP21 && sizeof(dma_addr_t) == 8) {
u32 val;
- u32 dma_addr_highbits, phys_addr_highbits;
+ u32 dma_addr_highbits;
val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC);
dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
- phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
- MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
-
- if (sizeof(dma_addr_t) == 8)
- *dma_addr |= (u64)dma_addr_highbits << 32;
-
- if (sizeof(phys_addr_t) == 8)
- *phys_addr |= (u64)phys_addr_highbits << 32;
+ *dma_addr |= (u64)dma_addr_highbits << 32;
}
+ *phys_addr = dma_to_phys(dev, *dma_addr);
put_cpu();
}
@@ -522,14 +583,15 @@ static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_p
}
/* Cleanup pool */
-static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv,
+static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
+ struct mvpp2 *priv,
struct mvpp2_bm_pool *bm_pool)
{
int buf_num;
u32 val;
buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
- mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num);
+ mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool, buf_num);
/* Check buffer counters after free */
buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
@@ -548,26 +610,37 @@ static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv,
priv->page_pool[bm_pool->id] = NULL;
}
- dma_free_coherent(dev, bm_pool->size_bytes,
+ dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
bm_pool->virt_addr,
bm_pool->dma_addr);
return 0;
}
-static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv)
+static int mvpp2_bm_pools_init(struct platform_device *pdev,
+ struct mvpp2 *priv)
{
- int i, err, size, poolnum = MVPP2_BM_POOLS_NUM;
+ int i, err, size, cpu;
struct mvpp2_bm_pool *bm_pool;
- if (priv->percpu_pools)
- poolnum = mvpp2_get_nrxqs(priv) * 2;
+ if (recycle) {
+ /* Allocate per-CPU long pools array */
+ priv->pools_pcpu = devm_kcalloc(&pdev->dev, num_present_cpus(),
+ sizeof(*priv->pools_pcpu),
+ GFP_KERNEL);
+ if (!priv->pools_pcpu)
+ return -ENOMEM;
+ }
+
+ /* Initialize Virtual with 0x0 */
+ for_each_present_cpu(cpu)
+ mvpp2_thread_write(priv, cpu, MVPP2_BM_VIRT_RLS_REG, 0x0);
/* Create all pools with maximum size */
size = MVPP2_BM_POOL_SIZE_MAX;
- for (i = 0; i < poolnum; i++) {
+ for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
bm_pool = &priv->bm_pools[i];
bm_pool->id = i;
- err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
+ err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
if (err)
goto err_unroll_pools;
mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
@@ -575,86 +648,69 @@ static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv)
return 0;
err_unroll_pools:
- dev_err(dev, "failed to create BM pool %d, size %d\n", i, size);
+ dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
for (i = i - 1; i >= 0; i--)
- mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
+ mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
return err;
}
-static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
+/* Routine enable PPv23 8 pool mode */
+static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv)
{
- enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
- int i, err, poolnum = MVPP2_BM_POOLS_NUM;
- struct mvpp2_port *port;
+ int val;
- if (priv->percpu_pools) {
- for (i = 0; i < priv->port_count; i++) {
- port = priv->port_list[i];
- if (port->xdp_prog) {
- dma_dir = DMA_BIDIRECTIONAL;
- break;
- }
- }
+ val = mvpp2_read(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG);
+ val |= MVPP23_BM_8POOL_MODE;
+ mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val);
+}
- poolnum = mvpp2_get_nrxqs(priv) * 2;
- for (i = 0; i < poolnum; i++) {
- /* the pool in use */
- int pn = i / (poolnum / 2);
-
- priv->page_pool[i] =
- mvpp2_create_page_pool(dev,
- mvpp2_pools[pn].buf_num,
- mvpp2_pools[pn].pkt_size,
- dma_dir);
- if (IS_ERR(priv->page_pool[i])) {
- int j;
-
- for (j = 0; j < i; j++) {
- page_pool_destroy(priv->page_pool[j]);
- priv->page_pool[j] = NULL;
- }
- return PTR_ERR(priv->page_pool[i]);
- }
- }
- }
+/* Cleanup pool before actual initialization in the OS */
+static void mvpp2_bm_pool_cleanup(struct mvpp2 *priv, int pool_id)
+{
+ u32 val;
+ int i;
- dev_info(dev, "using %d %s buffers\n", poolnum,
- priv->percpu_pools ? "per-cpu" : "shared");
+ /* Drain the BM from all possible residues left by firmware */
+ for (i = 0; i < MVPP2_BM_POOL_SIZE_MAX; i++)
+ mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(pool_id));
+
+ /* Stop the BM pool */
+ val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(pool_id));
+ val |= MVPP2_BM_STOP_MASK;
+ mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(pool_id), val);
- for (i = 0; i < poolnum; i++) {
- /* Mask BM all interrupts */
- mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
- /* Clear BM cause register */
- mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
+ /* Mask BM all interrupts */
+ mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(pool_id), 0);
+ /* Clear BM cause register */
+ mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(pool_id), 0);
+}
+
+static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
+{
+ int i, err;
+
+ for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
+ /* Make sure about the pool state in case it was
+ * used by firmware.
+ */
+ mvpp2_bm_pool_cleanup(priv, i);
}
/* Allocate and initialize BM pools */
- priv->bm_pools = devm_kcalloc(dev, poolnum,
+ priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
sizeof(*priv->bm_pools), GFP_KERNEL);
if (!priv->bm_pools)
return -ENOMEM;
- err = mvpp2_bm_pools_init(dev, priv);
+ if (priv->hw_version == MVPP23 && bm_underrun_protect)
+ mvpp23_bm_set_8pool_mode(priv);
+
+ err = mvpp2_bm_pools_init(pdev, priv);
if (err < 0)
return err;
return 0;
}
-static void mvpp2_setup_bm_pool(void)
-{
- /* Short pool */
- mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
- mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
-
- /* Long pool */
- mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
- mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
-
- /* Jumbo pool */
- mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM;
- mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE;
-}
-
/* Attach long pool to rxq */
static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
int lrxq, int long_pool)
@@ -697,12 +753,11 @@ static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
}
-static void *mvpp2_buf_alloc(struct mvpp2_port *port,
- struct mvpp2_bm_pool *bm_pool,
- struct page_pool *page_pool,
- dma_addr_t *buf_dma_addr,
- phys_addr_t *buf_phys_addr,
- gfp_t gfp_mask)
+
+static dma_addr_t mvpp2_buf_alloc(struct mvpp2_port *port,
+ struct mvpp2_bm_pool *bm_pool,
+ struct page_pool *page_pool,
+ gfp_t gfp_mask)
{
dma_addr_t dma_addr;
struct page *page;
@@ -710,7 +765,7 @@ static void *mvpp2_buf_alloc(struct mvpp2_port *port,
data = mvpp2_frag_alloc(bm_pool, page_pool);
if (!data)
- return NULL;
+ return (dma_addr_t)data;
if (page_pool) {
page = (struct page *)data;
@@ -718,23 +773,214 @@ static void *mvpp2_buf_alloc(struct mvpp2_port *port,
data = page_to_virt(page);
} else {
dma_addr = dma_map_single(port->dev->dev.parent, data,
- MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
- DMA_FROM_DEVICE);
+ bm_pool->buf_size, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
mvpp2_frag_free(bm_pool, NULL, data);
- return NULL;
+ dma_addr = 0;
+ }
+ return dma_addr;
+}
+
+/* Routine calculate single queue shares address space */
+static int mvpp22_calc_shared_addr_space(struct mvpp2_port *port)
+{
+ /* If number of CPU's greater than number of threads, return last
+ * address space
+ */
+ if (num_active_cpus() >= MVPP2_MAX_THREADS)
+ return MVPP2_MAX_THREADS - 1;
+
+ return num_active_cpus();
+}
+
+/* Routine enable flow control for RXQs conditon */
+void mvpp2_rxq_enable_fc(struct mvpp2_port *port)
+{
+ int val, cm3_state, host_id, q;
+ int fq = port->first_rxq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->priv->mss_spinlock, flags);
+
+ /* Remove Flow control enable bit to prevent race between FW and Kernel
+ * If Flow control were enabled, it would be re-enabled.
+ */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
+ val &= ~FLOW_CONTROL_ENABLE_BIT;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ /* Set same Flow control for all RXQs */
+ for (q = 0; q < port->nrxqs; q++) {
+ /* Set stop and start Flow control RXQ thresholds */
+ val = MSS_THRESHOLD_START;
+ val |= (MSS_THRESHOLD_STOP << MSS_RXQ_TRESH_STOP_OFFS);
+ mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
+
+ val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
+ /* Set RXQ port ID */
+ val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
+ val |= (port->id << MSS_RXQ_ASS_Q_BASE(q, fq));
+ val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
+ + MSS_RXQ_ASS_HOSTID_OFFS));
+
+ /* Calculate RXQ host ID:
+ * In Single queue mode: Host ID equal to Host ID used for
+ * shared RX interrupt
+ * In Multi queue mode: Host ID equal to number of
+ * RXQ ID / number of tc queues
+ * In Single resource mode: Host ID always equal to 0
+ */
+ if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
+ host_id = mvpp22_calc_shared_addr_space(port);
+ else if (queue_mode == MVPP2_QDIST_MULTI_MODE)
+ host_id = q / port->num_tc_queues;
+ else
+ host_id = 0;
+
+ /* Set RXQ host ID */
+ val |= (host_id << (MSS_RXQ_ASS_Q_BASE(q, fq)
+ + MSS_RXQ_ASS_HOSTID_OFFS));
+
+ mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val);
+ }
+
+ /* Notify Firmware that Flow control config space ready for update */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
+ val |= cm3_state;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
+}
+
+/* Routine disable flow control for RXQs conditon */
+void mvpp2_rxq_disable_fc(struct mvpp2_port *port)
+{
+ int val, cm3_state, q;
+ unsigned long flags;
+ int fq = port->first_rxq;
+
+ spin_lock_irqsave(&port->priv->mss_spinlock, flags);
+
+ /* Remove Flow control enable bit to prevent race between FW and Kernel
+ * If Flow control were enabled, it would be re-enabled.
+ */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
+ val &= ~FLOW_CONTROL_ENABLE_BIT;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ /* Disable Flow control for all RXQs */
+ for (q = 0; q < port->nrxqs; q++) {
+ /* Set threshold 0 to disable Flow control */
+ val = 0;
+ val |= (0 << MSS_RXQ_TRESH_STOP_OFFS);
+ mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
+
+ val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
+
+ val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
+
+ val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
+ + MSS_RXQ_ASS_HOSTID_OFFS));
+
+ mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val);
+ }
+
+ /* Notify Firmware that Flow control config space ready for update */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
+ val |= cm3_state;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
+}
+
+/* Routine disable/enable flow control for BM pool conditon */
+void mvpp2_bm_pool_update_fc(struct mvpp2_port *port,
+ struct mvpp2_bm_pool *pool,
+ bool en)
+{
+ int val, cm3_state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->priv->mss_spinlock, flags);
+
+ /* Remove Flow control enable bit to prevent race between FW and Kernel
+ * If Flow control were enabled, it would be re-enabled.
+ */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
+ val &= ~FLOW_CONTROL_ENABLE_BIT;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ /* Check if BM pool should be enabled/disable */
+ if (en) {
+ /* Set BM pool start and stop thresholds per port */
+ val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id));
+ val |= MSS_BUF_POOL_PORT_OFFS(port->id);
+ val &= ~MSS_BUF_POOL_START_MASK;
+ val |= (MSS_THRESHOLD_START << MSS_BUF_POOL_START_OFFS);
+ val &= ~MSS_BUF_POOL_STOP_MASK;
+ val |= MSS_THRESHOLD_STOP;
+ mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val);
+ } else {
+ /* Remove BM pool from the port */
+ val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id));
+ val &= ~MSS_BUF_POOL_PORT_OFFS(port->id);
+
+ /* Zero BM pool start and stop thresholds to disable pool
+ * flow control if pool empty (not used by any port)
+ */
+ if (!pool->buf_num) {
+ val &= ~MSS_BUF_POOL_START_MASK;
+ val &= ~MSS_BUF_POOL_STOP_MASK;
}
+
+ mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val);
+ }
+
+ /* Notify Firmware that Flow control config space ready for update */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
+ val |= cm3_state;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
+}
+
+static int mvpp2_enable_global_fc(struct mvpp2 *priv)
+{
+ int val, timeout = 0;
+
+ /* Enable global flow control. In this stage global
+ * flow control enabled, but still disabled per port.
+ */
+ val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
+ val |= FLOW_CONTROL_ENABLE_BIT;
+ mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
+
+ /* Check if Firmware running and disable FC if not*/
+ val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
+ mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
+
+ while (timeout < MSS_FC_MAX_TIMEOUT) {
+ val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
+
+ if (!(val & FLOW_CONTROL_UPDATE_COMMAND_BIT))
+ return 0;
+ usleep_range(10, 20);
+ timeout++;
}
- *buf_dma_addr = dma_addr;
- *buf_phys_addr = virt_to_phys(data);
- return data;
+ priv->global_tx_fc = false;
+ return -ENOTSUPP;
}
/* Release buffer to BM */
static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
- dma_addr_t buf_dma_addr,
- phys_addr_t buf_phys_addr)
+ dma_addr_t buf_dma_addr)
{
unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
unsigned long flags = 0;
@@ -742,29 +988,21 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
if (test_bit(thread, &port->priv->lock_map))
spin_lock_irqsave(&port->bm_lock[thread], flags);
- if (port->priv->hw_version == MVPP22) {
- u32 val = 0;
-
- if (sizeof(dma_addr_t) == 8)
- val |= upper_32_bits(buf_dma_addr) &
+ /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
+ * returned in the "cookie" field of the RX descriptor.
+ * For performance reasons don't store VA|PA and don't use "cookie".
+ * VA/PA obtained faster from dma_to_phys(dma-addr) and phys_to_virt.
+ */
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) && defined(CONFIG_PHYS_ADDR_T_64BIT)
+ if (!static_branch_unlikely(&mvpp21_variant)) {
+ u32 val = upper_32_bits(buf_dma_addr) &
MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
- if (sizeof(phys_addr_t) == 8)
- val |= (upper_32_bits(buf_phys_addr)
- << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
- MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
-
mvpp2_thread_write_relaxed(port->priv, thread,
MVPP22_BM_ADDR_HIGH_RLS_REG, val);
}
+#endif
- /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
- * returned in the "cookie" field of the RX
- * descriptor. Instead of storing the virtual address, we
- * store the physical address
- */
- mvpp2_thread_write_relaxed(port->priv, thread,
- MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
mvpp2_thread_write_relaxed(port->priv, thread,
MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
@@ -805,13 +1043,11 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
if (port->priv->percpu_pools)
pp = port->priv->page_pool[bm_pool->id];
for (i = 0; i < buf_num; i++) {
- buf = mvpp2_buf_alloc(port, bm_pool, pp, &dma_addr,
- &phys_addr, GFP_KERNEL);
- if (!buf)
+ dma_addr = mvpp2_buf_alloc(port, bm_pool, pp, GFP_KERNEL);
+ if (!dma_addr)
break;
- mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
- phys_addr);
+ mvpp2_bm_pool_put(port, bm_pool->id, dma_addr);
}
/* Update BM driver with number of buffers added to pool */
@@ -819,7 +1055,9 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
netdev_dbg(port->dev,
"pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
- bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
+ bm_pool->id, bm_pool->pkt_size,
+ MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
+ bm_pool->frag_size);
netdev_dbg(port->dev,
"pool %d: %d of %d buffers added\n",
@@ -834,10 +1072,10 @@ static struct mvpp2_bm_pool *
mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
{
struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
+ enum mvpp2_bm_pool_type pool_type = mvpp2_bm_pool_get_type(pool);
int num;
- if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) ||
- (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) {
+ if (pool >= MVPP2_BM_POOLS_NUM) {
netdev_err(port->dev, "Invalid pool %d\n", pool);
return NULL;
}
@@ -853,14 +1091,9 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
*/
pkts_num = new_pool->buf_num;
if (pkts_num == 0) {
- if (port->priv->percpu_pools) {
- if (pool < port->nrxqs)
- pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num;
- else
- pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num;
- } else {
- pkts_num = mvpp2_pools[pool].buf_num;
- }
+ pkts_num = mvpp2_bm_pool_default_buf_num(pool_type);
+ if (pkts_num < 0)
+ return NULL;
} else {
mvpp2_bm_bufs_free(port->dev->dev.parent,
port->priv, new_pool, pkts_num);
@@ -886,76 +1119,57 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
return new_pool;
}
-static struct mvpp2_bm_pool *
-mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type,
- unsigned int pool, int pkt_size)
+/* Create long pool per-CPU */
+static void mvpp2_bm_pool_pcpu_use(void *arg)
{
- struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
- int num;
-
- if (pool > port->nrxqs * 2) {
- netdev_err(port->dev, "Invalid pool %d\n", pool);
- return NULL;
- }
-
- /* Allocate buffers in case BM pool is used as long pool, but packet
- * size doesn't match MTU or BM pool hasn't being used yet
- */
- if (new_pool->pkt_size == 0) {
- int pkts_num;
-
- /* Set default buffer number or free all the buffers in case
- * the pool is not empty
- */
- pkts_num = new_pool->buf_num;
- if (pkts_num == 0)
- pkts_num = mvpp2_pools[type].buf_num;
- else
- mvpp2_bm_bufs_free(port->dev->dev.parent,
- port->priv, new_pool, pkts_num);
-
- new_pool->pkt_size = pkt_size;
- new_pool->frag_size =
- SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
- MVPP2_SKB_SHINFO_SIZE;
+ struct mvpp2_port *port = arg;
+ struct mvpp2_bm_pool **pools_pcpu = port->priv->pools_pcpu;
+ int cpu = smp_processor_id();
+ int pool_id, pkt_size;
- /* Allocate buffers for this pool */
- num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
- if (num != pkts_num) {
- WARN(1, "pool %d: %d of %d allocated\n",
- new_pool->id, num, pkts_num);
- return NULL;
- }
- }
+ if (pools_pcpu[cpu])
+ return;
- mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
- MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
+ pool_id = mvpp2_bm_pool_get_id(MVPP2_BM_LONG) + cpu,
+ pkt_size = mvpp2_bm_pool_default_pkt_size(MVPP2_BM_LONG);
- return new_pool;
+ pools_pcpu[cpu] = mvpp2_bm_pool_use(port, pool_id, pkt_size);
}
-/* Initialize pools for swf, shared buffers variant */
-static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port)
+/* Initialize pools for swf */
+static int mvpp2_swf_bm_pool_pcpu_init(struct mvpp2_port *port)
{
- enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
- int rxq;
+ enum mvpp2_bm_pool_type long_pool_type, short_pool_type;
+ int rxq, pkt_size, pool_id, cpu;
/* If port pkt_size is higher than 1518B:
* HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
* else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
*/
if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
- long_log_pool = MVPP2_BM_JUMBO;
- short_log_pool = MVPP2_BM_LONG;
+ long_pool_type = MVPP2_BM_JUMBO;
+ short_pool_type = MVPP2_BM_LONG;
} else {
- long_log_pool = MVPP2_BM_LONG;
- short_log_pool = MVPP2_BM_SHORT;
+ long_pool_type = MVPP2_BM_LONG;
+ short_pool_type = MVPP2_BM_SHORT;
}
- if (!port->pool_long) {
- port->pool_long =
- mvpp2_bm_pool_use(port, long_log_pool,
- mvpp2_pools[long_log_pool].pkt_size);
+ /* First handle the per-CPU long pools,
+ * as they are used in both cases.
+ */
+ on_each_cpu(mvpp2_bm_pool_pcpu_use, port, 1);
+ /* Sanity check */
+ for_each_present_cpu(cpu) {
+ if (!port->priv->pools_pcpu[cpu])
+ return -ENOMEM;
+ }
+
+ if (!port->pool_long && long_pool_type == MVPP2_BM_JUMBO) {
+ /* HW Long pool - SW Jumbo pool */
+ pool_id = mvpp2_bm_pool_get_id(long_pool_type);
+ pkt_size = mvpp2_bm_pool_default_pkt_size(long_pool_type);
+
+ port->pool_long = mvpp2_bm_pool_use(port, pool_id, pkt_size);
if (!port->pool_long)
return -ENOMEM;
@@ -963,12 +1177,27 @@ static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port)
for (rxq = 0; rxq < port->nrxqs; rxq++)
mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
+
+ /* HW Short pool - SW Long pool (per-CPU) */
+ port->pool_short = port->priv->pools_pcpu[0];
+ for (rxq = 0; rxq < port->nrxqs; rxq++)
+ mvpp2_rxq_short_pool_set(port, rxq,
+ port->pool_short->id + rxq);
+
+ } else if (!port->pool_long) {
+ /* HW Long pool - SW Long pool (per-CPU) */
+ port->pool_long = port->priv->pools_pcpu[0];
+ for (rxq = 0; rxq < port->nrxqs; rxq++)
+ mvpp2_rxq_long_pool_set(port, rxq,
+ port->pool_long->id + rxq);
}
if (!port->pool_short) {
- port->pool_short =
- mvpp2_bm_pool_use(port, short_log_pool,
- mvpp2_pools[short_log_pool].pkt_size);
+ /* HW Short pool - SW Short pool */
+ pool_id = mvpp2_bm_pool_get_id(short_pool_type);
+ pkt_size = mvpp2_bm_pool_default_pkt_size(short_pool_type);
+
+ port->pool_short = mvpp2_bm_pool_use(port, pool_id, pkt_size);
if (!port->pool_short)
return -ENOMEM;
@@ -979,108 +1208,141 @@ static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port)
port->pool_short->id);
}
+ /* Fill per-CPU Long pools' port map */
+ for_each_present_cpu(cpu)
+ port->priv->pools_pcpu[cpu]->port_map |= BIT(port->id);
+
return 0;
}
-/* Initialize pools for swf, percpu buffers variant */
-static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port)
+/* Initialize pools for swf */
+static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
{
- struct mvpp2_bm_pool *bm_pool;
- int i;
-
- for (i = 0; i < port->nrxqs; i++) {
- bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i,
- mvpp2_pools[MVPP2_BM_SHORT].pkt_size);
- if (!bm_pool)
- return -ENOMEM;
+ enum mvpp2_bm_pool_type long_pool_type, short_pool_type;
+ int rxq;
- bm_pool->port_map |= BIT(port->id);
- mvpp2_rxq_short_pool_set(port, i, bm_pool->id);
+ /* If port pkt_size is higher than 1518B:
+ * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
+ * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
+ */
+ if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
+ long_pool_type = MVPP2_BM_JUMBO;
+ short_pool_type = MVPP2_BM_LONG;
+ } else {
+ long_pool_type = MVPP2_BM_LONG;
+ short_pool_type = MVPP2_BM_SHORT;
}
- for (i = 0; i < port->nrxqs; i++) {
- bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs,
- mvpp2_pools[MVPP2_BM_LONG].pkt_size);
- if (!bm_pool)
+ if (!port->pool_long) {
+ port->pool_long =
+ mvpp2_bm_pool_use(port,
+ mvpp2_bm_pool_get_id(long_pool_type),
+ mvpp2_bm_pool_default_pkt_size(long_pool_type));
+ if (!port->pool_long)
return -ENOMEM;
- bm_pool->port_map |= BIT(port->id);
- mvpp2_rxq_long_pool_set(port, i, bm_pool->id);
- }
-
- port->pool_long = NULL;
- port->pool_short = NULL;
+ port->pool_long->port_map |= BIT(port->id);
- return 0;
-}
+ for (rxq = 0; rxq < port->nrxqs; rxq++)
+ mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
+ }
-static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
-{
- if (port->priv->percpu_pools)
- return mvpp2_swf_bm_pool_init_percpu(port);
- else
- return mvpp2_swf_bm_pool_init_shared(port);
-}
+ if (!port->pool_short) {
+ port->pool_short =
+ mvpp2_bm_pool_use(port,
+ mvpp2_bm_pool_get_id(short_pool_type),
+ mvpp2_bm_pool_default_pkt_size(short_pool_type));
+ if (!port->pool_short)
+ return -ENOMEM;
-static void mvpp2_set_hw_csum(struct mvpp2_port *port,
- enum mvpp2_bm_pool_log_num new_long_pool)
-{
- const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ port->pool_short->port_map |= BIT(port->id);
- /* Update L4 checksum when jumbo enable/disable on port.
- * Only port 0 supports hardware checksum offload due to
- * the Tx FIFO size limitation.
- * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor
- * has 7 bits, so the maximum L3 offset is 128.
- */
- if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
- port->dev->features &= ~csums;
- port->dev->hw_features &= ~csums;
- } else {
- port->dev->features |= csums;
- port->dev->hw_features |= csums;
+ for (rxq = 0; rxq < port->nrxqs; rxq++)
+ mvpp2_rxq_short_pool_set(port, rxq,
+ port->pool_short->id);
}
+
+ return 0;
}
static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
{
struct mvpp2_port *port = netdev_priv(dev);
- enum mvpp2_bm_pool_log_num new_long_pool;
+ enum mvpp2_bm_pool_type new_long_pool_type;
+ struct mvpp2_bm_pool **pools_pcpu = port->priv->pools_pcpu;
int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
-
- if (port->priv->percpu_pools)
- goto out_set;
+ int err, cpu;
/* If port MTU is higher than 1518B:
* HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
* else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
*/
if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
- new_long_pool = MVPP2_BM_JUMBO;
+ new_long_pool_type = MVPP2_BM_JUMBO;
else
- new_long_pool = MVPP2_BM_LONG;
+ new_long_pool_type = MVPP2_BM_LONG;
+
+ if (new_long_pool_type != port->pool_long->type) {
+ if (port->tx_fc) {
+ if (recycle) {
+ for_each_present_cpu(cpu)
+ mvpp2_bm_pool_update_fc(port,
+ pools_pcpu[cpu],
+ false);
+ } else if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
+ mvpp2_bm_pool_update_fc(port,
+ port->pool_short,
+ false);
+ else
+ mvpp2_bm_pool_update_fc(port, port->pool_long,
+ false);
+ }
- if (new_long_pool != port->pool_long->id) {
/* Remove port from old short & long pool */
- port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
- port->pool_long->pkt_size);
port->pool_long->port_map &= ~BIT(port->id);
port->pool_long = NULL;
- port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
- port->pool_short->pkt_size);
port->pool_short->port_map &= ~BIT(port->id);
port->pool_short = NULL;
port->pkt_size = pkt_size;
/* Add port to new short & long pool */
- mvpp2_swf_bm_pool_init(port);
+ if (recycle) {
+ for_each_present_cpu(cpu)
+ pools_pcpu[cpu]->port_map &= ~BIT(port->id);
+ err = mvpp2_swf_bm_pool_pcpu_init(port);
+ } else {
+ err = mvpp2_swf_bm_pool_init(port);
+ }
+ if (err)
+ return err;
- mvpp2_set_hw_csum(port, new_long_pool);
+ if (port->tx_fc) {
+ if (recycle) {
+ for_each_present_cpu(cpu)
+ mvpp2_bm_pool_update_fc(port,
+ pools_pcpu[cpu],
+ false);
+ } else if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
+ mvpp2_bm_pool_update_fc(port, port->pool_long,
+ true);
+ else
+ mvpp2_bm_pool_update_fc(port, port->pool_short,
+ true);
+ }
+
+ /* Update L4 checksum when jumbo enable/disable on port */
+ if (new_long_pool_type == MVPP2_BM_JUMBO && port->id != 0) {
+ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ dev->hw_features &= ~(NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM);
+ } else {
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ }
}
-out_set:
dev->mtu = mtu;
dev->wanted_features = dev->features;
@@ -1141,6 +1403,9 @@ static void mvpp2_interrupts_mask(void *arg)
mvpp2_thread_write(port->priv,
mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
+ mvpp2_thread_write(port->priv,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
+ MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), 0);
}
/* Unmask the current thread's Rx/Tx interrupts.
@@ -1156,14 +1421,20 @@ static void mvpp2_interrupts_unmask(void *arg)
if (smp_processor_id() >= port->priv->nthreads)
return;
- val = MVPP2_CAUSE_MISC_SUM_MASK |
- MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
+ if (port->flags & MVPP22_F_IF_MUSDK)
+ return;
+
+ val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(mvpp21_variant);
if (port->has_tx_irqs)
val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
mvpp2_thread_write(port->priv,
mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
+ mvpp2_thread_write(port->priv,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
+ MVPP2_ISR_RX_ERR_CAUSE_REG(port->id),
+ MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK);
}
static void
@@ -1172,13 +1443,13 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
u32 val;
int i;
- if (port->priv->hw_version != MVPP22)
+ if (port->priv->hw_version == MVPP21)
return;
if (mask)
val = 0;
else
- val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22);
+ val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(mvpp21_variant);
for (i = 0; i < port->nqvecs; i++) {
struct mvpp2_queue_vector *v = port->qvecs + i;
@@ -1188,6 +1459,9 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
mvpp2_thread_write(port->priv, v->sw_thread_id,
MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
+ mvpp2_thread_write(port->priv, v->sw_thread_id,
+ MVPP2_ISR_RX_ERR_CAUSE_REG(port->id),
+ MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK);
}
}
@@ -1203,12 +1477,6 @@ static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port)
}
/* Port configuration routines */
-static bool mvpp2_is_xlg(phy_interface_t interface)
-{
- return interface == PHY_INTERFACE_MODE_10GBASER ||
- interface == PHY_INTERFACE_MODE_XAUI;
-}
-
static void mvpp2_modify(void __iomem *ptr, u32 mask, u32 set)
{
u32 old, val;
@@ -1237,6 +1505,21 @@ static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
}
+static void mvpp22_gop_init_mii(struct mvpp2_port *port)
+{
+ struct mvpp2 *priv = port->priv;
+ u32 val;
+
+ regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
+ val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
+ regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
+
+ regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
+ val |= GENCONF_CTRL0_PORT1_RGMII_MII;
+ val &= ~GENCONF_CTRL0_PORT1_RGMII;
+ regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
+}
+
static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
{
struct mvpp2 *priv = port->priv;
@@ -1257,27 +1540,92 @@ static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
}
}
-static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
+static void mvpp22_gop_init_xpcs(struct mvpp2_port *port)
{
struct mvpp2 *priv = port->priv;
- void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
u32 val;
+ /* Reset the XPCS when reconfiguring the lanes */
+ val = readl(xpcs + MVPP22_XPCS_CFG0);
+ writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
+
+ /* XPCS */
val = readl(xpcs + MVPP22_XPCS_CFG0);
val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
writel(val, xpcs + MVPP22_XPCS_CFG0);
+ /* Release lanes from reset */
+ val = readl(xpcs + MVPP22_XPCS_CFG0);
+ writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
+
+}
+
+static void mvpp22_gop_init_mpcs(struct mvpp2_port *port)
+{
+ struct mvpp2 *priv = port->priv;
+ void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
+ u32 val;
+
+ /* MPCS */
val = readl(mpcs + MVPP22_MPCS_CTRL);
val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
writel(val, mpcs + MVPP22_MPCS_CTRL);
val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
- val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7);
+ val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC |
+ MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
+
+ val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
+ val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX;
+ writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
+}
+
+static void mvpp22_gop_fca_enable_periodic(struct mvpp2_port *port, bool en)
+{
+ struct mvpp2 *priv = port->priv;
+ void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id);
+ u32 val;
+
+ val = readl(fca + MVPP22_FCA_CONTROL_REG);
+ val &= ~MVPP22_FCA_ENABLE_PERIODIC;
+ if (en)
+ val |= MVPP22_FCA_ENABLE_PERIODIC;
+ writel(val, fca + MVPP22_FCA_CONTROL_REG);
+}
+
+static void mvpp22_gop_fca_set_timer(struct mvpp2_port *port, u32 timer)
+{
+ struct mvpp2 *priv = port->priv;
+ void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id);
+ u32 lsb, msb;
+
+ lsb = timer & MVPP22_FCA_REG_MASK;
+ msb = timer >> MVPP22_FCA_REG_SIZE;
+
+ writel(lsb, fca + MVPP22_PERIODIC_COUNTER_LSB_REG);
+ writel(msb, fca + MVPP22_PERIODIC_COUNTER_MSB_REG);
+}
+
+/* Set Flow Control timer x140 faster than pause quanta to ensure that link
+ * partner won't send taffic if port in XOFF mode.
+ */
+static void mvpp22_gop_fca_set_periodic_timer(struct mvpp2_port *port)
+{
+ u32 timer;
+
+ timer = (port->priv->tclk / (USEC_PER_SEC * FC_CLK_DIVIDER))
+ * FC_QUANTA;
+
+ mvpp22_gop_fca_enable_periodic(port, false);
+
+ mvpp22_gop_fca_set_timer(port, timer);
+
+ mvpp22_gop_fca_enable_periodic(port, true);
}
static int mvpp22_gop_init(struct mvpp2_port *port)
@@ -1289,6 +1637,11 @@ static int mvpp22_gop_init(struct mvpp2_port *port)
return 0;
switch (port->phy_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ if (port->gop_id == 0 || port->gop_id == 2)
+ goto invalid_conf;
+ mvpp22_gop_init_mii(port);
+ break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
@@ -1300,13 +1653,22 @@ static int mvpp22_gop_init(struct mvpp2_port *port)
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_2500BASET:
mvpp22_gop_init_sgmii(port);
break;
+ case PHY_INTERFACE_MODE_RXAUI:
+ if (port->gop_id != 0)
+ goto invalid_conf;
+ mvpp22_gop_init_xpcs(port);
+ break;
case PHY_INTERFACE_MODE_10GBASER:
- if (!mvpp2_port_supports_xlg(port))
+ case PHY_INTERFACE_MODE_5GKR:
+ if (!port->has_xlg_mac)
goto invalid_conf;
- mvpp22_gop_init_10gkr(port);
+ mvpp22_gop_init_mpcs(port);
break;
+ case PHY_INTERFACE_MODE_INTERNAL:
+ return 0;
default:
goto unsupported_conf;
}
@@ -1324,6 +1686,8 @@ static int mvpp22_gop_init(struct mvpp2_port *port)
val |= GENCONF_SOFT_RESET1_GOP;
regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
+ mvpp22_gop_fca_set_periodic_timer(port);
+
unsupported_conf:
return 0;
@@ -1338,17 +1702,21 @@ static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
if (phy_interface_mode_is_rgmii(port->phy_interface) ||
phy_interface_mode_is_8023z(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ port->phy_interface == PHY_INTERFACE_MODE_MII ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
+ port->phy_interface == PHY_INTERFACE_MODE_2500BASET) {
/* Enable the GMAC link status irq for this port */
val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
}
- if (mvpp2_port_supports_xlg(port)) {
+ if (port->has_xlg_mac) {
/* Enable the XLG/GIG irqs for this port */
val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
- if (mvpp2_is_xlg(port->phy_interface))
+ if (port->phy_interface == PHY_INTERFACE_MODE_10GKR ||
+ port->phy_interface == PHY_INTERFACE_MODE_5GKR ||
+ port->phy_interface == PHY_INTERFACE_MODE_INTERNAL)
val |= MVPP22_XLG_EXT_INT_MASK_XLG;
else
val |= MVPP22_XLG_EXT_INT_MASK_GIG;
@@ -1360,7 +1728,7 @@ static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
{
u32 val;
- if (mvpp2_port_supports_xlg(port)) {
+ if (port->has_xlg_mac) {
val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
MVPP22_XLG_EXT_INT_MASK_GIG);
@@ -1369,7 +1737,9 @@ static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
if (phy_interface_mode_is_rgmii(port->phy_interface) ||
phy_interface_mode_is_8023z(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ port->phy_interface == PHY_INTERFACE_MODE_MII ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
+ port->phy_interface == PHY_INTERFACE_MODE_2500BASET) {
val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
@@ -1387,13 +1757,14 @@ static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
if (port->phylink ||
phy_interface_mode_is_rgmii(port->phy_interface) ||
phy_interface_mode_is_8023z(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_MII ||
port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
val = readl(port->base + MVPP22_GMAC_INT_MASK);
val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
writel(val, port->base + MVPP22_GMAC_INT_MASK);
}
- if (mvpp2_port_supports_xlg(port)) {
+ if (port->has_xlg_mac) {
val = readl(port->base + MVPP22_XLG_INT_MASK);
val |= MVPP22_XLG_INT_MASK_LINK;
writel(val, port->base + MVPP22_XLG_INT_MASK);
@@ -1435,10 +1806,16 @@ static void mvpp2_port_enable(struct mvpp2_port *port)
{
u32 val;
- if (mvpp2_port_supports_xlg(port) &&
- mvpp2_is_xlg(port->phy_interface)) {
+ if (port->phy_interface == PHY_INTERFACE_MODE_INTERNAL)
+ return;
+
+ if (port->has_xlg_mac &&
+ (port->phy_interface == PHY_INTERFACE_MODE_RXAUI ||
+ port->phy_interface == PHY_INTERFACE_MODE_10GKR ||
+ port->phy_interface == PHY_INTERFACE_MODE_5GKR)) {
val = readl(port->base + MVPP22_XLG_CTRL0_REG);
- val |= MVPP22_XLG_CTRL0_PORT_EN;
+ val |= MVPP22_XLG_CTRL0_PORT_EN |
+ MVPP22_XLG_CTRL0_MAC_RESET_DIS;
val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
writel(val, port->base + MVPP22_XLG_CTRL0_REG);
} else {
@@ -1453,16 +1830,20 @@ static void mvpp2_port_disable(struct mvpp2_port *port)
{
u32 val;
- if (mvpp2_port_supports_xlg(port) &&
- mvpp2_is_xlg(port->phy_interface)) {
+ if (port->phy_interface == PHY_INTERFACE_MODE_INTERNAL)
+ return;
+
+ if (port->has_xlg_mac &&
+ (port->phy_interface == PHY_INTERFACE_MODE_RXAUI ||
+ port->phy_interface == PHY_INTERFACE_MODE_10GKR ||
+ port->phy_interface == PHY_INTERFACE_MODE_5GKR)) {
val = readl(port->base + MVPP22_XLG_CTRL0_REG);
val &= ~MVPP22_XLG_CTRL0_PORT_EN;
writel(val, port->base + MVPP22_XLG_CTRL0_REG);
- }
-
- val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
- val &= ~(MVPP2_GMAC_PORT_EN_MASK);
- writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+ } else {
+ val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+ val &= ~(MVPP2_GMAC_PORT_EN_MASK);
+ writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
}
/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
@@ -1525,17 +1906,6 @@ static u64 mvpp2_read_count(struct mvpp2_port *port,
return val;
}
-/* Some counters are accessed indirectly by first writing an index to
- * MVPP2_CTRS_IDX. The index can represent various resources depending on the
- * register we access, it can be a hit counter for some classification tables,
- * a counter specific to a rxq, a txq or a buffer pool.
- */
-static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg)
-{
- mvpp2_write(priv, MVPP2_CTRS_IDX, index);
- return mvpp2_read(priv, reg);
-}
-
/* Due to the fact that software statistics and hardware statistics are, by
* design, incremented at different moments in the chain of packet processing,
* it is very likely that incoming packets could have been dropped after being
@@ -1545,7 +1915,7 @@ static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg)
* Hence, statistics gathered from userspace with ifconfig (software) and
* ethtool (hardware) cannot be compared.
*/
-static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = {
+static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs[] = {
{ MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
{ MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
{ MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
@@ -1566,38 +1936,47 @@ static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = {
{ MVPP2_MIB_FC_RCVD, "fc_received" },
{ MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
{ MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
- { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
+ { MVPP2_MIB_FRAGMENTS_ERR_RCVD, "fragments_err_received" },
{ MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
{ MVPP2_MIB_JABBER_RCVD, "jabber_received" },
{ MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
{ MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
{ MVPP2_MIB_COLLISION, "collision" },
{ MVPP2_MIB_LATE_COLLISION, "late_collision" },
+#define MVPP2_LAST_MIB MVPP2_MIB_LATE_COLLISION
+
+ /* Extend counters */
+ { MVPP2_OVERRUN_DROP_REG(0), "rx_ppv2_overrun" },
+ { MVPP2_CLS_DROP_REG(0), "rx_cls_drop" },
+ { MVPP2_RX_PKT_FULLQ_DROP_REG, "rx_fullq_drop" },
+ { MVPP2_RX_PKT_EARLY_DROP_REG, "rx_early_drop" },
+ { MVPP2_RX_PKT_BM_DROP_REG, "rx_bm_drop" },
+
+ /* Extend SW counters (not registers) */
+#define MVPP2_FIRST_CNT_SW 0xf000
+#define MVPP2_TX_GUARD_CNT(cpu) (MVPP2_FIRST_CNT_SW + cpu)
+ { MVPP2_TX_GUARD_CNT(0), "tx-guard-cpu0" },
+ { MVPP2_TX_GUARD_CNT(1), "tx-guard-cpu1" },
+ { MVPP2_TX_GUARD_CNT(2), "tx-guard-cpu2" },
+ { MVPP2_TX_GUARD_CNT(3), "tx-guard-cpu3" },
};
-static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = {
- { MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" },
- { MVPP2_CLS_ETH_DROP, "rx_classifier_drops" },
+static const char mvpp22_priv_flags_strings[][ETH_GSTRING_LEN] = {
+ "musdk",
};
-static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = {
- { MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" },
- { MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" },
- { MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" },
- { MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" },
- { MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" },
- { MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" },
- { MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" },
- { MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" },
- { MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" },
-};
+#define MVPP22_F_IF_MUSDK_PRIV BIT(0)
-static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = {
- { MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" },
- { MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" },
- { MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" },
- { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" },
-};
+static int mvpp2_ethtool_get_mib_cntr_size(void)
+{
+ int i = 0;
+
+ while (i < ARRAY_SIZE(mvpp2_ethtool_regs)) {
+ if (mvpp2_ethtool_regs[i++].offset == MVPP2_LAST_MIB)
+ break;
+ }
+ return i; /* mib_size */
+}
static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] = {
{ ETHTOOL_XDP_REDIRECT, "rx_xdp_redirect", },
@@ -1609,55 +1988,63 @@ static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] = {
{ ETHTOOL_XDP_XMIT_ERR, "tx_xdp_xmit_errors", },
};
-#define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \
- ARRAY_SIZE(mvpp2_ethtool_port_regs) + \
- (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \
- (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)) + \
- ARRAY_SIZE(mvpp2_ethtool_xdp))
-
-static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
- u8 *data)
+static int mvpp2_ethtool_get_cntr_index(u32 offset)
{
- struct mvpp2_port *port = netdev_priv(netdev);
- int i, q;
-
- if (sset != ETH_SS_STATS)
- return;
+ int i = 0;
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) {
- strscpy(data, mvpp2_ethtool_mib_regs[i].string,
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
+ while (i < ARRAY_SIZE(mvpp2_ethtool_regs)) {
+ if (mvpp2_ethtool_regs[i].offset == offset)
+ break;
+ i++;
}
+ return i;
+}
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) {
- strscpy(data, mvpp2_ethtool_port_regs[i].string,
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+/* hw_get_stats - update the ethtool_stats accumulator from HW-registers
+ * The HW-registers/counters are cleared on read.
+ */
+static void mvpp2_hw_get_stats(struct mvpp2_port *port, u64 *pstats)
+{
+ int i, mib_size, queue, cpu;
+ unsigned int reg_offs;
+ u32 val, cls_drops;
+ u64 *ptmp;
- for (q = 0; q < port->ntxqs; q++) {
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) {
- snprintf(data, ETH_GSTRING_LEN,
- mvpp2_ethtool_txq_regs[i].string, q);
- data += ETH_GSTRING_LEN;
- }
- }
+ mib_size = mvpp2_ethtool_get_mib_cntr_size();
- for (q = 0; q < port->nrxqs; q++) {
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) {
- snprintf(data, ETH_GSTRING_LEN,
- mvpp2_ethtool_rxq_regs[i].string,
- q);
- data += ETH_GSTRING_LEN;
+ cls_drops = mvpp2_read(port->priv, MVPP2_OVERRUN_DROP_REG(port->id));
+
+ for (i = 0; i < mib_size; i++) {
+ if (mvpp2_ethtool_regs[i].offset == MVPP2_MIB_COLLISION) {
+ val = mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
+ port->dev->stats.collisions += val;
+ *pstats++ += val;
+ continue;
+ }
+ *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
+ }
+
+ /* Extend HW counters */
+ *pstats++ += cls_drops;
+ *pstats++ += mvpp2_read(port->priv, MVPP2_CLS_DROP_REG(port->id));
+ ptmp = pstats;
+ queue = port->first_rxq;
+ while (queue < (port->first_rxq + port->nrxqs)) {
+ mvpp2_write(port->priv, MVPP2_CNT_IDX_REG, queue++);
+ pstats = ptmp;
+ i = mib_size + 2;
+ while (i < ARRAY_SIZE(mvpp2_ethtool_regs)) {
+ reg_offs = mvpp2_ethtool_regs[i++].offset;
+ if (reg_offs == MVPP2_FIRST_CNT_SW)
+ break;
+ *pstats++ += mvpp2_read(port->priv, reg_offs);
}
}
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++) {
- strscpy(data, mvpp2_ethtool_xdp[i].string,
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+
+ /* Extend SW counters (i=MVPP2_FIRST_CNT_SW) */
+ for_each_present_cpu(cpu)
+ *pstats++ = mvpp2_tx_done_guard_get_stats(port, cpu);
}
static void
@@ -1699,68 +2086,47 @@ mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats)
}
}
-static void mvpp2_read_stats(struct mvpp2_port *port)
+static void mvpp2_hw_clear_stats(struct mvpp2_port *port)
{
- struct mvpp2_pcpu_stats xdp_stats = {};
- const struct mvpp2_ethtool_counter *s;
- u64 *pstats;
- int i, q;
-
- pstats = port->ethtool_stats;
+ int i, mib_size, queue;
+ unsigned int reg_offs;
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++)
- *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]);
+ mib_size = mvpp2_ethtool_get_mib_cntr_size();
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++)
- *pstats++ += mvpp2_read(port->priv,
- mvpp2_ethtool_port_regs[i].offset +
- 4 * port->id);
+ for (i = 0; i < mib_size; i++)
+ mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
- for (q = 0; q < port->ntxqs; q++)
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++)
- *pstats++ += mvpp2_read_index(port->priv,
- MVPP22_CTRS_TX_CTR(port->id, q),
- mvpp2_ethtool_txq_regs[i].offset);
-
- /* Rxqs are numbered from 0 from the user standpoint, but not from the
- * driver's. We need to add the port->first_rxq offset.
- */
- for (q = 0; q < port->nrxqs; q++)
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++)
- *pstats++ += mvpp2_read_index(port->priv,
- port->first_rxq + q,
- mvpp2_ethtool_rxq_regs[i].offset);
-
- /* Gather XDP Statistics */
- mvpp2_get_xdp_stats(port, &xdp_stats);
-
- for (i = 0, s = mvpp2_ethtool_xdp;
- s < mvpp2_ethtool_xdp + ARRAY_SIZE(mvpp2_ethtool_xdp);
- s++, i++) {
- switch (s->offset) {
- case ETHTOOL_XDP_REDIRECT:
- *pstats++ = xdp_stats.xdp_redirect;
- break;
- case ETHTOOL_XDP_PASS:
- *pstats++ = xdp_stats.xdp_pass;
- break;
- case ETHTOOL_XDP_DROP:
- *pstats++ = xdp_stats.xdp_drop;
- break;
- case ETHTOOL_XDP_TX:
- *pstats++ = xdp_stats.xdp_tx;
- break;
- case ETHTOOL_XDP_TX_ERR:
- *pstats++ = xdp_stats.xdp_tx_err;
- break;
- case ETHTOOL_XDP_XMIT:
- *pstats++ = xdp_stats.xdp_xmit;
- break;
- case ETHTOOL_XDP_XMIT_ERR:
- *pstats++ = xdp_stats.xdp_xmit_err;
- break;
+ /* Extend counters */
+ mvpp2_read(port->priv, MVPP2_OVERRUN_DROP_REG(port->id));
+ mvpp2_read(port->priv, MVPP2_CLS_DROP_REG(port->id));
+ queue = port->first_rxq;
+ while (queue < (port->first_rxq + port->nrxqs)) {
+ mvpp2_write(port->priv, MVPP2_CNT_IDX_REG, queue++);
+ i = mib_size + 2;
+ while (i < ARRAY_SIZE(mvpp2_ethtool_regs)) {
+ reg_offs = mvpp2_ethtool_regs[i++].offset;
+ if (reg_offs == MVPP2_FIRST_CNT_SW)
+ break;
+ mvpp2_read(port->priv, reg_offs);
}
}
+ /* Extend SW counters (i=MVPP2_FIRST_CNT_SW) */
+ /* no clear */
+}
+
+static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
+ u8 *data)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, mvpp22_priv_flags_strings,
+ ARRAY_SIZE(mvpp22_priv_flags_strings) * ETH_GSTRING_LEN);
+ }
}
static void mvpp2_gather_hw_statistics(struct work_struct *work)
@@ -1769,109 +2135,69 @@ static void mvpp2_gather_hw_statistics(struct work_struct *work)
struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
stats_work);
+ /* Update the statistic buffer by q-work only, not by ethtool-S */
mutex_lock(&port->gather_stats_lock);
-
- mvpp2_read_stats(port);
-
- /* No need to read again the counters right after this function if it
- * was called asynchronously by the user (ie. use of ethtool).
- */
- cancel_delayed_work(&port->stats_work);
+ mvpp2_hw_get_stats(port, port->ethtool_stats);
+ mutex_unlock(&port->gather_stats_lock);
queue_delayed_work(port->priv->stats_queue, &port->stats_work,
MVPP2_MIB_COUNTERS_STATS_DELAY);
-
- mutex_unlock(&port->gather_stats_lock);
}
static void mvpp2_ethtool_get_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct mvpp2_port *port = netdev_priv(dev);
+ int cls_drp, fc_rcv;
- /* Update statistics for the given port, then take the lock to avoid
- * concurrent accesses on the ethtool_stats structure during its copy.
+ /* Use statistic already accumulated in ethtool_stats by q-work
+ * and copy under mutex-lock it into given ethtool-data-buffer.
*/
- mvpp2_gather_hw_statistics(&port->stats_work.work);
mutex_lock(&port->gather_stats_lock);
memcpy(data, port->ethtool_stats,
- sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs));
+ sizeof(u64) * ARRAY_SIZE(mvpp2_ethtool_regs));
mutex_unlock(&port->gather_stats_lock);
+
+ /* Do not count flow control receive frames as classifier drops */
+ cls_drp = mvpp2_ethtool_get_cntr_index(MVPP2_CLS_DROP_REG(0));
+ fc_rcv = mvpp2_ethtool_get_cntr_index(MVPP2_MIB_FC_RCVD);
+ data[cls_drp] =
+ data[fc_rcv] > data[cls_drp] ? 0 : data[cls_drp] - data[fc_rcv];
}
static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
{
struct mvpp2_port *port = netdev_priv(dev);
- if (sset == ETH_SS_STATS)
- return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs);
-
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(mvpp2_ethtool_regs);
+ case ETH_SS_PRIV_FLAGS:
+ return (port->priv->hw_version == MVPP21) ?
+ 0 : ARRAY_SIZE(mvpp22_priv_flags_strings);
+ }
return -EOPNOTSUPP;
}
-static void mvpp2_mac_reset_assert(struct mvpp2_port *port)
+static void mvpp2_port_reset(struct mvpp2_port *port)
{
u32 val;
+ /* Read the GOP statistics to reset the hardware counters */
+ mvpp2_hw_clear_stats(port);
+
val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) |
MVPP2_GMAC_PORT_RESET_MASK;
writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
- if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
+ if (port->has_xlg_mac) {
+ /* Set the XLG MAC in reset */
val = readl(port->base + MVPP22_XLG_CTRL0_REG) &
~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
writel(val, port->base + MVPP22_XLG_CTRL0_REG);
- }
-}
-
-static void mvpp22_pcs_reset_assert(struct mvpp2_port *port)
-{
- struct mvpp2 *priv = port->priv;
- void __iomem *mpcs, *xpcs;
- u32 val;
-
- if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
- return;
-
- mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
- xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
-
- val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
- val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
- val |= MVPP22_MPCS_CLK_RESET_DIV_SET;
- writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
-
- val = readl(xpcs + MVPP22_XPCS_CFG0);
- writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
-}
-
-static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port)
-{
- struct mvpp2 *priv = port->priv;
- void __iomem *mpcs, *xpcs;
- u32 val;
-
- if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
- return;
-
- mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
- xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
-
- switch (port->phy_interface) {
- case PHY_INTERFACE_MODE_10GBASER:
- val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
- val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
- MAC_CLK_RESET_SD_TX;
- val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
- writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
- break;
- case PHY_INTERFACE_MODE_XAUI:
- case PHY_INTERFACE_MODE_RXAUI:
- val = readl(xpcs + MVPP22_XPCS_CFG0);
- writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
- break;
- default:
- break;
+ while (readl(port->base + MVPP22_XLG_CTRL0_REG) &
+ MVPP22_XLG_CTRL0_MAC_RESET_DIS)
+ continue;
}
}
@@ -1880,6 +2206,9 @@ static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
{
u32 val;
+ if (port->flags & MVPP22_F_IF_MUSDK)
+ return;
+
val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
@@ -1892,6 +2221,9 @@ static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
{
u32 val;
+ if (port->flags & MVPP22_F_IF_MUSDK)
+ return;
+
val = readl(port->base + MVPP22_XLG_CTRL1_REG);
val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
@@ -1899,19 +2231,42 @@ static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
writel(val, port->base + MVPP22_XLG_CTRL1_REG);
}
+static void mvpp2_gmac_tx_fifo_configure(struct mvpp2_port *port)
+{
+ u32 val, tx_fifo_min_th;
+ u8 low_wm, hi_wm;
+
+ tx_fifo_min_th = MVPP2_GMAC_TX_FIFO_MIN_TH;
+ low_wm = MVPP2_GMAC_TX_FIFO_LOW_WM;
+ hi_wm = MVPP2_GMAC_TX_FIFO_HI_WM;
+
+ /* Update TX FIFO MIN Threshold */
+ val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+ val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
+ val |= tx_fifo_min_th;
+ writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+
+ /* Update TX FIFO levels of assertion/deassertion
+ * of p2mem_ready_signal, which indicates readiness
+ * for fetching the data from DRAM.
+ */
+ val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_0_REG);
+ val &= ~MVPP2_GMAC_TX_FIFO_WM_MASK;
+ val |= (low_wm << MVPP2_GMAC_TX_FIFO_WM_LOW_OFFSET) | hi_wm;
+ writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_0_REG);
+}
+
/* Set defaults to the MVPP2 port */
static void mvpp2_defaults_set(struct mvpp2_port *port)
{
- int tx_port_num, val, queue, lrxq;
+ int tx_port_num, val, queue, ptxq, lrxq;
- if (port->priv->hw_version == MVPP21) {
- /* Update TX FIFO MIN Threshold */
- val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
- val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
- /* Min. TX threshold must be less than minimal packet length */
- val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
- writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
- }
+ if (phy_interface_mode_is_rgmii(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_MII ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
+ port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
+ port->phy_interface == PHY_INTERFACE_MODE_2500BASEX)
+ mvpp2_gmac_tx_fifo_configure(port);
/* Disable Legacy WRR, Disable EJP, Release from reset */
tx_port_num = mvpp2_egress_port(port);
@@ -1923,9 +2278,11 @@ static void mvpp2_defaults_set(struct mvpp2_port *port)
mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0);
/* Close bandwidth for all queues */
- for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
+ for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
+ ptxq = mvpp2_txq_phys(port->id, queue);
mvpp2_write(port->priv,
- MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
+ MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
+ }
/* Set refill period to 1 usec, refill tokens
* and bucket size to maximum
@@ -1994,6 +2351,9 @@ static void mvpp2_egress_enable(struct mvpp2_port *port)
int queue;
int tx_port_num = mvpp2_egress_port(port);
+ if (port->flags & MVPP22_F_IF_MUSDK)
+ return;
+
/* Enable all initialized TXs. */
qmap = 0;
for (queue = 0; queue < port->ntxqs; queue++) {
@@ -2016,6 +2376,9 @@ static void mvpp2_egress_disable(struct mvpp2_port *port)
int delay;
int tx_port_num = mvpp2_egress_port(port);
+ if (port->flags & MVPP22_F_IF_MUSDK)
+ return;
+
/* Issue stop command for active channels only */
mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
@@ -2118,9 +2481,13 @@ mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
*/
static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
{
+ int cpu = smp_processor_id();
+
+ mvpp2_tx_done_guard_timer_set(port, cpu);
+
/* aggregated access - relevant TXQ number is written in TX desc */
mvpp2_thread_write(port->priv,
- mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
+ mvpp2_cpu_to_thread(port->priv, cpu),
MVPP2_AGGR_TXQ_UPDATE_REG, pending);
}
@@ -2177,8 +2544,9 @@ static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port,
struct mvpp2_txq_pcpu *txq_pcpu,
int num)
{
- int req, desc_count;
unsigned int thread;
+ int req, desc_count;
+ struct mvpp2_txq_pcpu *txq_pcpu_aux;
if (txq_pcpu->reserved_num >= num)
return 0;
@@ -2186,27 +2554,24 @@ static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port,
/* Not enough descriptors reserved! Update the reserved descriptor
* count and check again.
*/
-
- desc_count = 0;
- /* Compute total of used descriptors */
- for (thread = 0; thread < port->priv->nthreads; thread++) {
- struct mvpp2_txq_pcpu *txq_pcpu_aux;
-
- txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread);
- desc_count += txq_pcpu_aux->count;
- desc_count += txq_pcpu_aux->reserved_num;
+ if (num <= MAX_SKB_FRAGS) {
+ req = MVPP2_CPU_DESC_CHUNK;
+ } else {
+ /* Compute total of used descriptors */
+ desc_count = 0;
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
+ txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread);
+ desc_count += txq_pcpu_aux->reserved_num;
+ }
+ req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
+ /* Check the reservation is possible */
+ if ((desc_count + req) > txq->size)
+ return -ENOMEM;
}
- req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
- desc_count += req;
-
- if (desc_count >
- (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK)))
- return -ENOMEM;
-
txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req);
- /* OK, the descriptor could have been updated: check again. */
+ /* Check the resulting reservation is enough */
if (txq_pcpu->reserved_num < num)
return -ENOMEM;
return 0;
@@ -2299,6 +2664,107 @@ static void mvpp2_txq_sent_counter_clear(void *arg)
}
}
+/* Avoid wrong tx_done calling for netif_tx_wake at time of
+ * dev-stop or linkDown processing by flag MVPP2_F_IF_TX_ON.
+ * Set/clear it on each cpu.
+ */
+static inline bool mvpp2_tx_stopped(struct mvpp2_port *port)
+{
+ return !(port->flags & MVPP2_F_IF_TX_ON);
+}
+
+static void mvpp2_txqs_on(void *arg)
+{
+ ((struct mvpp2_port *)arg)->flags |= MVPP2_F_IF_TX_ON;
+}
+
+static void mvpp2_txqs_off(void *arg)
+{
+ ((struct mvpp2_port *)arg)->flags &= ~MVPP2_F_IF_TX_ON;
+}
+
+static void mvpp2_txqs_on_tasklet_cb(unsigned long data)
+{
+ /* Activated/runs on 1 cpu only (with link_status_irq)
+ * to update/guarantee TX_ON coherency on other cpus
+ */
+ struct mvpp2_port *port = (struct mvpp2_port *)data;
+
+ if (mvpp2_tx_stopped(port))
+ on_each_cpu(mvpp2_txqs_off, port, 1);
+ else
+ on_each_cpu(mvpp2_txqs_on, port, 1);
+}
+
+static void mvpp2_txqs_on_tasklet_init(struct mvpp2_port *port)
+{
+ /* Init called only for port with link_status_isr */
+ tasklet_init(&port->txqs_on_tasklet,
+ mvpp2_txqs_on_tasklet_cb,
+ (unsigned long)port);
+}
+
+static void mvpp2_txqs_on_tasklet_kill(struct mvpp2_port *port)
+{
+ if (port->txqs_on_tasklet.func)
+ tasklet_kill(&port->txqs_on_tasklet);
+}
+
+/* Use mvpp2 APIs instead of netif_TX_ALL:
+ * netif_tx_start_all_queues -> mvpp2_tx_start_all_queues
+ * netif_tx_wake_all_queues -> mvpp2_tx_wake_all_queues
+ * netif_tx_stop_all_queues -> mvpp2_tx_stop_all_queues
+ * But keep using per-queue APIs netif_tx_wake_queue,
+ * netif_tx_stop_queue and netif_tx_queue_stopped.
+ */
+static void mvpp2_tx_start_all_queues(struct net_device *dev)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (port->flags & MVPP22_F_IF_MUSDK)
+ return;
+
+ /* Never called from IRQ. Update all cpus directly */
+ on_each_cpu(mvpp2_txqs_on, port, 1);
+ netif_tx_start_all_queues(dev);
+}
+
+static void mvpp2_tx_wake_all_queues(struct net_device *dev)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (port->flags & MVPP22_F_IF_MUSDK)
+ return;
+
+ if (irqs_disabled()) {
+ /* Link-status IRQ context (also ACPI).
+ * Set for THIS cpu, update other cpus over tasklet
+ */
+ mvpp2_txqs_on((void *)port);
+ tasklet_schedule(&port->txqs_on_tasklet);
+ } else {
+ on_each_cpu(mvpp2_txqs_on, port, 1);
+ }
+ netif_tx_wake_all_queues(dev);
+}
+
+static void mvpp2_tx_stop_all_queues(struct net_device *dev)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (port->flags & MVPP22_F_IF_MUSDK)
+ return;
+
+ if (irqs_disabled()) {
+ /* IRQ context. Set for THIS, update other cpus over tasklet */
+ mvpp2_txqs_off((void *)port);
+ tasklet_schedule(&port->txqs_on_tasklet);
+ } else {
+ on_each_cpu(mvpp2_txqs_off, port, 1);
+ }
+ netif_tx_stop_all_queues(dev);
+}
+
/* Set max sizes for Tx queues */
static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
{
@@ -2348,6 +2814,22 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
}
}
+/* Routine set the number of non-occupied descriptors threshold that change
+ * interrupt error cause polled by FW Flow Control
+ */
+void mvpp2_set_rxq_free_tresh(struct mvpp2_port *port,
+ struct mvpp2_rx_queue *rxq)
+{
+ u32 val;
+
+ mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
+
+ val = mvpp2_read(port->priv, MVPP2_RXQ_THRESH_REG);
+ val &= ~MVPP2_RXQ_NON_OCCUPIED_MASK;
+ val |= MSS_THRESHOLD_STOP << MVPP2_RXQ_NON_OCCUPIED_OFFSET;
+ mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
+}
+
/* Set the number of packets that will be received before Rx interrupt
* will be generated by HW.
*/
@@ -2366,24 +2848,44 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
put_cpu();
}
-/* For some reason in the LSP this is done on each CPU. Why ? */
-static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
- struct mvpp2_tx_queue *txq)
+/* Set pkts-coalescing HW with ZERO or configured VALUE
+ * The same should be set for all TXQs and all for all CPUs.
+ * Setting ZERO causes for immediate flush into tx-done handler.
+ */
+static inline void mvpp2_tx_pkts_coal_set_txqs(struct mvpp2_port *port,
+ int cpu, u32 val)
{
- unsigned int thread;
- u32 val;
+ struct mvpp2_tx_queue *txq;
+ int queue;
- if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
- txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
+ val <<= MVPP2_TXQ_THRESH_OFFSET;
- val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
- /* PKT-coalescing registers are per-queue + per-thread */
- for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) {
- mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
+ for (queue = 0; queue < port->ntxqs; queue++) {
+ txq = port->txqs[queue];
+ mvpp2_thread_write(port->priv, cpu, MVPP2_TXQ_NUM_REG,
+ txq->id);
+ mvpp2_thread_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val);
}
}
+static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port)
+{
+ struct mvpp2_tx_queue *txq = port->txqs[0];
+ u32 cfg_val = txq->done_pkts_coal;
+ int cpu;
+
+ for_each_present_cpu(cpu)
+ mvpp2_tx_pkts_coal_set_txqs(port, cpu, cfg_val);
+}
+
+/* Set ZERO value on on_each_cpu IRQ-context for 1 cpu only */
+static void mvpp2_tx_pkts_coal_set_zero_pcpu(void *arg)
+{
+ struct mvpp2_port *port = arg;
+
+ mvpp2_tx_pkts_coal_set_txqs(port, smp_processor_id(), 0);
+}
+
static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
{
u64 tmp = (u64)clk_hz * usec;
@@ -2447,16 +2949,29 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
struct mvpp2_txq_pcpu_buf *tx_buf =
txq_pcpu->buffs + txq_pcpu->txq_get_index;
- if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma) &&
+ if (!tx_buf->skb &&
tx_buf->type != MVPP2_TYPE_XDP_TX)
dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
tx_buf->size, DMA_TO_DEVICE);
- if (tx_buf->type == MVPP2_TYPE_SKB && tx_buf->skb)
- dev_kfree_skb_any(tx_buf->skb);
+ else if (tx_buf->skb != TSO_HEADER_MARK &&
+ tx_buf->type != MVPP2_TYPE_XDP_TX) {
+ dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
+ tx_buf->size, DMA_TO_DEVICE);
+#ifndef MODULE
+ if (static_branch_unlikely(&mvpp2_recycle_ena)) {
+ mvpp2_recycle_put(port, txq_pcpu, tx_buf);
+ /* sets tx_buf->skb=NULL if put to recycle */
+ if (tx_buf->skb)
+ dev_kfree_skb_any(tx_buf->skb);
+ } else
+#endif
+ dev_kfree_skb_any(tx_buf->skb);
+ }
else if (tx_buf->type == MVPP2_TYPE_XDP_TX ||
tx_buf->type == MVPP2_TYPE_XDP_NDO)
xdp_return_frame(tx_buf->xdpf);
+ /* else: no action, tx_buf->skb always overwritten in xmit */
mvpp2_txq_inc_get(txq_pcpu);
}
}
@@ -2494,9 +3009,15 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
txq_pcpu->count -= tx_done;
- if (netif_tx_queue_stopped(nq))
- if (txq_pcpu->count <= txq_pcpu->wake_threshold)
+ if (netif_tx_queue_stopped(nq) && !mvpp2_tx_stopped(port)) {
+ /* Wake if netif_tx_queue_stopped on same txq->log_id */
+ if (txq_pcpu->stopped_on_txq_id == txq->log_id &&
+ txq_pcpu->count <= txq_pcpu->wake_threshold) {
+ txq_pcpu->stopped_on_txq_id = MVPP2_MAX_TXQ;
+ nq = netdev_get_tx_queue(port->dev, txq->log_id);
netif_tx_wake_queue(nq);
+ }
+ }
}
static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
@@ -2506,6 +3027,9 @@ static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
struct mvpp2_txq_pcpu *txq_pcpu;
unsigned int tx_todo = 0;
+ /* Set/Restore "no-force" */
+ mvpp2_tx_done_guard_force_irq(port, thread, 0);
+
while (cause) {
txq = mvpp2_get_tx_queue(port, cause);
if (!txq)
@@ -2534,8 +3058,8 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
/* Allocate memory for TX descriptors */
aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
- MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
- &aggr_txq->descs_dma, GFP_KERNEL);
+ MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
+ &aggr_txq->descs_dma, GFP_KERNEL);
if (!aggr_txq->descs)
return -ENOMEM;
@@ -2580,6 +3104,7 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
return -ENOMEM;
rxq->last_desc = rxq->size - 1;
+ rxq->rx_pending = 0;
/* Zero occupied and non-occupied counters - direct access */
mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
@@ -2603,6 +3128,9 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
mvpp2_rx_pkts_coal_set(port, rxq);
mvpp2_rx_time_coal_set(port, rxq);
+ /* Set the number of non occupied descriptors threshold */
+ mvpp2_set_rxq_free_tresh(port, rxq);
+
/* Add number of descriptors ready for receiving packets */
mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
@@ -2651,6 +3179,7 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
{
int rx_received, i;
+ rxq->rx_pending = 0;
rx_received = mvpp2_rxq_received(port, rxq->id);
if (!rx_received)
return;
@@ -2664,8 +3193,7 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
MVPP2_RXD_BM_POOL_ID_OFFS;
mvpp2_bm_pool_put(port, pool,
- mvpp2_rxdesc_dma_addr_get(port, rx_desc),
- mvpp2_rxdesc_cookie_get(port, rx_desc));
+ mvpp2_rxdesc_dma_addr_get(port, rx_desc));
}
mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
}
@@ -2706,6 +3234,19 @@ static void mvpp2_rxq_deinit(struct mvpp2_port *port,
put_cpu();
}
+/* Disable all rx/ingress queues, called by mvpp2_init */
+static void mvpp2_rxq_disable_all(struct mvpp2 *priv)
+{
+ int i;
+ u32 val;
+
+ for (i = 0; i < MVPP2_RXQ_MAX_NUM; i++) {
+ val = mvpp2_read(priv, MVPP2_RXQ_CONFIG_REG(i));
+ val |= MVPP2_RXQ_DISABLE_MASK;
+ mvpp2_write(priv, MVPP2_RXQ_CONFIG_REG(i), val);
+ }
+}
+
/* Create and initialize a Tx queue */
static int mvpp2_txq_init(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq)
@@ -2783,8 +3324,11 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
txq_pcpu->txq_get_index = 0;
txq_pcpu->tso_headers = NULL;
- txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
- txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
+ txq_pcpu->stop_threshold = txq->size -
+ MVPP2_MAX_SKB_DESCS(num_present_cpus());
+ txq_pcpu->wake_threshold = txq_pcpu->stop_threshold -
+ MVPP2_TX_PAUSE_HYSTERESIS;
+ txq_pcpu->stopped_on_txq_id = MVPP2_MAX_TXQ;
txq_pcpu->tso_headers =
dma_alloc_coherent(port->dev->dev.parent,
@@ -2829,7 +3373,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
txq->descs_dma = 0;
/* Set minimum bandwidth for disabled TXQs */
- mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
+ mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
/* Set Tx descriptors queue starting address and size */
thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
@@ -2852,6 +3396,11 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
val |= MVPP2_TXQ_DRAIN_EN_MASK;
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
+ /* Temporarily enable egress for the port.
+ * It is required for releasing all remaining packets.
+ */
+ mvpp2_egress_enable(port);
+
/* The napi queue has been stopped so wait for all packets
* to be transmitted.
*/
@@ -2871,6 +3420,8 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
pending &= MVPP2_TXQ_PENDING_MASK;
} while (pending);
+ mvpp2_egress_disable(port);
+
val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
put_cpu();
@@ -2920,6 +3471,9 @@ static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
for (queue = 0; queue < port->nrxqs; queue++)
mvpp2_rxq_deinit(port, port->rxqs[queue]);
+
+ if (port->tx_fc)
+ mvpp2_rxq_disable_fc(port);
}
/* Init all Rx queues for port */
@@ -2932,6 +3486,10 @@ static int mvpp2_setup_rxqs(struct mvpp2_port *port)
if (err)
goto err_cleanup;
}
+
+ if (port->tx_fc)
+ mvpp2_rxq_enable_fc(port);
+
return 0;
err_cleanup:
@@ -2950,18 +3508,11 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port)
err = mvpp2_txq_init(port, txq);
if (err)
goto err_cleanup;
-
- /* Assign this queue to a CPU */
- if (queue < num_possible_cpus())
- netif_set_xps_queue(port->dev, cpumask_of(queue), queue);
}
if (port->has_tx_irqs) {
+ /* Download time-coal. The pkts-coal done in start_dev */
mvpp2_tx_time_coal_set(port);
- for (queue = 0; queue < port->ntxqs; queue++) {
- txq = port->txqs[queue];
- mvpp2_tx_pkts_coal_set(port, txq);
- }
}
on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
@@ -3038,23 +3589,23 @@ static void mvpp2_isr_handle_link(struct mvpp2_port *port, bool link)
{
struct net_device *dev = port->dev;
+ if (!netif_running(dev))
+ return;
+
if (port->phylink) {
phylink_mac_change(port->phylink, link);
return;
}
- if (!netif_running(dev))
- return;
-
if (link) {
mvpp2_interrupts_enable(port);
mvpp2_egress_enable(port);
mvpp2_ingress_enable(port);
netif_carrier_on(dev);
- netif_tx_wake_all_queues(dev);
+ mvpp2_tx_wake_all_queues(dev);
} else {
- netif_tx_stop_all_queues(dev);
+ mvpp2_tx_stop_all_queues(dev);
netif_carrier_off(dev);
mvpp2_ingress_disable(port);
mvpp2_egress_disable(port);
@@ -3083,6 +3634,7 @@ static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port)
if (phy_interface_mode_is_rgmii(port->phy_interface) ||
phy_interface_mode_is_8023z(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_MII ||
port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
val = readl(port->base + MVPP22_GMAC_INT_STAT);
if (val & MVPP22_GMAC_INT_STAT_LINK) {
@@ -3101,8 +3653,11 @@ static irqreturn_t mvpp2_port_isr(int irq, void *dev_id)
mvpp22_gop_mask_irq(port);
- if (mvpp2_port_supports_xlg(port) &&
- mvpp2_is_xlg(port->phy_interface)) {
+ if (port->has_xlg_mac &&
+ (port->phy_interface == PHY_INTERFACE_MODE_RXAUI ||
+ port->phy_interface == PHY_INTERFACE_MODE_10GKR ||
+ port->phy_interface == PHY_INTERFACE_MODE_5GKR ||
+ port->phy_interface == PHY_INTERFACE_MODE_INTERNAL)) {
/* Check the external status register */
val = readl(port->base + MVPP22_XLG_EXT_INT_STAT);
if (val & MVPP22_XLG_EXT_INT_STAT_XLG)
@@ -3124,21 +3679,31 @@ static irqreturn_t mvpp2_port_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
+static void mvpp2_tx_done_timer_set(struct mvpp2_port_pcpu *port_pcpu)
{
- struct net_device *dev;
- struct mvpp2_port *port;
+ ktime_t interval;
+
+ if (!port_pcpu->tx_done_timer_scheduled) {
+ port_pcpu->tx_done_timer_scheduled = true;
+ interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
+ hrtimer_start(&port_pcpu->tx_done_timer, interval,
+ HRTIMER_MODE_REL_PINNED);
+ }
+}
+
+static void mvpp2_tx_done_proc_cb(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct mvpp2_port *port = netdev_priv(dev);
struct mvpp2_port_pcpu *port_pcpu;
unsigned int tx_todo, cause;
- port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer);
- dev = port_pcpu->dev;
+ port_pcpu = per_cpu_ptr(port->pcpu,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
if (!netif_running(dev))
- return HRTIMER_NORESTART;
-
- port_pcpu->timer_scheduled = false;
- port = netdev_priv(dev);
+ return;
+ port_pcpu->tx_done_timer_scheduled = false;
/* Process all the Tx queues */
cause = (1 << port->ntxqs) - 1;
@@ -3146,16 +3711,318 @@ static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
/* Set the timer in case not all the packets were processed */
- if (tx_todo && !port_pcpu->timer_scheduled) {
- port_pcpu->timer_scheduled = true;
- hrtimer_forward_now(&port_pcpu->tx_done_timer,
- MVPP2_TXDONE_HRTIMER_PERIOD_NS);
+ if (tx_todo)
+ mvpp2_tx_done_timer_set(port_pcpu);
+}
+
+static enum hrtimer_restart mvpp2_tx_done_timer_cb(struct hrtimer *timer)
+{
+ struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
+ struct mvpp2_port_pcpu,
+ tx_done_timer);
+
+ tasklet_schedule(&port_pcpu->tx_done_tasklet);
+ return HRTIMER_NORESTART;
+}
+
+/* Bulk-timer could be started/restarted by XMIT, timer-cb or Tasklet.
+ * XMIT calls bulk-restart() which is CONDITIONAL (restart vs request).
+ * Timer-cb has own condition-logic, calls hrtimer_forward().
+ * Tasklet has own condition-logic, calls unconditional bulk-start().
+ * The flags scheduled::restart_req are used in the state-logic.
+ */
+static inline void mvpp2_bulk_timer_restart(struct mvpp2_port_pcpu *port_pcpu)
+{
+ if (!port_pcpu->bulk_timer_scheduled) {
+ port_pcpu->bulk_timer_scheduled = true;
+ hrtimer_start(&port_pcpu->bulk_timer, MVPP2_TX_BULK_TIME,
+ HRTIMER_MODE_REL_PINNED);
+ } else {
+ port_pcpu->bulk_timer_restart_req = true;
+ }
+}
+
+static void mvpp2_bulk_timer_start(struct mvpp2_port_pcpu *port_pcpu)
+{
+ port_pcpu->bulk_timer_scheduled = true;
+ port_pcpu->bulk_timer_restart_req = false;
+ hrtimer_start(&port_pcpu->bulk_timer, MVPP2_TX_BULK_TIME,
+ HRTIMER_MODE_REL_PINNED);
+}
+
+static enum hrtimer_restart mvpp2_bulk_timer_cb(struct hrtimer *timer)
+{
+ /* ISR context */
+ struct mvpp2_port_pcpu *port_pcpu =
+ container_of(timer, struct mvpp2_port_pcpu, bulk_timer);
+
+ if (!port_pcpu->bulk_timer_scheduled) {
+ /* All pending are already flushed by xmit */
+ return HRTIMER_NORESTART;
+ }
+ if (port_pcpu->bulk_timer_restart_req) {
+ /* Not flushed but restart requested by xmit */
+ port_pcpu->bulk_timer_scheduled = true;
+ port_pcpu->bulk_timer_restart_req = false;
+ hrtimer_forward_now(timer, MVPP2_TX_BULK_TIME);
return HRTIMER_RESTART;
}
+ /* Expired and need the flush for pending */
+ tasklet_schedule(&port_pcpu->bulk_tasklet);
return HRTIMER_NORESTART;
}
+static void mvpp2_bulk_tasklet_cb(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_port_pcpu *port_pcpu;
+ struct mvpp2_tx_queue *aggr_txq;
+ int frags;
+ int cpu = smp_processor_id();
+
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+ if (!port_pcpu->bulk_timer_scheduled) {
+ /* Flushed by xmit-softirq since timer-irq */
+ return;
+ }
+ port_pcpu->bulk_timer_scheduled = false;
+ if (port_pcpu->bulk_timer_restart_req) {
+ /* Restart requested by xmit-softirq since timer-irq */
+ mvpp2_bulk_timer_start(port_pcpu);
+ return;
+ }
+
+ /* Full time expired. Flush pending packets here */
+ aggr_txq = &port->priv->aggr_txqs[cpu];
+ frags = aggr_txq->pending;
+ if (!frags)
+ return; /* Flushed by xmit */
+ aggr_txq->pending -= frags;
+ mvpp2_aggr_txq_pend_desc_add(port, frags);
+}
+
+/* Guard timer, tasklet, fixer utilities */
+
+/* The Guard fixer, called for 2 opposite actions:
+ * Activate fix by set frame-coalescing to Zero (according to_zero_map)
+ * which forces the tx-done IRQ. Called by guard tasklet.
+ * Deactivate fixer ~ restore the coal-configration (to_zero_map=0)
+ * when/by tx-done activated.
+ */
+static void mvpp2_tx_done_guard_force_irq(struct mvpp2_port *port,
+ int sw_thread, u8 to_zero_map)
+{
+ int q;
+ u32 val, coal, qmask, xor;
+ struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, sw_thread);
+
+ if (port_pcpu->txq_coal_is_zero_map == to_zero_map)
+ return; /* all current & requested are already the same */
+
+ xor = port_pcpu->txq_coal_is_zero_map ^ to_zero_map;
+ /* Configuration num-of-frames coalescing is the same for all queues */
+ coal = port->txqs[0]->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET;
+
+ for (q = 0; q < port->ntxqs; q++) {
+ qmask = 1 << q;
+ if (!(xor & qmask))
+ continue;
+ if (to_zero_map & qmask)
+ val = 0; /* Set ZERO forcing the Interrupt */
+ else
+ val = coal; /* Set/restore configured threshold */
+ mvpp2_thread_write(port->priv, sw_thread,
+ MVPP2_TXQ_NUM_REG, port->txqs[q]->id);
+ mvpp2_thread_write(port->priv, sw_thread,
+ MVPP2_TXQ_THRESH_REG, val);
+ }
+ port_pcpu->txq_coal_is_zero_map = to_zero_map;
+}
+
+static inline void mvpp2_tx_done_guard_timer_set(struct mvpp2_port *port,
+ int sw_thread)
+{
+ struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu,
+ sw_thread);
+
+ if (!port_pcpu->guard_timer_scheduled) {
+ port_pcpu->guard_timer_scheduled = true;
+ hrtimer_start(&port_pcpu->tx_done_timer,
+ MVPP2_GUARD_TXDONE_HRTIMER_NS,
+ HRTIMER_MODE_REL_PINNED);
+ }
+}
+
+/* Guard timer and tasklet callbacks making check logic upon flags
+ * guard_timer_scheduled, tx_done_passed,
+ * txq_coal_is_zero_map, txq_busy_suspect_map
+ */
+static enum hrtimer_restart mvpp2_guard_timer_cb(struct hrtimer *timer)
+{
+ struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
+ struct mvpp2_port_pcpu, tx_done_timer);
+ struct mvpp2_port *port = port_pcpu->port;
+ struct mvpp2_tx_queue *txq;
+ struct mvpp2_txq_pcpu *txq_pcpu;
+ u8 txq_nonempty_map = 0;
+ int q, cpu;
+ ktime_t time;
+
+ if (port_pcpu->tx_done_passed) {
+ /* ok, tx-done was active since last checking */
+ port_pcpu->tx_done_passed = false;
+ time = MVPP2_GUARD_TXDONE_HRTIMER_NS; /* regular long */
+ goto timer_restart;
+ }
+
+ cpu = smp_processor_id(); /* timer is per-cpu */
+
+ for (q = 0; q < port->ntxqs; q++) {
+ txq = port->txqs[q];
+ txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+ if (txq_pcpu->count)
+ txq_nonempty_map |= 1 << q;
+ }
+
+ if (!txq_nonempty_map || mvpp2_tx_stopped(port)) {
+ /* All queues are empty, guard-timer may be stopped now
+ * It would be started again on new transmit.
+ */
+ port_pcpu->guard_timer_scheduled = false;
+ return HRTIMER_NORESTART;
+ }
+
+ if (port_pcpu->txq_busy_suspect_map) {
+ /* Second-hit ~~ tx-done is really stalled.
+ * Activate the tasklet to fix.
+ * Keep guard_timer_scheduled=TRUE
+ */
+ tasklet_schedule(&port_pcpu->tx_done_tasklet);
+ return HRTIMER_NORESTART;
+ }
+
+ /* First-hit ~~ tx-done seems stalled. Schedule re-check with SHORT time
+ * bigger a bit than HW-coal-time-usec (1024=2^10 vs NSEC_PER_USEC)
+ */
+ time = ktime_set(0, port->tx_time_coal << 10);
+ port_pcpu->txq_busy_suspect_map |= txq_nonempty_map;
+
+timer_restart:
+ /* Keep guard_timer_scheduled=TRUE but set new expiration time */
+ hrtimer_forward_now(timer, time);
+ return HRTIMER_RESTART;
+}
+
+static void mvpp2_tx_done_guard_tasklet_cb(unsigned long data)
+{
+ struct mvpp2_port *port = (void *)data;
+ struct mvpp2_port_pcpu *port_pcpu;
+ int cpu;
+
+ /* stop_dev() has permanent setting for coal=0 */
+ if (mvpp2_tx_stopped(port))
+ return;
+
+ cpu = get_cpu();
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu); /* tasklet is per-cpu */
+
+ if (port_pcpu->tx_done_passed) {
+ port_pcpu->tx_done_passed = false;
+ } else { /* Force IRQ */
+ mvpp2_tx_done_guard_force_irq(port, cpu,
+ port_pcpu->txq_busy_suspect_map);
+ port_pcpu->tx_guard_cntr++;
+ }
+ port_pcpu->txq_busy_suspect_map = 0;
+
+ /* guard_timer_scheduled is already TRUE, just start the timer */
+ hrtimer_start(&port_pcpu->tx_done_timer,
+ MVPP2_GUARD_TXDONE_HRTIMER_NS,
+ HRTIMER_MODE_REL_PINNED);
+ put_cpu();
+}
+
+static u32 mvpp2_tx_done_guard_get_stats(struct mvpp2_port *port, int cpu)
+{
+ return per_cpu_ptr(port->pcpu, cpu)->tx_guard_cntr;
+}
+
+static void mvpp2_tx_done_init_on_open(struct mvpp2_port *port, bool open)
+{
+ struct mvpp2_port_pcpu *port_pcpu;
+ int cpu;
+
+ if (port->flags & MVPP2_F_LOOPBACK)
+ return;
+
+ if (!open)
+ goto close;
+
+ /* Init tx-done tasklets and variables */
+ for_each_present_cpu(cpu) {
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+ /* Timer works in tx-done or Guard mode. To eliminate per-packet
+ * mode checking each mode has own "_scheduled" flag.
+ * Set scheduled=FALSE for active mode and TRUE for inactive, so
+ * timer would never be started in inactive mode.
+ */
+ if (port->has_tx_irqs) { /* guard-mode */
+ port_pcpu->txq_coal_is_zero_map = 0;
+ port_pcpu->txq_busy_suspect_map = 0;
+ port_pcpu->tx_done_passed = false;
+
+ /* "true" is never started */
+ port_pcpu->tx_done_timer_scheduled = true;
+ port_pcpu->guard_timer_scheduled = false;
+ tasklet_init(&port_pcpu->tx_done_tasklet,
+ mvpp2_tx_done_guard_tasklet_cb,
+ (unsigned long)port);
+ } else {
+ port_pcpu->tx_done_timer_scheduled = false;
+ /* "true" is never started */
+ port_pcpu->guard_timer_scheduled = true;
+ tasklet_init(&port_pcpu->tx_done_tasklet,
+ mvpp2_tx_done_proc_cb,
+ (unsigned long)port->dev);
+ }
+ }
+ return;
+close:
+ /* Kill tx-done timers and tasklets */
+ for_each_present_cpu(cpu) {
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+ /* Say "scheduled=true" is never started on XMIT */
+ port_pcpu->tx_done_timer_scheduled = true;
+ port_pcpu->guard_timer_scheduled = true;
+ hrtimer_cancel(&port_pcpu->tx_done_timer);
+ tasklet_kill(&port_pcpu->tx_done_tasklet);
+ }
+}
+
+static void mvpp2_tx_done_init_on_probe(struct platform_device *pdev,
+ struct mvpp2_port *port)
+{
+ struct mvpp2_port_pcpu *port_pcpu;
+ int cpu;
+ bool guard_mode = port->has_tx_irqs;
+
+ if (port->flags & MVPP2_F_LOOPBACK)
+ return;
+
+ for_each_present_cpu(cpu) {
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+ port_pcpu->port = port;
+ hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ port_pcpu->tx_done_timer.function = (guard_mode) ?
+ mvpp2_guard_timer_cb : mvpp2_tx_done_timer_cb;
+ }
+}
+
/* Main RX/TX processing routines */
/* Display more error info */
@@ -3167,8 +4034,8 @@ static void mvpp2_rx_error(struct mvpp2_port *port,
char *err_str = NULL;
switch (status & MVPP2_RXD_ERR_CODE_MASK) {
- case MVPP2_RXD_ERR_CRC:
- err_str = "crc";
+ case MVPP2_RXD_ERR_MAC:
+ err_str = "MAC";
break;
case MVPP2_RXD_ERR_OVERRUN:
err_str = "overrun";
@@ -3178,7 +4045,7 @@ static void mvpp2_rx_error(struct mvpp2_port *port,
break;
}
if (err_str && net_ratelimit())
- netdev_err(port->dev,
+ netdev_dbg(port->dev,
"bad rx status %08x (%s error), size=%zu\n",
status, err_str, sz);
}
@@ -3201,6 +4068,356 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
skb->ip_summed = CHECKSUM_NONE;
}
+static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc,
+ int pool, u32 rx_status)
+{
+ dma_addr_t dma_addr, dma_addr_next;
+ struct mvpp2_buff_hdr *buff_hdr;
+ phys_addr_t phys_addr;
+
+ dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
+ phys_addr = dma_to_phys(port->dev->dev.parent, dma_addr);
+
+ do {
+ buff_hdr = (struct mvpp2_buff_hdr *)phys_to_virt(phys_addr);
+
+ dma_addr_next = le32_to_cpu(buff_hdr->next_dma_addr);
+
+ if (port->priv->hw_version >= MVPP22)
+ dma_addr_next |= ((u64)buff_hdr->next_dma_addr_high << 32);
+
+ mvpp2_bm_pool_put(port, pool, dma_addr);
+
+ dma_addr = dma_addr_next;
+ phys_addr = dma_to_phys(port->dev->dev.parent, dma_addr);
+
+ } while (!MVPP2_B_HDR_INFO_IS_LAST(le16_to_cpu(buff_hdr->info)));
+}
+
+#ifndef MODULE
+void mvpp2_recycle_stats(void)
+{
+ int cpu;
+ int pl_id;
+ struct mvpp2_recycle_pcpu *pcpu;
+
+ pr_info("Recycle-stats: %d open ports (on all CP110s)\n",
+ mvpp2_share.num_open_ports);
+ if (!mvpp2_share.recycle_base)
+ return;
+ pcpu = mvpp2_share.recycle;
+ for_each_online_cpu(cpu) {
+ for (pl_id = 0; pl_id < MVPP2_BM_POOLS_NUM; pl_id++) {
+ pr_info("| cpu[%d].pool_%d: idx=%d\n",
+ cpu, pl_id, pcpu->idx[pl_id]);
+ }
+ pr_info("| ___[%d].skb_____idx=%d__\n",
+ cpu, pcpu->idx[MVPP2_BM_POOLS_NUM]);
+ pcpu++;
+ }
+}
+
+static int mvpp2_recycle_open(void)
+{
+ int cpu, pl_id, size;
+ struct mvpp2_recycle_pcpu *pcpu;
+ phys_addr_t addr;
+
+ mvpp2_share.num_open_ports++;
+ wmb(); /* for num_open_ports */
+
+ if (mvpp2_share.recycle_base)
+ return 0;
+
+ /* Allocate pool-tree */
+ size = sizeof(*pcpu) * num_online_cpus() + L1_CACHE_BYTES;
+ mvpp2_share.recycle_base = kzalloc(size, GFP_KERNEL);
+ if (!mvpp2_share.recycle_base)
+ goto err;
+ /* Use Address aligned to L1_CACHE_BYTES */
+ addr = (phys_addr_t)mvpp2_share.recycle_base + (L1_CACHE_BYTES - 1);
+ addr &= ~(L1_CACHE_BYTES - 1);
+ mvpp2_share.recycle = (void *)addr;
+
+ pcpu = mvpp2_share.recycle;
+ for_each_online_cpu(cpu) {
+ for (pl_id = 0; pl_id <= MVPP2_BM_POOLS_NUM; pl_id++)
+ pcpu->idx[pl_id] = -1;
+ pcpu++;
+ }
+ return 0;
+err:
+ pr_err("mvpp2 error: cannot allocate recycle pool\n");
+ return -ENOMEM;
+}
+
+static void mvpp2_recycle_close(void)
+{
+ int cpu, pl_id, i;
+ struct mvpp2_recycle_pcpu *pcpu;
+ struct mvpp2_recycle_pool *pool;
+
+ mvpp2_share.num_open_ports--;
+ wmb(); /* for num_open_ports */
+
+ /* Do nothing if recycle is not used at all or in use by port/ports */
+ if (mvpp2_share.num_open_ports || !mvpp2_share.recycle_base)
+ return;
+
+ /* Usable (recycle_base!=NULL), but last port gone down
+ * Let's free all accumulated buffers.
+ */
+ pcpu = mvpp2_share.recycle;
+ for_each_online_cpu(cpu) {
+ for (pl_id = 0; pl_id <= MVPP2_BM_POOLS_NUM; pl_id++) {
+ pool = &pcpu->pool[pl_id];
+ for (i = 0; i <= pcpu->idx[pl_id]; i++) {
+ if (!pool->pbuf[i])
+ continue;
+ if (pl_id < MVPP2_BM_POOLS_NUM)
+ kfree(pool->pbuf[i]);
+ else
+ kmem_cache_free(skbuff_head_cache,
+ pool->pbuf[i]);
+ }
+ }
+ pcpu++;
+ }
+ kfree(mvpp2_share.recycle_base);
+ mvpp2_share.recycle_base = NULL;
+}
+
+static int mvpp2_recycle_get_bm_id(struct sk_buff *skb)
+{
+ u32 hash;
+
+ /* Keep checking ordering for performance */
+ hash = skb_get_hash_raw(skb);
+ /* Check hash */
+ if (!MVPP2_RXTX_HASH_IS_OK(skb, hash))
+ return -1;
+ /* Check if skb could be free */
+ /* Use skb->cloned but not skb_cloned(), skb_header_cloned() */
+ if (skb_shared(skb) || skb->cloned)
+ return -1;
+ /* ipsec: sp/secpath, _skb_refdst ... */
+ if (!skb_irq_freeable(skb))
+ return -1;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_ZEROCOPY_FRAG)
+ return -1;
+
+ /* Get bm-pool-id */
+ hash &= MVPP2_RXTX_HASH_BMID_MASK;
+ if (hash >= MVPP2_BM_POOLS_NUM)
+ return -1;
+
+ return (int)hash;
+}
+
+static inline void mvpp2_recycle_put(struct mvpp2_port *port,
+ struct mvpp2_txq_pcpu *txq_pcpu,
+ struct mvpp2_txq_pcpu_buf *tx_buf)
+{
+ struct mvpp2_recycle_pcpu *pcpu;
+ struct mvpp2_recycle_pool *pool;
+ short int idx, pool_id;
+ struct sk_buff *skb = tx_buf->skb;
+ struct mvpp2_bm_pool *bm_pool;
+
+ /* tx_buf->skb is not NULL */
+ pool_id = mvpp2_recycle_get_bm_id(skb);
+ if (pool_id < 0)
+ return; /* non-recyclable */
+
+ bm_pool = &port->priv->bm_pools[pool_id];
+ if (skb_end_offset(skb) < (bm_pool->frag_size - MVPP2_SKB_SHINFO_SIZE))
+ return; /* shrank -> non-recyclable */
+
+ /* This skb could be destroyed. Put into recycle */
+ pcpu = mvpp2_share.recycle + txq_pcpu->thread;
+ idx = pcpu->idx[pool_id];
+ if (idx < (MVPP2_RECYCLE_FULL - 1)) {
+ pool = &pcpu->pool[pool_id];
+ pool->pbuf[++idx] = skb->head; /* pre-increment */
+ pcpu->idx[pool_id] = idx;
+ skb->head = NULL;
+ }
+ idx = pcpu->idx[MVPP2_BM_POOLS_NUM];
+ if (idx < (MVPP2_RECYCLE_FULL_SKB - 1)) {
+ pool = &pcpu->pool[MVPP2_BM_POOLS_NUM];
+ pool->pbuf[++idx] = skb;
+ pcpu->idx[MVPP2_BM_POOLS_NUM] = idx;
+ if (skb->head) {
+ if (bm_pool->frag_size <= PAGE_SIZE)
+ skb_free_frag(skb->head);
+ else
+ kfree(skb->head);
+ }
+ tx_buf->skb = NULL;
+ }
+}
+
+/* Allocate a new skb and add it to BM pool */
+static struct sk_buff *mvpp2_recycle_get(struct mvpp2_port *port,
+ struct mvpp2_bm_pool *bm_pool)
+{
+ int cpu;
+ struct mvpp2_recycle_pcpu *pcpu;
+ struct mvpp2_recycle_pool *pool;
+ short int idx;
+ void *frag;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ cpu = smp_processor_id();
+ pcpu = mvpp2_share.recycle + cpu;
+
+ /* GET bm buffer */
+ idx = pcpu->idx[bm_pool->id];
+ pool = &pcpu->pool[bm_pool->id];
+
+ if (idx >= 0) {
+ frag = pool->pbuf[idx];
+ pcpu->idx[bm_pool->id]--; /* post-decrement */
+ } else {
+ /* Allocate 2 buffers, put 1, use another now */
+ pcpu->idx[bm_pool->id] = 0;
+ pool->pbuf[0] = mvpp2_frag_alloc(bm_pool);
+ frag = NULL;
+ }
+ if (!frag)
+ frag = mvpp2_frag_alloc(bm_pool);
+
+ /* refill the buffer into BM */
+ dma_addr = dma_map_single(port->dev->dev.parent, frag,
+ bm_pool->buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
+ pcpu->idx[bm_pool->id]++; /* Return back to recycle */
+ netdev_err(port->dev, "failed to refill BM pool-%d (%d:%p)\n",
+ bm_pool->id, pcpu->idx[bm_pool->id], frag);
+ return NULL;
+ }
+
+ /* GET skb buffer */
+ idx = pcpu->idx[MVPP2_BM_POOLS_NUM];
+ if (idx >= 0) {
+ pool = &pcpu->pool[MVPP2_BM_POOLS_NUM];
+ skb = pool->pbuf[idx];
+ pcpu->idx[MVPP2_BM_POOLS_NUM]--;
+ } else {
+ skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
+ }
+
+ if (unlikely(!skb)) {
+ dma_unmap_single(port->dev->dev.parent, dma_addr,
+ bm_pool->buf_size, DMA_FROM_DEVICE);
+ mvpp2_frag_free(bm_pool, frag);
+ return NULL;
+ }
+ mvpp2_bm_pool_put(port, bm_pool->id, dma_addr);
+ return skb;
+}
+
+/* SKB and BM-buff alloc/refill like mvpp2_recycle_get but without recycle */
+static inline
+struct sk_buff *mvpp2_bm_refill_skb_get(struct mvpp2_port *port,
+ struct mvpp2_bm_pool *bm_pool)
+{
+ void *frag;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+
+ /* GET bm buffer, refill into BM */
+ frag = mvpp2_frag_alloc(bm_pool);
+ dma_addr = dma_map_single(port->dev->dev.parent, frag,
+ bm_pool->buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
+ netdev_err(port->dev, "failed to refill BM pool-%d\n",
+ bm_pool->id);
+ return NULL;
+ }
+
+ /* GET skb buffer */
+ skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
+ if (unlikely(!skb)) {
+ dma_unmap_single(port->dev->dev.parent, dma_addr,
+ bm_pool->buf_size, DMA_FROM_DEVICE);
+ mvpp2_frag_free(bm_pool, frag);
+ return NULL;
+ }
+ mvpp2_bm_pool_put(port, bm_pool->id, dma_addr);
+ return skb;
+}
+
+static inline void mvpp2_skb_set_extra(struct sk_buff *skb,
+ struct napi_struct *napi,
+ u32 status,
+ u8 rxq_id,
+ struct mvpp2_bm_pool *bm_pool)
+{
+ u32 hash;
+ enum pkt_hash_types hash_type;
+
+ /* Improve performance and set identification for RX-TX fast-forward */
+ hash = MVPP2_RXTX_HASH_GENER(skb, bm_pool->id);
+ hash_type = (status & (MVPP2_RXD_L4_UDP | MVPP2_RXD_L4_TCP)) ?
+ PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
+ skb_set_hash(skb, hash, hash_type);
+ skb_mark_napi_id(skb, napi);
+ skb_record_rx_queue(skb, (u16)rxq_id);
+}
+
+/* This is "fast inline" clone of __build_skb+build_skb,
+ * and also with setting mv-extra information
+ */
+static inline
+struct sk_buff *mvpp2_build_skb(void *data, unsigned int frag_size,
+ struct napi_struct *napi,
+ struct mvpp2_port *port,
+ u32 rx_status,
+ u8 rxq_id,
+ struct mvpp2_bm_pool *bm_pool)
+{
+ struct skb_shared_info *shinfo;
+ struct sk_buff *skb;
+ unsigned int size = frag_size ? : ksize(data);
+
+ if (static_branch_unlikely(&mvpp2_recycle_ena))
+ skb = mvpp2_recycle_get(port, bm_pool);
+ else
+ skb = mvpp2_bm_refill_skb_get(port, bm_pool);
+ if (unlikely(!skb))
+ return NULL;
+
+ size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ memset(skb, 0, offsetof(struct sk_buff, tail));
+ skb->truesize = SKB_TRUESIZE(size);
+ refcount_set(&skb->users, 1);
+ skb->head = data;
+ skb->data = data;
+ skb_reset_tail_pointer(skb);
+ skb->end = skb->tail + size;
+ skb->mac_header = (typeof(skb->mac_header))~0U;
+ skb->transport_header = (typeof(skb->transport_header))~0U;
+
+ /* make sure we initialize shinfo sequentially */
+ shinfo = skb_shinfo(skb);
+ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+ atomic_set(&shinfo->dataref, 1);
+
+ /* From build_skb wrapper */
+ if (frag_size) {
+ skb->head_frag = 1;
+ if (page_is_pfmemalloc(virt_to_head_page(data)))
+ skb->pfmemalloc = 1;
+ }
+
+ mvpp2_skb_set_extra(skb, napi, rx_status, rxq_id, bm_pool);
+
+ return skb;
+}
+#endif
+
/* Allocate a new skb and add it to BM pool */
static int mvpp2_rx_refill(struct mvpp2_port *port,
struct mvpp2_bm_pool *bm_pool,
@@ -3521,16 +4738,26 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
struct xdp_buff xdp;
int rx_received;
int rx_done = 0;
- u32 xdp_ret = 0;
+ u32 xdp_ret = 0, i = 0;
+ struct sk_buff *skb_all[64];
rcu_read_lock();
xdp_prog = READ_ONCE(port->xdp_prog);
- /* Get number of received packets and clamp the to-do */
- rx_received = mvpp2_rxq_received(port, rxq->id);
- if (rx_todo > rx_received)
- rx_todo = rx_received;
+ if (rxq->rx_pending >= rx_todo) {
+ rx_received = rx_todo;
+ rxq->rx_pending -= rx_todo;
+ } else {
+ /* Get number of received packets and clamp the to-do */
+ rx_received = mvpp2_rxq_received(port, rxq->id);
+ if (rx_received < rx_todo) {
+ rx_todo = rx_received;
+ rxq->rx_pending = 0;
+ } else {
+ rxq->rx_pending = rx_received - rx_todo;
+ }
+ }
while (rx_done < rx_todo) {
struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
@@ -3541,7 +4768,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
dma_addr_t dma_addr;
phys_addr_t phys_addr;
u32 rx_status, timestamp;
- int pool, rx_bytes, err, ret;
+ int pool, rx_bytes, ret;
void *data;
rx_done++;
@@ -3549,7 +4776,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
rx_bytes -= MVPP2_MH_SIZE;
dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
- phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
+ phys_addr = dma_to_phys(port->dev->dev.parent, dma_addr);
data = (void *)phys_to_virt(phys_addr);
pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
@@ -3616,7 +4843,29 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
}
}
+ /* _sync_ for coherency (_unmap_ is asynchroneous).
+ * _sync_ should be done for the SAME size as in map/unmap.
+ * The prefetch is for CPU and should be after unmap ~ mapToCPU
+ */
+ if (rx_todo == 1)
+ dma_sync_single_for_cpu(dev->dev.parent, dma_addr,
+ bm_pool->buf_size,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(dev->dev.parent, dma_addr,
+ bm_pool->buf_size, DMA_FROM_DEVICE);
+
+ /* Buffer header not supported */
+ if (rx_status & MVPP2_RXD_BUF_HDR)
+ goto err_drop_frame;
+
+ prefetch(data + NET_SKB_PAD); /* packet header */
+
+#ifdef MODULE
skb = build_skb(data, frag_size);
+#else
+ skb = mvpp2_build_skb(data, frag_size,
+ napi, port, rx_status, rxq->id, bm_pool);
+#endif
if (!skb) {
netdev_warn(port->dev, "skb build failed\n");
goto err_drop_frame;
@@ -3631,29 +4880,13 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
skb_hwtstamps(skb));
}
- err = mvpp2_rx_refill(port, bm_pool, pp, pool);
- if (err) {
- netdev_err(port->dev, "failed to refill BM pools\n");
- dev_kfree_skb_any(skb);
- goto err_drop_frame;
- }
-
- if (pp)
- page_pool_release_page(pp, virt_to_page(data));
- else
- dma_unmap_single_attrs(dev->dev.parent, dma_addr,
- bm_pool->buf_size, DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
-
- ps.rx_packets++;
- ps.rx_bytes += rx_bytes;
-
skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
skb_put(skb, rx_bytes);
skb->protocol = eth_type_trans(skb, dev);
mvpp2_rx_csum(port, rx_status, skb);
- napi_gro_receive(napi, skb);
+ skb_all[rcvd_pkts++] = skb;
+ rcvd_bytes += rx_bytes;
continue;
err_drop_frame:
@@ -3663,9 +4896,12 @@ err_drop_frame:
if (rx_status & MVPP2_RXD_BUF_HDR)
mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status);
else
- mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
+ mvpp2_bm_pool_put(port, pool, dma_addr);
}
+ while (i < rcvd_pkts)
+ napi_gro_receive(napi, skb_all[i++]);
+
rcu_read_unlock();
if (xdp_ret & MVPP2_XDP_REDIR)
@@ -3684,8 +4920,7 @@ err_drop_frame:
u64_stats_update_end(&stats->syncp);
}
- /* Update Rx queue management counters */
- wmb();
+ /* Update HW Rx queue management counters with RX-done */
mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
return rx_todo;
@@ -3817,8 +5052,7 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag));
buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
- skb_frag_size(frag),
- DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
mvpp2_txq_desc_put(txq);
goto cleanup;
@@ -3874,6 +5108,7 @@ static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
MVPP2_TXD_F_DESC |
MVPP2_TXD_PADDING_DISABLE);
mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
+ mvpp2_txq_inc_put(port, txq_pcpu, TSO_HEADER_MARK, tx_desc, MVPP2_TYPE_SKB);
}
static inline int mvpp2_tso_put_data(struct sk_buff *skb,
@@ -3920,15 +5155,18 @@ static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
struct mvpp2_txq_pcpu *txq_pcpu)
{
struct mvpp2_port *port = netdev_priv(dev);
- int hdr_sz, i, len, descs = 0;
+ int hdr_sz, i, len, descs = tso_count_descs(skb);
struct tso_t tso;
- /* Check number of available descriptors */
- if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) ||
- mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu,
- tso_count_descs(skb)))
+ /* Check enough free-space in txq and
+ * number of available aggr/reserved descriptors
+ */
+ if (((txq_pcpu->size - txq_pcpu->count) < descs) ||
+ mvpp2_aggr_desc_num_check(port, aggr_txq, descs) ||
+ mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, descs))
return 0;
+ descs = 0; /* real descs <= tso_count_descs() */
hdr_sz = tso_start(skb, &tso);
len = skb->len - hdr_sz;
@@ -3995,8 +5233,11 @@ static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
}
frags = skb_shinfo(skb)->nr_frags + 1;
- /* Check number of available descriptors */
- if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) ||
+ /* Check enough free-space in txq and
+ * number of available aggr/reserved descriptors
+ */
+ if (((txq_pcpu->size - txq_pcpu->count) < frags) ||
+ mvpp2_aggr_desc_num_check(port, aggr_txq, frags) ||
mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) {
frags = 0;
goto out;
@@ -4043,19 +5284,41 @@ static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
out:
if (frags > 0) {
struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread);
- struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
+ struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+ struct netdev_queue *nq;
+ bool deferred_tx;
txq_pcpu->reserved_num -= frags;
txq_pcpu->count += frags;
aggr_txq->count += frags;
- /* Enable transmit */
- wmb();
- mvpp2_aggr_txq_pend_desc_add(port, frags);
+ /* Enable transmit; RX-to-TX may be deferred with Bulk-timer */
+ deferred_tx = (frags == 1) &&
+ MVPP2_RXTX_HASH_IS_OK_TX(skb, skb_get_hash_raw(skb)) &&
+ (aggr_txq->pending < min(MVPP2_TX_BULK_MAX_PACKETS,
+ (int)(txq->done_pkts_coal / 2)));
- if (txq_pcpu->count >= txq_pcpu->stop_threshold)
- netif_tx_stop_queue(nq);
+ if (deferred_tx) {
+ aggr_txq->pending += frags;
+ mvpp2_bulk_timer_restart(port_pcpu);
+ } else {
+ port_pcpu->bulk_timer_scheduled = false;
+ port_pcpu->bulk_timer_restart_req = false;
+ frags += aggr_txq->pending;
+ aggr_txq->pending = 0;
+ mvpp2_aggr_txq_pend_desc_add(port, frags);
+ }
+ if (unlikely(txq_pcpu->count >= txq_pcpu->stop_threshold)) {
+ nq = netdev_get_tx_queue(dev, txq_id);
+ /* txq_id may differ from thread/cpu and come from more
+ * than one txq_pcpu. Save only the first for wakeup.
+ */
+ if (unlikely(!netif_tx_queue_stopped(nq))) {
+ txq_pcpu->stopped_on_txq_id = txq_id;
+ netif_tx_stop_queue(nq);
+ }
+ }
u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
stats->tx_bytes += skb->len;
@@ -4074,12 +5337,7 @@ out:
txq_pcpu->count > 0) {
struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
- if (!port_pcpu->timer_scheduled) {
- port_pcpu->timer_scheduled = true;
- hrtimer_start(&port_pcpu->tx_done_timer,
- MVPP2_TXDONE_HRTIMER_PERIOD_NS,
- HRTIMER_MODE_REL_PINNED_SOFT);
- }
+ mvpp2_tx_done_timer_set(port_pcpu);
}
if (test_bit(thread, &port->priv->lock_map))
@@ -4088,23 +5346,12 @@ out:
return NETDEV_TX_OK;
}
-static inline void mvpp2_cause_error(struct net_device *dev, int cause)
-{
- if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
- netdev_err(dev, "FCS error\n");
- if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
- netdev_err(dev, "rx fifo overrun error\n");
- if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
- netdev_err(dev, "tx fifo underrun error\n");
-}
-
static int mvpp2_poll(struct napi_struct *napi, int budget)
{
- u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
+ u32 cause_rx_tx, cause_rx, cause_tx;
int rx_done = 0;
struct mvpp2_port *port = netdev_priv(napi->dev);
struct mvpp2_queue_vector *qv;
- unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
qv = container_of(napi, struct mvpp2_queue_vector, napi);
@@ -4121,20 +5368,11 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id,
MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
- cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
- if (cause_misc) {
- mvpp2_cause_error(port->dev, cause_misc);
-
- /* Clear the cause register */
- mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
- mvpp2_thread_write(port->priv, thread,
- MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
- cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
- }
-
if (port->has_tx_irqs) {
cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
if (cause_tx) {
+ per_cpu_ptr(port->pcpu,
+ qv->sw_thread_id)->tx_done_passed = true;
cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
}
@@ -4142,7 +5380,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
/* Process RX packets */
cause_rx = cause_rx_tx &
- MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
+ MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(mvpp21_variant);
cause_rx <<= qv->first_rxq;
cause_rx |= qv->pending_cause_rx;
while (cause_rx && budget > 0) {
@@ -4179,25 +5417,22 @@ static void mvpp22_mode_reconfigure(struct mvpp2_port *port)
{
u32 ctrl3;
- /* Set the GMAC & XLG MAC in reset */
- mvpp2_mac_reset_assert(port);
-
- /* Set the MPCS and XPCS in reset */
- mvpp22_pcs_reset_assert(port);
-
/* comphy reconfiguration */
mvpp22_comphy_init(port);
/* gop reconfiguration */
mvpp22_gop_init(port);
- mvpp22_pcs_reset_deassert(port);
+ if (port->phy_interface == PHY_INTERFACE_MODE_INTERNAL)
+ return;
- if (mvpp2_port_supports_xlg(port)) {
+ if (port->has_xlg_mac) {
ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG);
ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
- if (mvpp2_is_xlg(port->phy_interface))
+ if (port->phy_interface == PHY_INTERFACE_MODE_RXAUI ||
+ port->phy_interface == PHY_INTERFACE_MODE_10GKR ||
+ port->phy_interface == PHY_INTERFACE_MODE_5GKR)
ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
else
ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
@@ -4205,7 +5440,10 @@ static void mvpp22_mode_reconfigure(struct mvpp2_port *port)
writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG);
}
- if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(port->phy_interface))
+ if (port->has_xlg_mac &&
+ (port->phy_interface == PHY_INTERFACE_MODE_RXAUI ||
+ port->phy_interface == PHY_INTERFACE_MODE_10GKR ||
+ port->phy_interface == PHY_INTERFACE_MODE_5GKR))
mvpp2_xlg_max_rx_size_set(port);
else
mvpp2_gmac_max_rx_size_set(port);
@@ -4218,13 +5456,17 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
mvpp2_txp_max_tx_size_set(port);
+ /* stop_dev() sets Coal to ZERO. Care to restore it now */
+ if (port->has_tx_irqs)
+ mvpp2_tx_pkts_coal_set(port);
+
for (i = 0; i < port->nqvecs; i++)
napi_enable(&port->qvecs[i].napi);
/* Enable interrupts on all threads */
mvpp2_interrupts_enable(port);
- if (port->priv->hw_version == MVPP22)
+ if (port->priv->hw_version != MVPP21)
mvpp22_mode_reconfigure(port);
if (port->phylink) {
@@ -4233,7 +5475,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
mvpp2_acpi_start(port);
}
- netif_tx_start_all_queues(port->dev);
+ mvpp2_tx_start_all_queues(port->dev);
clear_bit(0, &port->state);
}
@@ -4243,6 +5485,21 @@ static void mvpp2_stop_dev(struct mvpp2_port *port)
{
int i;
+ /* Stop-dev called by ifconfig but also by ethtool-features.
+ * Under active traffic the BM/RX and TX PP2-HW could be non-empty.
+ * Stop asap new packets ariving from both RX and TX directions,
+ * but do NOT disable egress free/send-out and interrupts tx-done,
+ * yeild and msleep this context for gracefull finishing.
+ * Flush all tx-done by forcing pkts-coal to ZERO
+ */
+ mvpp2_tx_stop_all_queues(port->dev);
+ mvpp2_ingress_disable(port);
+ if (port->has_tx_irqs)
+ on_each_cpu(mvpp2_tx_pkts_coal_set_zero_pcpu, port, 1);
+
+ msleep(40);
+ mvpp2_egress_disable(port);
+
set_bit(0, &port->state);
/* Disable interrupts on all threads */
@@ -4275,11 +5532,8 @@ static int mvpp2_check_ringparam_valid(struct net_device *dev,
else if (!IS_ALIGNED(ring->tx_pending, 32))
new_tx_pending = ALIGN(ring->tx_pending, 32);
- /* The Tx ring size cannot be smaller than the minimum number of
- * descriptors needed for TSO.
- */
- if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
- new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
+ if (new_tx_pending < MVPP2_MIN_TXD(num_present_cpus()))
+ new_tx_pending = MVPP2_MIN_TXD(num_present_cpus());
if (ring->rx_pending != new_rx_pending) {
netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
@@ -4374,30 +5628,38 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port)
}
}
-static bool mvpp22_rss_is_supported(void)
+static bool mvpp22_rss_is_supported(struct mvpp2_port *port)
{
- return queue_mode == MVPP2_QDIST_MULTI_MODE;
+ return (queue_mode == MVPP2_QDIST_MULTI_MODE) &&
+ !(port->flags & MVPP2_F_LOOPBACK) &&
+ !(port->flags & MVPP22_F_IF_MUSDK);
}
static int mvpp2_open(struct net_device *dev)
{
struct mvpp2_port *port = netdev_priv(dev);
struct mvpp2 *priv = port->priv;
+ struct mvpp2_port_pcpu *port_pcpu;
unsigned char mac_bcast[ETH_ALEN] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
bool valid = false;
- int err;
+ int err, cpu;
err = mvpp2_prs_mac_da_accept(port, mac_bcast, true);
if (err) {
netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
return err;
}
+
err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true);
if (err) {
netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n");
return err;
}
+
+ if (port->flags & MVPP22_F_IF_MUSDK)
+ goto skip_musdk_parser;
+
err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
if (err) {
netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
@@ -4409,6 +5671,7 @@ static int mvpp2_open(struct net_device *dev)
return err;
}
+skip_musdk_parser:
/* Allocate the Rx/Tx queues */
err = mvpp2_setup_rxqs(port);
if (err) {
@@ -4422,6 +5685,11 @@ static int mvpp2_open(struct net_device *dev)
goto err_cleanup_rxqs;
}
+#ifndef MODULE
+ /* Recycle buffer pool for performance optimization */
+ mvpp2_recycle_open();
+#endif
+
err = mvpp2_irqs_init(port);
if (err) {
netdev_err(port->dev, "cannot init IRQs\n");
@@ -4440,7 +5708,9 @@ static int mvpp2_open(struct net_device *dev)
valid = true;
}
- if (priv->hw_version == MVPP22 && port->port_irq) {
+ if (priv->hw_version != MVPP21 && port->port_irq &&
+ (!port->phylink || !port->has_phy)) {
+ mvpp2_txqs_on_tasklet_init(port);
err = request_irq(port->port_irq, mvpp2_port_isr, 0,
dev->name, port);
if (err) {
@@ -4467,10 +5737,19 @@ static int mvpp2_open(struct net_device *dev)
goto err_free_irq;
}
+ /* Init bulk-transmit timer */
+ for_each_present_cpu(cpu) {
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+ port_pcpu->bulk_timer_scheduled = false;
+ port_pcpu->bulk_timer_restart_req = false;
+ }
+
/* Unmask interrupts on all CPUs */
on_each_cpu(mvpp2_interrupts_unmask, port, 1);
mvpp2_shared_interrupt_mask_unmask(port, false);
+ mvpp2_tx_done_init_on_open(port, true);
+
mvpp2_start_dev(port);
/* Start hardware statistics gathering */
@@ -4493,6 +5772,7 @@ static int mvpp2_stop(struct net_device *dev)
struct mvpp2_port *port = netdev_priv(dev);
struct mvpp2_port_pcpu *port_pcpu;
unsigned int thread;
+ int cpu;
mvpp2_stop_dev(port);
@@ -4511,16 +5791,25 @@ static int mvpp2_stop(struct net_device *dev)
port_pcpu = per_cpu_ptr(port->pcpu, thread);
hrtimer_cancel(&port_pcpu->tx_done_timer);
- port_pcpu->timer_scheduled = false;
+ port_pcpu->tx_done_timer_scheduled = false;
+ tasklet_kill(&port_pcpu->tx_done_tasklet);
}
}
+ /* Cancel bulk tasklet and timer */
+ for_each_present_cpu(cpu) {
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+ hrtimer_cancel(&port_pcpu->bulk_timer);
+ tasklet_kill(&port_pcpu->bulk_tasklet);
+ }
+ mvpp2_tx_done_init_on_open(port, false);
+ mvpp2_txqs_on_tasklet_kill(port);
mvpp2_cleanup_rxqs(port);
mvpp2_cleanup_txqs(port);
cancel_delayed_work_sync(&port->stats_work);
-
- mvpp2_mac_reset_assert(port);
- mvpp22_pcs_reset_assert(port);
+#ifndef MODULE
+ mvpp2_recycle_close();
+#endif
return 0;
}
@@ -4602,104 +5891,54 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p)
return err;
}
-/* Shut down all the ports, reconfigure the pools as percpu or shared,
- * then bring up again all ports.
- */
-static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
-{
- int numbufs = MVPP2_BM_POOLS_NUM, i;
- struct mvpp2_port *port = NULL;
- bool status[MVPP2_MAX_PORTS];
-
- for (i = 0; i < priv->port_count; i++) {
- port = priv->port_list[i];
- status[i] = netif_running(port->dev);
- if (status[i])
- mvpp2_stop(port->dev);
- }
-
- /* nrxqs is the same for all ports */
- if (priv->percpu_pools)
- numbufs = port->nrxqs * 2;
-
- for (i = 0; i < numbufs; i++)
- mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]);
-
- devm_kfree(port->dev->dev.parent, priv->bm_pools);
- priv->percpu_pools = percpu;
- mvpp2_bm_init(port->dev->dev.parent, priv);
-
- for (i = 0; i < priv->port_count; i++) {
- port = priv->port_list[i];
- mvpp2_swf_bm_pool_init(port);
- if (status[i])
- mvpp2_open(port->dev);
- }
-
- return 0;
-}
-
static int mvpp2_change_mtu(struct net_device *dev, int mtu)
{
struct mvpp2_port *port = netdev_priv(dev);
- bool running = netif_running(dev);
- struct mvpp2 *priv = port->priv;
int err;
- if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
- netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
- ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
- mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
+ if (port->flags & MVPP22_F_IF_MUSDK) {
+ netdev_err(dev, "MTU cannot be modified in MUSDK mode\n");
+ return -EPERM;
}
- if (port->xdp_prog && mtu > MVPP2_MAX_RX_BUF_SIZE) {
- netdev_err(dev, "Illegal MTU value %d (> %d) for XDP mode\n",
- mtu, (int)MVPP2_MAX_RX_BUF_SIZE);
+ if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE && port->xdp_prog) {
+ netdev_err(dev, "Jumbo frames are not supported with XDP\n");
return -EINVAL;
}
- if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) {
- if (priv->percpu_pools) {
- netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu);
- mvpp2_bm_switch_buffers(priv, false);
- }
- } else {
- bool jumbo = false;
- int i;
-
- for (i = 0; i < priv->port_count; i++)
- if (priv->port_list[i] != port &&
- MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) >
- MVPP2_BM_LONG_PKT_SIZE) {
- jumbo = true;
- break;
- }
-
- /* No port is using jumbo frames */
- if (!jumbo) {
- dev_info(port->dev->dev.parent,
- "all ports have a low MTU, switching to per-cpu buffers");
- mvpp2_bm_switch_buffers(priv, true);
+ if (!netif_running(dev)) {
+ err = mvpp2_bm_update_mtu(dev, mtu);
+ if (!err) {
+ port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
+ return 0;
}
+ /* Reconfigure BM to the original MTU */
+ err = mvpp2_bm_update_mtu(dev, dev->mtu);
+ if (err)
+ goto log_error;
}
- if (running)
- mvpp2_stop_dev(port);
+ mvpp2_stop_dev(port);
err = mvpp2_bm_update_mtu(dev, mtu);
- if (err) {
- netdev_err(dev, "failed to change MTU\n");
- /* Reconfigure BM to the original MTU */
- mvpp2_bm_update_mtu(dev, dev->mtu);
- } else {
+ if (!err) {
port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
+ goto out_start;
}
- if (running) {
- mvpp2_start_dev(port);
- mvpp2_egress_enable(port);
- mvpp2_ingress_enable(port);
- }
+ /* Reconfigure BM to the original MTU */
+ err = mvpp2_bm_update_mtu(dev, dev->mtu);
+ if (err)
+ goto log_error;
+
+out_start:
+ mvpp2_start_dev(port);
+ mvpp2_egress_enable(port);
+ mvpp2_ingress_enable(port);
+
+ return 0;
+log_error:
+ netdev_err(dev, "failed to change MTU\n");
return err;
}
@@ -4763,6 +6002,7 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->rx_errors = dev->stats.rx_errors;
stats->rx_dropped = dev->stats.rx_dropped;
stats->tx_dropped = dev->stats.tx_dropped;
+ stats->collisions = dev->stats.collisions;
}
static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
@@ -4930,9 +6170,9 @@ static int mvpp2_set_features(struct net_device *dev,
if (changed & NETIF_F_RXHASH) {
if (features & NETIF_F_RXHASH)
- mvpp22_port_rss_enable(port);
+ mvpp22_rss_enable(port);
else
- mvpp22_port_rss_disable(port);
+ mvpp22_rss_disable(port);
}
return 0;
@@ -5010,6 +6250,7 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *c)
{
struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_tx_queue *txq;
int queue;
for (queue = 0; queue < port->nrxqs; queue++) {
@@ -5021,18 +6262,22 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
mvpp2_rx_time_coal_set(port, rxq);
}
- if (port->has_tx_irqs) {
+ /* Set TX time and pkts coalescing configuration */
+ if (port->has_tx_irqs)
port->tx_time_coal = c->tx_coalesce_usecs;
- mvpp2_tx_time_coal_set(port);
- }
for (queue = 0; queue < port->ntxqs; queue++) {
- struct mvpp2_tx_queue *txq = port->txqs[queue];
-
+ txq = port->txqs[queue];
txq->done_pkts_coal = c->tx_max_coalesced_frames;
+ if (port->has_tx_irqs &&
+ txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
+ txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
+ }
- if (port->has_tx_irqs)
- mvpp2_tx_pkts_coal_set(port, txq);
+ if (port->has_tx_irqs) {
+ /* Download configured values into MVPP2 HW */
+ mvpp2_tx_time_coal_set(port);
+ mvpp2_tx_pkts_coal_set(port);
}
return 0;
@@ -5054,12 +6299,16 @@ static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
+ struct mvpp2_port *port = netdev_priv(dev);
+
strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
sizeof(drvinfo->driver));
strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
sizeof(drvinfo->bus_info));
+ drvinfo->n_priv_flags = (port->priv->hw_version == MVPP21) ?
+ 0 : ARRAY_SIZE(mvpp22_priv_flags_strings);
}
static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
@@ -5085,6 +6334,15 @@ static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
if (err)
return err;
+ if (ring->rx_pending < MSS_THRESHOLD_START && port->tx_fc) {
+ netdev_warn(dev, "TX FC disabled. Ring size is less than %d\n",
+ MSS_THRESHOLD_START);
+ port->tx_fc = false;
+ mvpp2_rxq_disable_fc(port);
+ if (port->priv->hw_version == MVPP23)
+ mvpp23_rx_fifo_fc_en(port->priv, port->id, false);
+ }
+
if (!netif_running(dev)) {
port->rx_ring_size = ring->rx_pending;
port->tx_ring_size = ring->tx_pending;
@@ -5144,11 +6402,52 @@ static void mvpp2_ethtool_get_pause_param(struct net_device *dev,
phylink_ethtool_get_pauseparam(port->phylink, pause);
}
+static void mvpp2_reconfigure_fc(struct mvpp2_port *port)
+{
+ struct mvpp2_bm_pool **pools_pcpu = port->priv->pools_pcpu;
+ int cpu;
+
+ if (recycle) {
+ for_each_present_cpu(cpu)
+ mvpp2_bm_pool_update_fc(port, pools_pcpu[cpu],
+ port->tx_fc);
+ if (port->pool_long->type == MVPP2_BM_JUMBO)
+ mvpp2_bm_pool_update_fc(port,
+ port->pool_long, port->tx_fc);
+ else
+ mvpp2_bm_pool_update_fc(port,
+ port->pool_short, port->tx_fc);
+ } else {
+ mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc);
+ mvpp2_bm_pool_update_fc(port, port->pool_short, port->tx_fc);
+ }
+ if (port->priv->hw_version == MVPP23)
+ mvpp23_rx_fifo_fc_en(port->priv, port->id, port->tx_fc);
+}
+
static int mvpp2_ethtool_set_pause_param(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct mvpp2_port *port = netdev_priv(dev);
+ if (pause->tx_pause && port->priv->global_tx_fc &&
+ bm_underrun_protect) {
+ if (port->rx_ring_size < MSS_THRESHOLD_START) {
+ netdev_err(dev, "TX FC cannot be supported.");
+ netdev_err(dev, "Ring size is less than %d\n",
+ MSS_THRESHOLD_START);
+ return -EINVAL;
+ }
+
+ port->tx_fc = true;
+ mvpp2_rxq_enable_fc(port);
+ mvpp2_reconfigure_fc(port);
+ } else if (port->priv->global_tx_fc) {
+ port->tx_fc = false;
+ mvpp2_rxq_disable_fc(port);
+ mvpp2_reconfigure_fc(port);
+ }
+
if (!port->phylink)
return -ENOTSUPP;
@@ -5181,9 +6480,9 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *info, u32 *rules)
{
struct mvpp2_port *port = netdev_priv(dev);
- int ret = 0, i, loc = 0;
+ int ret = 0;
- if (!mvpp22_rss_is_supported())
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
switch (info->cmd) {
@@ -5193,18 +6492,6 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
case ETHTOOL_GRXRINGS:
info->data = port->nrxqs;
break;
- case ETHTOOL_GRXCLSRLCNT:
- info->rule_cnt = port->n_rfs_rules;
- break;
- case ETHTOOL_GRXCLSRULE:
- ret = mvpp2_ethtool_cls_rule_get(port, info);
- break;
- case ETHTOOL_GRXCLSRLALL:
- for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
- if (port->rfs_rules[i])
- rules[loc++] = i;
- }
- break;
default:
return -ENOTSUPP;
}
@@ -5218,19 +6505,13 @@ static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
struct mvpp2_port *port = netdev_priv(dev);
int ret = 0;
- if (!mvpp22_rss_is_supported())
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
switch (info->cmd) {
case ETHTOOL_SRXFH:
ret = mvpp2_ethtool_rxfh_set(port, info);
break;
- case ETHTOOL_SRXCLSRLINS:
- ret = mvpp2_ethtool_cls_rule_ins(port, info);
- break;
- case ETHTOOL_SRXCLSRLDEL:
- ret = mvpp2_ethtool_cls_rule_del(port, info);
- break;
default:
return -EOPNOTSUPP;
}
@@ -5239,34 +6520,136 @@ static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
{
- return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES : 0;
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ return mvpp22_rss_is_supported(port) ? MVPP22_RSS_TABLE_ENTRIES : 0;
}
static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct mvpp2_port *port = netdev_priv(dev);
- int ret = 0;
- if (!mvpp22_rss_is_supported())
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
if (indir)
- ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir);
+ memcpy(indir, port->indir,
+ ARRAY_SIZE(port->indir) * sizeof(port->indir[0]));
if (hfunc)
*hfunc = ETH_RSS_HASH_CRC32;
- return ret;
+ return 0;
+}
+
+/* RSS API */
+
+/* Translate CPU sequence number to real CPU ID */
+static int mvpp22_cpu_id_from_indir_tbl_get(struct mvpp2 *pp2,
+ int cpu_seq, u32 *cpu_id)
+{
+ int i;
+ int seq = 0;
+
+ if (!pp2 || !cpu_id || cpu_seq >= 16)
+ return -EINVAL;
+
+ for (i = 0; i < 16; i++) {
+ if (pp2->cpu_map & (1 << i)) {
+ if (seq == cpu_seq) {
+ *cpu_id = i;
+ return 0;
+ }
+ seq++;
+ }
+ }
+
+ return -1;
+}
+
+/* RSS */
+/* The function will set rss table entry */
+int mvpp22_rss_tbl_entry_set(struct mvpp2 *hw, struct mvpp2_rss_tbl_entry entry)
+{
+ unsigned int reg_val = 0;
+
+ if (entry.tbl_id >= MVPP22_RSS_TBL_NUM ||
+ entry.tbl_line >= MVPP22_RSS_TABLE_ENTRIES ||
+ entry.width >= MVPP22_RSS_WIDTH_MAX)
+ return -EINVAL;
+ /* Write index */
+ reg_val |= (entry.tbl_line << MVPP22_RSS_IDX_ENTRY_NUM_OFF |
+ entry.tbl_id << MVPP22_RSS_IDX_TBL_NUM_OFF);
+ mvpp2_write(hw, MVPP22_RSS_INDEX, reg_val);
+ /* Write entry */
+ reg_val &= (~MVPP22_RSS_TBL_ENTRY_MASK);
+ reg_val |= (entry.rxq << MVPP22_RSS_TBL_ENTRY_OFF);
+ mvpp2_write(hw, MVPP22_RSS_TABLE_ENTRY, reg_val);
+ reg_val &= (~MVPP22_RSS_WIDTH_MASK);
+ reg_val |= (entry.width << MVPP22_RSS_WIDTH_OFF);
+ mvpp2_write(hw, MVPP22_RSS_WIDTH, reg_val);
+
+ return 0;
+}
+
+static u32 mvpp2_get_cpu_width(struct mvpp2_port *port)
+{
+ return ilog2(roundup_pow_of_two(num_online_cpus()));
+}
+
+u32 mvpp2_get_tc_width(struct mvpp2_port *port)
+{
+ return ilog2(roundup_pow_of_two(port->num_tc_queues));
+}
+
+int mvpp22_rss_fill_table_per_tc(struct mvpp2_port *port)
+{
+ struct mvpp2_rss_tbl_entry rss_entry;
+ int rss_tbl, entry_idx;
+ u32 tc_width = 0, cpu_width = 0, cpu_id = 0;
+ int rss_tbl_needed = port->num_tc_queues;
+
+ if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
+ return -1;
+
+ memset(&rss_entry, 0, sizeof(rss_entry));
+
+ if (!port->priv->cpu_map)
+ return -1;
+
+ /* Calculate cpu and tc width */
+ cpu_width = mvpp2_get_cpu_width(port);
+ tc_width = mvpp2_get_tc_width(port);
+
+ rss_entry.width = tc_width + cpu_width;
+
+ for (rss_tbl = 0; rss_tbl < rss_tbl_needed; rss_tbl++) {
+ for (entry_idx = 0; entry_idx < MVPP22_RSS_TABLE_ENTRIES;
+ entry_idx++) {
+ rss_entry.tbl_id = rss_tbl;
+ rss_entry.tbl_line = entry_idx;
+ if (mvpp22_cpu_id_from_indir_tbl_get(port->priv,
+ port->indir[entry_idx],
+ &cpu_id))
+ return -1;
+ /* Value of rss_tbl equals to tc queue */
+ rss_entry.rxq = (cpu_id << tc_width) |
+ rss_tbl;
+ if (mvpp22_rss_tbl_entry_set(port->priv, rss_entry))
+ return -1;
+ }
+ }
+
+ return 0;
}
static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct mvpp2_port *port = netdev_priv(dev);
- int ret = 0;
- if (!mvpp22_rss_is_supported())
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
@@ -5275,60 +6658,142 @@ static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
if (key)
return -EOPNOTSUPP;
- if (indir)
- ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir);
+ if (indir) {
+ memcpy(port->indir, indir,
+ ARRAY_SIZE(port->indir) * sizeof(port->indir[0]));
+ if (port->num_tc_queues > 1)
+ mvpp22_rss_fill_table_per_tc(port);
+ else
+ mvpp22_rss_fill_table(port, port->id);
+ }
- return ret;
+ return 0;
}
-static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
- u8 *key, u8 *hfunc, u32 rss_context)
+static u32 mvpp22_get_priv_flags(struct net_device *dev)
{
struct mvpp2_port *port = netdev_priv(dev);
- int ret = 0;
+ u32 priv_flags = 0;
- if (!mvpp22_rss_is_supported())
- return -EOPNOTSUPP;
- if (rss_context >= MVPP22_N_RSS_TABLES)
- return -EINVAL;
+ if (port->flags & MVPP22_F_IF_MUSDK)
+ priv_flags |= MVPP22_F_IF_MUSDK_PRIV;
+ return priv_flags;
+}
- if (hfunc)
- *hfunc = ETH_RSS_HASH_CRC32;
+static int mvpp2_port_musdk_cfg(struct net_device *dev, bool ena)
+{
+ struct mvpp2_port_us_cfg {
+ unsigned int nqvecs;
+ unsigned int nrxqs;
+ unsigned int ntxqs;
+ int mtu;
+ bool rxhash_en;
+ u8 rss_en;
+ } *us;
- if (indir)
- ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir);
+ struct mvpp2_port *port = netdev_priv(dev);
+ int rxq;
- return ret;
+ if (ena) {
+ /* Disable Queues and IntVec allocations for MUSDK,
+ * but save original values.
+ */
+ us = kzalloc(sizeof(*us), GFP_KERNEL);
+ if (!us)
+ return -ENOMEM;
+ port->us_cfg = (void *)us;
+ us->nqvecs = port->nqvecs;
+ us->nrxqs = port->nrxqs;
+ us->ntxqs = port->ntxqs;
+ us->mtu = dev->mtu;
+ us->rxhash_en = !!(dev->hw_features & NETIF_F_RXHASH);
+
+ port->nqvecs = 0;
+ port->nrxqs = 0;
+ port->ntxqs = 0;
+ if (us->rxhash_en) {
+ dev->hw_features &= ~NETIF_F_RXHASH;
+ netdev_update_features(dev);
+ }
+ } else {
+ /* Back to Kernel mode */
+ us = port->us_cfg;
+ port->nqvecs = us->nqvecs;
+ port->nrxqs = us->nrxqs;
+ port->ntxqs = us->ntxqs;
+ if (us->rxhash_en) {
+ dev->hw_features |= NETIF_F_RXHASH;
+ netdev_update_features(dev);
+ }
+ kfree(us);
+ port->us_cfg = NULL;
+
+ /* Restore RxQ/pool association */
+ for (rxq = 0; rxq < port->nrxqs; rxq++) {
+ mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
+ mvpp2_rxq_short_pool_set(port, rxq,
+ port->pool_short->id);
+ }
+ }
+ return 0;
}
-static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
- const u32 *indir, const u8 *key,
- const u8 hfunc, u32 *rss_context,
- bool delete)
+static int mvpp2_port_musdk_set(struct net_device *dev, bool ena)
{
struct mvpp2_port *port = netdev_priv(dev);
- int ret;
+ bool running = netif_running(dev);
+ int err;
- if (!mvpp22_rss_is_supported())
- return -EOPNOTSUPP;
+ /* This procedure is called by ethtool change or by Module-remove.
+ * For "remove" do anything only if we are in musdk-mode
+ * and toggling back to Kernel-mode is really required.
+ */
+ if (!ena && !port->us_cfg)
+ return 0;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
- return -EOPNOTSUPP;
+ if (running)
+ mvpp2_stop(dev);
- if (key)
- return -EOPNOTSUPP;
+ if (ena) {
+ err = mvpp2_port_musdk_cfg(dev, ena);
+ port->flags |= MVPP22_F_IF_MUSDK;
+ } else {
+ err = mvpp2_port_musdk_cfg(dev, ena);
+ port->flags &= ~MVPP22_F_IF_MUSDK;
+ }
- if (delete)
- return mvpp22_port_rss_ctx_delete(port, *rss_context);
+ if (err) {
+ netdev_err(dev, "musdk set=%d: error=%d\n", ena, err);
+ if (err)
+ return err;
+ /* print Error message but continue */
+ }
- if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- ret = mvpp22_port_rss_ctx_create(port, rss_context);
- if (ret)
- return ret;
+ if (running)
+ mvpp2_open(dev);
+
+ return 0;
+}
+
+static int mvpp22_set_priv_flags(struct net_device *dev, u32 priv_flags)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ bool f_old, f_new;
+ int err = 0;
+
+ if (recycle && (priv_flags & MVPP22_F_IF_MUSDK_PRIV)) {
+ WARN(1, "Fail to enable MUSDK. KS recycling feature enabled.");
+ return -EOPNOTSUPP;
}
- return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir);
+ f_old = port->flags & MVPP22_F_IF_MUSDK;
+ f_new = priv_flags & MVPP22_F_IF_MUSDK_PRIV;
+ if (f_old != f_new)
+ err = mvpp2_port_musdk_set(dev, f_new);
+
+ return err;
}
+
/* Device ops */
static const struct net_device_ops mvpp2_netdev_ops = {
@@ -5370,8 +6835,8 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
.get_rxfh = mvpp2_ethtool_get_rxfh,
.set_rxfh = mvpp2_ethtool_set_rxfh,
- .get_rxfh_context = mvpp2_ethtool_get_rxfh_context,
- .set_rxfh_context = mvpp2_ethtool_set_rxfh_context,
+ .get_priv_flags = mvpp22_get_priv_flags,
+ .set_priv_flags = mvpp22_set_priv_flags,
};
/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
@@ -5431,8 +6896,8 @@ static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
snprintf(irqname, sizeof(irqname), "hif%d", i);
if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
- v->first_rxq = i;
- v->nrxqs = 1;
+ v->first_rxq = port->num_tc_queues * i;
+ v->nrxqs = port->num_tc_queues;
} else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
i == (port->nqvecs - 1)) {
v->first_rxq = 0;
@@ -5494,7 +6959,7 @@ static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
return;
}
- /* Handle the more complicated PPv2.2 case */
+ /* Handle the more complicated PPv2.2 and PPv2.3 case */
for (i = 0; i < port->nqvecs; i++) {
struct mvpp2_queue_vector *qv = port->qvecs + i;
@@ -5518,7 +6983,7 @@ static int mvpp2_port_init(struct mvpp2_port *port)
struct mvpp2 *priv = port->priv;
struct mvpp2_txq_pcpu *txq_pcpu;
unsigned int thread;
- int queue, err, val;
+ int queue, err;
/* Checks for hardware constraints */
if (port->first_rxq + port->nrxqs >
@@ -5532,18 +6997,6 @@ static int mvpp2_port_init(struct mvpp2_port *port)
mvpp2_egress_disable(port);
mvpp2_port_disable(port);
- if (mvpp2_is_xlg(port->phy_interface)) {
- val = readl(port->base + MVPP22_XLG_CTRL0_REG);
- val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
- val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
- writel(val, port->base + MVPP22_XLG_CTRL0_REG);
- } else {
- val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
- val |= MVPP2_GMAC_FORCE_LINK_DOWN;
- writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- }
-
port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
@@ -5601,7 +7054,7 @@ static int mvpp2_port_init(struct mvpp2_port *port)
/* Map this Rx queue to a physical queue */
rxq->id = port->first_rxq + queue;
rxq->port = port->id;
- rxq->logic_rxq = queue;
+ rxq->logic_rxq = (u8)queue;
port->rxqs[queue] = rxq;
}
@@ -5626,22 +7079,20 @@ static int mvpp2_port_init(struct mvpp2_port *port)
mvpp2_cls_oversize_rxq_set(port);
mvpp2_cls_port_config(port);
- if (mvpp22_rss_is_supported())
- mvpp22_port_rss_init(port);
+ if (mvpp22_rss_is_supported(port))
+ mvpp22_rss_port_init(port);
/* Provide an initial Rx packet size */
port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
/* Initialize pools for swf */
- err = mvpp2_swf_bm_pool_init(port);
+ if (recycle)
+ err = mvpp2_swf_bm_pool_pcpu_init(port);
+ else
+ err = mvpp2_swf_bm_pool_init(port);
if (err)
goto err_free_percpu;
- /* Clear all port stats */
- mvpp2_read_stats(port);
- memset(port->ethtool_stats, 0,
- MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64));
-
return 0;
err_free_percpu:
@@ -5671,7 +7122,7 @@ static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node,
/* Checks if the port dt description has the required Tx interrupts:
* - PPv2.1: there are no such interrupts.
- * - PPv2.2:
+ * - PPv2.2 and PPv2.3:
* - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3]
* - The new ones have: "hifX" with X in [0..8]
*
@@ -5736,6 +7187,18 @@ static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
return container_of(config, struct mvpp2_port, phylink_config);
}
+static void mvpp2_get_interface_by_speed(struct phylink_link_state *state)
+{
+ if (state->speed == SPEED_1000)
+ state->interface = PHY_INTERFACE_MODE_1000BASEX;
+ else if (state->speed == SPEED_2500)
+ state->interface = PHY_INTERFACE_MODE_2500BASEX;
+ else if (state->speed == SPEED_5000)
+ state->interface = PHY_INTERFACE_MODE_5GKR;
+ else if (state->speed == SPEED_10000)
+ state->interface = PHY_INTERFACE_MODE_10GKR;
+}
+
static struct mvpp2_port *mvpp2_pcs_to_port(struct phylink_pcs *pcs)
{
return container_of(pcs, struct mvpp2_port, phylink_pcs);
@@ -5747,7 +7210,11 @@ static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
u32 val;
- state->speed = SPEED_10000;
+ if (state->interface == PHY_INTERFACE_MODE_5GKR)
+ state->speed = SPEED_5000;
+ else
+ state->speed = SPEED_10000;
+
state->duplex = 1;
state->an_complete = 1;
@@ -5793,6 +7260,7 @@ static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
state->speed = SPEED_1000;
break;
case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_2500BASET:
state->speed = SPEED_2500;
break;
default:
@@ -5870,7 +7338,8 @@ static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
{
- struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ struct net_device *dev = to_net_dev(config->dev);
+ struct mvpp2_port *port = netdev_priv(dev);
u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
@@ -5889,21 +7358,35 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
unsigned long *supported,
struct phylink_link_state *state)
{
- struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+ struct net_device *dev = to_net_dev(config->dev);
+ struct mvpp2_port *port = netdev_priv(dev);
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ if (!port->phy_exist)
+ mvpp2_get_interface_by_speed(state);
+
/* Invalid combinations */
switch (state->interface) {
case PHY_INTERFACE_MODE_10GBASER:
- case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_5GKR:
+ case PHY_INTERFACE_MODE_INTERNAL:
+ if (!port->has_xlg_mac)
+ goto empty_set;
+ break;
+ case PHY_INTERFACE_MODE_RXAUI:
if (!mvpp2_port_supports_xlg(port))
goto empty_set;
break;
+ case PHY_INTERFACE_MODE_MII:
+ if (port->gop_id == 2)
+ goto empty_set;
+ /* Fall-through */
+ case PHY_INTERFACE_MODE_GMII:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
- if (!mvpp2_port_supports_rgmii(port))
+ if (port->priv->hw_version != MVPP21 && port->gop_id == 0)
goto empty_set;
break;
default:
@@ -5913,11 +7396,15 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
phylink_set(mask, Autoneg);
phylink_set_port_modes(mask);
- switch (state->interface) {
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ switch (port->of_phy_interface) {
case PHY_INTERFACE_MODE_10GBASER:
- case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
case PHY_INTERFACE_MODE_NA:
- if (mvpp2_port_supports_xlg(port)) {
+ case PHY_INTERFACE_MODE_INTERNAL:
+ if (port->has_xlg_mac) {
phylink_set(mask, 10000baseT_Full);
phylink_set(mask, 10000baseCR_Full);
phylink_set(mask, 10000baseSR_Full);
@@ -5929,38 +7416,50 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
if (state->interface != PHY_INTERFACE_MODE_NA)
break;
fallthrough;
+ case PHY_INTERFACE_MODE_5GKR:
+ if (port->has_xlg_mac)
+ phylink_set(mask, 5000baseT_Full);
+ if (!port->phy_exist) {
+ phylink_set(mask, 2500baseX_Full);
+ phylink_set(mask, 1000baseX_Full);
+ break;
+ };
+ /* Fall-through */
+ case PHY_INTERFACE_MODE_2500BASET:
+ phylink_set(mask, 2500baseT_Full);
+ /* Fall-through */
+ case PHY_INTERFACE_MODE_GMII:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_SGMII:
+ phylink_set(mask, 1000baseT_Full);
+ /* Fall-through */
+ case PHY_INTERFACE_MODE_MII:
phylink_set(mask, 10baseT_Half);
phylink_set(mask, 10baseT_Full);
phylink_set(mask, 100baseT_Half);
phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 1000baseT_Full);
phylink_set(mask, 1000baseX_Full);
if (state->interface != PHY_INTERFACE_MODE_NA)
break;
- fallthrough;
- case PHY_INTERFACE_MODE_1000BASEX:
+ phylink_set(mask, 1000baseT_Full);
+ break;
case PHY_INTERFACE_MODE_2500BASEX:
if (port->comphy ||
- state->interface != PHY_INTERFACE_MODE_2500BASEX) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- }
- if (port->comphy ||
state->interface == PHY_INTERFACE_MODE_2500BASEX) {
- phylink_set(mask, 2500baseT_Full);
phylink_set(mask, 2500baseX_Full);
}
+ /* Fall-through */
+ case PHY_INTERFACE_MODE_1000BASEX:
+ phylink_set(mask, 1000baseX_Full);
break;
default:
goto empty_set;
}
- bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_copy(supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
bitmap_and(state->advertising, state->advertising, mask,
__ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -5979,11 +7478,11 @@ static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
MVPP22_XLG_CTRL0_MAC_RESET_DIS,
MVPP22_XLG_CTRL0_MAC_RESET_DIS);
- mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG,
- MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
- MVPP22_XLG_CTRL4_EN_IDLE_CHECK |
- MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC,
- MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC);
+
+ if (state->interface == PHY_INTERFACE_MODE_RXAUI)
+ mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG,
+ MVPP22_XLG_CTRL4_MACMODSELECT_GMAC,
+ MVPP22_XLG_CTRL4_USE_XPCS);
/* Wait for reset to deassert */
do {
@@ -6012,19 +7511,23 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
MVPP22_CTRL4_DP_CLK_SEL |
MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
- } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
+ } else if (state->interface == PHY_INTERFACE_MODE_SGMII ||
+ state->interface == PHY_INTERFACE_MODE_2500BASET) {
ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK;
ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
MVPP22_CTRL4_DP_CLK_SEL |
MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
- } else if (phy_interface_mode_is_rgmii(state->interface)) {
+ } else if ((phy_interface_mode_is_rgmii(state->interface)) ||
+ (state->interface == PHY_INTERFACE_MODE_MII)) {
ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL;
ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
MVPP22_CTRL4_SYNC_BYPASS_DIS |
MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
}
+ ctrl4 &= ~(MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN);
+
/* Configure negotiation style */
if (!phylink_autoneg_inband(mode)) {
/* Phy or fixed speed - no in-band AN, nothing to do, leave the
@@ -6054,12 +7557,27 @@ static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+ struct net_device *dev = port->dev;
/* Check for invalid configuration */
- if (mvpp2_is_xlg(interface) && port->gop_id != 0) {
- netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name);
- return -EINVAL;
- }
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_5GKR:
+ if (!port->has_xlg_mac) {
+ netdev_err(dev, "Invalid mode %s on %s\n",
+ phy_modes(port->phy_interface), dev->name);
+ return;
+ }
+ break;
+ case PHY_INTERFACE_MODE_RXAUI:
+ if (port->id != 0) {
+ netdev_err(dev, "Invalid mode %s on %s\n",
+ phy_modes(port->phy_interface), dev->name);
+ return;
+ }
+ default:
+ break;
+ };
if (port->phy_interface != interface ||
phylink_autoneg_inband(mode)) {
@@ -6082,7 +7600,10 @@ static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode,
}
/* Make sure the port is disabled when reconfiguring the mode */
- mvpp2_port_disable(port);
+ if (port->priv->hw_version != MVPP21 && change_interface) {
+ /* Make sure the port is disabled when reconfiguring the mode */
+ mvpp2_tx_stop_all_queues(port->dev);
+ mvpp2_port_disable(port);
if (port->phy_interface != interface) {
/* Place GMAC into reset */
@@ -6090,11 +7611,12 @@ static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode,
MVPP2_GMAC_PORT_RESET_MASK,
MVPP2_GMAC_PORT_RESET_MASK);
- if (port->priv->hw_version == MVPP22) {
mvpp22_gop_mask_irq(port);
phy_power_off(port->comphy);
- }
+
+ mvpp2_tx_wake_all_queues(dev);
+ mvpp2_port_enable(port);
}
/* Select the appropriate PCS operations depending on the
@@ -6128,12 +7650,14 @@ static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
struct mvpp2_port *port = mvpp2_phylink_to_port(config);
/* mac (re)configuration */
- if (mvpp2_is_xlg(state->interface))
+ if (state->interface == PHY_INTERFACE_MODE_RXAUI ||
+ state->interface == PHY_INTERFACE_MODE_10GKR ||
+ state->interface == PHY_INTERFACE_MODE_5GKR) {
mvpp2_xlg_config(port, mode, state);
- else if (phy_interface_mode_is_rgmii(state->interface) ||
- phy_interface_mode_is_8023z(state->interface) ||
- state->interface == PHY_INTERFACE_MODE_SGMII)
+ } else {
mvpp2_gmac_config(port, mode, state);
+ mvpp2_gmac_tx_fifo_configure(port);
+ }
if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
mvpp2_port_loopback_set(port, state);
@@ -6193,75 +7717,60 @@ static void mvpp2_mac_link_up(struct phylink_config *config,
struct mvpp2_port *port = mvpp2_phylink_to_port(config);
u32 val;
- if (mvpp2_is_xlg(interface)) {
- if (!phylink_autoneg_inband(mode)) {
- val = MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
- if (tx_pause)
- val |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
- if (rx_pause)
- val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
+ if (!phylink_autoneg_inband(mode) &&
+ interface != PHY_INTERFACE_MODE_RXAUI &&
+ interface != PHY_INTERFACE_MODE_10GBASER &&
+ interface != PHY_INTERFACE_MODE_5GKR) {
+ val = MVPP2_GMAC_FORCE_LINK_PASS;
- mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
- MVPP22_XLG_CTRL0_FORCE_LINK_DOWN |
- MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
- MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN |
- MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN, val);
- }
- } else {
- if (!phylink_autoneg_inband(mode)) {
- val = MVPP2_GMAC_FORCE_LINK_PASS;
+ if (speed == SPEED_1000 || speed == SPEED_2500)
+ val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
+ else if (speed == SPEED_100)
+ val |= MVPP2_GMAC_CONFIG_MII_SPEED;
- if (speed == SPEED_1000 || speed == SPEED_2500)
- val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
- else if (speed == SPEED_100)
- val |= MVPP2_GMAC_CONFIG_MII_SPEED;
+ if (duplex == DUPLEX_FULL)
+ val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
- if (duplex == DUPLEX_FULL)
- val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
-
- mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
- MVPP2_GMAC_FORCE_LINK_DOWN |
- MVPP2_GMAC_FORCE_LINK_PASS |
- MVPP2_GMAC_CONFIG_MII_SPEED |
- MVPP2_GMAC_CONFIG_GMII_SPEED |
- MVPP2_GMAC_CONFIG_FULL_DUPLEX, val);
- }
+ mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
+ MVPP2_GMAC_FORCE_LINK_DOWN |
+ MVPP2_GMAC_FORCE_LINK_PASS |
+ MVPP2_GMAC_CONFIG_MII_SPEED |
+ MVPP2_GMAC_CONFIG_GMII_SPEED |
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX, val);
+ }
- /* We can always update the flow control enable bits;
- * these will only be effective if flow control AN
- * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled.
- */
- val = 0;
- if (tx_pause)
- val |= MVPP22_CTRL4_TX_FC_EN;
- if (rx_pause)
- val |= MVPP22_CTRL4_RX_FC_EN;
+ /* We can always update the flow control enable bits;
+ * these will only be effective if flow control AN
+ * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled.
+ */
+ val = 0;
+ if (tx_pause)
+ val |= MVPP22_CTRL4_TX_FC_EN;
+ if (rx_pause)
+ val |= MVPP22_CTRL4_RX_FC_EN;
- mvpp2_modify(port->base + MVPP22_GMAC_CTRL_4_REG,
- MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN,
- val);
+ mvpp2_modify(port->base + MVPP22_GMAC_CTRL_4_REG,
+ MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN,
+ val);
}
mvpp2_port_enable(port);
mvpp2_egress_enable(port);
mvpp2_ingress_enable(port);
- netif_tx_wake_all_queues(port->dev);
+ mvpp2_tx_wake_all_queues(port->dev);
}
-static void mvpp2_mac_link_down(struct phylink_config *config,
- unsigned int mode, phy_interface_t interface)
+static void mvpp2_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
{
struct mvpp2_port *port = mvpp2_phylink_to_port(config);
u32 val;
- if (!phylink_autoneg_inband(mode)) {
- if (mvpp2_is_xlg(interface)) {
- val = readl(port->base + MVPP22_XLG_CTRL0_REG);
- val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
- val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
- writel(val, port->base + MVPP22_XLG_CTRL0_REG);
- } else {
+ if (!phylink_autoneg_inband(mode) &&
+ interface != PHY_INTERFACE_MODE_RXAUI &&
+ interface != PHY_INTERFACE_MODE_10GKR &&
+ interface != PHY_INTERFACE_MODE_5GKR) {
val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
val |= MVPP2_GMAC_FORCE_LINK_DOWN;
@@ -6269,7 +7778,7 @@ static void mvpp2_mac_link_down(struct phylink_config *config,
}
}
- netif_tx_stop_all_queues(port->dev);
+ mvpp2_tx_stop_all_queues(port->dev);
mvpp2_egress_disable(port);
mvpp2_ingress_disable(port);
@@ -6308,6 +7817,36 @@ static void mvpp2_acpi_start(struct mvpp2_port *port)
SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
}
+#if IS_ENABLED(CONFIG_NET_DSA)
+/* DSA notifier */
+static void mvpp2_dsa_port_register(struct net_device *dev)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2 *priv = port->priv;
+ u32 reg;
+
+ /* For switch port enable non-extended DSA tags and make sure
+ * the extended DSA tag usage is disabled as those
+ * two options cannot coexist.
+ */
+ reg = mvpp2_read(priv, MVPP2_MH_REG(port->id));
+ reg &= ~MVPP2_DSA_EXTENDED;
+ reg |= MVPP2_DSA_NON_EXTENDED;
+ mvpp2_write(priv, MVPP2_MH_REG(port->id), reg);
+}
+
+static int mvpp2_dsa_notifier(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct dsa_notifier_register_info *info = ptr;
+
+ if (event == DSA_PORT_REGISTER)
+ mvpp2_dsa_port_register(info->master);
+
+ return NOTIFY_DONE;
+}
+#endif
+
/* Ports initialization */
static int mvpp2_port_probe(struct platform_device *pdev,
struct fwnode_handle *port_fwnode,
@@ -6317,16 +7856,21 @@ static int mvpp2_port_probe(struct platform_device *pdev,
struct mvpp2_port *port;
struct mvpp2_port_pcpu *port_pcpu;
struct device_node *port_node = to_of_node(port_fwnode);
- netdev_features_t features;
struct net_device *dev;
+ struct resource *res;
struct phylink *phylink;
char *mac_from = "";
- unsigned int ntxqs, nrxqs, thread;
+ unsigned int ntxqs, nrxqs;
unsigned long flags = 0;
+ u32 cpu_nrxqs;
+ u16 cpu_map = 0;
bool has_tx_irqs;
+ dma_addr_t p;
u32 id;
+ int features;
int phy_mode;
int err, i;
+ int cpu;
has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags);
if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) {
@@ -6336,12 +7880,34 @@ static int mvpp2_port_probe(struct platform_device *pdev,
}
ntxqs = MVPP2_MAX_TXQ;
- nrxqs = mvpp2_get_nrxqs(priv);
+ cpu_nrxqs = MVPP2_NUM_OF_TC;
+ if (priv->hw_version != MVPP21 && queue_mode ==
+ MVPP2_QDIST_SINGLE_MODE) {
+ nrxqs = 1;
+ } else {
+ /* According to the PPv2.2 datasheet and our experiments on
+ * PPv2.1, RX queues have an allocation granularity of 4 (when
+ * more than a single one on PPv2.2).
+ * Round up to nearest multiple of 4.
+ */
+ nrxqs = (num_possible_cpus() * cpu_nrxqs + 3) & ~0x3;
+ if (nrxqs > MVPP2_PORT_MAX_RXQ) {
+ nrxqs = MVPP2_PORT_MAX_RXQ;
+ cpu_nrxqs = MVPP2_PORT_MAX_RXQ / num_possible_cpus();
+ dev_warn(&pdev->dev, "cpu_nrxqs to big set to %d\n", cpu_nrxqs);
+ }
+ }
dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
if (!dev)
return -ENOMEM;
+ /* XPS mapping queues to 0..N cpus (may be less than ntxqs) */
+ for_each_online_cpu(cpu) {
+ cpu_map |= (1 << cpu);
+ netif_set_xps_queue(dev, cpumask_of(cpu), cpu);
+ }
+ priv->cpu_map = cpu_map;
phy_mode = fwnode_get_phy_mode(port_fwnode);
if (phy_mode < 0) {
dev_err(&pdev->dev, "incorrect phy mode\n");
@@ -6384,6 +7950,13 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->dev = dev;
port->fwnode = port_fwnode;
port->has_phy = !!of_find_property(port_node, "phy", NULL);
+ port->num_tc_queues = cpu_nrxqs;
+ if (port->has_phy && phy_mode == PHY_INTERFACE_MODE_INTERNAL) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "internal mode doesn't work with phy\n");
+ goto err_free_netdev;
+ }
+
port->ntxqs = ntxqs;
port->nrxqs = nrxqs;
port->priv = priv;
@@ -6417,10 +7990,21 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->of_node = port_node;
port->phy_interface = phy_mode;
+ port->of_phy_interface = phy_mode;
port->comphy = comphy;
+ if (of_phy_find_device(port_node))
+ port->phy_exist = true;
+ else
+ port->phy_exist = false;
+
+ if ((port->id == 0 && port->priv->hw_version != MVPP21) ||
+ (port->id == 1 && port->priv->hw_version == MVPP23))
+ port->has_xlg_mac = true;
+
if (priv->hw_version == MVPP21) {
- port->base = devm_platform_ioremap_resource(pdev, 2 + id);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
+ port->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(port->base)) {
err = PTR_ERR(port->base);
goto err_free_irq;
@@ -6456,13 +8040,16 @@ static int mvpp2_port_probe(struct platform_device *pdev,
goto err_free_irq;
}
- port->ethtool_stats = devm_kcalloc(&pdev->dev,
- MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs),
- sizeof(u64), GFP_KERNEL);
- if (!port->ethtool_stats) {
+ p = (dma_addr_t)devm_kcalloc(&pdev->dev,
+ ARRAY_SIZE(mvpp2_ethtool_regs) +
+ L1_CACHE_BYTES,
+ sizeof(u64), GFP_KERNEL);
+ if (!p) {
err = -ENOMEM;
goto err_free_stats;
}
+ p = (p + ~CACHE_LINE_MASK) & CACHE_LINE_MASK;
+ port->ethtool_stats = (void *)p;
mutex_init(&port->gather_stats_lock);
INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
@@ -6481,8 +8068,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
mvpp2_port_periodic_xon_disable(port);
- mvpp2_mac_reset_assert(port);
- mvpp22_pcs_reset_assert(port);
+ mvpp2_port_reset(port);
port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
if (!port->pcpu) {
@@ -6490,31 +8076,31 @@ static int mvpp2_port_probe(struct platform_device *pdev,
goto err_free_txq_pcpu;
}
- if (!port->has_tx_irqs) {
- for (thread = 0; thread < priv->nthreads; thread++) {
- port_pcpu = per_cpu_ptr(port->pcpu, thread);
+ /* Init tx-done/guard timer and tasklet */
+ mvpp2_tx_done_init_on_probe(pdev, port);
- hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED_SOFT);
- port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
- port_pcpu->timer_scheduled = false;
- port_pcpu->dev = dev;
- }
+ /* Init bulk timer and tasklet */
+ for_each_present_cpu(cpu) {
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+ hrtimer_init(&port_pcpu->bulk_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+ port_pcpu->bulk_timer.function = mvpp2_bulk_timer_cb;
+ tasklet_init(&port_pcpu->bulk_tasklet,
+ mvpp2_bulk_tasklet_cb, (unsigned long)dev);
}
features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_TSO;
+ NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_FILTER;
dev->features = features | NETIF_F_RXCSUM;
- dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
- NETIF_F_HW_VLAN_CTAG_FILTER;
+ dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
- if (mvpp22_rss_is_supported()) {
+ if (mvpp22_rss_is_supported(port))
dev->hw_features |= NETIF_F_RXHASH;
- dev->features |= NETIF_F_NTUPLE;
- }
- if (!port->priv->percpu_pools)
- mvpp2_set_hw_csum(port, port->pool_long->id);
+ if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
+ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ }
dev->vlan_features |= features;
dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
@@ -6522,8 +8108,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
/* MTU range: 68 - 9704 */
dev->min_mtu = ETH_MIN_MTU;
- /* 9704 == 9728 - 20 and rounding to 8 */
- dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
+ /* 9704 == 9728 - 24 (no rounding for MTU but for frag_size) */
+ dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE - MVPP2_MTU_OVERHEAD_SIZE;
dev->dev.of_node = port_node;
/* Phylink isn't used w/ ACPI as of now */
@@ -6561,6 +8147,38 @@ static int mvpp2_port_probe(struct platform_device *pdev,
priv->port_list[priv->port_count++] = port;
+ /* Port may be configured by Uboot to transmit IDLE, so a remote side
+ * feels the link as UP. Stop TX in same way as in mvpp2_open/stop.
+ */
+ if (port->of_node && port->phylink) {
+ if (rtnl_is_locked()) {
+ if (!phylink_of_phy_connect(port->phylink,
+ port->of_node, 0))
+ phylink_disconnect_phy(port->phylink);
+ } else {
+ rtnl_lock();
+ if (!phylink_of_phy_connect(port->phylink,
+ port->of_node, 0))
+ phylink_disconnect_phy(port->phylink);
+ rtnl_unlock();
+ }
+ }
+
+ /* Init TX locks and bm locks */
+ for (i = 0; i < MVPP2_MAX_THREADS; i++) {
+ spin_lock_init(&port->bm_lock[i]);
+ spin_lock_init(&port->tx_lock[i]);
+ }
+
+#if IS_ENABLED(CONFIG_NET_DSA)
+ /* Register DSA notifier */
+ port->dsa_notifier.notifier_call = mvpp2_dsa_notifier;
+ err = register_dsa_notifier(&port->dsa_notifier);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register DSA notifier\n");
+ goto err_phylink;
+ }
+#endif
return 0;
err_phylink:
@@ -6588,7 +8206,12 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
{
int i;
+ mvpp2_port_musdk_set(port->dev, false);
+ kfree(port->dbgfs_port_flow_entry);
unregister_netdev(port->dev);
+#if IS_ENABLED(CONFIG_NET_DSA)
+ unregister_dsa_notifier(&port->dsa_notifier);
+#endif
if (port->phylink)
phylink_destroy(port->phylink);
free_percpu(port->pcpu);
@@ -6651,32 +8274,56 @@ static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
}
-static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
+static void mvpp22_rx_fifo_set_hw(struct mvpp2 *priv, int port, int data_size)
{
- int port;
+ int attr_size = MVPP2_RX_FIFO_PORT_ATTR_SIZE(data_size);
- /* The FIFO size parameters are set depending on the maximum speed a
- * given port can handle:
- * - Port 0: 10Gbps
- * - Port 1: 2.5Gbps
- * - Ports 2 and 3: 1Gbps
- */
+ mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), data_size);
+ mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), attr_size);
+}
- mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
- MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
- mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
- MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
+/* Initialize TX FIFO's: the total FIFO size is 48kB on PPv2.2 and PPv2.3.
+ * 4kB fixed space must be assigned for the loopback port.
+ * Redistribute remaining avialable 44kB space among all active ports.
+ * Guarantee minimum 32kB for 10G port and 8kB for port 1, capable of 2.5G
+ * SGMII link.
+ */
+static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
+{
+ int port, size;
+ unsigned long port_map;
+ int remaining_ports_count;
+ int size_remainder;
+
+ /* The loopback requires fixed 4kB of the FIFO space assignment. */
+ mvpp22_rx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
+ MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
+ port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
+
+ /* Set RX FIFO size to 0 for inactive ports. */
+ for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX)
+ mvpp22_rx_fifo_set_hw(priv, port, 0);
+
+ /* Assign remaining RX FIFO space among all active ports. */
+ size_remainder = MVPP2_RX_FIFO_PORT_DATA_SIZE_44KB;
+ remaining_ports_count = hweight_long(port_map);
+
+ for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
+ if (remaining_ports_count == 1)
+ size = size_remainder;
+ else if (port == 0)
+ size = max(size_remainder / remaining_ports_count,
+ MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
+ else if (port == 1)
+ size = max(size_remainder / remaining_ports_count,
+ MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
+ else
+ size = size_remainder / remaining_ports_count;
- mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
- MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
- mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
- MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
+ size_remainder -= size;
+ remaining_ports_count--;
- for (port = 2; port < MVPP2_MAX_PORTS; port++) {
- mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
- MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
- mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
- MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
+ mvpp22_rx_fifo_set_hw(priv, port, size);
}
mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
@@ -6684,27 +8331,152 @@ static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
}
-/* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G
- * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G,
- * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB.
- */
-static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
+/* Configure Rx FIFO Flow control thresholds */
+static void mvpp23_rx_fifo_fc_set_tresh(struct mvpp2 *priv)
{
- int port, size, thrs;
+ int port, val;
- for (port = 0; port < MVPP2_MAX_PORTS; port++) {
+ /* Port 0: maximum speed -10Gb/s port
+ * required by spec RX FIFO threshold 9KB
+ * Port 1: maximum speed -5Gb/s port
+ * required by spec RX FIFO threshold 4KB
+ * Port 2: maximum speed -1Gb/s port
+ * required by spec RX FIFO threshold 2KB
+ */
+
+ /* Without loopback port */
+ for (port = 0; port < (MVPP2_MAX_PORTS - 1); port++) {
if (port == 0) {
- size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
- thrs = MVPP2_TX_FIFO_THRESHOLD_10KB;
+ val = (MVPP23_PORT0_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
+ << MVPP2_RX_FC_TRSH_OFFS;
+ val &= MVPP2_RX_FC_TRSH_MASK;
+ mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
+ } else if (port == 1) {
+ val = (MVPP23_PORT1_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
+ << MVPP2_RX_FC_TRSH_OFFS;
+ val &= MVPP2_RX_FC_TRSH_MASK;
+ mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
} else {
- size = MVPP22_TX_FIFO_DATA_SIZE_3KB;
- thrs = MVPP2_TX_FIFO_THRESHOLD_3KB;
+ val = (MVPP23_PORT2_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
+ << MVPP2_RX_FC_TRSH_OFFS;
+ val &= MVPP2_RX_FC_TRSH_MASK;
+ mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
}
- mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
- mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs);
}
}
+/* Configure Rx FIFO Flow control thresholds */
+void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en)
+{
+ int val;
+
+ val = mvpp2_read(priv, MVPP2_RX_FC_REG(port));
+
+ if (en)
+ val |= MVPP2_RX_FC_EN;
+ else
+ val &= ~MVPP2_RX_FC_EN;
+
+ mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
+}
+
+static void mvpp22_tx_fifo_set_hw(struct mvpp2 *priv, int port, int size)
+{
+ int threshold = MVPP2_TX_FIFO_THRESHOLD(size);
+
+ mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
+ mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), threshold);
+}
+
+/* Initialize TX FIFO's: the total FIFO size is 19kB on PPv2.2 and PPv2.3.
+ * 1kB fixed space must be assigned for the loopback port.
+ * Redistribute remaining avialable 18kB space among all active ports.
+ * The 10G interface should use 10kB (which is maximum possible size
+ * per single port).
+ */
+static void mvpp22_tx_fifo_init_default(struct mvpp2 *priv)
+{
+ int port, size;
+ unsigned long port_map;
+ int remaining_ports_count;
+ int size_remainder;
+
+ /* The loopback requires fixed 1kB of the FIFO space assignment. */
+ mvpp22_tx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
+ MVPP22_TX_FIFO_DATA_SIZE_1KB);
+ port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
+
+ /* Set TX FIFO size to 0 for inactive ports. */
+ for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX)
+ mvpp22_tx_fifo_set_hw(priv, port, 0);
+
+ /* Assign remaining TX FIFO space among all active ports. */
+ size_remainder = MVPP22_TX_FIFO_DATA_SIZE_18KB;
+ remaining_ports_count = hweight_long(port_map);
+
+ for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
+ if (remaining_ports_count == 1)
+ size = min(size_remainder,
+ MVPP22_TX_FIFO_DATA_SIZE_10KB);
+ else if (port == 0)
+ size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
+ else
+ size = size_remainder / remaining_ports_count;
+
+ size_remainder -= size;
+ remaining_ports_count--;
+
+ mvpp22_tx_fifo_set_hw(priv, port, size);
+ }
+}
+
+static void mvpp22_tx_fifo_init_param(struct platform_device *pdev,
+ struct mvpp2 *priv)
+{
+ unsigned long port_map;
+ int size_remainder;
+ int port, size;
+
+ /* The loopback requires fixed 1kB of the FIFO space assignment. */
+ mvpp22_tx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
+ MVPP22_TX_FIFO_DATA_SIZE_1KB);
+ port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
+
+ /* Set TX FIFO size to 0 for inactive ports. */
+ for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
+ mvpp22_tx_fifo_set_hw(priv, port, 0);
+ if (MVPP22_TX_FIFO_EXTRA_PARAM_SIZE(port, tx_fifo_map))
+ goto error;
+ }
+
+ /* The physical port requires minimum 3kB */
+ for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
+ size = MVPP22_TX_FIFO_EXTRA_PARAM_SIZE(port, tx_fifo_map);
+ if (size < MVPP22_TX_FIFO_DATA_SIZE_MIN ||
+ size > MVPP22_TX_FIFO_DATA_SIZE_MAX)
+ goto error;
+ }
+
+ /* Assign remaining TX FIFO space among all active ports. */
+ size_remainder = MVPP22_TX_FIFO_DATA_SIZE_18KB;
+ for (port = 0; port < MVPP2_LOOPBACK_PORT_INDEX; port++) {
+ size = MVPP22_TX_FIFO_EXTRA_PARAM_SIZE(port, tx_fifo_map);
+ if (!size)
+ continue;
+ size_remainder -= size;
+ mvpp22_tx_fifo_set_hw(priv, port, size);
+ }
+
+ if (size_remainder)
+ goto error;
+
+ return;
+
+error:
+ dev_warn(&pdev->dev, "Fail to set TX FIFO from module_param, fallback to default\n");
+ mvpp22_tx_fifo_init_default(priv);
+}
+
static void mvpp2_axi_init(struct mvpp2 *priv)
{
u32 val, rdval, wrval;
@@ -6734,6 +8506,10 @@ static void mvpp2_axi_init(struct mvpp2 *priv)
mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
/* Buffer Data */
+ /* Force TX FIFO transactions priority on the AXI QOS bus */
+ if (tx_fifo_protection)
+ rdval |= MVPP22_AXI_TX_DATA_RD_QOS_ATTRIBUTE;
+
mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
@@ -6765,13 +8541,14 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
const struct mbus_dram_target_info *dram_target_info;
int err, i;
u32 val;
+ dma_addr_t p;
/* MBUS windows configuration */
dram_target_info = mv_mbus_dram_info();
if (dram_target_info)
mvpp2_conf_mbus_windows(dram_target_info, priv);
- if (priv->hw_version == MVPP22)
+ if (priv->hw_version != MVPP21)
mvpp2_axi_init(priv);
/* Disable HW PHY polling */
@@ -6785,12 +8562,16 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
}
- /* Allocate and initialize aggregated TXQs */
- priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS,
- sizeof(*priv->aggr_txqs),
- GFP_KERNEL);
- if (!priv->aggr_txqs)
+ /* Allocate and initialize aggregated TXQs
+ * The aggr_txqs[per-cpu] entry should be aligned onto cache.
+ * So allocate more than needed and round-up the pointer.
+ */
+ val = sizeof(*priv->aggr_txqs) * MVPP2_MAX_THREADS + L1_CACHE_BYTES;
+ p = (dma_addr_t)devm_kzalloc(&pdev->dev, val, GFP_KERNEL);
+ if (!p)
return -ENOMEM;
+ p = (p + ~CACHE_LINE_MASK) & CACHE_LINE_MASK;
+ priv->aggr_txqs = (struct mvpp2_tx_queue *)p;
for (i = 0; i < MVPP2_MAX_THREADS; i++) {
priv->aggr_txqs[i].id = i;
@@ -6805,7 +8586,12 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
mvpp2_rx_fifo_init(priv);
} else {
mvpp22_rx_fifo_init(priv);
- mvpp22_tx_fifo_init(priv);
+ if (tx_fifo_map)
+ mvpp22_tx_fifo_init_param(pdev, priv);
+ else
+ mvpp22_tx_fifo_init_default(priv);
+ if (priv->hw_version == MVPP23)
+ mvpp23_rx_fifo_fc_set_tresh(priv);
}
if (priv->hw_version == MVPP21)
@@ -6816,7 +8602,7 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
/* Buffer Manager initialization */
- err = mvpp2_bm_init(&pdev->dev, priv);
+ err = mvpp2_bm_init(pdev, priv);
if (err < 0)
return err;
@@ -6828,6 +8614,38 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
/* Classifier default initialization */
mvpp2_cls_init(priv);
+ /* Disable all ingress queues */
+ mvpp2_rxq_disable_all(priv);
+
+ return 0;
+}
+
+static int mvpp2_get_sram(struct platform_device *pdev,
+ struct mvpp2 *priv)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ struct resource *res;
+
+ if (has_acpi_companion(&pdev->dev)) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!res) {
+ dev_warn(&pdev->dev, "ACPI is too old, TX FC disabled\n");
+ return 0;
+ }
+ priv->cm3_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->cm3_base))
+ return PTR_ERR(priv->cm3_base);
+ } else {
+ priv->sram_pool = of_gen_pool_get(dn, "cm3-mem", 0);
+ if (!priv->sram_pool) {
+ dev_warn(&pdev->dev, "DT is too old, TX FC disabled\n");
+ return 0;
+ }
+ priv->cm3_base = (void __iomem *)gen_pool_alloc(priv->sram_pool,
+ MSS_SRAM_SIZE);
+ if (!priv->cm3_base)
+ return -ENOMEM;
+ }
return 0;
}
@@ -6849,8 +8667,6 @@ static int mvpp2_probe(struct platform_device *pdev)
if (has_acpi_companion(&pdev->dev)) {
acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
&pdev->dev);
- if (!acpi_id)
- return -EINVAL;
priv->hw_version = (unsigned long)acpi_id->driver_data;
} else {
priv->hw_version =
@@ -6863,12 +8679,14 @@ static int mvpp2_probe(struct platform_device *pdev)
if (priv->hw_version == MVPP21)
queue_mode = MVPP2_QDIST_SINGLE_MODE;
- base = devm_platform_ioremap_resource(pdev, 0);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
if (priv->hw_version == MVPP21) {
- priv->lms_base = devm_platform_ioremap_resource(pdev, 1);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->lms_base))
return PTR_ERR(priv->lms_base);
} else {
@@ -6891,9 +8709,18 @@ static int mvpp2_probe(struct platform_device *pdev)
priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->iface_base))
return PTR_ERR(priv->iface_base);
+
+ /* Map CM3 SRAM */
+ err = mvpp2_get_sram(pdev, priv);
+ if (err)
+ dev_warn(&pdev->dev, "Fail to alloc CM3 SRAM\n");
+
+ /* Enable global Flow Control only if hanler to SRAM not NULL */
+ if (priv->cm3_base)
+ priv->global_tx_fc = true;
}
- if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) {
+ if (priv->hw_version != MVPP21 && dev_of_node(&pdev->dev)) {
priv->sysctrl_base =
syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"marvell,system-controller");
@@ -6906,13 +8733,6 @@ static int mvpp2_probe(struct platform_device *pdev)
priv->sysctrl_base = NULL;
}
- if (priv->hw_version == MVPP22 &&
- mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS)
- priv->percpu_pools = 1;
-
- mvpp2_setup_bm_pool();
-
-
priv->nthreads = min_t(unsigned int, num_present_cpus(),
MVPP2_MAX_THREADS);
@@ -6929,18 +8749,15 @@ static int mvpp2_probe(struct platform_device *pdev)
priv->swth_base[i] = base + i * addr_space_sz;
}
- if (priv->hw_version == MVPP21)
- priv->max_port_rxqs = 8;
- else
- priv->max_port_rxqs = 32;
-
if (dev_of_node(&pdev->dev)) {
priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
- if (IS_ERR(priv->pp_clk))
- return PTR_ERR(priv->pp_clk);
+ if (IS_ERR(priv->pp_clk)) {
+ err = PTR_ERR(priv->pp_clk);
+ goto err_cm3;
+ }
err = clk_prepare_enable(priv->pp_clk);
if (err < 0)
- return err;
+ goto err_cm3;
priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
if (IS_ERR(priv->gop_clk)) {
@@ -6951,7 +8768,7 @@ static int mvpp2_probe(struct platform_device *pdev)
if (err < 0)
goto err_pp_clk;
- if (priv->hw_version == MVPP22) {
+ if (priv->hw_version != MVPP21) {
priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
if (IS_ERR(priv->mg_clk)) {
err = PTR_ERR(priv->mg_clk);
@@ -6992,10 +8809,39 @@ static int mvpp2_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (priv->hw_version == MVPP22) {
+ if (priv->hw_version != MVPP21) {
+ if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23)
+ priv->hw_version = MVPP23;
+ }
+
+ if (priv->hw_version == MVPP21)
+ priv->max_port_rxqs = 8;
+ else
+ priv->max_port_rxqs = 32;
+
+ priv->custom_dma_mask = false;
+ if (priv->hw_version != MVPP21) {
+ /* If dma_mask points to coherent_dma_mask, setting both will
+ * override the value of the other. This is problematic as the
+ * PPv2 driver uses a 32-bit-mask for coherent accesses (txq,
+ * rxq, bm) and a 40-bit mask for all other accesses.
+ */
+ if (pdev->dev.dma_mask == &pdev->dev.coherent_dma_mask) {
+ pdev->dev.dma_mask =
+ kzalloc(sizeof(*pdev->dev.dma_mask),
+ GFP_KERNEL);
+ if (!pdev->dev.dma_mask) {
+ err = -ENOMEM;
+ goto err_mg_clk;
+ }
+
+ priv->custom_dma_mask = true;
+ }
+
err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
if (err)
- goto err_axi_clk;
+ goto err_dma_mask;
+
/* Sadly, the BM pools all share the same register to
* store the high 32 bits of their address. So they
* must all have the same high 32 bits, which forces
@@ -7003,9 +8849,35 @@ static int mvpp2_probe(struct platform_device *pdev)
*/
err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err)
- goto err_axi_clk;
+ goto err_dma_mask;
+ }
+
+ /* Assign the reserved memory region to the device for DMA allocations,
+ * if a memory-region phandle is found.
+ */
+ if (dev_of_node(&pdev->dev))
+ of_reserved_mem_device_init_by_idx(&pdev->dev,
+ pdev->dev.of_node, 0);
+
+ /* Configure branch prediction switch */
+ if (priv->hw_version == MVPP21)
+ static_branch_enable(&mvpp21_variant);
+ if (recycle) {
+ dev_info(&pdev->dev,
+ "kernel space packet recycling feature enabled\n");
+ static_branch_enable(&mvpp2_recycle_ena);
+ }
+ /* else - keep the DEFINE_STATIC_KEY_FALSE */
+
+ /* Map DTS-active ports. Should be done before FIFO mvpp2_init */
+ fwnode_for_each_available_child_node(fwnode, port_fwnode) {
+ if (!fwnode_property_read_u32(port_fwnode, "port-id", &i))
+ priv->port_map |= BIT(i);
}
+ /* Init mss lock */
+ spin_lock_init(&priv->mss_spinlock);
+
/* Initialize network controller */
err = mvpp2_init(pdev, priv);
if (err < 0) {
@@ -7045,6 +8917,12 @@ static int mvpp2_probe(struct platform_device *pdev)
goto err_port_probe;
}
+ if (priv->global_tx_fc && priv->hw_version != MVPP21) {
+ err = mvpp2_enable_global_fc(priv);
+ if (err)
+ dev_warn(&pdev->dev, "CM3 firmware not running, TX FC disabled\n");
+ }
+
mvpp2_dbgfs_init(priv, pdev->name);
platform_set_drvdata(pdev, priv);
@@ -7059,19 +8937,29 @@ err_port_probe:
mvpp2_port_remove(priv->port_list[i]);
i++;
}
+err_dma_mask:
+ if (priv->custom_dma_mask) {
+ kfree(pdev->dev.dma_mask);
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ }
err_axi_clk:
clk_disable_unprepare(priv->axi_clk);
err_mg_core_clk:
- if (priv->hw_version == MVPP22)
+ if (priv->hw_version != MVPP21)
clk_disable_unprepare(priv->mg_core_clk);
err_mg_clk:
- if (priv->hw_version == MVPP22)
+ if (priv->hw_version != MVPP21)
clk_disable_unprepare(priv->mg_clk);
err_gop_clk:
clk_disable_unprepare(priv->gop_clk);
err_pp_clk:
clk_disable_unprepare(priv->pp_clk);
+err_cm3:
+ if (!has_acpi_companion(&pdev->dev) && priv->cm3_base)
+ gen_pool_free(priv->sram_pool, (unsigned long)priv->cm3_base,
+ MSS_SRAM_SIZE);
+
return err;
}
@@ -7079,11 +8967,14 @@ static int mvpp2_remove(struct platform_device *pdev)
{
struct mvpp2 *priv = platform_get_drvdata(pdev);
struct fwnode_handle *fwnode = pdev->dev.fwnode;
- int i = 0, poolnum = MVPP2_BM_POOLS_NUM;
struct fwnode_handle *port_fwnode;
+ int i = 0;
mvpp2_dbgfs_cleanup(priv);
+ flush_workqueue(priv->stats_queue);
+ destroy_workqueue(priv->stats_queue);
+
fwnode_for_each_available_child_node(fwnode, port_fwnode) {
if (priv->port_list[i]) {
mutex_destroy(&priv->port_list[i]->gather_stats_lock);
@@ -7092,15 +8983,10 @@ static int mvpp2_remove(struct platform_device *pdev)
i++;
}
- destroy_workqueue(priv->stats_queue);
-
- if (priv->percpu_pools)
- poolnum = mvpp2_get_nrxqs(priv) * 2;
-
- for (i = 0; i < poolnum; i++) {
+ for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
- mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool);
+ mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
}
for (i = 0; i < MVPP2_MAX_THREADS; i++) {
@@ -7112,6 +8998,17 @@ static int mvpp2_remove(struct platform_device *pdev)
aggr_txq->descs_dma);
}
+ if (priv->custom_dma_mask) {
+ kfree(pdev->dev.dma_mask);
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ }
+
+ if (!has_acpi_companion(&pdev->dev)) {
+ gen_pool_free(priv->sram_pool, (unsigned long)priv->cm3_base,
+ MSS_SRAM_SIZE);
+ gen_pool_destroy(priv->sram_pool);
+ }
+
if (is_acpi_node(port_fwnode))
return 0;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index dd590086fe6a..1e1d70fffcdc 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -11,6 +11,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
+#include <linux/mpls.h>
#include <uapi/linux/ppp_defs.h>
#include <net/ip.h>
#include <net/ipv6.h>
@@ -198,6 +199,19 @@ static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
}
+/* Set u32 in tcam sw entry */
+static void mvpp2_prs_tcam_data_u32_set(struct mvpp2_prs_entry *pe,
+ u32 val, u32 mask)
+{
+ int i;
+
+ for (i = sizeof(u32) - 1; i >= 0; --i) {
+ mvpp2_prs_tcam_data_byte_set(pe, i, val & 0xff, mask & 0xff);
+ mask >>= 8;
+ val >>= 8;
+ }
+}
+
/* Set vid in tcam sw entry */
static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
unsigned short vid)
@@ -406,11 +420,12 @@ static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
}
/* Drop flow control pause frames */
-static void mvpp2_prs_drop_fc(struct mvpp2 *priv)
+static void mv_pp2x_prs_drop_fc(struct mvpp2 *priv)
{
- unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
struct mvpp2_prs_entry pe;
unsigned int len;
+ unsigned char da[ETH_ALEN] = {
+ 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
memset(&pe, 0, sizeof(pe));
@@ -559,12 +574,8 @@ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
/* Set ai bits for next iteration */
- if (extend)
- mvpp2_prs_sram_ai_update(&pe, 1,
- MVPP2_PRS_SRAM_AI_MASK);
- else
- mvpp2_prs_sram_ai_update(&pe, 0,
- MVPP2_PRS_SRAM_AI_MASK);
+ mvpp2_prs_sram_ai_update(&pe, extend,
+ MVPP2_PRS_SRAM_AI_MASK);
/* Set result info bits to 'single vlan' */
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
@@ -914,15 +925,14 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
pe.index = tid;
- /* Set next lu to IPv4 */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
- /* Set L4 offset */
- mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
- sizeof(struct iphdr) - 4,
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, -4,
MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
- mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
- MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
@@ -931,7 +941,8 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
MVPP2_PRS_TCAM_PROTO_MASK);
mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
- mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
/* Unmask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
@@ -999,12 +1010,17 @@ static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
return -EINVAL;
}
- /* Finished: go to flowid generation */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
- mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ /* Go again to ipv4 */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
MVPP2_PRS_IPV4_DIP_AI_BIT);
+
+ /* Shift back to IPv4 proto */
+ mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+
/* Unmask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
@@ -1166,6 +1182,21 @@ static void mvpp2_prs_mh_init(struct mvpp2 *priv)
/* Update shadow table and hw entry */
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
mvpp2_prs_hw_write(priv, &pe);
+
+ /* Set MH entry that skip parser */
+ pe.index = MVPP2_PE_MH_SKIP_PRS;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
+ mvpp2_prs_hw_write(priv, &pe);
}
/* Set default entires (place holder) for promiscuous, non-promiscuous and
@@ -1194,7 +1225,7 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
mvpp2_prs_hw_write(priv, &pe);
/* Create dummy entries for drop all and promiscuous modes */
- mvpp2_prs_drop_fc(priv);
+ mv_pp2x_prs_drop_fc(priv);
mvpp2_prs_mac_drop_all_set(priv, 0, false);
mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
@@ -1314,7 +1345,8 @@ static void mvpp2_prs_vid_init(struct mvpp2 *priv)
static int mvpp2_prs_etype_init(struct mvpp2 *priv)
{
struct mvpp2_prs_entry pe;
- int tid;
+ unsigned short ethertype;
+ int tid, ihl;
/* Ethertype: PPPoE */
tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
@@ -1406,66 +1438,43 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv)
MVPP2_PRS_RI_UDF3_MASK);
mvpp2_prs_hw_write(priv, &pe);
- /* Ethertype: IPv4 without options */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
-
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
- pe.index = tid;
-
- mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
- mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
- MVPP2_PRS_IPV4_HEAD_MASK |
- MVPP2_PRS_IPV4_IHL_MASK);
-
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
- MVPP2_PRS_RI_L3_PROTO_MASK);
- /* Skip eth_type + 4 bytes of IP header */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
- MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
- /* Set L3 offset */
- mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
- MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
- priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
- priv->prs_shadow[pe.index].finish = false;
- mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
- MVPP2_PRS_RI_L3_PROTO_MASK);
- mvpp2_prs_hw_write(priv, &pe);
-
- /* Ethertype: IPv4 with options */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
-
- pe.index = tid;
+ /* Ethertype: IPv4 with header length >= 5 */
+ for (ihl = MVPP2_PRS_IPV4_IHL_MIN; ihl <= MVPP2_PRS_IPV4_IHL_MAX; ihl++) {
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
- mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_IPV4_HEAD,
- MVPP2_PRS_IPV4_HEAD_MASK);
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = tid;
- /* Clear ri before updating */
- pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
- pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
- MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
+ mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_IPV4_HEAD | ihl,
+ MVPP2_PRS_IPV4_HEAD_MASK |
+ MVPP2_PRS_IPV4_IHL_MASK);
+
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ /* goto ipv4 dst-address (skip eth_type + IP-header-size - 4) */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
+ sizeof(struct iphdr) - 4,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Set L4 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+ MVPP2_ETH_TYPE_LEN + (ihl * 4),
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
- priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
- priv->prs_shadow[pe.index].finish = false;
- mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
- MVPP2_PRS_RI_L3_PROTO_MASK);
- mvpp2_prs_hw_write(priv, &pe);
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+ priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ priv->prs_shadow[pe.index].finish = false;
+ mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_hw_write(priv, &pe);
+ }
/* Ethertype: IPv6 without options */
tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
@@ -1498,6 +1507,34 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv)
MVPP2_PRS_RI_L3_PROTO_MASK);
mvpp2_prs_hw_write(priv, &pe);
+ for (ethertype = ETH_P_MPLS_UC; ethertype <= ETH_P_MPLS_MC; ++ethertype) {
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = tid;
+
+ mvpp2_prs_match_etype(&pe, 0, ethertype);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MPLS);
+
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+ priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ priv->prs_shadow[pe.index].finish = false;
+ mvpp2_prs_hw_write(priv, &pe);
+ }
+
/* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
@@ -1609,6 +1646,104 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
return 0;
}
+/* Set entries for MPLS ethertype */
+static int mvpp2_prs_mpls_init(struct mvpp2 *priv)
+{
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ /* Add multiple MPLS TCAM entry */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MPLS);
+ pe.index = tid;
+
+ mvpp2_prs_tcam_data_u32_set(&pe, 0, MPLS_LS_S_MASK);
+
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_MPLS_HEADER_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_MPLS_HEADER_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* If MPLS isn't last MPLS jump to next MPLS */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MPLS);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MPLS);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Add ipv4 MPLS TCAM entry */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MPLS);
+ pe.index = tid;
+
+ mvpp2_prs_tcam_data_u32_set(&pe, MPLS_LABEL_IPV4NULL << MPLS_LS_LABEL_SHIFT,
+ MPLS_LS_LABEL_MASK);
+
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+
+ /* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_MPLS_HEADER_LEN +
+ sizeof(struct iphdr) - 4,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_MPLS_HEADER_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MPLS);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Add ipv6 MPLS TCAM entry */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MPLS);
+ pe.index = tid;
+
+ mvpp2_prs_tcam_data_u32_set(&pe, MPLS_LABEL_IPV6NULL << MPLS_LS_LABEL_SHIFT,
+ MPLS_LS_LABEL_MASK);
+
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+
+ /* Skip DIP of IPV6 header */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_MPLS_HEADER_LEN + 8 +
+ MVPP2_MAX_L3_ADDR_SIZE,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_MPLS_HEADER_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MPLS);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
/* Set entries for PPPoE ethertype */
static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
{
@@ -1630,8 +1765,9 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
MVPP2_PRS_RI_L3_PROTO_MASK);
- /* Skip eth_type + 4 bytes of IP header */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+ /* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
+ sizeof(struct iphdr) - 4,
MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
/* Set L3 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
@@ -1651,7 +1787,8 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
pe.index = tid;
mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
+ MVPP2_PRS_IPV4_HEAD |
+ MVPP2_PRS_IPV4_IHL_MIN,
MVPP2_PRS_IPV4_HEAD_MASK |
MVPP2_PRS_IPV4_IHL_MASK);
@@ -1761,19 +1898,19 @@ static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
pe.index = MVPP2_PE_IP4_PROTO_UN;
- /* Set next lu to IPv4 */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
- /* Set L4 offset */
- mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
- sizeof(struct iphdr) - 4,
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, -4,
MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
- mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
- MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
MVPP2_PRS_RI_L4_PROTO_MASK);
- mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
/* Unmask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
@@ -1786,14 +1923,19 @@ static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
pe.index = MVPP2_PE_IP4_ADDR_UN;
- /* Finished: go to flowid generation */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
- mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ /* Go again to ipv4 */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+
+ mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
+
+ /* Shift back to IPv4 proto */
+ mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
MVPP2_PRS_RI_L3_ADDR_MASK);
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
- mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
- MVPP2_PRS_IPV4_DIP_AI_BIT);
/* Unmask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
@@ -1831,14 +1973,6 @@ static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
if (err)
return err;
- /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
- /* Result Info: UDF7=1, DS lite */
- err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
- MVPP2_PRS_RI_UDF7_IP6_LITE,
- MVPP2_PRS_RI_UDF7_MASK);
- if (err)
- return err;
-
/* IPv6 multicast */
err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
if (err)
@@ -1940,7 +2074,8 @@ static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
}
/* Find tcam entry with matched pair <vid,port> */
-static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask)
+static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int port_id, u16 vid,
+ u16 mask)
{
unsigned char byte[2], enable[2];
struct mvpp2_prs_entry pe;
@@ -1948,13 +2083,13 @@ static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask)
int tid;
/* Go through the all entries with MVPP2_PRS_LU_VID */
- for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
- tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
- if (!port->priv->prs_shadow[tid].valid ||
- port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
+ for (tid = MVPP2_PRS_VID_PORT_FIRST(port_id);
+ tid <= MVPP2_PRS_VID_PORT_LAST(port_id); tid++) {
+ if (!priv->prs_shadow[tid].valid ||
+ priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
continue;
- mvpp2_prs_init_from_hw(port->priv, &pe, tid);
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
@@ -1984,7 +2119,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
memset(&pe, 0, sizeof(pe));
/* Scan TCAM and see if entry with this <vid,port> already exist */
- tid = mvpp2_prs_vid_range_find(port, vid, mask);
+ tid = mvpp2_prs_vid_range_find(priv, port->id, vid, mask);
reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
if (reg_val & MVPP2_DSA_EXTENDED)
@@ -2042,7 +2177,7 @@ void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
int tid;
/* Scan TCAM and see if entry with this <vid,port> already exist */
- tid = mvpp2_prs_vid_range_find(port, vid, 0xfff);
+ tid = mvpp2_prs_vid_range_find(priv, port->id, vid, 0xfff);
/* No such entry */
if (tid < 0)
@@ -2060,7 +2195,8 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
- if (priv->prs_shadow[tid].valid) {
+ if (priv->prs_shadow[tid].valid &&
+ priv->prs_shadow[tid].lu == MVPP2_PRS_LU_VID) {
mvpp2_prs_hw_inv(priv, tid);
priv->prs_shadow[tid].valid = false;
}
@@ -2190,6 +2326,10 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
if (err)
return err;
+ err = mvpp2_prs_mpls_init(priv);
+ if (err)
+ return err;
+
return 0;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
index 4b68dd374733..c0da6d645076 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
@@ -28,7 +28,8 @@
#define MVPP2_PRS_IPV4_MC 0xe0
#define MVPP2_PRS_IPV4_MC_MASK 0xf0
#define MVPP2_PRS_IPV4_BC_MASK 0xff
-#define MVPP2_PRS_IPV4_IHL 0x5
+#define MVPP2_PRS_IPV4_IHL_MIN 0x5
+#define MVPP2_PRS_IPV4_IHL_MAX 0xf
#define MVPP2_PRS_IPV4_IHL_MASK 0xf
#define MVPP2_PRS_IPV6_MC 0xff
#define MVPP2_PRS_IPV6_MC_MASK 0xff
@@ -51,6 +52,7 @@
#define MVPP2_PRS_AI_MASK 0xff
#define MVPP2_PRS_PORT_MASK 0xff
#define MVPP2_PRS_LU_MASK 0xf
+#define MVPP2_PRS_WORD_MASK 0xffff
/* TCAM entries in registers are accessed using 16 data bits + 16 enable bits */
#define MVPP2_PRS_BYTE_TO_WORD(byte) ((byte) / 2)
@@ -103,10 +105,11 @@
#define MVPP2_PE_MAC_RANGE_START (MVPP2_PE_MAC_RANGE_END - \
MVPP2_PRS_MAC_RANGE_SIZE + 1)
/* VLAN filtering range */
-#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
+#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 32)
#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \
MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1)
-#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1)
+#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1)
+#define MVPP2_PE_MH_SKIP_PRS (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
@@ -263,6 +266,7 @@ enum mvpp2_prs_lookup {
MVPP2_PRS_LU_IP4,
MVPP2_PRS_LU_IP6,
MVPP2_PRS_LU_FLOWS,
+ MVPP2_PRS_LU_MPLS,
MVPP2_PRS_LU_LAST,
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
index 543a1d047567..c6acb318f4bc 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Kconfig
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -9,8 +9,8 @@ config OCTEONTX2_MBOX
config OCTEONTX2_AF
tristate "Marvell OcteonTX2 RVU Admin Function driver"
select OCTEONTX2_MBOX
+ select NET_DEVLINK
depends on (64BIT && COMPILE_TEST) || ARM64
- depends on PCI
help
This driver supports Marvell's OcteonTX2 Resource Virtualization
Unit's admin function manager which manages all RVU HW resources
@@ -29,6 +29,7 @@ config NDC_DIS_DYNAMIC_CACHING
config OCTEONTX2_PF
tristate "Marvell OcteonTX2 NIC Physical Function driver"
select OCTEONTX2_MBOX
+ select NET_DEVLINK
depends on (64BIT && COMPILE_TEST) || ARM64
depends on PCI
help
@@ -39,3 +40,11 @@ config OCTEONTX2_VF
depends on OCTEONTX2_PF
help
This driver supports Marvell's OcteonTX2 NIC virtual function.
+
+config OCTEONTX2_BPHY_RFOE_NETDEV
+ tristate "OcteonTX2 BPHY RFOE netdev driver"
+ depends on ARM64
+ help
+ This driver provides support for processing packets received/sent by
+ BPHY RFOE MHAB such as eCPRI control, PTP and other ethernet packets
+ in Linux kernel. The rest of packets are processed by ODP application.
diff --git a/drivers/net/ethernet/marvell/octeontx2/Makefile b/drivers/net/ethernet/marvell/octeontx2/Makefile
index 0064a69e0f72..53743791546c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/Makefile
@@ -6,3 +6,4 @@
obj-$(CONFIG_OCTEONTX2_MBOX) += af/
obj-$(CONFIG_OCTEONTX2_AF) += af/
obj-$(CONFIG_OCTEONTX2_PF) += nic/
+obj-$(CONFIG_OCTEONTX2_BPHY_RFOE_NETDEV) += bphy/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 2f7a861d0c7b..e79230603c0c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -1,12 +1,16 @@
# SPDX-License-Identifier: GPL-2.0
#
-# Makefile for Marvell's OcteonTX2 RVU Admin Function driver
+# Makefile for Marvell's RVU Admin Function driver
#
ccflags-y += -I$(src)
-obj-$(CONFIG_OCTEONTX2_MBOX) += octeontx2_mbox.o
-obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
+obj-$(CONFIG_OCTEONTX2_MBOX) += rvu_mbox.o
+obj-$(CONFIG_OCTEONTX2_AF) += rvu_af.o
-octeontx2_mbox-y := mbox.o rvu_trace.o
-octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
- rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o
+rvu_mbox-y := mbox.o rvu_trace.o
+rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
+ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o \
+ rvu_validation.o rvu_sso.o rvu_tim.o \
+ rvu_cpt.o rvu_npc_fs.o rvu_fixes.o \
+ rvu_sdp.o rvu_ree.o rvu_cn10k.o rpm.o rvu_devlink.o \
+ rvu_switch.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index fc27a40202c6..48f2a36c90c3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -1,11 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Marvell OcteonTx2 CGX driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
@@ -14,61 +11,53 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include "cgx.h"
+#include "rvu.h"
+#include "lmac_common.h"
-#define DRV_NAME "octeontx2-cgx"
-#define DRV_STRING "Marvell OcteonTX2 CGX/MAC Driver"
-
-/**
- * struct lmac
- * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion
- * @cmd_lock: Lock to serialize the command interface
- * @resp: command response
- * @link_info: link related information
- * @event_cb: callback for linkchange events
- * @event_cb_lock: lock for serializing callback with unregister
- * @cmd_pend: flag set before new command is started
- * flag cleared after command response is received
- * @cgx: parent cgx port
- * @lmac_id: lmac port id
- * @name: lmac port name
- */
-struct lmac {
- wait_queue_head_t wq_cmd_cmplt;
- struct mutex cmd_lock;
- u64 resp;
- struct cgx_link_user_info link_info;
- struct cgx_event_cb event_cb;
- spinlock_t event_cb_lock;
- bool cmd_pend;
- struct cgx *cgx;
- u8 lmac_id;
- char *name;
-};
+#define DRV_NAME "Marvell-CGX/RPM"
+#define DRV_STRING "Marvell CGX/RPM Driver"
-struct cgx {
- void __iomem *reg_base;
- struct pci_dev *pdev;
- u8 cgx_id;
- u8 lmac_count;
- struct lmac *lmac_idmap[MAX_LMAC_PER_CGX];
- struct work_struct cgx_cmd_work;
- struct workqueue_struct *cgx_cmd_workq;
- struct list_head cgx_list;
-};
+#define CGX_RX_STAT_GLOBAL_INDEX 9
static LIST_HEAD(cgx_list);
/* Convert firmware speed encoding to user format(Mbps) */
-static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX];
+static const u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX] = {
+ [CGX_LINK_NONE] = 0,
+ [CGX_LINK_10M] = 10,
+ [CGX_LINK_100M] = 100,
+ [CGX_LINK_1G] = 1000,
+ [CGX_LINK_2HG] = 2500,
+ [CGX_LINK_5G] = 5000,
+ [CGX_LINK_10G] = 10000,
+ [CGX_LINK_20G] = 20000,
+ [CGX_LINK_25G] = 25000,
+ [CGX_LINK_40G] = 40000,
+ [CGX_LINK_50G] = 50000,
+ [CGX_LINK_80G] = 80000,
+ [CGX_LINK_100G] = 100000,
+};
/* Convert firmware lmac type encoding to string */
-static char *cgx_lmactype_string[LMAC_MODE_MAX];
+static const char *cgx_lmactype_string[LMAC_MODE_MAX] = {
+ [LMAC_MODE_SGMII] = "SGMII",
+ [LMAC_MODE_XAUI] = "XAUI",
+ [LMAC_MODE_RXAUI] = "RXAUI",
+ [LMAC_MODE_10G_R] = "10G_R",
+ [LMAC_MODE_40G_R] = "40G_R",
+ [LMAC_MODE_QSGMII] = "QSGMII",
+ [LMAC_MODE_25G_R] = "25G_R",
+ [LMAC_MODE_50G_R] = "50G_R",
+ [LMAC_MODE_100G_R] = "100G_R",
+ [LMAC_MODE_USXGMII] = "USXGMII",
+};
/* CGX PHY management internal APIs */
static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
@@ -76,22 +65,63 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
/* Supported devices */
static const struct pci_device_id cgx_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
{ 0, } /* end of table */
};
MODULE_DEVICE_TABLE(pci, cgx_id_table);
-static void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
+static bool is_dev_rpm(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ return (cgx->pdev->device == PCI_DEVID_CN10K_RPM);
+}
+
+bool is_lmac_valid(struct cgx *cgx, int lmac_id)
+{
+ if (!cgx || lmac_id < 0 || lmac_id >= MAX_LMAC_PER_CGX)
+ return false;
+ return test_bit(lmac_id, &cgx->lmac_bmap);
+}
+
+/* Helper function to get sequential index
+ * given the enabled LMAC of a CGX
+ */
+static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
+{
+ int tmp, id = 0;
+
+ for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
+ if (tmp == lmac_id)
+ break;
+ id++;
+ }
+
+ return id;
+}
+
+struct mac_ops *get_mac_ops(void *cgxd)
+{
+ if (!cgxd)
+ return cgxd;
+
+ return ((struct cgx *)cgxd)->mac_ops;
+}
+
+void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
{
- writeq(val, cgx->reg_base + (lmac << 18) + offset);
+ writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
+ offset);
}
-static u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
+u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
{
- return readq(cgx->reg_base + (lmac << 18) + offset);
+ return readq(cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
+ offset);
}
-static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
+struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
{
if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
return NULL;
@@ -135,6 +165,26 @@ void *cgx_get_pdata(int cgx_id)
return NULL;
}
+void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+
+ /* Software must not access disabled LMAC registers */
+ if (!is_lmac_valid(cgx_dev, lmac_id))
+ return;
+ cgx_write(cgx_dev, lmac_id, offset, val);
+}
+
+u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+
+ /* Software must not access disabled LMAC registers */
+ if (!is_lmac_valid(cgx_dev, lmac_id))
+ return 0;
+ return cgx_read(cgx_dev, lmac_id, offset);
+}
+
int cgx_get_cgxid(void *cgxd)
{
struct cgx *cgx = cgxd;
@@ -145,6 +195,16 @@ int cgx_get_cgxid(void *cgxd)
return cgx->cgx_id;
}
+u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ u64 cfg;
+
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_CFG);
+
+ return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT;
+}
+
/* Ensure the required lock for event queue(where asynchronous events are
* posted) is acquired before calling this API. Else an asynchronous event(with
* latest link status) can reach the destination before this function returns
@@ -172,32 +232,261 @@ static u64 mac2u64 (u8 *mac_addr)
return mac;
}
+static void cfg2mac(u64 cfg, u8 *mac_addr)
+{
+ int i, index = 0;
+
+ for (i = ETH_ALEN - 1; i >= 0; i--, index++)
+ mac_addr[i] = (cfg >> (8 * index)) & 0xFF;
+}
+
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ int index, id;
u64 cfg;
+ /* access mac_ops to know csr_offset */
+ mac_ops = cgx_dev->mac_ops;
+
/* copy 6bytes from macaddr */
/* memcpy(&cfg, mac_addr, 6); */
cfg = mac2u64 (mac_addr);
- cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)),
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max;
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)),
cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
- cfg |= CGX_DMAC_CTL0_CAM_ENABLE;
+ cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE |
+ CGX_DMAC_MCAST_MODE);
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ return 0;
+}
+
+u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id)
+{
+ struct mac_ops *mac_ops;
+ struct cgx *cgx = cgxd;
+
+ if (!cgxd || !is_lmac_valid(cgxd, lmac_id))
+ return 0;
+
+ cgx = cgxd;
+ /* Get mac_ops to know csr offset */
+ mac_ops = cgx->mac_ops;
+
+ return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+}
+
+u64 cgx_read_dmac_entry(void *cgxd, int index)
+{
+ struct mac_ops *mac_ops;
+ struct cgx *cgx;
+
+ if (!cgxd)
+ return 0;
+
+ cgx = cgxd;
+ mac_ops = cgx->mac_ops;
+ return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8)));
+}
+
+int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ int index, idx;
+ u64 cfg = 0;
+ int id;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Get available index where entry is to be installed */
+ idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap);
+ if (idx < 0)
+ return idx;
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + idx;
+
+ cfg = mac2u64 (mac_addr);
+ cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
+ cfg |= ((u64)lmac_id << 49);
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
+
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT);
+
+ if (is_multicast_ether_addr(mac_addr)) {
+ cfg &= ~GENMASK_ULL(2, 1);
+ cfg |= CGX_DMAC_MCAST_MODE_CAM;
+ lmac->mcast_filters_count++;
+ } else if (!lmac->mcast_filters_count) {
+ cfg |= CGX_DMAC_MCAST_MODE;
+ }
+
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ return idx;
+}
+EXPORT_SYMBOL(cgx_lmac_addr_add);
+
+int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ u8 index = 0, id;
+ u64 cfg;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Restore index 0 to its default init value as done during
+ * cgx_lmac_init
+ */
+ set_bit(0, lmac->mac_to_index_bmap.bmap);
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
+
+ /* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg &= ~CGX_DMAC_CAM_ACCEPT;
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
return 0;
}
+EXPORT_SYMBOL(cgx_lmac_addr_reset);
+
+/* Allows caller to change macaddress associated with index
+ * in dmac filter table including index 0 reserved for
+ * interface mac address
+ */
+int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct mac_ops *mac_ops;
+ struct lmac *lmac;
+ u64 cfg;
+ int id;
+
+ lmac = lmac_pdata(lmac_id, cgx_dev);
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Validate the index */
+ if (index >= lmac->mac_to_index_bmap.max)
+ return -EINVAL;
+
+ /* ensure index is already set */
+ if (!test_bit(index, lmac->mac_to_index_bmap.bmap))
+ return -EINVAL;
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+
+ cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
+ cfg &= ~CGX_RX_DMAC_ADR_MASK;
+ cfg |= mac2u64 (mac_addr);
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
+ return 0;
+}
+
+int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ u8 mac[ETH_ALEN];
+ u64 cfg;
+ int id;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Validate the index */
+ if (index >= lmac->mac_to_index_bmap.max)
+ return -EINVAL;
+
+ /* Skip deletion for reserved index i.e. index 0 */
+ if (index == 0)
+ return 0;
+
+ rvu_free_rsrc(&lmac->mac_to_index_bmap, index);
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+
+ /* Read MAC address to check whether it is ucast or mcast */
+ cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
+
+ cfg2mac(cfg, mac);
+ if (is_multicast_ether_addr(mac))
+ lmac->mcast_filters_count--;
+
+ if (!lmac->mcast_filters_count) {
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg &= ~GENMASK_ULL(2, 1);
+ cfg |= CGX_DMAC_MCAST_MODE;
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+ }
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
+
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_addr_del);
+
+int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+
+ if (lmac)
+ return lmac->mac_to_index_bmap.max;
+
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_addr_max_entries_get);
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ int index;
u64 cfg;
+ int id;
- cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
+ mac_ops = cgx_dev->mac_ops;
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max;
+
+ cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8);
return cfg & CGX_RX_DMAC_ADR_MASK;
}
@@ -205,15 +494,28 @@ int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
{
struct cgx *cgx = cgxd;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
return 0;
}
-static inline u8 cgx_get_lmac_type(struct cgx *cgx, int lmac_id)
+int cgx_get_pkind(void *cgxd, u8 lmac_id, int *pkind)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ *pkind = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP);
+ *pkind = *pkind & 0x3F;
+ return 0;
+}
+
+u8 cgx_get_lmac_type(void *cgxd, int lmac_id)
{
+ struct cgx *cgx = cgxd;
u64 cfg;
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
@@ -227,10 +529,10 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
u8 lmac_type;
u64 cfg;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
- lmac_type = cgx_get_lmac_type(cgx, lmac_id);
+ lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac_id);
if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
if (enable)
@@ -252,33 +554,50 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
{
struct cgx *cgx = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx);
+ u16 max_dmac = lmac->mac_to_index_bmap.max;
+ struct mac_ops *mac_ops;
+ int index, i;
u64 cfg = 0;
+ int id;
if (!cgx)
return;
+ id = get_sequence_id_of_lmac(cgx, lmac_id);
+
+ mac_ops = cgx->mac_ops;
if (enable) {
/* Enable promiscuous mode on LMAC */
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
- cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE);
- cfg |= CGX_DMAC_BCAST_MODE;
+ cfg &= ~CGX_DMAC_CAM_ACCEPT;
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
- cfg = cgx_read(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
- cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
- cgx_write(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
+ for (i = 0; i < max_dmac; i++) {
+ index = id * max_dmac + i;
+ cfg = cgx_read(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
+ cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg);
+ }
} else {
/* Disable promiscuous mode */
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
- cfg = cgx_read(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
- cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
- cgx_write(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
+ for (i = 0; i < max_dmac; i++) {
+ index = id * max_dmac + i;
+ cfg = cgx_read(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
+ if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) {
+ cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8),
+ cfg);
+ }
+ }
}
}
@@ -286,27 +605,54 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
{
struct cgx *cgx = cgxd;
+ u8 rx_pause, tx_pause;
+ bool is_pfc_enabled;
+ struct lmac *lmac;
u64 cfg;
if (!cgx)
return;
- if (enable) {
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return;
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ /* Pause frames are not enabled just return */
+ if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max))
+ return;
+
+ cgx_lmac_get_pause_frm_status(cgx, lmac_id, &rx_pause, &tx_pause);
+ is_pfc_enabled = rx_pause ? false : true;
+
+ if (enable) {
+ if (!is_pfc_enabled) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ cfg |= CGXX_SMUX_CBFC_CTL_BCK_EN;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+ }
} else {
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ if (!is_pfc_enabled) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ cfg &= ~CGXX_SMUX_CBFC_CTL_BCK_EN;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+ }
}
}
@@ -314,8 +660,13 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
{
struct cgx *cgx = cgxd;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
+
+ /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */
+ if (idx >= CGX_RX_STAT_GLOBAL_INDEX)
+ lmac_id = 0;
+
*rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
return 0;
}
@@ -324,25 +675,119 @@ int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
{
struct cgx *cgx = cgxd;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
+
*tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
return 0;
}
+int cgx_stats_rst(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ int stat_id;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ for (stat_id = 0 ; stat_id < CGX_RX_STATS_COUNT; stat_id++) {
+ if (stat_id >= CGX_RX_STAT_GLOBAL_INDEX)
+ /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_STAT0 + (stat_id * 8)), 0);
+ else
+ cgx_write(cgx, lmac_id,
+ (CGXX_CMRX_RX_STAT0 + (stat_id * 8)), 0);
+ }
+
+ for (stat_id = 0 ; stat_id < CGX_TX_STATS_COUNT; stat_id++)
+ cgx_write(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (stat_id * 8), 0);
+
+ return 0;
+}
+
+u64 cgx_features_get(void *cgxd)
+{
+ return ((struct cgx *)cgxd)->hw_features;
+}
+
+static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo)
+{
+ if (!linfo->fec)
+ return 0;
+
+ switch (linfo->lmac_type_id) {
+ case LMAC_MODE_SGMII:
+ case LMAC_MODE_XAUI:
+ case LMAC_MODE_RXAUI:
+ case LMAC_MODE_QSGMII:
+ return 0;
+ case LMAC_MODE_10G_R:
+ case LMAC_MODE_25G_R:
+ case LMAC_MODE_100G_R:
+ case LMAC_MODE_USXGMII:
+ return 1;
+ case LMAC_MODE_40G_R:
+ return 4;
+ case LMAC_MODE_50G_R:
+ if (linfo->fec == OTX2_FEC_BASER)
+ return 2;
+ else
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
+{
+ int stats, fec_stats_count = 0;
+ int corr_reg, uncorr_reg;
+ struct cgx *cgx = cgxd;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+ fec_stats_count =
+ cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info);
+ if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
+ corr_reg = CGXX_SPUX_LNX_FEC_CORR_BLOCKS;
+ uncorr_reg = CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS;
+ } else {
+ corr_reg = CGXX_SPUX_RSFEC_CORR;
+ uncorr_reg = CGXX_SPUX_RSFEC_UNCORR;
+ }
+ for (stats = 0; stats < fec_stats_count; stats++) {
+ rsp->fec_corr_blks +=
+ cgx_read(cgx, lmac_id, corr_reg + (stats * 8));
+ rsp->fec_uncorr_blks +=
+ cgx_read(cgx, lmac_id, uncorr_reg + (stats * 8));
+ }
+ return 0;
+}
+
+u64 cgx_get_lmac_tx_fifo_status(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return 0;
+ return cgx_read(cgx, lmac_id, CGXX_CMRX_TX_FIFO_LEN);
+}
+EXPORT_SYMBOL(cgx_get_lmac_tx_fifo_status);
+
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
{
struct cgx *cgx = cgxd;
u64 cfg;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
if (enable)
- cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
+ cfg |= DATA_PKT_RX_EN | DATA_PKT_TX_EN;
else
- cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
+ cfg &= ~(DATA_PKT_RX_EN | DATA_PKT_TX_EN);
cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
return 0;
}
@@ -352,7 +797,7 @@ int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
struct cgx *cgx = cgxd;
u64 cfg, last;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
@@ -367,15 +812,32 @@ int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
return !!(last & DATA_PKT_TX_EN);
}
-int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
- u8 *tx_pause, u8 *rx_pause)
+static int cgx_lmac_get_higig2_pause_frm_status(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL);
+
+ *rx_pause = !!(cfg & CGXX_SMUX_HG2_CONTROL_RX_ENABLE);
+ *tx_pause = !!(cfg & CGXX_SMUX_HG2_CONTROL_TX_ENABLE);
+ return 0;
+}
+
+int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause)
{
struct cgx *cgx = cgxd;
u64 cfg;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
+ if (is_higig2_enabled(cgxd, lmac_id))
+ return cgx_lmac_get_higig2_pause_frm_status(cgxd, lmac_id,
+ tx_pause, rx_pause);
+
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
*rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
@@ -384,14 +846,51 @@ int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
return 0;
}
-int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
- u8 tx_pause, u8 rx_pause)
+static int cgx_lmac_enadis_higig2_pause_frm(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause)
{
struct cgx *cgx = cgxd;
u64 cfg;
- if (!cgx || lmac_id >= cgx->lmac_count)
- return -ENODEV;
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL);
+ cfg &= ~CGXX_SMUX_HG2_CONTROL_RX_ENABLE;
+ cfg |= rx_pause ? CGXX_SMUX_HG2_CONTROL_RX_ENABLE : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL, cfg);
+
+ /* Forward PAUSE information to TX block */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL);
+ cfg &= ~CGXX_SMUX_HG2_CONTROL_TX_ENABLE;
+ cfg |= tx_pause ? CGXX_SMUX_HG2_CONTROL_TX_ENABLE : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL, cfg);
+
+ /* allow intra packet hg2 generation */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
+ cfg &= ~CGXX_SMUX_TX_PAUSE_PKT_HG2_INTRA_EN;
+ cfg |= tx_pause ? CGXX_SMUX_TX_PAUSE_PKT_HG2_INTRA_EN : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL, cfg);
+
+ cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
+ if (tx_pause) {
+ cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id);
+ } else {
+ cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
+ cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
+ }
+ cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
+
+ return 0;
+}
+
+static int cgx_lmac_enadis_8023_pause_frm(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
@@ -411,30 +910,37 @@ int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
}
cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
+
return 0;
}
-static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
+int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause)
{
- u64 cfg;
+ struct cgx *cgx = cgxd;
- if (!cgx || lmac_id >= cgx->lmac_count)
- return;
- if (enable) {
- /* Enable receive pause frames */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+ if (is_higig2_enabled(cgxd, lmac_id))
+ return cgx_lmac_enadis_higig2_pause_frm(cgxd, lmac_id,
+ tx_pause, rx_pause);
+ else
+ return cgx_lmac_enadis_8023_pause_frm(cgxd, lmac_id,
+ tx_pause, rx_pause);
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_enadis_pause_frm);
- /* Enable pause frames transmission */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
- cfg |= CGX_SMUX_TX_CTL_L2P_BP_CONV;
- cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+ if (!is_lmac_valid(cgx, lmac_id))
+ return;
+
+ if (enable) {
/* Set pause time and interval */
cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
DEFAULT_PAUSE_TIME);
@@ -443,6 +949,12 @@ static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
cfg | (DEFAULT_PAUSE_TIME / 2));
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
+ cfg = FIELD_SET(HG2_INTRA_INTERVAL, (DEFAULT_PAUSE_TIME / 2),
+ cfg);
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
+ cfg);
+
cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME,
DEFAULT_PAUSE_TIME);
@@ -451,21 +963,128 @@ static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
cfg &= ~0xFFFFULL;
cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
cfg | (DEFAULT_PAUSE_TIME / 2));
- } else {
- /* ALL pause frames received are completely ignored */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ }
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+ /* ALL pause frames received are completely ignored */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL);
+ cfg &= ~CGXX_SMUX_HG2_CONTROL_RX_ENABLE;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL, cfg);
+
+ /* Disable pause frames transmission */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL);
+ cfg &= ~CGXX_SMUX_HG2_CONTROL_TX_ENABLE;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL, cfg);
+
+ cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
+ cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
+ cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
+ cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
+}
+
+int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ int pfvf_idx)
+{
+ struct cgx *cgx = cgxd;
+ struct lmac *lmac;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return -ENODEV;
+
+ if (!rx_pause)
+ clear_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
+ else
+ set_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
+
+ if (!tx_pause)
+ clear_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
+ else
+ set_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
+
+ /* check if other pfvfs are using flow control */
+ if (!rx_pause && bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) {
+ dev_warn(&cgx->pdev->dev,
+ "Receive Flow control disable not permitted as its used by other PFVFs\n");
+ return -EPERM;
+ }
+
+ if (!tx_pause && bitmap_weight(lmac->tx_fc_pfvf_bmap.bmap, lmac->tx_fc_pfvf_bmap.max)) {
+ dev_warn(&cgx->pdev->dev,
+ "Transmit Flow control disable not permitted as its used by other PFVFs\n");
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause,
+ u8 rx_pause, u16 pfc_en)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ /* Return as no traffic classes are requested */
+ if (tx_pause && !pfc_en)
+ return 0;
- /* Disable pause frames transmission */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
- cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
- cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+
+ if (rx_pause) {
+ cfg |= (CGXX_SMUX_CBFC_CTL_RX_EN |
+ CGXX_SMUX_CBFC_CTL_BCK_EN |
+ CGXX_SMUX_CBFC_CTL_DRP_EN);
+ } else {
+ cfg &= ~(CGXX_SMUX_CBFC_CTL_RX_EN |
+ CGXX_SMUX_CBFC_CTL_BCK_EN |
+ CGXX_SMUX_CBFC_CTL_DRP_EN);
}
+
+ if (tx_pause)
+ cfg |= CGXX_SMUX_CBFC_CTL_TX_EN;
+ else
+ cfg &= ~CGXX_SMUX_CBFC_CTL_TX_EN;
+
+ cfg = FIELD_SET(CGX_PFC_CLASS_MASK, pfc_en, cfg);
+
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+
+ /* Write source MAC address which will be filled into PFC packet */
+ cfg = cgx_lmac_addr_get(cgx->cgx_id, lmac_id);
+ cgx_write(cgx, lmac_id, CGXX_SMUX_SMAC, cfg);
+
+ return 0;
+}
+
+int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+
+ *rx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_RX_EN);
+ *tx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_TX_EN);
+
+ return 0;
}
void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
@@ -498,7 +1117,7 @@ void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
}
/* CGX Firmware interface low level support */
-static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
+int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
{
struct cgx *cgx = lmac->cgx;
struct device *dev;
@@ -530,9 +1149,9 @@ static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
dev = &cgx->pdev->dev;
- dev_err(dev, "cgx port %d:%d cmd timeout\n",
- cgx->cgx_id, lmac->lmac_id);
- err = -EIO;
+ dev_err(dev, "cgx port %d:%d cmd %lld timeout\n",
+ cgx->cgx_id, lmac->lmac_id, FIELD_GET(CMDREG_ID, req));
+ err = LMAC_AF_ERR_CMD_TIMEOUT;
goto unlock;
}
@@ -546,8 +1165,7 @@ unlock:
return err;
}
-static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp,
- struct cgx *cgx, int lmac_id)
+int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id)
{
struct lmac *lmac;
int err;
@@ -569,43 +1187,229 @@ static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp,
return err;
}
-static inline void cgx_link_usertable_init(void)
-{
- cgx_speed_mbps[CGX_LINK_NONE] = 0;
- cgx_speed_mbps[CGX_LINK_10M] = 10;
- cgx_speed_mbps[CGX_LINK_100M] = 100;
- cgx_speed_mbps[CGX_LINK_1G] = 1000;
- cgx_speed_mbps[CGX_LINK_2HG] = 2500;
- cgx_speed_mbps[CGX_LINK_5G] = 5000;
- cgx_speed_mbps[CGX_LINK_10G] = 10000;
- cgx_speed_mbps[CGX_LINK_20G] = 20000;
- cgx_speed_mbps[CGX_LINK_25G] = 25000;
- cgx_speed_mbps[CGX_LINK_40G] = 40000;
- cgx_speed_mbps[CGX_LINK_50G] = 50000;
- cgx_speed_mbps[CGX_LINK_100G] = 100000;
-
- cgx_lmactype_string[LMAC_MODE_SGMII] = "SGMII";
- cgx_lmactype_string[LMAC_MODE_XAUI] = "XAUI";
- cgx_lmactype_string[LMAC_MODE_RXAUI] = "RXAUI";
- cgx_lmactype_string[LMAC_MODE_10G_R] = "10G_R";
- cgx_lmactype_string[LMAC_MODE_40G_R] = "40G_R";
- cgx_lmactype_string[LMAC_MODE_QSGMII] = "QSGMII";
- cgx_lmactype_string[LMAC_MODE_25G_R] = "25G_R";
- cgx_lmactype_string[LMAC_MODE_50G_R] = "50G_R";
- cgx_lmactype_string[LMAC_MODE_100G_R] = "100G_R";
- cgx_lmactype_string[LMAC_MODE_USXGMII] = "USXGMII";
+static int cgx_link_usertable_index_map(int speed)
+{
+ switch (speed) {
+ case SPEED_10:
+ return CGX_LINK_10M;
+ case SPEED_100:
+ return CGX_LINK_100M;
+ case SPEED_1000:
+ return CGX_LINK_1G;
+ case SPEED_2500:
+ return CGX_LINK_2HG;
+ case SPEED_5000:
+ return CGX_LINK_5G;
+ case SPEED_10000:
+ return CGX_LINK_10G;
+ case SPEED_20000:
+ return CGX_LINK_20G;
+ case SPEED_25000:
+ return CGX_LINK_25G;
+ case SPEED_40000:
+ return CGX_LINK_40G;
+ case SPEED_50000:
+ return CGX_LINK_50G;
+ case 80000:
+ return CGX_LINK_80G;
+ case SPEED_100000:
+ return CGX_LINK_100G;
+ case SPEED_UNKNOWN:
+ return CGX_LINK_NONE;
+ }
+ return CGX_LINK_NONE;
}
+static void set_mod_args(struct cgx_set_link_mode_args *args,
+ u32 speed, u8 duplex, u8 autoneg, u64 mode)
+{
+ int mode_baseidx;
+ u8 cgx_mode;
+
+ /* Fill default values incase of user did not pass
+ * valid parameters
+ */
+ if (args->duplex == DUPLEX_UNKNOWN)
+ args->duplex = duplex;
+ if (args->speed == SPEED_UNKNOWN)
+ args->speed = speed;
+ if (args->an == AUTONEG_UNKNOWN)
+ args->an = autoneg;
+
+ /* Derive mode_base_idx and mode fields based
+ * on cgx_mode value
+ */
+ cgx_mode = find_first_bit((unsigned long *)&mode,
+ CGX_MODE_MAX);
+ args->mode = mode;
+ mode_baseidx = cgx_mode - 41;
+ if (mode_baseidx > 0) {
+ args->mode_baseidx = 1;
+ args->mode = BIT_ULL(mode_baseidx);
+ }
+}
+
+static void otx2_map_ethtool_link_modes(u64 bitmask,
+ struct cgx_set_link_mode_args *args)
+{
+ switch (bitmask) {
+ case ETHTOOL_LINK_MODE_10baseT_Half_BIT:
+ set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_10baseT_Full_BIT:
+ set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_100baseT_Half_BIT:
+ set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_100baseT_Full_BIT:
+ set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_1000baseT_Half_BIT:
+ set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_1000baseT_Full_BIT:
+ set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_1000baseX_Full_BIT:
+ set_mod_args(args, 1000, 0, 0, BIT_ULL(CGX_MODE_1000_BASEX));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseT_Full_BIT:
+ set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_QSGMII));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseSR_Full_BIT:
+ set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseLR_Full_BIT:
+ set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2M));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseKR_Full_BIT:
+ set_mod_args(args, 10000, 0, 1, BIT_ULL(CGX_MODE_10G_KR));
+ break;
+ case ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT:
+ set_mod_args(args, 20000, 0, 0, BIT_ULL(CGX_MODE_20G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_25000baseSR_Full_BIT:
+ set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseR_FEC_BIT:
+ set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_C2M));
+ break;
+ case ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT:
+ set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_2_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_25000baseCR_Full_BIT:
+ set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_CR));
+ break;
+ case ETHTOOL_LINK_MODE_25000baseKR_Full_BIT:
+ set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_KR));
+ break;
+ case ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT:
+ set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT:
+ set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2M));
+ break;
+ case ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT:
+ set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_CR4));
+ break;
+ case ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT:
+ set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_KR4));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT:
+ set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40GAUI_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT:
+ set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT:
+ set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_4_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseDR_Full_BIT:
+ set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2M));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT:
+ set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_CR));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT:
+ set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_KR));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT:
+ set_mod_args(args, 80000, 0, 0, BIT_ULL(CGX_MODE_80GAUI_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT:
+ set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT:
+ set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2M));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT:
+ set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_CR4));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT:
+ set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_KR4));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseSR_Full_BIT:
+ set_mod_args(args, 50000, 0, 0,
+ BIT_ULL(CGX_MODE_LAUI_2_C2C_BIT));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT:
+ set_mod_args(args, 50000, 0, 0,
+ BIT_ULL(CGX_MODE_LAUI_2_C2M_BIT));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseCR_Full_BIT:
+ set_mod_args(args, 50000, 0, 1,
+ BIT_ULL(CGX_MODE_50GBASE_CR2_C_BIT));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseKR_Full_BIT:
+ set_mod_args(args, 50000, 0, 1,
+ BIT_ULL(CGX_MODE_50GBASE_KR2_C_BIT));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT:
+ set_mod_args(args, 100000, 0, 0,
+ BIT_ULL(CGX_MODE_100GAUI_2_C2C_BIT));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT:
+ set_mod_args(args, 100000, 0, 0,
+ BIT_ULL(CGX_MODE_100GAUI_2_C2M_BIT));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT:
+ set_mod_args(args, 100000, 0, 1,
+ BIT_ULL(CGX_MODE_100GBASE_CR2_BIT));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT:
+ set_mod_args(args, 100000, 0, 1,
+ BIT_ULL(CGX_MODE_100GBASE_KR2_BIT));
+ break;
+ case ETHTOOL_LINK_MODE_1000baseKX_Full_BIT:
+ set_mod_args(args, 1000, 0, 0,
+ BIT_ULL(CGX_MODE_SFI_1G_BIT));
+ break;
+ case ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT:
+ set_mod_args(args, 25000, 0, 1,
+ BIT_ULL(CGX_MODE_25GBASE_CR_C_BIT));
+ break;
+ case ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT:
+ set_mod_args(args, 25000, 0, 1,
+ BIT_ULL(CGX_MODE_25GBASE_KR_C_BIT));
+ break;
+ default:
+ set_mod_args(args, 0, 1, 0, BIT_ULL(CGX_MODE_MAX));
+ break;
+ }
+}
static inline void link_status_user_format(u64 lstat,
struct cgx_link_user_info *linfo,
struct cgx *cgx, u8 lmac_id)
{
- char *lmac_string;
+ const char *lmac_string;
linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
- linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
+ linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat);
+ linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat);
+ linfo->lmac_type_id = FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, lstat);
lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
}
@@ -632,6 +1436,8 @@ static inline void cgx_link_change_handler(u64 lstat,
lmac->link_info = event.link_uinfo;
linfo = &lmac->link_info;
+ if (err_type == CGX_ERR_SPEED_CHANGE_INVALID)
+ return;
/* Ensure callback doesn't get unregistered until we finish it */
spin_lock(&lmac->event_cb_lock);
@@ -660,7 +1466,8 @@ static inline bool cgx_cmdresp_is_linkevent(u64 event)
id = FIELD_GET(EVTREG_ID, event);
if (id == CGX_CMD_LINK_BRING_UP ||
- id == CGX_CMD_LINK_BRING_DOWN)
+ id == CGX_CMD_LINK_BRING_DOWN ||
+ id == CGX_CMD_MODE_CHANGE)
return true;
else
return false;
@@ -676,12 +1483,16 @@ static inline bool cgx_event_is_linkevent(u64 event)
static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
{
+ u64 event, offset, clear_bit;
struct lmac *lmac = data;
struct cgx *cgx;
- u64 event;
cgx = lmac->cgx;
+ /* Clear SW_INT for RPM and CMR_INT for CGX */
+ offset = cgx->mac_ops->int_register;
+ clear_bit = cgx->mac_ops->int_ena_bit;
+
event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
if (!FIELD_GET(EVTREG_ACK, event))
@@ -704,7 +1515,7 @@ static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
/* Release thread waiting for completion */
lmac->cmd_pend = false;
- wake_up_interruptible(&lmac->wq_cmd_cmplt);
+ wake_up(&lmac->wq_cmd_cmplt);
break;
case CGX_EVT_ASYNC:
if (cgx_event_is_linkevent(event))
@@ -717,7 +1528,7 @@ static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
* Ack the interrupt register as well.
*/
cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
- cgx_write(lmac->cgx, lmac->lmac_id, CGXX_CMRX_INT, FW_CGX_INT);
+ cgx_write(lmac->cgx, lmac->lmac_id, offset, clear_bit);
return IRQ_HANDLED;
}
@@ -761,20 +1572,108 @@ int cgx_get_fwdata_base(u64 *base)
{
u64 req = 0, resp;
struct cgx *cgx;
+ int first_lmac;
int err;
cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list);
if (!cgx)
return -ENXIO;
+ first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
- err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
+ err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac);
if (!err)
*base = FIELD_GET(RESP_FWD_BASE, resp);
return err;
}
+int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
+ int cgx_id, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u64 req = 0, resp;
+
+ if (!cgx)
+ return -ENODEV;
+
+ otx2_map_ethtool_link_modes(args.mode, &args);
+ if (!args.speed && args.duplex && !args.an)
+ return -EINVAL;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req);
+ req = FIELD_SET(CMDMODECHANGE_SPEED,
+ cgx_link_usertable_index_map(args.speed), req);
+ req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req);
+ req = FIELD_SET(CMDMODECHANGE_AN, args.an, req);
+ req = FIELD_SET(CMDMODECHANGE_MODE_BASEIDX, args.mode_baseidx, req);
+ req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req);
+
+ return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+}
+
+int cgx_set_fec(u64 fec, int cgx_id, int lmac_id)
+{
+ u64 req = 0, resp;
+ struct cgx *cgx;
+ int err = 0;
+
+ cgx = cgx_get_pdata(cgx_id);
+ if (!cgx)
+ return -ENXIO;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_SET_FEC, req);
+ req = FIELD_SET(CMDSETFEC, fec, req);
+ err = cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+ if (err)
+ return err;
+
+ cgx->lmac_idmap[lmac_id]->link_info.fec =
+ FIELD_GET(RESP_LINKSTAT_FEC, resp);
+ return cgx->lmac_idmap[lmac_id]->link_info.fec;
+}
+
+int cgx_get_phy_fec_stats(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u64 req = 0, resp;
+
+ if (!cgx)
+ return -ENODEV;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_PHY_FEC_STATS, req);
+ return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+}
+
+int cgx_set_phy_mod_type(int mod, void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u64 req = 0, resp;
+
+ if (!cgx)
+ return -ENODEV;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_SET_PHY_MOD_TYPE, req);
+ req = FIELD_SET(CMDSETPHYMODTYPE, mod, req);
+ return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+}
+
+int cgx_get_phy_mod_type(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u64 req = 0, resp;
+ int err;
+
+ if (!cgx)
+ return -ENODEV;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_PHY_MOD_TYPE, req);
+ err = cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+ if (!err)
+ return FIELD_GET(RESP_GETPHYMODTYPE, resp);
+ return err;
+}
+
static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
{
u64 req = 0;
@@ -790,10 +1689,11 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
{
+ int first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
u64 req = 0;
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
- return cgx_fwi_cmd_generic(req, resp, cgx, 0);
+ return cgx_fwi_cmd_generic(req, resp, cgx, first_lmac);
}
static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
@@ -814,8 +1714,7 @@ static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
dev_dbg(dev, "Firmware command interface version = %d.%d\n",
major_ver, minor_ver);
- if (major_ver != CGX_FIRMWARE_MAJOR_VER ||
- minor_ver != CGX_FIRMWARE_MINOR_VER)
+ if (major_ver != CGX_FIRMWARE_MAJOR_VER)
return -EIO;
else
return 0;
@@ -827,8 +1726,8 @@ static void cgx_lmac_linkup_work(struct work_struct *work)
struct device *dev = &cgx->pdev->dev;
int i, err;
- /* Do Link up for all the lmacs */
- for (i = 0; i < cgx->lmac_count; i++) {
+ /* Do Link up for all the enabled lmacs */
+ for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
err = cgx_fwi_link_change(cgx, i, true);
if (err)
dev_info(dev, "cgx port %d:%d Link up command failed\n",
@@ -836,6 +1735,17 @@ static void cgx_lmac_linkup_work(struct work_struct *work)
}
}
+int cgx_set_link_state(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx)
+ return -ENODEV;
+
+ return cgx_fwi_link_change(cgx, lmac_id, enable);
+}
+EXPORT_SYMBOL(cgx_set_link_state);
+
int cgx_lmac_linkup_start(void *cgxd)
{
struct cgx *cgx = cgxd;
@@ -848,17 +1758,112 @@ int cgx_lmac_linkup_start(void *cgxd)
return 0;
}
+void cgx_lmac_enadis_higig2(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 req = 0, resp;
+
+ /* disable 802.3 pause frames before enabling higig2 */
+ if (enable) {
+ cgx_lmac_enadis_8023_pause_frm(cgxd, lmac_id, false, false);
+ cgx_lmac_enadis_higig2_pause_frm(cgxd, lmac_id, true, true);
+ }
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_HIGIG, req);
+ req = FIELD_SET(CMDREG_ENABLE, enable, req);
+ cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+
+ /* enable 802.3 pause frames as higig2 disabled */
+ if (!enable) {
+ cgx_lmac_enadis_higig2_pause_frm(cgxd, lmac_id, false, false);
+ cgx_lmac_enadis_8023_pause_frm(cgxd, lmac_id, true, true);
+ }
+}
+
+bool is_higig2_enabled(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ return (cfg & CGXX_SMUX_TX_CTL_HIGIG_EN);
+}
+
+static void cgx_lmac_get_fifolen(struct cgx *cgx)
+{
+ u64 cfg;
+
+ cfg = cgx_read(cgx, 0, CGX_CONST);
+ cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
+}
+
+static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac,
+ int cnt, bool req_free)
+{
+ struct mac_ops *mac_ops = cgx->mac_ops;
+ u64 offset, ena_bit;
+ unsigned int irq;
+ int err;
+
+ irq = pci_irq_vector(cgx->pdev, mac_ops->lmac_fwi +
+ cnt * mac_ops->irq_offset);
+ offset = mac_ops->int_set_reg;
+ ena_bit = mac_ops->int_ena_bit;
+
+ if (req_free) {
+ free_irq(irq, lmac);
+ return 0;
+ }
+
+ err = request_irq(irq, cgx_fwi_event_handler, 0, lmac->name, lmac);
+ if (err)
+ return err;
+
+ /* Enable interrupt */
+ cgx_write(cgx, lmac->lmac_id, offset, ena_bit);
+ return 0;
+}
+
+int cgx_get_nr_lmacs(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ return cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7ULL;
+}
+
+u8 cgx_get_lmacid(void *cgxd, u8 lmac_index)
+{
+ struct cgx *cgx = cgxd;
+
+ return cgx->lmac_idmap[lmac_index]->lmac_id;
+}
+
+unsigned long cgx_get_lmac_bmap(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ return cgx->lmac_bmap;
+}
+
static int cgx_lmac_init(struct cgx *cgx)
{
struct lmac *lmac;
+ u64 lmac_list;
int i, err;
- cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7;
+ cgx_lmac_get_fifolen(cgx);
+
+ /* lmac_list specifies which lmacs are enabled
+ * when bit n is set to 1, LMAC[n] is enabled
+ */
+ if (cgx->mac_ops->non_contiguous_serdes_lane)
+ lmac_list = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
+
if (cgx->lmac_count > MAX_LMAC_PER_CGX)
cgx->lmac_count = MAX_LMAC_PER_CGX;
for (i = 0; i < cgx->lmac_count; i++) {
- lmac = kcalloc(1, sizeof(struct lmac), GFP_KERNEL);
+ lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL);
if (!lmac)
return -ENOMEM;
lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
@@ -867,29 +1872,56 @@ static int cgx_lmac_init(struct cgx *cgx)
goto err_lmac_free;
}
sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
- lmac->lmac_id = i;
+ if (cgx->mac_ops->non_contiguous_serdes_lane) {
+ lmac->lmac_id = __ffs64(lmac_list);
+ lmac_list &= ~BIT_ULL(lmac->lmac_id);
+ } else {
+ lmac->lmac_id = i;
+ }
+
lmac->cgx = cgx;
+ lmac->mac_to_index_bmap.max =
+ MAX_DMAC_ENTRIES_PER_CGX / cgx->lmac_count;
+ err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
+ if (err)
+ goto err_name_free;
+
+ /* Reserve first entry for default MAC address */
+ set_bit(0, lmac->mac_to_index_bmap.bmap);
+
+ lmac->rx_fc_pfvf_bmap.max = 128;
+ err = rvu_alloc_bitmap(&lmac->rx_fc_pfvf_bmap);
+ if (err)
+ goto err_dmac_bmap_free;
+
+ lmac->tx_fc_pfvf_bmap.max = 128;
+ err = rvu_alloc_bitmap(&lmac->tx_fc_pfvf_bmap);
+ if (err)
+ goto err_rx_fc_bmap_free;
+
init_waitqueue_head(&lmac->wq_cmd_cmplt);
mutex_init(&lmac->cmd_lock);
spin_lock_init(&lmac->event_cb_lock);
- err = request_irq(pci_irq_vector(cgx->pdev,
- CGX_LMAC_FWI + i * 9),
- cgx_fwi_event_handler, 0, lmac->name, lmac);
- if (err)
- goto err_irq;
- /* Enable interrupt */
- cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
- FW_CGX_INT);
+ err = cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, false);
+ if (err)
+ goto err_bitmap_free;
/* Add reference */
- cgx->lmac_idmap[i] = lmac;
- cgx_lmac_pause_frm_config(cgx, i, true);
+ cgx->lmac_idmap[lmac->lmac_id] = lmac;
+ set_bit(lmac->lmac_id, &cgx->lmac_bmap);
+ cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
}
return cgx_lmac_verify_fwi_version(cgx);
-err_irq:
+err_bitmap_free:
+ rvu_free_bitmap(&lmac->tx_fc_pfvf_bmap);
+err_rx_fc_bmap_free:
+ rvu_free_bitmap(&lmac->rx_fc_pfvf_bmap);
+err_dmac_bmap_free:
+ rvu_free_bitmap(&lmac->mac_to_index_bmap);
+err_name_free:
kfree(lmac->name);
err_lmac_free:
kfree(lmac);
@@ -908,12 +1940,13 @@ static int cgx_lmac_exit(struct cgx *cgx)
}
/* Free all lmac related resources */
- for (i = 0; i < cgx->lmac_count; i++) {
- cgx_lmac_pause_frm_config(cgx, i, false);
+ for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
lmac = cgx->lmac_idmap[i];
if (!lmac)
continue;
- free_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), lmac);
+ cgx->mac_ops->mac_pause_frm_config(cgx, i, false);
+ cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
+ kfree(lmac->mac_to_index_bmap.bmap);
kfree(lmac->name);
kfree(lmac);
}
@@ -921,6 +1954,44 @@ static int cgx_lmac_exit(struct cgx *cgx)
return 0;
}
+static void cgx_populate_features(struct cgx *cgx)
+{
+ if (is_dev_rpm(cgx))
+ cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM |
+ RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
+ else
+ cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_HIGIG2 |
+ RVU_LMAC_FEAT_PTP | RVU_LMAC_FEAT_DMACF);
+}
+
+struct mac_ops cgx_mac_ops = {
+ .name = "cgx",
+ .csr_offset = 0,
+ .lmac_offset = 18,
+ .int_register = CGXX_CMRX_INT,
+ .int_set_reg = CGXX_CMRX_INT_ENA_W1S,
+ .irq_offset = 9,
+ .int_ena_bit = FW_CGX_INT,
+ .lmac_fwi = CGX_LMAC_FWI,
+ .non_contiguous_serdes_lane = false,
+ .rx_stats_cnt = 9,
+ .tx_stats_cnt = 18,
+ .get_nr_lmacs = cgx_get_nr_lmacs,
+ .get_lmac_type = cgx_get_lmac_type,
+ .mac_lmac_intl_lbk = cgx_lmac_internal_loopback,
+ .mac_get_rx_stats = cgx_get_rx_stats,
+ .mac_get_tx_stats = cgx_get_tx_stats,
+ .mac_enadis_rx_pause_fwding = cgx_lmac_enadis_rx_pause_fwding,
+ .mac_get_pause_frm_status = cgx_lmac_get_pause_frm_status,
+ .mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm,
+ .mac_pause_frm_config = cgx_lmac_pause_frm_config,
+ .mac_enadis_ptp_config = cgx_lmac_ptp_config,
+ .mac_rx_tx_enable = cgx_lmac_rx_tx_enable,
+ .mac_tx_enable = cgx_lmac_tx_enable,
+ .pfc_config = cgx_lmac_pfc_config,
+ .mac_get_pfc_frm_cfg = cgx_lmac_get_pfc_frm_cfg,
+};
+
static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
@@ -934,6 +2005,12 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, cgx);
+ /* Use mac_ops to get MAC specific features */
+ if (pdev->device == PCI_DEVID_CN10K_RPM)
+ cgx->mac_ops = rpm_get_mac_ops();
+ else
+ cgx->mac_ops = &cgx_mac_ops;
+
err = pci_enable_device(pdev);
if (err) {
dev_err(dev, "Failed to enable PCI device\n");
@@ -955,7 +2032,26 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_release_regions;
}
- nvec = CGX_NVEC;
+
+ cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
+ & CGX_ID_MASK;
+
+ /* Skip probe if CGX is not mapped to NIX */
+ if (!is_cgx_mapped_to_nix(pdev->subsystem_device, cgx->cgx_id)) {
+ dev_notice(dev, "CGX %d not mapped to NIX, skipping probe\n", cgx->cgx_id);
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
+ if (!cgx->lmac_count) {
+ dev_notice(dev, "CGX %d LMAC count is zero, skipping probe\n", cgx->cgx_id);
+ err = -EOPNOTSUPP;
+ goto err_release_regions;
+ }
+
+ nvec = pci_msix_vec_count(cgx->pdev);
+
err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
if (err < 0 || err != nvec) {
dev_err(dev, "Request for %d msix vectors failed, err %d\n",
@@ -963,9 +2059,6 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_release_regions;
}
- cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
- & CGX_ID_MASK;
-
/* init wq for processing linkup requests */
INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
@@ -977,7 +2070,10 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
list_add(&cgx->cgx_list, &cgx_list);
- cgx_link_usertable_init();
+
+ cgx_populate_features(cgx);
+
+ mutex_init(&cgx->lock);
err = cgx_lmac_init(cgx);
if (err)
@@ -1002,8 +2098,11 @@ static void cgx_remove(struct pci_dev *pdev)
{
struct cgx *cgx = pci_get_drvdata(pdev);
- cgx_lmac_exit(cgx);
- list_del(&cgx->cgx_list);
+ if (cgx) {
+ cgx_lmac_exit(cgx);
+ list_del(&cgx->cgx_list);
+ }
+
pci_free_irq_vectors(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 27ca3291682b..ff8fee22473f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -1,11 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 CGX driver
+/* Marvell OcteonTx2 CGX driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef CGX_H
@@ -13,6 +10,7 @@
#include "mbox.h"
#include "cgx_fw_if.h"
+#include "rpm.h"
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_CGX 0xA059
@@ -22,11 +20,15 @@
#define CGX_ID_MASK 0x7
#define MAX_LMAC_PER_CGX 4
-#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */
+#define MAX_DMAC_ENTRIES_PER_CGX 32
#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
/* Registers */
#define CGXX_CMRX_CFG 0x00
+#define CMR_P2X_SEL_MASK GENMASK_ULL(61, 59)
+#define CMR_P2X_SEL_SHIFT 59ULL
+#define CMR_P2X_SEL_NIX0 1ULL
+#define CMR_P2X_SEL_NIX1 2ULL
#define CMR_EN BIT_ULL(55)
#define DATA_PKT_TX_EN BIT_ULL(53)
#define DATA_PKT_RX_EN BIT_ULL(54)
@@ -38,20 +40,31 @@
#define CGXX_CMRX_RX_ID_MAP 0x060
#define CGXX_CMRX_RX_STAT0 0x070
#define CGXX_CMRX_RX_LMACS 0x128
-#define CGXX_CMRX_RX_DMAC_CTL0 0x1F8
+#define CGXX_CMRX_RX_DMAC_CTL0 (0x1F8 + mac_ops->csr_offset)
#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3)
#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3)
+#define CGX_DMAC_MCAST_MODE_CAM BIT_ULL(2)
#define CGX_DMAC_MCAST_MODE BIT_ULL(1)
#define CGX_DMAC_BCAST_MODE BIT_ULL(0)
-#define CGXX_CMRX_RX_DMAC_CAM0 0x200
+#define CGXX_CMRX_RX_DMAC_CAM0 (0x200 + mac_ops->csr_offset)
#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48)
+#define CGX_DMAC_CAM_ENTRY_LMACID GENMASK_ULL(50, 49)
#define CGXX_CMRX_RX_DMAC_CAM1 0x400
#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0)
+#define CGXX_CMRX_TX_FIFO_LEN 0x618
+#define CGXX_CMRX_TX_LMAC_IDLE BIT_ULL(14)
+#define CGXX_CMRX_TX_LMAC_E_IDLE BIT_ULL(29)
#define CGXX_CMRX_TX_STAT0 0x700
#define CGXX_SCRATCH0_REG 0x1050
#define CGXX_SCRATCH1_REG 0x1058
#define CGX_CONST 0x2000
+#define CGX_CONST_RXFIFO_SIZE GENMASK_ULL(23, 0)
#define CGXX_SPUX_CONTROL1 0x10000
+#define CGXX_SPUX_LNX_FEC_CORR_BLOCKS 0x10700
+#define CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS 0x10800
+#define CGXX_SPUX_RSFEC_CORR 0x10088
+#define CGXX_SPUX_RSFEC_UNCORR 0x10090
+
#define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14)
#define CGXX_GMP_PCS_MRX_CTL 0x30000
#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14)
@@ -63,22 +76,34 @@
#define CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK BIT_ULL(3)
#define CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE BIT_ULL(12)
#define CGXX_SMUX_TX_CTL 0x20178
+#define CGXX_SMUX_TX_CTL_HIGIG_EN BIT_ULL(8)
#define CGXX_SMUX_TX_PAUSE_PKT_TIME 0x20110
#define CGXX_SMUX_TX_PAUSE_PKT_INTERVAL 0x20120
+#define CGXX_SMUX_SMAC 0x20108
+#define CGXX_SMUX_CBFC_CTL 0x20218
+#define CGXX_SMUX_CBFC_CTL_RX_EN BIT_ULL(0)
+#define CGXX_SMUX_CBFC_CTL_TX_EN BIT_ULL(1)
+#define CGXX_SMUX_CBFC_CTL_DRP_EN BIT_ULL(2)
+#define CGXX_SMUX_CBFC_CTL_BCK_EN BIT_ULL(3)
+#define CGX_PFC_CLASS_MASK GENMASK_ULL(47, 32)
+#define CGXX_SMUX_TX_PAUSE_PKT_HG2_INTRA_EN BIT_ULL(32)
+#define HG2_INTRA_INTERVAL GENMASK_ULL(31, 16)
#define CGXX_GMP_GMI_TX_PAUSE_PKT_TIME 0x38230
#define CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL 0x38248
#define CGX_SMUX_TX_CTL_L2P_BP_CONV BIT_ULL(7)
#define CGXX_CMR_RX_OVR_BP 0x130
#define CGX_CMR_RX_OVR_BP_EN(X) BIT_ULL(((X) + 8))
#define CGX_CMR_RX_OVR_BP_BP(X) BIT_ULL(((X) + 4))
+#define CGXX_SMUX_HG2_CONTROL 0x20210
+#define CGXX_SMUX_HG2_CONTROL_TX_ENABLE BIT_ULL(18)
+#define CGXX_SMUX_HG2_CONTROL_RX_ENABLE BIT_ULL(17)
#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
#define CGX_EVENT_REG CGXX_SCRATCH0_REG
#define CGX_CMD_TIMEOUT 2200 /* msecs */
#define DEFAULT_PAUSE_TIME 0x7FF
-#define CGX_NVEC 37
-#define CGX_LMAC_FWI 0
+#define CGX_LMAC_FWI 0
enum cgx_nix_stat_type {
NIX_STATS_RX,
@@ -126,10 +151,16 @@ int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id);
int cgx_lmac_evh_unregister(void *cgxd, int lmac_id);
int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat);
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
+int cgx_stats_rst(void *cgxd, int lmac_id);
+u64 cgx_get_lmac_tx_fifo_status(void *cgxd, int lmac_id);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
+int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id);
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id);
+int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
+int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index);
+int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id);
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
@@ -137,10 +168,39 @@ int cgx_get_link_info(void *cgxd, int lmac_id,
struct cgx_link_user_info *linfo);
int cgx_lmac_linkup_start(void *cgxd);
int cgx_get_fwdata_base(u64 *base);
-int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
- u8 *tx_pause, u8 *rx_pause);
-int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
- u8 tx_pause, u8 rx_pause);
+int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause);
+int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause);
+void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable);
void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable);
-
+int cgx_set_link_state(void *cgxd, int lmac_id, bool enable);
+int cgx_set_phy_mod_type(int mod, void *cgxd, int lmac_id);
+int cgx_get_phy_mod_type(void *cgxd, int lmac_id);
+void cgx_lmac_enadis_higig2(void *cgxd, int lmac_id, bool enable);
+bool is_higig2_enabled(void *cgxd, int lmac_id);
+int cgx_get_pkind(void *cgxd, u8 lmac_id, int *pkind);
+u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id);
+int cgx_set_fec(u64 fec, int cgx_id, int lmac_id);
+int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp);
+int cgx_get_phy_fec_stats(void *cgxd, int lmac_id);
+int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
+ int cgx_id, int lmac_id);
+u64 cgx_features_get(void *cgxd);
+struct mac_ops *get_mac_ops(void *cgxd);
+int cgx_get_nr_lmacs(void *cgxd);
+void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val);
+u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset);
+u8 cgx_get_lmac_type(void *cgx, int lmac_id);
+u8 cgx_get_lmacid(void *cgxd, u8 lmac_index);
+unsigned long cgx_get_lmac_bmap(void *cgxd);
+u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id);
+u64 cgx_read_dmac_entry(void *cgxd, int index);
+int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index);
+int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause);
+int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ int pfvf_idx);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
index c3702fa58b6b..fd7bda8024bc 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -1,11 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 CGX driver
+/* Marvell OcteonTx2 CGX driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __CGX_FW_INTF_H__
@@ -14,6 +11,10 @@
#include <linux/bitops.h>
#include <linux/bitfield.h>
+/* Major version would change only if there is structural change in
+ * existing commands and due to which functionaliy is impacted.
+ * Minor version would change with new command/structure additions
+ */
#define CGX_FIRMWARE_MAJOR_VER 1
#define CGX_FIRMWARE_MINOR_VER 0
@@ -43,7 +44,13 @@ enum cgx_error_type {
CGX_ERR_TRAINING_FAIL,
CGX_ERR_RX_EQU_FAIL,
CGX_ERR_SPUX_BER_FAIL,
- CGX_ERR_SPUX_RSFEC_ALGN_FAIL, /* = 22 */
+ CGX_ERR_SPUX_RSFEC_ALGN_FAIL,
+ CGX_ERR_SPUX_MARKER_LOCK_FAIL,
+ CGX_ERR_SET_FEC_INVALID,
+ CGX_ERR_SET_FEC_FAIL,
+ CGX_ERR_MODULE_INVALID,
+ CGX_ERR_MODULE_NOT_PRESENT,
+ CGX_ERR_SPEED_CHANGE_INVALID,
};
/* LINK speed types */
@@ -59,10 +66,53 @@ enum cgx_link_speed {
CGX_LINK_25G,
CGX_LINK_40G,
CGX_LINK_50G,
+ CGX_LINK_80G,
CGX_LINK_100G,
CGX_LINK_SPEED_MAX,
};
+enum CGX_MODE_ {
+ CGX_MODE_SGMII,
+ CGX_MODE_1000_BASEX,
+ CGX_MODE_QSGMII,
+ CGX_MODE_10G_C2C,
+ CGX_MODE_10G_C2M,
+ CGX_MODE_10G_KR,
+ CGX_MODE_20G_C2C,
+ CGX_MODE_25G_C2C,
+ CGX_MODE_25G_C2M,
+ CGX_MODE_25G_2_C2C,
+ CGX_MODE_25G_CR,
+ CGX_MODE_25G_KR,
+ CGX_MODE_40G_C2C,
+ CGX_MODE_40G_C2M,
+ CGX_MODE_40G_CR4,
+ CGX_MODE_40G_KR4,
+ CGX_MODE_40GAUI_C2C,
+ CGX_MODE_50G_C2C,
+ CGX_MODE_50G_C2M,
+ CGX_MODE_50G_4_C2C,
+ CGX_MODE_50G_CR,
+ CGX_MODE_50G_KR,
+ CGX_MODE_80GAUI_C2C,
+ CGX_MODE_100G_C2C,
+ CGX_MODE_100G_C2M,
+ CGX_MODE_100G_CR4,
+ CGX_MODE_100G_KR4,
+ CGX_MODE_LAUI_2_C2C_BIT,
+ CGX_MODE_LAUI_2_C2M_BIT,
+ CGX_MODE_50GBASE_CR2_C_BIT,
+ CGX_MODE_50GBASE_KR2_C_BIT, /* = 30 */
+ CGX_MODE_100GAUI_2_C2C_BIT,
+ CGX_MODE_100GAUI_2_C2M_BIT,
+ CGX_MODE_100GBASE_CR2_BIT,
+ CGX_MODE_100GBASE_KR2_BIT,
+ CGX_MODE_SFI_1G_BIT,
+ CGX_MODE_25GBASE_CR_C_BIT,
+ CGX_MODE_25GBASE_KR_C_BIT,
+ CGX_MODE_MAX /* = 38 */
+};
+
/* REQUEST ID types. Input to firmware */
enum cgx_cmd_id {
CGX_CMD_NONE,
@@ -75,12 +125,25 @@ enum cgx_cmd_id {
CGX_CMD_INTERNAL_LBK,
CGX_CMD_EXTERNAL_LBK,
CGX_CMD_HIGIG,
- CGX_CMD_LINK_STATE_CHANGE,
+ CGX_CMD_LINK_STAT_CHANGE,
CGX_CMD_MODE_CHANGE, /* hot plug support */
CGX_CMD_INTF_SHUTDOWN,
CGX_CMD_GET_MKEX_PRFL_SIZE,
CGX_CMD_GET_MKEX_PRFL_ADDR,
CGX_CMD_GET_FWD_BASE, /* get base address of shared FW data */
+ CGX_CMD_GET_LINK_MODES, /* Supported Link Modes */
+ CGX_CMD_SET_LINK_MODE,
+ CGX_CMD_GET_SUPPORTED_FEC,
+ CGX_CMD_SET_FEC,
+ CGX_CMD_GET_AN,
+ CGX_CMD_SET_AN,
+ CGX_CMD_GET_ADV_LINK_MODES,
+ CGX_CMD_GET_ADV_FEC,
+ CGX_CMD_GET_PHY_MOD_TYPE, /* line-side modulation type: NRZ or PAM4 */
+ CGX_CMD_SET_PHY_MOD_TYPE,
+ CGX_CMD_PRBS,
+ CGX_CMD_DISPLAY_EYE,
+ CGX_CMD_GET_PHY_FEC_STATS,
};
/* async event ids */
@@ -171,13 +234,20 @@ struct cgx_lnk_sts {
uint64_t full_duplex:1;
uint64_t speed:4; /* cgx_link_speed */
uint64_t err_type:10;
- uint64_t reserved2:39;
+ uint64_t an:1; /* AN supported or not */
+ uint64_t fec:2; /* FEC type if enabled, if not 0 */
+ uint64_t lmac_type:8;
+ uint64_t mode:8;
+ uint64_t reserved2:20;
};
#define RESP_LINKSTAT_UP GENMASK_ULL(9, 9)
#define RESP_LINKSTAT_FDUPLEX GENMASK_ULL(10, 10)
#define RESP_LINKSTAT_SPEED GENMASK_ULL(14, 11)
#define RESP_LINKSTAT_ERRTYPE GENMASK_ULL(24, 15)
+#define RESP_LINKSTAT_AN GENMASK_ULL(25, 25)
+#define RESP_LINKSTAT_FEC GENMASK_ULL(27, 26)
+#define RESP_LINKSTAT_LMAC_TYPE GENMASK_ULL(35, 28)
/* scratchx(1) CSR used for non-secure SW->ATF communication
* This CSR acts as a command register
@@ -199,4 +269,23 @@ struct cgx_lnk_sts {
#define CMDLINKCHANGE_FULLDPLX BIT_ULL(9)
#define CMDLINKCHANGE_SPEED GENMASK_ULL(13, 10)
+#define CMDSETFEC GENMASK_ULL(9, 8)
+/* command argument to be passed for cmd ID - CGX_CMD_MODE_CHANGE */
+#define CMDMODECHANGE_SPEED GENMASK_ULL(11, 8)
+#define CMDMODECHANGE_DUPLEX GENMASK_ULL(12, 12)
+#define CMDMODECHANGE_AN GENMASK_ULL(13, 13)
+/* this field categorize the mode ID(FLAGS) range to accommodate
+ * more modes.
+ * To specify mode ID range of 0 - 41, this field will be 0.
+ * To specify mode ID range of 42 - 83, this field will be 1.
+ */
+#define CMDMODECHANGE_MODE_BASEIDX GENMASK_ULL(21, 20)
+#define CMDMODECHANGE_FLAGS GENMASK_ULL(63, 22)
+
+/* command argument to be passed for cmd ID - CGX_CMD_SET_PHY_MOD_TYPE */
+#define CMDSETPHYMODTYPE GENMASK_ULL(8, 8)
+
+/* response to cmd ID - RESP_GETPHYMODTYPE */
+#define RESP_GETPHYMODTYPE GENMASK_ULL(9, 9)
+
#endif /* __CGX_FW_INTF_H__ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index f48eb66ed021..8931864ee110 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -1,11 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Copyright (C) 2018 Marvell.
*/
#ifndef COMMON_H
@@ -64,8 +60,8 @@ static inline int qmem_alloc(struct device *dev, struct qmem **q,
qmem->entry_sz = entry_sz;
qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN;
- qmem->base = dma_alloc_coherent(dev, qmem->alloc_sz,
- &qmem->iova, GFP_KERNEL);
+ qmem->base = dma_alloc_attrs(dev, qmem->alloc_sz, &qmem->iova,
+ GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS);
if (!qmem->base)
return -ENOMEM;
@@ -84,9 +80,10 @@ static inline void qmem_free(struct device *dev, struct qmem *qmem)
return;
if (qmem->base)
- dma_free_coherent(dev, qmem->alloc_sz,
- qmem->base - qmem->align,
- qmem->iova - qmem->align);
+ dma_free_attrs(dev, qmem->alloc_sz,
+ qmem->base - qmem->align,
+ qmem->iova - qmem->align,
+ DMA_ATTR_FORCE_CONTIGUOUS);
devm_kfree(dev, qmem);
}
@@ -146,15 +143,14 @@ enum nix_scheduler {
#define TXSCH_RR_QTM_MAX ((1 << 24) - 1)
#define TXSCH_TL1_DFLT_RR_QTM TXSCH_RR_QTM_MAX
#define TXSCH_TL1_DFLT_RR_PRIO (0x1ull)
-#define MAX_SCHED_WEIGHT 0xFF
-#define DFLT_RR_WEIGHT 71
-#define DFLT_RR_QTM ((DFLT_RR_WEIGHT * TXSCH_RR_QTM_MAX) \
- / MAX_SCHED_WEIGHT)
+#define CN10K_MAX_DWRR_WEIGHT 16384 /* Weight is 14bit on CN10K */
/* Min/Max packet sizes, excluding FCS */
#define NIC_HW_MIN_FRS 40
#define NIC_HW_MAX_FRS 9212
#define SDP_HW_MAX_FRS 65535
+#define CN10K_LMAC_LINK_MAX_FRS 16380 /* 16k - FCS */
+#define CN10K_LBK_LINK_MAX_FRS 65535 /* 64k */
/* NIX RX action operation*/
#define NIX_RX_ACTIONOP_DROP (0x0ull)
@@ -162,6 +158,8 @@ enum nix_scheduler {
#define NIX_RX_ACTIONOP_UCAST_IPSEC (0x2ull)
#define NIX_RX_ACTIONOP_MCAST (0x3ull)
#define NIX_RX_ACTIONOP_RSS (0x4ull)
+/* Use the RX action set in the default unicast entry */
+#define NIX_RX_ACTION_DEFAULT (0xfull)
/* NIX TX action operation*/
#define NIX_TX_ACTIONOP_DROP (0x0ull)
@@ -174,17 +172,31 @@ enum nix_scheduler {
#define NPC_MCAM_KEY_X2 1
#define NPC_MCAM_KEY_X4 2
-#define NIX_INTF_RX 0
-#define NIX_INTF_TX 1
+#define NIX_INTFX_RX(a) (0x0ull | (a) << 1)
+#define NIX_INTFX_TX(a) (0x1ull | (a) << 1)
+
+/* Default interfaces are NIX0_RX and NIX0_TX */
+#define NIX_INTF_RX NIX_INTFX_RX(0)
+#define NIX_INTF_TX NIX_INTFX_TX(0)
#define NIX_INTF_TYPE_CGX 0
#define NIX_INTF_TYPE_LBK 1
+#define NIX_INTF_TYPE_SDP 2
#define MAX_LMAC_PKIND 12
#define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b))
#define NIX_LINK_LBK(a) (12 + (a))
#define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c))
#define NIX_CHAN_LBK_CHX(a, b) (0 + 0x100 * (a) + (b))
+#define NIX_CHAN_SDP_CH_START (0x700ull)
+#define NIX_CHAN_SDP_CHX(a) (NIX_CHAN_SDP_CH_START + (a))
+#define NIX_CHAN_SDP_NUM_CHANS 256
+#define NIX_CHAN_CPT_CH_START (0x800ull)
+
+/* The mask is to extract lower 10-bits of channel number
+ * which CPT will pass to X2P.
+ */
+#define NIX_CHAN_CPT_X2P_MASK (0x3ffull)
/* NIX LSO format indices.
* As of now TSO is the only one using, so statically assigning indices.
@@ -206,6 +218,8 @@ enum ndc_idx_e {
NIX0_RX = 0x0,
NIX0_TX = 0x1,
NPA0_U = 0x2,
+ NIX1_RX = 0x4,
+ NIX1_TX = 0x5,
};
enum ndc_ctype_e {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
new file mode 100644
index 000000000000..9fc73844d5c0
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell CN10K RPM driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include "rvu.h"
+#include "cgx.h"
+/**
+ * struct lmac - per lmac locks and properties
+ * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion
+ * @cmd_lock: Lock to serialize the command interface
+ * @resp: command response
+ * @link_info: link related information
+ * @mac_to_index_bmap: Mac address to CGX table index mapping
+ * @rx_fc_pfvf_bmap: Receive flow control enabled netdev mapping
+ * @tx_fc_pfvf_bmap: Transmit flow control enabled netdev mapping
+ * @event_cb: callback for linkchange events
+ * @event_cb_lock: lock for serializing callback with unregister
+ * @cgx: parent cgx port
+ * @mcast_filters_count: count of multicast address filters
+ * @lmac_id: lmac port id
+ * @cmd_pend: flag set before new command is started
+ * flag cleared after command response is received
+ * @name: lmac port name
+ */
+struct lmac {
+ wait_queue_head_t wq_cmd_cmplt;
+ /* Lock to serialize the command interface */
+ struct mutex cmd_lock;
+ u64 resp;
+ struct cgx_link_user_info link_info;
+ struct rsrc_bmap mac_to_index_bmap;
+ struct rsrc_bmap rx_fc_pfvf_bmap;
+ struct rsrc_bmap tx_fc_pfvf_bmap;
+ struct cgx_event_cb event_cb;
+ /* lock for serializing callback with unregister */
+ spinlock_t event_cb_lock;
+ struct cgx *cgx;
+ u8 mcast_filters_count;
+ u8 lmac_id;
+ bool cmd_pend;
+ char *name;
+};
+
+/* CGX & RPM has different feature set
+ * update the structure fields with different one
+ */
+struct mac_ops {
+ char *name;
+ /* Features like DMAC FILTER csrs differs by fixed
+ * bar offset for example
+ * CGX DMAC_CTL0 0x1f8
+ * RPM DMAC_CTL0 0x4ff8
+ */
+ u64 csr_offset;
+ /* For ATF to send events to kernel, there is no dedicated interrupt
+ * defined hence CGX uses OVERFLOW bit in CMR_INT. RPM block supports
+ * SW_INT so that ATF triggers this interrupt after processing of
+ * requested command
+ */
+ u64 int_register;
+ u64 int_set_reg;
+ /* lmac offset is different is RPM */
+ u8 lmac_offset;
+ u8 irq_offset;
+ u8 int_ena_bit;
+ u8 lmac_fwi;
+ u32 fifo_len;
+ bool non_contiguous_serdes_lane;
+ /* RPM & CGX differs in number of Receive/transmit stats */
+ u8 rx_stats_cnt;
+ u8 tx_stats_cnt;
+
+ /* Incase of RPM get number of lmacs from RPMX_CMR_RX_LMACS[LMAC_EXIST]
+ * number of setbits in lmac_exist tells number of lmacs
+ */
+ int (*get_nr_lmacs)(void *cgx);
+ u8 (*get_lmac_type)(void *cgx, int lmac_id);
+ int (*mac_lmac_intl_lbk)(void *cgx, int lmac_id,
+ bool enable);
+ /* Register Stats related functions */
+ int (*mac_get_rx_stats)(void *cgx, int lmac_id,
+ int idx, u64 *rx_stat);
+ int (*mac_get_tx_stats)(void *cgx, int lmac_id,
+ int idx, u64 *tx_stat);
+ /* Enable LMAC Pause Frame Configuration */
+ void (*mac_enadis_rx_pause_fwding)(void *cgxd,
+ int lmac_id,
+ bool enable);
+ int (*mac_get_pause_frm_status)(void *cgxd,
+ int lmac_id,
+ u8 *tx_pause,
+ u8 *rx_pause);
+ int (*mac_enadis_pause_frm)(void *cgxd,
+ int lmac_id,
+ u8 tx_pause,
+ u8 rx_pause);
+ void (*mac_pause_frm_config)(void *cgxd,
+ int lmac_id,
+ bool enable);
+ /* Enable/Disable Inbound PTP */
+ void (*mac_enadis_ptp_config)(void *cgxd,
+ int lmac_id,
+ bool enable);
+ int (*mac_rx_tx_enable)(void *cgxd, int lmac_id, bool enable);
+ int (*mac_tx_enable)(void *cgxd, int lmac_id, bool enable);
+
+ int (*pfc_config)(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause, u16 pfc_en);
+
+ int (*mac_get_pfc_frm_cfg)(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause);
+
+};
+
+struct cgx {
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+ u8 cgx_id;
+ u8 lmac_count;
+ struct lmac *lmac_idmap[MAX_LMAC_PER_CGX];
+ struct work_struct cgx_cmd_work;
+ struct workqueue_struct *cgx_cmd_workq;
+ struct list_head cgx_list;
+ u64 hw_features;
+ struct mac_ops *mac_ops;
+ /* Lock to serialize read/write of global csrs like
+ * RPMX_MTI_STAT_DATA_HI_CDC etc
+ */
+ struct mutex lock;
+ unsigned long lmac_bmap; /* bitmap of enabled lmacs */
+};
+
+typedef struct cgx rpm_t;
+
+/* Function Declarations */
+void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val);
+u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset);
+struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx);
+int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac);
+int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id);
+bool is_lmac_valid(struct cgx *cgx, int lmac_id);
+struct mac_ops *rpm_get_mac_ops(void);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index bbabb8e64201..2898931d5260 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -1,11 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
@@ -20,9 +17,9 @@ static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
{
- void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
+ void *hw_mbase = mdev->hwbase;
tx_hdr = hw_mbase + mbox->tx_start;
rx_hdr = hw_mbase + mbox->rx_start;
@@ -56,12 +53,9 @@ void otx2_mbox_destroy(struct otx2_mbox *mbox)
}
EXPORT_SYMBOL(otx2_mbox_destroy);
-int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
- void *reg_base, int direction, int ndevs)
+static int otx2_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ void *reg_base, int direction, int ndevs)
{
- struct otx2_mbox_dev *mdev;
- int devid;
-
switch (direction) {
case MBOX_DIR_AFPF:
case MBOX_DIR_PFVF:
@@ -121,7 +115,6 @@ int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
}
mbox->reg_base = reg_base;
- mbox->hwbase = hwbase;
mbox->pdev = pdev;
mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
@@ -129,11 +122,27 @@ int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
otx2_mbox_destroy(mbox);
return -ENOMEM;
}
-
mbox->ndevs = ndevs;
+
+ return 0;
+}
+
+int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
+ void *reg_base, int direction, int ndevs)
+{
+ struct otx2_mbox_dev *mdev;
+ int devid, err;
+
+ err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs);
+ if (err)
+ return err;
+
+ mbox->hwbase = hwbase;
+
for (devid = 0; devid < ndevs; devid++) {
mdev = &mbox->dev[devid];
mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE);
+ mdev->hwbase = mdev->mbase;
spin_lock_init(&mdev->mbox_lock);
/* Init header to reset value */
otx2_mbox_reset(mbox, devid);
@@ -143,6 +152,35 @@ int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
}
EXPORT_SYMBOL(otx2_mbox_init);
+/* Initialize mailbox with the set of mailbox region addresses
+ * in the array hwbase.
+ */
+int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase,
+ struct pci_dev *pdev, void *reg_base,
+ int direction, int ndevs)
+{
+ struct otx2_mbox_dev *mdev;
+ int devid, err;
+
+ err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs);
+ if (err)
+ return err;
+
+ mbox->hwbase = hwbase[0];
+
+ for (devid = 0; devid < ndevs; devid++) {
+ mdev = &mbox->dev[devid];
+ mdev->mbase = hwbase[devid];
+ mdev->hwbase = hwbase[devid];
+ spin_lock_init(&mdev->mbox_lock);
+ /* Init header to reset value */
+ otx2_mbox_reset(mbox, devid);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2_mbox_regions_init);
+
int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
{
unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
@@ -175,9 +213,9 @@ EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
{
- void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
+ void *hw_mbase = mdev->hwbase;
tx_hdr = hw_mbase + mbox->tx_start;
rx_hdr = hw_mbase + mbox->rx_start;
@@ -371,5 +409,5 @@ const char *otx2_mbox_id2name(u16 id)
}
EXPORT_SYMBOL(otx2_mbox_id2name);
-MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_AUTHOR("Marvell.");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 263a21129416..98dc16ab639e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -1,11 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef MBOX_H
@@ -36,7 +33,7 @@
#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull))
-#define MBOX_RSP_TIMEOUT 2000 /* Time(ms) to wait for mbox response */
+#define MBOX_RSP_TIMEOUT 3000 /* Time(ms) to wait for mbox response */
#define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */
@@ -52,6 +49,7 @@
struct otx2_mbox_dev {
void *mbase; /* This dev's mbox region */
+ void *hwbase;
spinlock_t mbox_lock;
u16 msg_size; /* Total msg size to be sent */
u16 rsp_size; /* Total rsp size to be sure the reply is ok */
@@ -86,7 +84,7 @@ struct mbox_msghdr {
#define OTX2_MBOX_REQ_SIG (0xdead)
#define OTX2_MBOX_RSP_SIG (0xbeef)
u16 sig; /* Signature, for validating corrupted msgs */
-#define OTX2_MBOX_VERSION (0x0001)
+#define OTX2_MBOX_VERSION (0x000b)
u16 ver; /* Version of msg's structure for this ID */
u16 next_msgoff; /* Offset of next msg within mailbox region */
int rc; /* Msg process'ed response code */
@@ -98,6 +96,9 @@ void otx2_mbox_destroy(struct otx2_mbox *mbox);
int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase,
struct pci_dev *pdev, void __force *reg_base,
int direction, int ndevs);
+int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase,
+ struct pci_dev *pdev, void __force *reg_base,
+ int direction, int ndevs);
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
@@ -126,10 +127,15 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
M(READY, 0x001, ready, msg_req, ready_msg_rsp) \
M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \
M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \
+M(FREE_RSRC_CNT, 0x004, free_rsrc_cnt, msg_req, free_rsrcs_rsp) \
M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
+M(NDC_SYNC_OP, 0x009, ndc_sync_op, ndc_sync_op, msg_rsp) \
+M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \
+ msg_rsp) \
+M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \
@@ -149,6 +155,33 @@ M(CGX_PTP_RX_ENABLE, 0x20C, cgx_ptp_rx_enable, msg_req, msg_rsp) \
M(CGX_PTP_RX_DISABLE, 0x20D, cgx_ptp_rx_disable, msg_req, msg_rsp) \
M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \
cgx_pause_frm_cfg) \
+M(CGX_FW_DATA_GET, 0x20F, cgx_get_aux_link_info, msg_req, cgx_fw_data) \
+M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode) \
+M(CGX_MAC_ADDR_ADD, 0x211, cgx_mac_addr_add, cgx_mac_addr_add_req, \
+ cgx_mac_addr_add_rsp) \
+M(CGX_MAC_ADDR_DEL, 0x212, cgx_mac_addr_del, cgx_mac_addr_del_req, \
+ msg_rsp) \
+M(CGX_MAC_MAX_ENTRIES_GET, 0x213, cgx_mac_max_entries_get, msg_req, \
+ cgx_max_dmac_entries_get_rsp) \
+M(CGX_SET_LINK_STATE, 0x214, cgx_set_link_state, \
+ cgx_set_link_state_msg, msg_rsp) \
+M(CGX_GET_PHY_MOD_TYPE, 0x215, cgx_get_phy_mod_type, msg_req, \
+ cgx_phy_mod_type) \
+M(CGX_SET_PHY_MOD_TYPE, 0x216, cgx_set_phy_mod_type, cgx_phy_mod_type, \
+ msg_rsp) \
+M(CGX_FEC_STATS, 0x217, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \
+M(CGX_SET_LINK_MODE, 0x218, cgx_set_link_mode, cgx_set_link_mode_req,\
+ cgx_set_link_mode_rsp) \
+M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req, msg_rsp) \
+M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp) \
+M(CGX_FEATURES_GET, 0x21B, cgx_features_get, msg_req, \
+ cgx_features_info_msg) \
+M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \
+M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, msg_req, msg_rsp) \
+M(CGX_MAC_ADDR_UPDATE, 0x21E, cgx_mac_addr_update, cgx_mac_addr_update_req, \
+ msg_rsp) \
+M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg, \
+ cgx_pfc_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
npa_lf_alloc_req, npa_lf_alloc_rsp) \
@@ -156,8 +189,66 @@ M(NPA_LF_FREE, 0x401, npa_lf_free, msg_req, msg_rsp) \
M(NPA_AQ_ENQ, 0x402, npa_aq_enq, npa_aq_enq_req, npa_aq_enq_rsp) \
M(NPA_HWCTX_DISABLE, 0x403, npa_hwctx_disable, hwctx_disable_req, msg_rsp)\
/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \
+M(SSO_LF_ALLOC, 0x600, sso_lf_alloc, \
+ sso_lf_alloc_req, sso_lf_alloc_rsp) \
+M(SSO_LF_FREE, 0x601, sso_lf_free, \
+ sso_lf_free_req, msg_rsp) \
+M(SSOW_LF_ALLOC, 0x602, ssow_lf_alloc, \
+ ssow_lf_alloc_req, msg_rsp) \
+M(SSOW_LF_FREE, 0x603, ssow_lf_free, \
+ ssow_lf_free_req, msg_rsp) \
+M(SSO_HW_SETCONFIG, 0x604, sso_hw_setconfig, \
+ sso_hw_setconfig, msg_rsp) \
+M(SSO_GRP_SET_PRIORITY, 0x605, sso_grp_set_priority, \
+ sso_grp_priority, msg_rsp) \
+M(SSO_GRP_GET_PRIORITY, 0x606, sso_grp_get_priority, \
+ sso_info_req, sso_grp_priority) \
+M(SSO_WS_CACHE_INV, 0x607, sso_ws_cache_inv, msg_req, msg_rsp) \
+M(SSO_GRP_QOS_CONFIG, 0x608, sso_grp_qos_config, sso_grp_qos_cfg, msg_rsp)\
+M(SSO_GRP_GET_STATS, 0x609, sso_grp_get_stats, sso_info_req, sso_grp_stats)\
+M(SSO_HWS_GET_STATS, 0x610, sso_hws_get_stats, sso_info_req, sso_hws_stats)\
+M(SSO_HW_RELEASE_XAQ, 0x611, sso_hw_release_xaq_aura, \
+ sso_release_xaq, msg_rsp) \
+M(SSO_CONFIG_LSW, 0x612, ssow_config_lsw, \
+ ssow_config_lsw, msg_rsp) \
+M(SSO_HWS_CHNG_MSHIP, 0x613, ssow_chng_mship, ssow_chng_mship, msg_rsp)\
/* TIM mbox IDs (range 0x800 - 0x9FF) */ \
+M(TIM_LF_ALLOC, 0x800, tim_lf_alloc, \
+ tim_lf_alloc_req, tim_lf_alloc_rsp) \
+M(TIM_LF_FREE, 0x801, tim_lf_free, tim_ring_req, msg_rsp) \
+M(TIM_CONFIG_RING, 0x802, tim_config_ring, tim_config_req, msg_rsp)\
+M(TIM_ENABLE_RING, 0x803, tim_enable_ring, tim_ring_req, tim_enable_rsp)\
+M(TIM_DISABLE_RING, 0x804, tim_disable_ring, tim_ring_req, msg_rsp) \
+M(TIM_GET_MIN_INTVL, 0x805, tim_get_min_intvl, tim_intvl_req, \
+ tim_intvl_rsp) \
/* CPT mbox IDs (range 0xA00 - 0xBFF) */ \
+M(CPT_LF_ALLOC, 0xA00, cpt_lf_alloc, cpt_lf_alloc_req_msg, \
+ msg_rsp) \
+M(CPT_LF_FREE, 0xA01, cpt_lf_free, msg_req, msg_rsp) \
+M(CPT_RD_WR_REGISTER, 0xA02, cpt_rd_wr_register, cpt_rd_wr_reg_msg, \
+ cpt_rd_wr_reg_msg) \
+M(CPT_INLINE_IPSEC_CFG, 0xA04, cpt_inline_ipsec_cfg, \
+ cpt_inline_ipsec_cfg_msg, msg_rsp) \
+M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \
+M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \
+ msg_rsp) \
+M(CPT_CTX_CACHE_SYNC, 0xA07, cpt_ctx_cache_sync, msg_req, msg_rsp) \
+/* REE mbox IDs (range 0xE00 - 0xFFF) */ \
+M(REE_CONFIG_LF, 0xE01, ree_config_lf, ree_lf_req_msg, \
+ msg_rsp) \
+M(REE_RD_WR_REGISTER, 0xE02, ree_rd_wr_register, ree_rd_wr_reg_msg, \
+ ree_rd_wr_reg_msg) \
+M(REE_RULE_DB_PROG, 0xE03, ree_rule_db_prog, \
+ ree_rule_db_prog_req_msg, \
+ msg_rsp) \
+M(REE_RULE_DB_LEN_GET, 0xE04, ree_rule_db_len_get, ree_req_msg, \
+ ree_rule_db_len_rsp_msg) \
+M(REE_RULE_DB_GET, 0xE05, ree_rule_db_get, \
+ ree_rule_db_get_req_msg, \
+ ree_rule_db_get_rsp_msg) \
+/* SDP mbox IDs (range 0x1000 - 0x11FF) */ \
+M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \
+M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \
/* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \
M(NPC_MCAM_ALLOC_ENTRY, 0x6000, npc_mcam_alloc_entry, npc_mcam_alloc_entry_req,\
npc_mcam_alloc_entry_rsp) \
@@ -188,19 +279,35 @@ M(NPC_MCAM_ALLOC_AND_WRITE_ENTRY, 0x600b, npc_mcam_alloc_and_write_entry, \
npc_mcam_alloc_and_write_entry_rsp) \
M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, \
msg_req, npc_get_kex_cfg_rsp) \
+M(NPC_INSTALL_FLOW, 0x600d, npc_install_flow, \
+ npc_install_flow_req, npc_install_flow_rsp) \
+M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \
+ npc_delete_flow_req, msg_rsp) \
+M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \
+ npc_mcam_read_entry_req, \
+ npc_mcam_read_entry_rsp) \
+M(NPC_SET_PKIND, 0x6010, npc_set_pkind, \
+ npc_set_pkind, msg_rsp) \
+M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \
+ msg_req, npc_mcam_read_base_rule_rsp) \
+M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats, \
+ npc_mcam_get_stats_req, \
+ npc_mcam_get_stats_rsp) \
/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \
nix_lf_alloc_req, nix_lf_alloc_rsp) \
-M(NIX_LF_FREE, 0x8001, nix_lf_free, msg_req, msg_rsp) \
+M(NIX_LF_FREE, 0x8001, nix_lf_free, nix_lf_free_req, msg_rsp) \
M(NIX_AQ_ENQ, 0x8002, nix_aq_enq, nix_aq_enq_req, nix_aq_enq_rsp) \
M(NIX_HWCTX_DISABLE, 0x8003, nix_hwctx_disable, \
hwctx_disable_req, msg_rsp) \
M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc, \
nix_txsch_alloc_req, nix_txsch_alloc_rsp) \
M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free, nix_txsch_free_req, msg_rsp) \
-M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, msg_rsp) \
+M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, \
+ nix_txschq_config) \
M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp) \
-M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, msg_rsp) \
+M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, \
+ nix_vtag_config_rsp) \
M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, \
nix_rss_flowkey_cfg, \
nix_rss_flowkey_cfg_rsp) \
@@ -216,22 +323,45 @@ M(NIX_SET_RX_CFG, 0x8010, nix_set_rx_cfg, nix_rx_cfg, msg_rsp) \
M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
nix_lso_format_cfg, \
nix_lso_format_cfg_rsp) \
-M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp) \
M(NIX_LF_PTP_TX_ENABLE, 0x8013, nix_lf_ptp_tx_enable, msg_req, msg_rsp) \
M(NIX_LF_PTP_TX_DISABLE, 0x8014, nix_lf_ptp_tx_disable, msg_req, msg_rsp) \
+M(NIX_SET_VLAN_TPID, 0x8015, nix_set_vlan_tpid, nix_set_vlan_tpid, msg_rsp) \
M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \
nix_bp_cfg_rsp) \
M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \
+M(NIX_INLINE_IPSEC_CFG, 0x8019, nix_inline_ipsec_cfg, \
+ nix_inline_ipsec_cfg, msg_rsp) \
+M(NIX_INLINE_IPSEC_LF_CFG, 0x801a, nix_inline_ipsec_lf_cfg, \
+ nix_inline_ipsec_lf_cfg, msg_rsp) \
+M(NIX_CN10K_AQ_ENQ, 0x801b, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \
+ nix_cn10k_aq_enq_rsp) \
+M(NIX_GET_HW_INFO, 0x801c, nix_get_hw_info, msg_req, nix_hw_info) \
+M(NIX_BANDPROF_ALLOC, 0x801d, nix_bandprof_alloc, nix_bandprof_alloc_req, \
+ nix_bandprof_alloc_rsp) \
+M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
+ msg_rsp) \
+M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
+ nix_bandprof_get_hwinfo_rsp) \
+M(NIX_CPT_BP_ENABLE, 0x8020, nix_cpt_bp_enable, nix_bp_cfg_req, \
+ nix_bp_cfg_rsp) \
+M(NIX_CPT_BP_DISABLE, 0x8021, nix_cpt_bp_disable, nix_bp_cfg_req, \
+ msg_rsp) \
+M(NIX_RX_SW_SYNC, 0x8022, nix_rx_sw_sync, msg_req, msg_rsp)
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
#define MBOX_UP_CGX_MESSAGES \
-M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp)
+M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp) \
+M(CGX_PTP_RX_INFO, 0xC01, cgx_ptp_rx_info, cgx_ptp_rx_info_msg, msg_rsp)
+
+#define MBOX_UP_CPT_MESSAGES \
+M(CPT_INST_LMTST, 0xD00, cpt_inst_lmtst, cpt_inst_lmtst_req, msg_rsp)
enum {
#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
MBOX_MESSAGES
MBOX_UP_CGX_MESSAGES
+MBOX_UP_CPT_MESSAGES
#undef M
};
@@ -271,6 +401,17 @@ struct ready_msg_rsp {
* or to detach partial of a cetain resource type.
* Rest of the fields specify how many of what type to
* be attached.
+ * To request LFs from two blocks of same type this mailbox
+ * can be sent twice as below:
+ * struct rsrc_attach *attach;
+ * .. Allocate memory for message ..
+ * attach->cptlfs = 3; <3 LFs from CPT0>
+ * .. Send message ..
+ * .. Allocate memory for message ..
+ * attach->modify = 1;
+ * attach->cpt_blkaddr = BLKADDR_CPT1;
+ * attach->cptlfs = 2; <2 LFs from CPT1>
+ * .. Send message ..
*/
struct rsrc_attach {
struct mbox_msghdr hdr;
@@ -281,6 +422,9 @@ struct rsrc_attach {
u16 ssow;
u16 timlfs;
u16 cptlfs;
+ u16 reelfs;
+ int cpt_blkaddr; /* BLKADDR_CPT0/BLKADDR_CPT1 or 0 for BLKADDR_CPT0 */
+ int ree_blkaddr; /* BLKADDR_REE0/BLKADDR_REE1 or 0 for BLKADDR_REE0 */
};
/* Structure for relinquishing resources.
@@ -297,6 +441,27 @@ struct rsrc_detach {
u8 ssow:1;
u8 timlfs:1;
u8 cptlfs:1;
+ u8 reelfs:1;
+};
+
+/*
+ * Number of resources available to the caller.
+ * In reply to MBOX_MSG_FREE_RSRC_CNT.
+ */
+struct free_rsrcs_rsp {
+ struct mbox_msghdr hdr;
+ u16 schq[NIX_TXSCH_LVL_CNT];
+ u16 sso;
+ u16 tim;
+ u16 ssow;
+ u16 cpt;
+ u8 npa;
+ u8 nix;
+ u16 schq_nix1[NIX_TXSCH_LVL_CNT];
+ u8 nix1;
+ u8 cpt1;
+ u8 ree0;
+ u8 ree1;
};
#define MSIX_VECTOR_INVALID 0xFFFF
@@ -306,14 +471,20 @@ struct msix_offset_rsp {
struct mbox_msghdr hdr;
u16 npa_msixoff;
u16 nix_msixoff;
- u8 sso;
- u8 ssow;
- u8 timlfs;
- u8 cptlfs;
+ u16 sso;
+ u16 ssow;
+ u16 timlfs;
+ u16 cptlfs;
u16 sso_msixoff[MAX_RVU_BLKLF_CNT];
u16 ssow_msixoff[MAX_RVU_BLKLF_CNT];
u16 timlf_msixoff[MAX_RVU_BLKLF_CNT];
u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 cpt1_lfs;
+ u16 ree0_lfs;
+ u16 ree1_lfs;
+ u16 cpt1_lf_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 ree0_lf_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 ree1_lf_msixoff[MAX_RVU_BLKLF_CNT];
};
struct get_hw_cap_rsp {
@@ -322,16 +493,48 @@ struct get_hw_cap_rsp {
u8 nix_shaping; /* Is shaping and coloring supported */
};
+struct ndc_sync_op {
+ struct mbox_msghdr hdr;
+ u8 nix_lf_tx_sync;
+ u8 nix_lf_rx_sync;
+ u8 npa_lf_sync;
+};
+
+struct lmtst_tbl_setup_req {
+ struct mbox_msghdr hdr;
+ u64 dis_sched_early_comp :1;
+ u64 sch_ena :1;
+ u64 dis_line_pref :1;
+ u64 ssow_pf_func :13;
+ u16 base_pcifunc;
+ u8 use_local_lmt_region;
+ u64 lmt_iova;
+ u64 rsvd[4];
+};
+
+struct set_vf_perm {
+ struct mbox_msghdr hdr;
+ u16 vf;
+#define RESET_VF_PERM BIT_ULL(0)
+#define VF_TRUSTED BIT_ULL(1)
+ u64 flags;
+};
+
/* CGX mbox message formats */
struct cgx_stats_rsp {
struct mbox_msghdr hdr;
-#define CGX_RX_STATS_COUNT 13
-#define CGX_TX_STATS_COUNT 18
+#define CGX_RX_STATS_COUNT 9
+#define CGX_TX_STATS_COUNT 18
u64 rx_stats[CGX_RX_STATS_COUNT];
u64 tx_stats[CGX_TX_STATS_COUNT];
};
+struct cgx_fec_stats_rsp {
+ struct mbox_msghdr hdr;
+ u64 fec_corr_blks;
+ u64 fec_uncorr_blks;
+};
/* Structure for requesting the operation for
* setting/getting mac address in the CGX interface
*/
@@ -340,11 +543,45 @@ struct cgx_mac_addr_set_or_get {
u8 mac_addr[ETH_ALEN];
};
+/* Structure for requesting the operation to
+ * add DMAC filter entry into CGX interface
+ */
+struct cgx_mac_addr_add_req {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* Structure for response against the operation to
+ * add DMAC filter entry into CGX interface
+ */
+struct cgx_mac_addr_add_rsp {
+ struct mbox_msghdr hdr;
+ u8 index;
+};
+
+/* Structure for requesting the operation to
+ * delete DMAC filter entry from CGX interface
+ */
+struct cgx_mac_addr_del_req {
+ struct mbox_msghdr hdr;
+ u8 index;
+};
+
+/* Structure for response against the operation to
+ * get maximum supported DMAC filter entries
+ */
+struct cgx_max_dmac_entries_get_rsp {
+ struct mbox_msghdr hdr;
+ u8 max_dmac_filters;
+};
+
struct cgx_link_user_info {
uint64_t link_up:1;
uint64_t full_duplex:1;
uint64_t lmac_type_id:4;
uint64_t speed:20; /* speed in Mbps */
+ uint64_t an:1; /* AN supported or not */
+ uint64_t fec:2; /* FEC type if enabled else 0 */
#define LMACTYPE_STR_LEN 16
char lmac_type[LMACTYPE_STR_LEN];
};
@@ -354,6 +591,11 @@ struct cgx_link_info_msg {
struct cgx_link_user_info link_info;
};
+struct cgx_ptp_rx_info_msg {
+ struct mbox_msghdr hdr;
+ u8 ptp_en;
+};
+
struct cgx_pause_frm_cfg {
struct mbox_msghdr hdr;
u8 set;
@@ -363,6 +605,151 @@ struct cgx_pause_frm_cfg {
u8 tx_pause;
};
+enum fec_type {
+ OTX2_FEC_NONE,
+ OTX2_FEC_BASER,
+ OTX2_FEC_RS,
+ OTX2_FEC_STATS_CNT = 2,
+ OTX2_FEC_OFF,
+};
+
+struct fec_mode {
+ struct mbox_msghdr hdr;
+ int fec;
+};
+
+struct sfp_eeprom_s {
+#define SFP_EEPROM_SIZE 256
+ u16 sff_id;
+ u8 buf[SFP_EEPROM_SIZE];
+ u64 reserved;
+};
+
+struct phy_s {
+ struct {
+ u64 can_change_mod_type:1;
+ u64 mod_type:1;
+ u64 has_fec_stats:1;
+ } misc;
+ struct fec_stats_s {
+ u32 rsfec_corr_cws;
+ u32 rsfec_uncorr_cws;
+ u32 brfec_corr_blks;
+ u32 brfec_uncorr_blks;
+ } fec_stats;
+};
+
+struct cgx_lmac_fwdata_s {
+ u16 rw_valid;
+ u64 supported_fec;
+ u64 supported_an;
+ u64 supported_link_modes;
+ /* only applicable if AN is supported */
+ u64 advertised_fec;
+ u64 advertised_link_modes;
+ /* Only applicable if SFP/QSFP slot is present */
+ struct sfp_eeprom_s sfp_eeprom;
+ struct phy_s phy;
+#define LMAC_FWDATA_RESERVED_MEM 1021
+ u64 reserved[LMAC_FWDATA_RESERVED_MEM];
+};
+
+struct cgx_fw_data {
+ struct mbox_msghdr hdr;
+ struct cgx_lmac_fwdata_s fwdata;
+};
+
+struct cgx_set_link_mode_args {
+ u32 speed;
+ u8 duplex;
+ u8 an;
+ u8 mode_baseidx;
+ u64 mode;
+};
+
+struct cgx_set_link_mode_req {
+#define AUTONEG_UNKNOWN 0xff
+ struct mbox_msghdr hdr;
+ struct cgx_set_link_mode_args args;
+};
+
+struct cgx_set_link_mode_rsp {
+ struct mbox_msghdr hdr;
+ int status;
+};
+
+struct cgx_set_link_state_msg {
+ struct mbox_msghdr hdr;
+ u8 enable; /* '1' for link up, '0' for link down */
+};
+
+struct cgx_phy_mod_type {
+ struct mbox_msghdr hdr;
+ int mod;
+};
+
+struct cgx_mac_addr_update_req {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+ u8 index;
+};
+
+#define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */
+#define RVU_LMAC_FEAT_HIGIG2 BIT_ULL(1)
+ /* flow control from physical link higig2 messages */
+#define RVU_LMAC_FEAT_PTP BIT_ULL(2) /* precison time protocol */
+#define RVU_LMAC_FEAT_DMACF BIT_ULL(3) /* DMAC FILTER */
+#define RVU_MAC_VERSION BIT_ULL(4)
+#define RVU_MAC_CGX BIT_ULL(5)
+#define RVU_MAC_RPM BIT_ULL(6)
+
+struct cgx_features_info_msg {
+ struct mbox_msghdr hdr;
+ u64 lmac_features;
+};
+
+struct rpm_stats_rsp {
+ struct mbox_msghdr hdr;
+#define RPM_RX_STATS_COUNT 43
+#define RPM_TX_STATS_COUNT 34
+ u64 rx_stats[RPM_RX_STATS_COUNT];
+ u64 tx_stats[RPM_TX_STATS_COUNT];
+};
+
+struct cgx_pfc_cfg {
+ struct mbox_msghdr hdr;
+ u8 rx_pause;
+ u8 tx_pause;
+ u16 pfc_en; /* bitmap indicating pfc enabled traffic classes */
+};
+
+struct cgx_pfc_rsp {
+ struct mbox_msghdr hdr;
+ u8 rx_pause;
+ u8 tx_pause;
+};
+
+ /* NPA mbox message formats */
+
+struct npc_set_pkind {
+ struct mbox_msghdr hdr;
+#define OTX2_PRIV_FLAGS_DEFAULT BIT_ULL(0)
+#define OTX2_PRIV_FLAGS_EDSA BIT_ULL(1)
+#define OTX2_PRIV_FLAGS_HIGIG BIT_ULL(2)
+#define OTX2_PRIV_FLAGS_FDSA BIT_ULL(3)
+#define OTX2_PRIV_FLAGS_CUSTOM BIT_ULL(63)
+ u64 mode;
+#define PKIND_TX BIT_ULL(0)
+#define PKIND_RX BIT_ULL(1)
+ u8 dir;
+ u8 pkind; /* valid only in case custom flag */
+ u8 var_len_off; /* Offset of custom header length field.
+ * Valid only for pkind NPC_RX_CUSTOM_PRE_L2_PKIND
+ */
+ u8 var_len_off_mask; /* Mask for length with in offset */
+ u8 shift_dir; /* shift direction to get length of the header at var_len_off */
+};
+
/* NPA mbox message formats */
/* NPA mailbox error codes
@@ -391,6 +778,7 @@ struct npa_lf_alloc_rsp {
u32 stack_pg_ptrs; /* No of ptrs per stack page */
u32 stack_pg_bytes; /* Size of stack page */
u16 qints; /* NPA_AF_CONST::QINTS */
+ u8 cache_lines; /*BATCH ALLOC DMA */
};
/* NPA AQ enqueue msg */
@@ -459,6 +847,29 @@ enum nix_af_status {
NIX_AF_ERR_LSO_CFG_FAIL = -418,
NIX_AF_INVAL_NPA_PF_FUNC = -419,
NIX_AF_INVAL_SSO_PF_FUNC = -420,
+ NIX_AF_ERR_TX_VTAG_NOSPC = -421,
+ NIX_AF_ERR_RX_VTAG_INUSE = -422,
+ NIX_AF_ERR_PTP_CONFIG_FAIL = -423,
+ NIX_AF_ERR_NPC_KEY_NOT_SUPP = -424,
+ NIX_AF_ERR_INVALID_NIXBLK = -425,
+ NIX_AF_ERR_INVALID_BANDPROF = -426,
+ NIX_AF_ERR_IPOLICER_NOTSUPP = -427,
+ NIX_AF_ERR_BANDPROF_INVAL_REQ = -428,
+ NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429,
+ NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430,
+ NIX_AF_ERR_LINK_CREDITS = -431,
+};
+
+/* For NIX RX vtag action */
+enum nix_rx_vtag0_type {
+ NIX_AF_LFX_RX_VTAG_TYPE0, /* reserved for rx vlan offload */
+ NIX_AF_LFX_RX_VTAG_TYPE1,
+ NIX_AF_LFX_RX_VTAG_TYPE2,
+ NIX_AF_LFX_RX_VTAG_TYPE3,
+ NIX_AF_LFX_RX_VTAG_TYPE4,
+ NIX_AF_LFX_RX_VTAG_TYPE5,
+ NIX_AF_LFX_RX_VTAG_TYPE6,
+ NIX_AF_LFX_RX_VTAG_TYPE7,
};
/* For NIX LF context alloc and init */
@@ -475,6 +886,9 @@ struct nix_lf_alloc_req {
u16 sso_func;
u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */
u64 way_mask;
+#define NIX_LF_RSS_TAG_LSB_AS_ADDER BIT_ULL(0)
+#define NIX_LF_LBK_BLK_SEL BIT_ULL(1)
+ u64 flags;
};
struct nix_lf_alloc_rsp {
@@ -491,6 +905,54 @@ struct nix_lf_alloc_rsp {
u8 lf_tx_stats; /* NIX_AF_CONST1::LF_TX_STATS */
u16 cints; /* NIX_AF_CONST2::CINTS */
u16 qints; /* NIX_AF_CONST2::QINTS */
+ u8 hw_rx_tstamp_en;
+ u8 cgx_links; /* No. of CGX links present in HW */
+ u8 lbk_links; /* No. of LBK links present in HW */
+ u8 sdp_links; /* No. of SDP links present in HW */
+ u8 tx_link; /* Transmit channel link number */
+};
+
+struct nix_lf_free_req {
+ struct mbox_msghdr hdr;
+#define NIX_LF_DISABLE_FLOWS BIT_ULL(0)
+#define NIX_LF_DONT_FREE_TX_VTAG BIT_ULL(1)
+ u64 flags;
+};
+
+/* CN10K NIX AQ enqueue msg */
+struct nix_cn10k_aq_enq_req {
+ struct mbox_msghdr hdr;
+ u32 qidx;
+ u8 ctype;
+ u8 op;
+ union {
+ struct nix_cn10k_rq_ctx_s rq;
+ struct nix_cn10k_sq_ctx_s sq;
+ struct nix_cq_ctx_s cq;
+ struct nix_rsse_s rss;
+ struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
+ };
+ union {
+ struct nix_cn10k_rq_ctx_s rq_mask;
+ struct nix_cn10k_sq_ctx_s sq_mask;
+ struct nix_cq_ctx_s cq_mask;
+ struct nix_rsse_s rss_mask;
+ struct nix_rx_mce_s mce_mask;
+ struct nix_bandprof_s prof_mask;
+ };
+};
+
+struct nix_cn10k_aq_enq_rsp {
+ struct mbox_msghdr hdr;
+ union {
+ struct nix_cn10k_rq_ctx_s rq;
+ struct nix_cn10k_sq_ctx_s sq;
+ struct nix_cq_ctx_s cq;
+ struct nix_rsse_s rss;
+ struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
+ };
};
/* NIX AQ enqueue msg */
@@ -505,6 +967,7 @@ struct nix_aq_enq_req {
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
+ u64 prof;
};
union {
struct nix_rq_ctx_s rq_mask;
@@ -512,6 +975,7 @@ struct nix_aq_enq_req {
struct nix_cq_ctx_s cq_mask;
struct nix_rsse_s rss_mask;
struct nix_rx_mce_s mce_mask;
+ u64 prof_mask;
};
};
@@ -523,6 +987,7 @@ struct nix_aq_enq_rsp {
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
};
};
@@ -563,6 +1028,7 @@ struct nix_txsch_free_req {
struct nix_txschq_config {
struct mbox_msghdr hdr;
u8 lvl; /* SMQ/MDQ/TL4/TL3/TL2/TL1 */
+ u8 read;
#define TXSCHQ_IDX_SHIFT 16
#define TXSCHQ_IDX_MASK (BIT_ULL(10) - 1)
#define TXSCHQ_IDX(reg, shift) (((reg) >> (shift)) & TXSCHQ_IDX_MASK)
@@ -570,6 +1036,8 @@ struct nix_txschq_config {
#define MAX_REGS_PER_MBOX_MSG 20
u64 reg[MAX_REGS_PER_MBOX_MSG];
u64 regval[MAX_REGS_PER_MBOX_MSG];
+ /* All 0's => overwrite with new value */
+ u64 regval_mask[MAX_REGS_PER_MBOX_MSG];
};
struct nix_vtag_config {
@@ -583,14 +1051,40 @@ struct nix_vtag_config {
union {
/* valid when cfg_type is '0' */
struct {
- /* tx vlan0 tag(C-VLAN) */
- u64 vlan0;
- /* tx vlan1 tag(S-VLAN) */
- u64 vlan1;
- /* insert tx vlan tag */
- u8 insert_vlan :1;
- /* insert tx double vlan tag */
- u8 double_vlan :1;
+ u64 vtag0;
+ u64 vtag1;
+
+ /* cfg_vtag0 & cfg_vtag1 fields are valid
+ * when free_vtag0 & free_vtag1 are '0's.
+ */
+ /* cfg_vtag0 = 1 to configure vtag0 */
+ u8 cfg_vtag0 :1;
+ /* cfg_vtag1 = 1 to configure vtag1 */
+ u8 cfg_vtag1 :1;
+
+ /* vtag0_idx & vtag1_idx are only valid when
+ * both cfg_vtag0 & cfg_vtag1 are '0's,
+ * these fields are used along with free_vtag0
+ * & free_vtag1 to free the nix lf's tx_vlan
+ * configuration.
+ *
+ * Denotes the indices of tx_vtag def registers
+ * that needs to be cleared and freed.
+ */
+ int vtag0_idx;
+ int vtag1_idx;
+
+ /* free_vtag0 & free_vtag1 fields are valid
+ * when cfg_vtag0 & cfg_vtag1 are '0's.
+ */
+ /* free_vtag0 = 1 clears vtag0 configuration
+ * vtag0_idx denotes the index to be cleared.
+ */
+ u8 free_vtag0 :1;
+ /* free_vtag1 = 1 clears vtag1 configuration
+ * vtag1_idx denotes the index to be cleared.
+ */
+ u8 free_vtag1 :1;
} tx;
/* valid when cfg_type is '1' */
@@ -605,6 +1099,19 @@ struct nix_vtag_config {
};
};
+struct nix_vtag_config_rsp {
+ struct mbox_msghdr hdr;
+ int vtag0_idx;
+ int vtag1_idx;
+ /* Indices of tx_vtag def registers used to configure
+ * tx vtag0 & vtag1 headers, these indices are valid
+ * when nix_vtag_config mbox requested for vtag0 and/
+ * or vtag1 configuration.
+ */
+};
+
+#define NIX_FLOW_KEY_TYPE_L3_L4_MASK (~(0xf << 28))
+
struct nix_rss_flowkey_cfg {
struct mbox_msghdr hdr;
int mcam_index; /* MCAM entry index to modify */
@@ -626,7 +1133,16 @@ struct nix_rss_flowkey_cfg {
#define NIX_FLOW_KEY_TYPE_INNR_UDP BIT(15)
#define NIX_FLOW_KEY_TYPE_INNR_SCTP BIT(16)
#define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
+#define NIX_FLOW_KEY_TYPE_CH_LEN_90B BIT(18)
+#define NIX_FLOW_KEY_TYPE_CUSTOM0 BIT(19)
#define NIX_FLOW_KEY_TYPE_VLAN BIT(20)
+#define NIX_FLOW_KEY_TYPE_IPV4_PROTO BIT(21)
+#define NIX_FLOW_KEY_TYPE_AH BIT(22)
+#define NIX_FLOW_KEY_TYPE_ESP BIT(23)
+#define NIX_FLOW_KEY_TYPE_L4_DST_ONLY BIT(28)
+#define NIX_FLOW_KEY_TYPE_L4_SRC_ONLY BIT(29)
+#define NIX_FLOW_KEY_TYPE_L3_DST_ONLY BIT(30)
+#define NIX_FLOW_KEY_TYPE_L3_SRC_ONLY BIT(31)
u32 flowkey_cfg; /* Flowkey types selected */
u8 group; /* RSS context or group */
};
@@ -665,6 +1181,7 @@ struct nix_rx_mode {
#define NIX_RX_MODE_UCAST BIT(0)
#define NIX_RX_MODE_PROMISC BIT(1)
#define NIX_RX_MODE_ALLMULTI BIT(2)
+#define NIX_RX_MODE_USE_MCE BIT(3)
u16 mode;
};
@@ -672,6 +1189,7 @@ struct nix_rx_cfg {
struct mbox_msghdr hdr;
#define NIX_RX_OL3_VERIFY BIT(0)
#define NIX_RX_OL4_VERIFY BIT(1)
+#define NIX_RX_DROP_RE BIT(2)
u8 len_verify; /* Outer L3/L4 len check */
#define NIX_RX_CSUM_OL4_VERIFY BIT(0)
u8 csum_verify; /* Outer L4 checksum verification */
@@ -698,6 +1216,14 @@ struct nix_lso_format_cfg_rsp {
u8 lso_format_idx;
};
+struct nix_set_vlan_tpid {
+ struct mbox_msghdr hdr;
+#define NIX_VLAN_TYPE_INNER 0
+#define NIX_VLAN_TYPE_OUTER 1
+ u8 vlan_type;
+ u16 tpid;
+};
+
struct nix_bp_cfg_req {
struct mbox_msghdr hdr;
u16 chan_base; /* Starting channel number */
@@ -717,6 +1243,209 @@ struct nix_bp_cfg_rsp {
u8 chan_cnt; /* Number of channel for which bpids are assigned */
};
+/* Global NIX inline IPSec configuration */
+struct nix_inline_ipsec_cfg {
+ struct mbox_msghdr hdr;
+ u32 cpt_credit;
+ struct {
+ u8 egrp;
+ u8 opcode;
+ u16 param1;
+ u16 param2;
+ } gen_cfg;
+ struct {
+ u16 cpt_pf_func;
+ u8 cpt_slot;
+ } inst_qsel;
+ u8 enable;
+};
+
+/* Per NIX LF inline IPSec configuration */
+struct nix_inline_ipsec_lf_cfg {
+ struct mbox_msghdr hdr;
+ u64 sa_base_addr;
+ struct {
+ u32 tag_const;
+ u16 lenm1_max;
+ u8 sa_pow2_size;
+ u8 tt;
+ } ipsec_cfg0;
+ struct {
+ u32 sa_idx_max;
+ u8 sa_idx_w;
+ } ipsec_cfg1;
+ u8 enable;
+};
+
+struct nix_hw_info {
+ struct mbox_msghdr hdr;
+ u16 vwqe_delay;
+ u16 max_mtu;
+ u16 min_mtu;
+ u32 rpm_dwrr_mtu;
+ u32 sdp_dwrr_mtu;
+ u64 rsvd[16]; /* Add reserved fields for future expansion */
+};
+
+struct nix_bandprof_alloc_req {
+ struct mbox_msghdr hdr;
+ /* Count of profiles needed per layer */
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+};
+
+struct nix_bandprof_alloc_rsp {
+ struct mbox_msghdr hdr;
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+
+ /* There is no need to allocate morethan 1 bandwidth profile
+ * per RQ of a PF_FUNC's NIXLF. So limit the maximum
+ * profiles to 64 per PF_FUNC.
+ */
+#define MAX_BANDPROF_PER_PFFUNC 64
+ u16 prof_idx[BAND_PROF_NUM_LAYERS][MAX_BANDPROF_PER_PFFUNC];
+};
+
+struct nix_bandprof_free_req {
+ struct mbox_msghdr hdr;
+ u8 free_all;
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+ u16 prof_idx[BAND_PROF_NUM_LAYERS][MAX_BANDPROF_PER_PFFUNC];
+};
+
+struct nix_bandprof_get_hwinfo_rsp {
+ struct mbox_msghdr hdr;
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+ u32 policer_timeunit;
+};
+
+/* SSO mailbox error codes
+ * Range 501 - 600.
+ */
+enum sso_af_status {
+ SSO_AF_ERR_PARAM = -501,
+ SSO_AF_ERR_LF_INVALID = -502,
+ SSO_AF_ERR_AF_LF_ALLOC = -503,
+ SSO_AF_ERR_GRP_EBUSY = -504,
+ SSO_AF_INVAL_NPA_PF_FUNC = -505,
+};
+
+struct sso_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ int node;
+ u16 hwgrps;
+};
+
+struct sso_lf_alloc_rsp {
+ struct mbox_msghdr hdr;
+ u32 xaq_buf_size;
+ u32 xaq_wq_entries;
+ u32 in_unit_entries;
+ u16 hwgrps;
+};
+
+struct sso_lf_free_req {
+ struct mbox_msghdr hdr;
+ int node;
+ u16 hwgrps;
+};
+
+struct sso_hw_setconfig {
+ struct mbox_msghdr hdr;
+ u32 npa_aura_id;
+ u16 npa_pf_func;
+ u16 hwgrps;
+};
+
+struct sso_release_xaq {
+ struct mbox_msghdr hdr;
+ u16 hwgrps;
+};
+
+struct sso_info_req {
+ struct mbox_msghdr hdr;
+ union {
+ u16 grp;
+ u16 hws;
+ };
+};
+
+struct sso_grp_priority {
+ struct mbox_msghdr hdr;
+ u16 grp;
+ u8 priority;
+ u8 affinity;
+ u8 weight;
+};
+
+/* SSOW mailbox error codes
+ * Range 601 - 700.
+ */
+enum ssow_af_status {
+ SSOW_AF_ERR_PARAM = -601,
+ SSOW_AF_ERR_LF_INVALID = -602,
+ SSOW_AF_ERR_AF_LF_ALLOC = -603,
+ SSOW_AF_ERR_INVALID_CFG = -604,
+};
+
+struct ssow_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ int node;
+ u16 hws;
+};
+
+struct ssow_lf_free_req {
+ struct mbox_msghdr hdr;
+ int node;
+ u16 hws;
+};
+
+struct ssow_config_lsw {
+ struct mbox_msghdr hdr;
+#define SSOW_LSW_DIS 0
+#define SSOW_LSW_GW_WAIT 1
+#define SSOW_LSW_GW_IMM 2
+ u8 lsw_mode;
+#define SSOW_WQE_REL_LSW_WAIT 0
+#define SSOW_WQE_REL_IMM 1
+ u8 wqe_release;
+};
+
+struct ssow_chng_mship {
+ struct mbox_msghdr hdr;
+ u8 set;
+ u8 enable;
+ u8 hws;
+ u16 nb_hwgrps;
+ u16 hwgrps[MAX_RVU_BLKLF_CNT];
+};
+
+struct sso_grp_qos_cfg {
+ struct mbox_msghdr hdr;
+ u16 grp;
+ u32 xaq_limit;
+ u16 taq_thr;
+ u16 iaq_thr;
+};
+
+struct sso_grp_stats {
+ struct mbox_msghdr hdr;
+ u16 grp;
+ u64 ws_pc;
+ u64 ext_pc;
+ u64 wa_pc;
+ u64 ts_pc;
+ u64 ds_pc;
+ u64 dq_pc;
+ u64 aw_status;
+ u64 page_cnt;
+};
+
+struct sso_hws_stats {
+ struct mbox_msghdr hdr;
+ u16 hws;
+ u64 arbitration;
+};
+
/* NPC mbox message structs */
#define NPC_MCAM_ENTRY_INVALID 0xFFFF
@@ -730,6 +1459,15 @@ enum npc_af_status {
NPC_MCAM_ALLOC_DENIED = -702,
NPC_MCAM_ALLOC_FAILED = -703,
NPC_MCAM_PERM_DENIED = -704,
+ NPC_AF_ERR_HIGIG_CONFIG_FAIL = -705,
+ NPC_AF_ERR_HIGIG_NOT_SUPPORTED = -706,
+ NPC_FLOW_INTF_INVALID = -707,
+ NPC_FLOW_CHAN_INVALID = -708,
+ NPC_FLOW_NO_NIXLF = -709,
+ NPC_FLOW_NOT_SUPPORTED = -710,
+ NPC_FLOW_VF_PERM_DENIED = -711,
+ NPC_FLOW_VF_NOT_INIT = -712,
+ NPC_FLOW_VF_OVERLAP = -713,
};
struct npc_mcam_alloc_entry_req {
@@ -865,20 +1603,453 @@ struct npc_get_kex_cfg_rsp {
u8 mkex_pfl_name[MKEX_NAME_LEN];
};
+struct flow_msg {
+ unsigned char dmac[6];
+ unsigned char smac[6];
+ __be16 etype;
+ __be16 vlan_etype;
+ __be16 vlan_tci;
+ union {
+ __be32 ip4src;
+ __be32 ip6src[4];
+ };
+ union {
+ __be32 ip4dst;
+ __be32 ip6dst[4];
+ };
+ u8 tos;
+ u8 ip_ver;
+ u8 ip_proto;
+ u8 tc;
+ __be16 sport;
+ __be16 dport;
+};
+
+struct npc_install_flow_req {
+ struct mbox_msghdr hdr;
+ struct flow_msg packet;
+ struct flow_msg mask;
+ u64 features;
+ u16 entry;
+ u16 channel;
+ u16 chan_mask;
+ u8 intf;
+ u8 set_cntr; /* If counter is available set counter for this entry ? */
+ u8 default_rule;
+ u8 append; /* overwrite(0) or append(1) flow to default rule? */
+ u16 vf;
+ /* action */
+ u32 index;
+ u16 match_id;
+ u8 flow_key_alg;
+ u8 op;
+ /* vtag rx action */
+ u8 vtag0_type;
+ u8 vtag0_valid;
+ u8 vtag1_type;
+ u8 vtag1_valid;
+ /* vtag tx action */
+ u16 vtag0_def;
+ u8 vtag0_op;
+ u16 vtag1_def;
+ u8 vtag1_op;
+};
+
+struct npc_install_flow_rsp {
+ struct mbox_msghdr hdr;
+ int counter; /* negative if no counter else counter number */
+};
+
+struct npc_delete_flow_req {
+ struct mbox_msghdr hdr;
+ u16 entry;
+ u16 start;/*Disable range of entries */
+ u16 end;
+ u8 all; /* PF + VFs */
+};
+
+struct npc_mcam_read_entry_req {
+ struct mbox_msghdr hdr;
+ u16 entry; /* MCAM entry to read */
+};
+
+struct npc_mcam_read_entry_rsp {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry_data;
+ u8 intf;
+ u8 enable;
+};
+
+struct npc_mcam_read_base_rule_rsp {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry;
+};
+
+struct npc_mcam_get_stats_req {
+ struct mbox_msghdr hdr;
+ u16 entry; /* mcam entry */
+};
+
+struct npc_mcam_get_stats_rsp {
+ struct mbox_msghdr hdr;
+ u64 stat; /* counter stats */
+ u8 stat_ena; /* enabled */
+};
+
+/* TIM mailbox error codes
+ * Range 801 - 900.
+ */
+enum tim_af_status {
+ TIM_AF_NO_RINGS_LEFT = -801,
+ TIM_AF_INVAL_NPA_PF_FUNC = -802,
+ TIM_AF_INVAL_SSO_PF_FUNC = -803,
+ TIM_AF_RING_STILL_RUNNING = -804,
+ TIM_AF_LF_INVALID = -805,
+ TIM_AF_CSIZE_NOT_ALIGNED = -806,
+ TIM_AF_CSIZE_TOO_SMALL = -807,
+ TIM_AF_CSIZE_TOO_BIG = -808,
+ TIM_AF_INTERVAL_TOO_SMALL = -809,
+ TIM_AF_INVALID_BIG_ENDIAN_VALUE = -810,
+ TIM_AF_INVALID_CLOCK_SOURCE = -811,
+ TIM_AF_GPIO_CLK_SRC_NOT_ENABLED = -812,
+ TIM_AF_INVALID_BSIZE = -813,
+ TIM_AF_INVALID_ENABLE_PERIODIC = -814,
+ TIM_AF_INVALID_ENABLE_DONTFREE = -815,
+ TIM_AF_ENA_DONTFRE_NSET_PERIODIC = -816,
+ TIM_AF_RING_ALREADY_DISABLED = -817,
+};
+
+enum tim_clk_srcs {
+ TIM_CLK_SRCS_TENNS = 0,
+ TIM_CLK_SRCS_GPIO = 1,
+ TIM_CLK_SRCS_GTI = 2,
+ TIM_CLK_SRCS_PTP = 3,
+ TIM_CLK_SRCS_SYNCE = 4,
+ TIM_CLK_SRCS_BTS = 5,
+ TIM_CLK_SRSC_INVALID,
+};
+
+enum tim_gpio_edge {
+ TIM_GPIO_NO_EDGE = 0,
+ TIM_GPIO_LTOH_TRANS = 1,
+ TIM_GPIO_HTOL_TRANS = 2,
+ TIM_GPIO_BOTH_TRANS = 3,
+ TIM_GPIO_INVALID,
+};
+
+struct tim_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ u16 ring;
+ u16 npa_pf_func;
+ u16 sso_pf_func;
+};
+
+struct tim_ring_req {
+ struct mbox_msghdr hdr;
+ u16 ring;
+};
+
+struct tim_config_req {
+ struct mbox_msghdr hdr;
+ u16 ring;
+ u8 bigendian;
+ u8 clocksource;
+ u8 enableperiodic;
+ u8 enabledontfreebuffer;
+ u32 bucketsize;
+ u32 chunksize;
+ u32 interval; /* Cycles between traversal */
+ u8 gpioedge;
+ u8 rsvd[7];
+ u64 intervalns; /* Nanoseconds between traversal */
+ u64 clockfreq;
+};
+
+struct tim_lf_alloc_rsp {
+ struct mbox_msghdr hdr;
+ u64 tenns_clk;
+};
+
+struct tim_enable_rsp {
+ struct mbox_msghdr hdr;
+ u64 timestarted;
+ u32 currentbucket;
+};
+
+struct tim_intvl_req {
+ struct mbox_msghdr hdr;
+ u8 clocksource;
+ u64 clockfreq;
+};
+
+struct tim_intvl_rsp {
+ struct mbox_msghdr hdr;
+ u64 intvl_cyc;
+ u64 intvl_ns;
+};
+
+/* CPT mailbox error codes
+ * Range 901 - 1000.
+ */
+enum cpt_af_status {
+ CPT_AF_ERR_PARAM = -901,
+ CPT_AF_ERR_GRP_INVALID = -902,
+ CPT_AF_ERR_LF_INVALID = -903,
+ CPT_AF_ERR_ACCESS_DENIED = -904,
+ CPT_AF_ERR_SSO_PF_FUNC_INVALID = -905,
+ CPT_AF_ERR_NIX_PF_FUNC_INVALID = -906,
+ CPT_AF_ERR_INLINE_IPSEC_INB_ENA = -907,
+ CPT_AF_ERR_INLINE_IPSEC_OUT_ENA = -908
+};
+
+/* CPT mbox message formats */
+
+struct cpt_rd_wr_reg_msg {
+ struct mbox_msghdr hdr;
+ u64 reg_offset;
+ u64 *ret_val;
+ u64 val;
+ u8 is_write;
+ u8 blkaddr; /* BLKADDR_CPT0/BLKADDR_CPT1 or 0 for BLKADDR_CPT0 */
+};
+
+struct cpt_lf_alloc_req_msg {
+ struct mbox_msghdr hdr;
+ u16 nix_pf_func;
+ u16 sso_pf_func;
+ u16 eng_grpmsk;
+ u8 blkaddr; /* BLKADDR_CPT0/BLKADDR_CPT1 or 0 for BLKADDR_CPT0 */
+};
+
+#define CPT_INLINE_INBOUND 0
+#define CPT_INLINE_OUTBOUND 1
+struct cpt_inline_ipsec_cfg_msg {
+ struct mbox_msghdr hdr;
+ u8 enable;
+ u8 slot;
+ u8 dir;
+ u8 sso_pf_func_ovrd;
+ u16 sso_pf_func; /* inbound path SSO_PF_FUNC */
+ u16 nix_pf_func; /* outbound path NIX_PF_FUNC */
+};
+
+/* Mailbox message request and response format for CPT stats. */
+struct cpt_sts_req {
+ struct mbox_msghdr hdr;
+ u8 blkaddr;
+};
+
+struct cpt_sts_rsp {
+ struct mbox_msghdr hdr;
+ u64 inst_req_pc;
+ u64 inst_lat_pc;
+ u64 rd_req_pc;
+ u64 rd_lat_pc;
+ u64 rd_uc_pc;
+ u64 active_cycles_pc;
+ u64 ctx_mis_pc;
+ u64 ctx_hit_pc;
+ u64 ctx_aop_pc;
+ u64 ctx_aop_lat_pc;
+ u64 ctx_ifetch_pc;
+ u64 ctx_ifetch_lat_pc;
+ u64 ctx_ffetch_pc;
+ u64 ctx_ffetch_lat_pc;
+ u64 ctx_wback_pc;
+ u64 ctx_wback_lat_pc;
+ u64 ctx_psh_pc;
+ u64 ctx_psh_lat_pc;
+ u64 ctx_err;
+ u64 ctx_enc_id;
+ u64 ctx_flush_timer;
+ u64 rxc_time;
+ u64 rxc_time_cfg;
+ u64 rxc_active_sts;
+ u64 rxc_zombie_sts;
+ u64 busy_sts_ae;
+ u64 free_sts_ae;
+ u64 busy_sts_se;
+ u64 free_sts_se;
+ u64 busy_sts_ie;
+ u64 free_sts_ie;
+ u64 exe_err_info;
+ u64 cptclk_cnt;
+ u64 diag;
+ u64 rxc_dfrg;
+ u64 x2p_link_cfg0;
+ u64 x2p_link_cfg1;
+};
+
+/* Mailbox message request format to configure reassembly timeout. */
+struct cpt_rxc_time_cfg_req {
+ struct mbox_msghdr hdr;
+ int blkaddr;
+ u32 step;
+ u16 zombie_thres;
+ u16 zombie_limit;
+ u16 active_thres;
+ u16 active_limit;
+};
+
+/* Mailbox message request format to request for CPT_INST_S lmtst. */
+struct cpt_inst_lmtst_req {
+ struct mbox_msghdr hdr;
+ u64 inst[8];
+ u64 rsvd;
+};
+
+/* REE mailbox error codes
+ * Range 1001 - 1100.
+ */
+enum ree_af_status {
+ REE_AF_ERR_RULE_UNKNOWN_VALUE = -1001,
+ REE_AF_ERR_LF_NO_MORE_RESOURCES = -1002,
+ REE_AF_ERR_LF_INVALID = -1003,
+ REE_AF_ERR_ACCESS_DENIED = -1004,
+ REE_AF_ERR_RULE_DB_PARTIAL = -1005,
+ REE_AF_ERR_RULE_DB_EQ_BAD_VALUE = -1006,
+ REE_AF_ERR_RULE_DB_BLOCK_ALLOC_FAILED = -1007,
+ REE_AF_ERR_BLOCK_NOT_IMPLEMENTED = -1008,
+ REE_AF_ERR_RULE_DB_INC_OFFSET_TOO_BIG = -1009,
+ REE_AF_ERR_RULE_DB_OFFSET_TOO_BIG = -1010,
+ REE_AF_ERR_Q_IS_GRACEFUL_DIS = -1011,
+ REE_AF_ERR_Q_NOT_GRACEFUL_DIS = -1012,
+ REE_AF_ERR_RULE_DB_ALLOC_FAILED = -1013,
+ REE_AF_ERR_RULE_DB_TOO_BIG = -1014,
+ REE_AF_ERR_RULE_DB_GEQ_BAD_VALUE = -1015,
+ REE_AF_ERR_RULE_DB_LEQ_BAD_VALUE = -1016,
+ REE_AF_ERR_RULE_DB_WRONG_LENGTH = -1017,
+ REE_AF_ERR_RULE_DB_WRONG_OFFSET = -1018,
+ REE_AF_ERR_RULE_DB_BLOCK_TOO_BIG = -1019,
+ REE_AF_ERR_RULE_DB_SHOULD_FILL_REQUEST = -1020,
+ REE_AF_ERR_RULE_DBI_ALLOC_FAILED = -1021,
+ REE_AF_ERR_LF_WRONG_PRIORITY = -1022,
+ REE_AF_ERR_LF_SIZE_TOO_BIG = -1023,
+ REE_AF_ERR_GRAPH_ADDRESS_TOO_BIG = -1024,
+ REE_AF_ERR_BAD_RULE_TYPE = -1025,
+};
+
+/* REE mbox message formats */
+
+struct ree_req_msg {
+ struct mbox_msghdr hdr;
+ u32 blkaddr;
+};
+
+struct ree_lf_req_msg {
+ struct mbox_msghdr hdr;
+ u32 blkaddr;
+ u32 size;
+ u8 lf;
+ u8 pri;
+};
+
+struct ree_rule_db_prog_req_msg {
+ struct mbox_msghdr hdr;
+/* Rule DB passed in MBOX and is copied to internal REE DB
+ * This size should be power of 2 to fit into rule DB internal blocks
+ */
+#define REE_RULE_DB_REQ_BLOCK_SIZE (MBOX_SIZE >> 1)
+ u8 rule_db[REE_RULE_DB_REQ_BLOCK_SIZE];
+ u32 blkaddr; /* REE0 or REE1 */
+ u32 total_len; /* Total len of rule db */
+ u32 offset; /* Offset of current rule db block */
+ u16 len; /* Length of rule db block */
+ u8 is_last; /* Is this the last block */
+ u8 is_incremental; /* Is incremental flow */
+ u8 is_dbi; /* Is rule db incremental */
+};
+
+struct ree_rule_db_get_req_msg {
+ struct mbox_msghdr hdr;
+ u32 blkaddr;
+ u32 offset; /* Retrieve db from this offset */
+ u8 is_dbi; /* Is request for rule db incremental */
+};
+
+struct ree_rd_wr_reg_msg {
+ struct mbox_msghdr hdr;
+ u64 reg_offset;
+ u64 *ret_val;
+ u64 val;
+ u32 blkaddr;
+ u8 is_write;
+};
+
+struct ree_rule_db_len_rsp_msg {
+ struct mbox_msghdr hdr;
+ u32 blkaddr;
+ u32 len;
+ u32 inc_len;
+};
+
+struct ree_rule_db_get_rsp_msg {
+ struct mbox_msghdr hdr;
+#define REE_RULE_DB_RSP_BLOCK_SIZE (MBOX_DOWN_TX_SIZE - SZ_1K)
+ u8 rule_db[REE_RULE_DB_RSP_BLOCK_SIZE];
+ u32 total_len; /* Total len of rule db */
+ u32 offset; /* Offset of current rule db block */
+ u16 len; /* Length of rule db block */
+ u8 is_last; /* Is this the last block */
+};
+
enum ptp_op {
PTP_OP_ADJFINE = 0,
PTP_OP_GET_CLOCK = 1,
+ PTP_OP_GET_TSTMP = 2,
+ PTP_OP_SET_THRESH = 3,
+ PTP_OP_SET_CLOCK = 4,
+ PTP_OP_ADJ_CLOCK = 5,
};
struct ptp_req {
struct mbox_msghdr hdr;
u8 op;
s64 scaled_ppm;
+ u8 is_pmu;
+ u64 thresh;
+ u64 nsec;
+ s64 delta;
};
struct ptp_rsp {
struct mbox_msghdr hdr;
u64 clk;
+ u64 tsc;
+};
+
+struct sdp_node_info {
+ /* Node to which this PF belons to */
+ u8 node_id;
+ u8 max_vfs;
+ u8 num_pf_rings;
+ u8 pf_srn;
+#define SDP_MAX_VFS 128
+ u8 vf_rings[SDP_MAX_VFS];
+};
+
+struct sdp_chan_info_msg {
+ struct mbox_msghdr hdr;
+ struct sdp_node_info info;
+};
+
+struct sdp_get_chan_info_msg {
+ struct mbox_msghdr hdr;
+ u16 chan_base;
+ u16 num_chan;
+};
+
+/* CGX mailbox error codes
+ * Range 1101 - 1200.
+ */
+enum cgx_af_status {
+ LMAC_AF_ERR_INVALID_PARAM = -1101,
+ LMAC_AF_ERR_PF_NOT_MAPPED = -1102,
+ LMAC_AF_ERR_PERM_DENIED = -1103,
+ LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED = -1104,
+ LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED = -1105,
+ LMAC_AF_ERR_CMD_TIMEOUT = -1106,
+ LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED = -1107,
};
#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 407b9477da24..b7ab87c8df92 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -1,16 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef NPC_H
#define NPC_H
+#define NPC_KEX_CHAN_MASK 0xFFFULL
+
enum NPC_LID_E {
NPC_LID_LA = 0,
NPC_LID_LB,
@@ -28,11 +27,12 @@ enum npc_kpu_la_ltype {
NPC_LT_LA_8023 = 1,
NPC_LT_LA_ETHER,
NPC_LT_LA_IH_NIX_ETHER,
- NPC_LT_LA_IH_8_ETHER,
- NPC_LT_LA_IH_4_ETHER,
- NPC_LT_LA_IH_2_ETHER,
- NPC_LT_LA_HIGIG2_ETHER,
+ NPC_LT_LA_HIGIG2_ETHER = 7,
NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_LT_LA_CPT_HDR,
+ NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_LT_LA_CUSTOM_PRE_L2_ETHER,
NPC_LT_LA_CUSTOM0 = 0xE,
NPC_LT_LA_CUSTOM1 = 0xF,
};
@@ -42,7 +42,7 @@ enum npc_kpu_lb_ltype {
NPC_LT_LB_CTAG,
NPC_LT_LB_STAG_QINQ,
NPC_LT_LB_BTAG,
- NPC_LT_LB_ITAG,
+ NPC_LT_LB_PPPOE,
NPC_LT_LB_DSA,
NPC_LT_LB_DSA_VLAN,
NPC_LT_LB_EDSA,
@@ -50,12 +50,18 @@ enum npc_kpu_lb_ltype {
NPC_LT_LB_EXDSA,
NPC_LT_LB_EXDSA_VLAN,
NPC_LT_LB_FDSA,
+ NPC_LT_LB_VLAN_EXDSA,
NPC_LT_LB_CUSTOM0 = 0xE,
NPC_LT_LB_CUSTOM1 = 0xF,
};
+/* Don't modify ltypes up to IP6_EXT, otherwise length and checksum of IP
+ * headers may not be checked correctly. IPv4 ltypes and IPv6 ltypes must
+ * differ only at bit 0 so mask 0xE can be used to detect extended headers.
+ */
enum npc_kpu_lc_ltype {
- NPC_LT_LC_IP = 1,
+ NPC_LT_LC_PTP = 1,
+ NPC_LT_LC_IP,
NPC_LT_LC_IP_OPT,
NPC_LT_LC_IP6,
NPC_LT_LC_IP6_EXT,
@@ -63,8 +69,8 @@ enum npc_kpu_lc_ltype {
NPC_LT_LC_RARP,
NPC_LT_LC_MPLS,
NPC_LT_LC_NSH,
- NPC_LT_LC_PTP,
NPC_LT_LC_FCOE,
+ NPC_LT_LC_NGIO,
NPC_LT_LC_CUSTOM0 = 0xE,
NPC_LT_LC_CUSTOM1 = 0xF,
};
@@ -145,8 +151,84 @@ enum npc_kpu_lh_ltype {
* Software assigns pkind for each incoming port such as CGX
* Ethernet interfaces, LBK interfaces, etc.
*/
+#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_CUSTOM_PRE_L2_PKIND
+
enum npc_pkind_type {
- NPC_TX_DEF_PKIND = 63ULL, /* NIX-TX PKIND */
+ NPC_RX_LBK_PKIND = 0ULL,
+ NPC_RX_CUSTOM_PRE_L2_PKIND = 55ULL,
+ NPC_RX_VLAN_EXDSA_PKIND = 56ULL,
+ NPC_RX_CHLEN24B_PKIND = 57ULL,
+ NPC_RX_CPT_HDR_PKIND,
+ NPC_RX_CHLEN90B_PKIND,
+ NPC_TX_HIGIG_PKIND,
+ NPC_RX_HIGIG_PKIND,
+ NPC_RX_EDSA_PKIND,
+ NPC_TX_DEF_PKIND, /* NIX-TX PKIND */
+};
+
+enum npc_interface_type {
+ NPC_INTF_MODE_DEF,
+ NPC_INTF_MODE_EDSA,
+ NPC_INTF_MODE_HIGIG,
+ NPC_INTF_MODE_FDSA,
+};
+
+/* list of known and supported fields in packet header and
+ * fields present in key structure.
+ */
+enum key_fields {
+ NPC_DMAC,
+ NPC_SMAC,
+ NPC_ETYPE,
+ NPC_VLAN_ETYPE_CTAG, /* 0x8100 */
+ NPC_VLAN_ETYPE_STAG, /* 0x88A8 */
+ NPC_OUTER_VID,
+ NPC_TOS,
+ NPC_SIP_IPV4,
+ NPC_DIP_IPV4,
+ NPC_SIP_IPV6,
+ NPC_DIP_IPV6,
+ NPC_IPPROTO_TCP,
+ NPC_IPPROTO_UDP,
+ NPC_IPPROTO_SCTP,
+ NPC_IPPROTO_AH,
+ NPC_IPPROTO_ESP,
+ NPC_IPPROTO_ICMP,
+ NPC_IPPROTO_ICMP6,
+ NPC_SPORT_TCP,
+ NPC_DPORT_TCP,
+ NPC_SPORT_UDP,
+ NPC_DPORT_UDP,
+ NPC_SPORT_SCTP,
+ NPC_DPORT_SCTP,
+ NPC_FDSA_VAL,
+ NPC_HEADER_FIELDS_MAX,
+ NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */
+ NPC_PF_FUNC, /* Valid when Tx */
+ NPC_ERRLEV,
+ NPC_ERRCODE,
+ NPC_LXMB,
+ NPC_LA,
+ NPC_LB,
+ NPC_LC,
+ NPC_LD,
+ NPC_LE,
+ NPC_LF,
+ NPC_LG,
+ NPC_LH,
+ /* Ethertype for untagged frame */
+ NPC_ETYPE_ETHER,
+ /* Ethertype for single tagged frame */
+ NPC_ETYPE_TAG1,
+ /* Ethertype for double tagged frame */
+ NPC_ETYPE_TAG2,
+ /* outer vlan tci for single tagged frame */
+ NPC_VLAN_TAG1,
+ /* outer vlan tci for double tagged frame */
+ NPC_VLAN_TAG2,
+ /* other header fields programmed to extract but not of our interest */
+ NPC_UNKNOWN,
+ NPC_KEY_FIELDS_MAX,
};
struct npc_kpu_profile_cam {
@@ -158,7 +240,7 @@ struct npc_kpu_profile_cam {
u16 dp1_mask;
u16 dp2;
u16 dp2_mask;
-};
+} __packed;
struct npc_kpu_profile_action {
u8 errlev;
@@ -178,13 +260,13 @@ struct npc_kpu_profile_action {
u8 mask;
u8 right;
u8 shift;
-};
+} __packed;
struct npc_kpu_profile {
int cam_entries;
int action_entries;
- const struct npc_kpu_profile_cam *cam;
- const struct npc_kpu_profile_action *action;
+ struct npc_kpu_profile_cam *cam;
+ struct npc_kpu_profile_action *action;
};
/* NPC KPU register formats */
@@ -306,6 +388,22 @@ struct nix_rx_action {
#endif
};
+struct nix_tx_action {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_48 :16;
+ u64 match_id :16;
+ u64 index :20;
+ u64 rsvd_11_8 :8;
+ u64 op :4;
+#else
+ u64 op :4;
+ u64 rsvd_11_8 :8;
+ u64 index :20;
+ u64 match_id :16;
+ u64 rsvd_63_48 :16;
+#endif
+};
+
/* NPC_AF_INTFX_KEX_CFG field masks */
#define NPC_PARSE_NIBBLE GENMASK_ULL(30, 0)
@@ -332,10 +430,41 @@ struct nix_rx_action {
#define NPC_PARSE_NIBBLE_LH_LTYPE BIT_ULL(30)
/* NIX Receive Vtag Action Structure */
-#define VTAG0_VALID_BIT BIT_ULL(15)
-#define VTAG0_TYPE_MASK GENMASK_ULL(14, 12)
-#define VTAG0_LID_MASK GENMASK_ULL(10, 8)
-#define VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
+#define RX_VTAG0_VALID_BIT BIT_ULL(15)
+#define RX_VTAG0_TYPE_MASK GENMASK_ULL(14, 12)
+#define RX_VTAG0_LID_MASK GENMASK_ULL(10, 8)
+#define RX_VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
+#define RX_VTAG1_VALID_BIT BIT_ULL(47)
+#define RX_VTAG1_TYPE_MASK GENMASK_ULL(46, 44)
+#define RX_VTAG1_LID_MASK GENMASK_ULL(42, 40)
+#define RX_VTAG1_RELPTR_MASK GENMASK_ULL(39, 32)
+
+/* NIX Transmit Vtag Action Structure */
+#define TX_VTAG0_DEF_MASK GENMASK_ULL(25, 16)
+#define TX_VTAG0_OP_MASK GENMASK_ULL(13, 12)
+#define TX_VTAG0_LID_MASK GENMASK_ULL(10, 8)
+#define TX_VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
+#define TX_VTAG1_DEF_MASK GENMASK_ULL(57, 48)
+#define TX_VTAG1_OP_MASK GENMASK_ULL(45, 44)
+#define TX_VTAG1_LID_MASK GENMASK_ULL(42, 40)
+#define TX_VTAG1_RELPTR_MASK GENMASK_ULL(39, 32)
+
+/* NPC MCAM reserved entry index per nixlf */
+#define NIXLF_UCAST_ENTRY 0
+#define NIXLF_BCAST_ENTRY 1
+#define NIXLF_ALLMULTI_ENTRY 2
+#define NIXLF_PROMISC_ENTRY 3
+
+struct npc_coalesced_kpu_prfl {
+#define NPC_SIGN 0x00666f727063706e
+#define NPC_PRFL_NAME "npc_prfls_array"
+#define NPC_NAME_LEN 32
+ u64 signature; /* "npcprof\0" (8 bytes/ASCII characters) */
+ u8 name[NPC_NAME_LEN]; /* KPU Profile name */
+ u64 version; /* KPU firmware/profile version */
+ u8 num_prfl; /* No of NPC profiles. */
+ u16 prfl_sz[0];
+};
struct npc_mcam_kex {
/* MKEX Profle Header */
@@ -355,11 +484,42 @@ struct npc_mcam_kex {
u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
} __packed;
+struct npc_kpu_fwdata {
+ int entries;
+ /* What follows is:
+ * struct npc_kpu_profile_cam[entries];
+ * struct npc_kpu_profile_action[entries];
+ */
+ u8 data[0];
+} __packed;
+
+struct rvu_npc_mcam_rule {
+ struct flow_msg packet;
+ struct flow_msg mask;
+ u8 intf;
+ union {
+ struct nix_tx_action tx_action;
+ struct nix_rx_action rx_action;
+ };
+ u64 vtag_action;
+ struct list_head list;
+ u64 features;
+ u16 owner;
+ u16 entry;
+ u16 cntr;
+ bool has_cntr;
+ u8 default_rule;
+ bool enable;
+ bool vfvlan_cfg;
+ u16 chan;
+ u16 chan_mask;
+};
+
struct npc_lt_def {
u8 ltype_mask;
u8 ltype_match;
u8 lid;
-};
+} __packed;
struct npc_lt_def_ipsec {
u8 ltype_mask;
@@ -367,7 +527,30 @@ struct npc_lt_def_ipsec {
u8 lid;
u8 spi_offset;
u8 spi_nz;
-};
+} __packed;
+
+struct npc_lt_def_apad {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 valid;
+} __packed;
+
+struct npc_lt_def_color {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 noffset;
+ u8 offset;
+} __packed;
+
+struct npc_lt_def_et {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 valid;
+ u8 offset;
+} __packed;
struct npc_lt_def_cfg {
struct npc_lt_def rx_ol2;
@@ -386,6 +569,40 @@ struct npc_lt_def_cfg {
struct npc_lt_def pck_oip4;
struct npc_lt_def pck_oip6;
struct npc_lt_def pck_iip4;
-};
+ struct npc_lt_def_apad rx_apad0;
+ struct npc_lt_def_apad rx_apad1;
+ struct npc_lt_def_color ovlan;
+ struct npc_lt_def_color ivlan;
+ struct npc_lt_def_color rx_gen0_color;
+ struct npc_lt_def_color rx_gen1_color;
+ struct npc_lt_def_et rx_et[2];
+} __packed;
+
+/* Loadable KPU profile firmware data */
+struct npc_kpu_profile_fwdata {
+#define KPU_SIGN 0x00666f727075706b
+#define KPU_NAME_LEN 32
+/** Maximum number of custom KPU entries supported by the built-in profile. */
+#define KPU_MAX_CST_ENT 6
+ /* KPU Profle Header */
+ u64 signature; /* "kpuprof\0" (8 bytes/ASCII characters) */
+ u8 name[KPU_NAME_LEN]; /* KPU Profile name */
+ u64 version; /* KPU profile version */
+ u8 kpus;
+ u8 reserved[7];
+
+ /* Default MKEX profile to be used with this KPU profile. May be
+ * overridden with mkex_profile module parameter. Format is same as for
+ * the MKEX profile to streamline processing.
+ */
+ struct npc_mcam_kex mkex;
+ /* LTYPE values for specific HW offloaded protocols. */
+ struct npc_lt_def_cfg lt_def;
+ /* Dynamically sized data:
+ * Custom KPU CAM and ACTION configuration entries.
+ * struct npc_kpu_fwdata kpu[kpus];
+ */
+ u8 data[0];
+} __packed;
#endif /* NPC_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index 0e4af93be0fb..5b712d0aa327 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -1,17 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef NPC_PROFILE_H
#define NPC_PROFILE_H
-#define NPC_KPU_PROFILE_VER 0x0000000100050000
+#define NPC_KPU_PROFILE_VER 0x0000000100070000
+#define NPC_KPU_VER_MAJ(ver) (u16)(((ver) >> 32) & 0xFFFF)
+#define NPC_KPU_VER_MIN(ver) (u16)(((ver) >> 16) & 0xFFFF)
+#define NPC_KPU_VER_PATCH(ver) (u16)((ver) & 0xFFFF)
#define NPC_IH_W 0x8000
#define NPC_IH_UTAG 0x2000
@@ -20,6 +20,7 @@
#define NPC_ETYPE_IP6 0x86dd
#define NPC_ETYPE_ARP 0x0806
#define NPC_ETYPE_RARP 0x8035
+#define NPC_ETYPE_NGIO 0x8842
#define NPC_ETYPE_MPLSU 0x8847
#define NPC_ETYPE_MPLSM 0x8848
#define NPC_ETYPE_ETAG 0x893f
@@ -33,6 +34,10 @@
#define NPC_ETYPE_PPP 0x880b
#define NPC_ETYPE_NSH 0x894f
#define NPC_ETYPE_DSA 0xdada
+#define NPC_ETYPE_PPPOE 0x8864
+
+#define NPC_PPP_IP 0x0021
+#define NPC_PPP_IP6 0x0057
#define NPC_IPNH_HOP 0
#define NPC_IPNH_ICMP 1
@@ -142,7 +147,7 @@
#define NPC_DSA_EDSA 0x8000
#define NPC_DSA_FDSA 0xc000
-#define NPC_KEXOF_DMAC 8
+#define NPC_KEXOF_DMAC 9
#define MKEX_SIGN 0x19bbfdbd15f /* strtoull of "mkexprof" with base:36 */
#define KEX_LD_CFG(bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \
(((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
@@ -150,6 +155,7 @@
/* Rx parse key extract nibble enable */
#define NPC_PARSE_NIBBLE_INTF_RX (NPC_PARSE_NIBBLE_CHAN | \
+ NPC_PARSE_NIBBLE_ERRCODE | \
NPC_PARSE_NIBBLE_LA_LTYPE | \
NPC_PARSE_NIBBLE_LB_LTYPE | \
NPC_PARSE_NIBBLE_LC_LTYPE | \
@@ -170,25 +176,30 @@ enum npc_kpu_parser_state {
NPC_S_KPU1_EXDSA,
NPC_S_KPU1_HIGIG2,
NPC_S_KPU1_IH_NIX_HIGIG2,
+ NPC_S_KPU1_CUSTOM_PRE_L2,
+ NPC_S_KPU1_CPT_HDR,
+ NPC_S_KPU1_VLAN_EXDSA,
NPC_S_KPU2_CTAG,
NPC_S_KPU2_CTAG2,
NPC_S_KPU2_SBTAG,
NPC_S_KPU2_QINQ,
NPC_S_KPU2_ETAG,
- NPC_S_KPU2_ITAG,
- NPC_S_KPU2_PREHEADER,
NPC_S_KPU2_EXDSA,
+ NPC_S_KPU2_CPT_CTAG,
+ NPC_S_KPU2_CPT_QINQ,
NPC_S_KPU3_CTAG,
NPC_S_KPU3_STAG,
NPC_S_KPU3_QINQ,
- NPC_S_KPU3_ITAG,
NPC_S_KPU3_CTAG_C,
NPC_S_KPU3_STAG_C,
NPC_S_KPU3_QINQ_C,
NPC_S_KPU3_DSA,
+ NPC_S_KPU3_VLAN_EXDSA,
NPC_S_KPU4_MPLS,
NPC_S_KPU4_NSH,
NPC_S_KPU4_FDSA,
+ NPC_S_KPU4_VLAN_EXDSA,
+ NPC_S_KPU4_PPPOE,
NPC_S_KPU5_IP,
NPC_S_KPU5_IP6,
NPC_S_KPU5_ARP,
@@ -198,13 +209,20 @@ enum npc_kpu_parser_state {
NPC_S_KPU5_MPLS,
NPC_S_KPU5_MPLS_PL,
NPC_S_KPU5_NSH,
+ NPC_S_KPU5_CPT_IP,
+ NPC_S_KPU5_CPT_IP6,
+ NPC_S_KPU5_NGIO,
NPC_S_KPU6_IP6_EXT,
NPC_S_KPU6_IP6_HOP_DEST,
NPC_S_KPU6_IP6_ROUT,
NPC_S_KPU6_IP6_FRAG,
+ NPC_S_KPU6_IP6_CPT_FRAG,
+ NPC_S_KPU6_IP6_CPT_HOP_DEST,
+ NPC_S_KPU6_IP6_CPT_ROUT,
NPC_S_KPU7_IP6_EXT,
NPC_S_KPU7_IP6_ROUT,
NPC_S_KPU7_IP6_FRAG,
+ NPC_S_KPU7_CPT_IP6_FRAG,
NPC_S_KPU8_TCP,
NPC_S_KPU8_UDP,
NPC_S_KPU8_SCTP,
@@ -265,7 +283,6 @@ enum npc_kpu_la_lflag {
NPC_F_LA_L_UNK_ETYPE = 1,
NPC_F_LA_L_WITH_VLAN,
NPC_F_LA_L_WITH_ETAG,
- NPC_F_LA_L_WITH_ITAG,
NPC_F_LA_L_WITH_MPLS,
NPC_F_LA_L_WITH_NSH,
};
@@ -442,7 +459,28 @@ enum NPC_ERRLEV_E {
NPC_ERRLEV_ENUM_LAST = 16,
};
-static const struct npc_kpu_profile_action ikpu_action_entries[] = {
+#define NPC_KPU_NOP_CAM \
+ { \
+ NPC_S_NA, 0xff, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ }
+
+#define NPC_KPU_NOP_ACTION \
+ { \
+ NPC_ERRLEV_RE, NPC_EC_NOERR, \
+ 0, 0, 0, 0, 0, \
+ NPC_S_NA, 0, 0, \
+ NPC_LID_LA, NPC_LT_NA, \
+ 0, \
+ 0, 0, 0, 0, \
+ }
+
+static struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
@@ -941,8 +979,8 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_PRE_L2_ETHER,
0,
0, 0, 0, 0,
@@ -950,7 +988,7 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
+ NPC_S_KPU1_VLAN_EXDSA, 0, 0,
NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -959,8 +997,8 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 24, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
0,
0, 0, 0, 0,
@@ -968,17 +1006,17 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
+ NPC_S_KPU1_CPT_HDR, 40, 0,
NPC_LID_LA, NPC_LT_NA,
0,
- 0, 0, 0, 0,
+ 0, 7, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 90, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
0,
0, 0, 0, 0,
@@ -1021,7 +1059,13 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU1_ETHER, 0xff,
NPC_ETYPE_IP,
@@ -1123,7 +1167,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_ITAG,
+ NPC_ETYPE_MPLSU,
0xffff,
0x0000,
0x0000,
@@ -1132,7 +1176,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_MPLSU,
+ NPC_ETYPE_MPLSM,
0xffff,
0x0000,
0x0000,
@@ -1141,7 +1185,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_MPLSM,
+ NPC_ETYPE_NSH,
0xffff,
0x0000,
0x0000,
@@ -1150,7 +1194,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_NSH,
+ NPC_ETYPE_DSA,
0xffff,
0x0000,
0x0000,
@@ -1159,7 +1203,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_DSA,
+ NPC_ETYPE_PPPOE,
0xffff,
0x0000,
0x0000,
@@ -1294,15 +1338,6 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH_NIX, 0xff,
- NPC_ETYPE_ITAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_IH_NIX, 0xff,
NPC_ETYPE_MPLSU,
0xffff,
0x0000,
@@ -1339,33 +1374,6 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH, 0xff,
- NPC_IH_W|NPC_IH_UTAG,
- NPC_IH_W|NPC_IH_UTAG,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_IH, 0xff,
- NPC_IH_W,
- NPC_IH_W|NPC_IH_UTAG,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_IH, 0xff,
- 0x0000,
- NPC_IH_W|NPC_IH_UTAG,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_IH, 0xff,
0x0000,
0x0000,
0x0000,
@@ -1501,15 +1509,6 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_HIGIG2, 0xff,
- NPC_ETYPE_ITAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_HIGIG2, 0xff,
NPC_ETYPE_MPLSU,
0xffff,
0x0000,
@@ -1645,7 +1644,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
- NPC_ETYPE_ITAG,
+ NPC_ETYPE_MPLSU,
0xffff,
0x0000,
0x0000,
@@ -1654,7 +1653,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
- NPC_ETYPE_MPLSU,
+ NPC_ETYPE_MPLSM,
0xffff,
0x0000,
0x0000,
@@ -1663,7 +1662,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
- NPC_ETYPE_MPLSM,
+ NPC_ETYPE_NSH,
0xffff,
0x0000,
0x0000,
@@ -1672,6 +1671,132 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_NSH,
0xffff,
0x0000,
@@ -1680,7 +1805,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
0x0000,
0x0000,
0x0000,
@@ -1689,6 +1814,51 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -1699,7 +1869,13 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU2_CTAG, 0xff,
NPC_ETYPE_IP,
@@ -1783,6 +1959,33 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
},
{
NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_NGIO,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
0x0000,
0x0000,
0x0000,
@@ -2226,15 +2429,6 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
NPC_S_KPU2_ETAG, 0xff,
NPC_ETYPE_SBTAG,
0xffff,
- NPC_ETYPE_ITAG,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ETAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
0x0000,
0x0000,
0x0000,
@@ -2313,159 +2507,6 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_RARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
NPC_S_KPU2_CTAG2, 0xff,
NPC_ETYPE_IP,
0xffff,
@@ -2565,114 +2606,6 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_RARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_PTP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_FCOE,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_QINQ,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_MPLSU,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_MPLSM,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_NSH,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
NPC_S_KPU2_EXDSA, 0xff,
NPC_DSA_EDSA,
NPC_DSA_EDSA,
@@ -2817,6 +2750,42 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU2_CPT_CTAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CPT_CTAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CPT_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CPT_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -2827,7 +2796,13 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu3_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU3_CTAG, 0xff,
NPC_ETYPE_IP,
@@ -3243,159 +3218,6 @@ static const struct npc_kpu_profile_cam kpu3_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_RARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
NPC_S_KPU3_CTAG_C, 0xff,
NPC_ETYPE_IP,
0xffff,
@@ -3936,6 +3758,15 @@ static const struct npc_kpu_profile_cam kpu3_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU3_VLAN_EXDSA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -3946,7 +3777,13 @@ static const struct npc_kpu_profile_cam kpu3_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu4_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU4_MPLS, 0xff,
NPC_MPLS_S,
@@ -4084,6 +3921,78 @@ static const struct npc_kpu_profile_cam kpu4_cam_entries[] = {
},
{
NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
0x0000,
NPC_DSA_FDSA,
0x0000,
@@ -4092,6 +4001,87 @@ static const struct npc_kpu_profile_cam kpu4_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_PPPOE, 0xff,
+ NPC_PPP_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_PPPOE, 0xff,
+ NPC_PPP_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -4102,7 +4092,13 @@ static const struct npc_kpu_profile_cam kpu4_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU5_IP, 0xff,
0x0000,
@@ -4662,6 +4658,438 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ NPC_IP_TTL_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0001,
+ NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_GRE,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP6,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_MPLS,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_GRE,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP6,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_MPLS,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ 0x0000,
+ NPC_IP6_HOP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_HOP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_DEST << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_MOBILITY << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_HOSTID << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_SHIM6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_NGIO, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -4672,7 +5100,13 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu6_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU6_IP6_EXT, 0xff,
0x0000,
@@ -5007,6 +5441,330 @@ static const struct npc_kpu_profile_cam kpu6_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -5017,7 +5775,13 @@ static const struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu7_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU7_IP6_EXT, 0xff,
0x0000,
@@ -5226,6 +5990,105 @@ static const struct npc_kpu_profile_cam kpu7_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -5236,7 +6099,13 @@ static const struct npc_kpu_profile_cam kpu7_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU8_TCP, 0xff,
0x0000,
@@ -5977,7 +6846,13 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu9_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
NPC_MPLS_S,
@@ -6378,17 +7253,8 @@ static const struct npc_kpu_profile_cam kpu9_cam_entries[] = {
NPC_S_KPU9_GTPU, 0xff,
0x0000,
0x0000,
- NPC_GTP_PT_GTP | NPC_GTP_VER1 | NPC_GTP_MT_G_PDU,
- NPC_GTP_PT_MASK | NPC_GTP_VER_MASK | NPC_GTP_MT_MASK,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU9_GTPU, 0xff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
+ NPC_GTP_PT_GTP | NPC_GTP_VER1,
+ NPC_GTP_PT_MASK | NPC_GTP_VER_MASK,
0x0000,
0x0000,
},
@@ -6448,7 +7314,13 @@ static const struct npc_kpu_profile_cam kpu9_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu10_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU10_TU_MPLS, 0xff,
NPC_MPLS_S,
@@ -6613,7 +7485,13 @@ static const struct npc_kpu_profile_cam kpu10_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu11_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU11_TU_ETHER, 0xff,
NPC_ETYPE_IP,
@@ -6922,7 +7800,13 @@ static const struct npc_kpu_profile_cam kpu11_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_TCP,
@@ -7177,7 +8061,13 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu13_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU13_TU_IP6_EXT, 0xff,
0x0000,
@@ -7189,7 +8079,13 @@ static const struct npc_kpu_profile_cam kpu13_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu14_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU14_TU_IP6_EXT, 0xff,
0x0000,
@@ -7201,7 +8097,13 @@ static const struct npc_kpu_profile_cam kpu14_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu15_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU15_TU_TCP, 0xff,
0x0000,
@@ -7402,7 +8304,13 @@ static const struct npc_kpu_profile_cam kpu15_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu16_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU16_TCP_DATA, 0xff,
0x0000,
@@ -7459,7 +8367,13 @@ static const struct npc_kpu_profile_cam kpu16_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu1_action_entries[] = {
+static struct npc_kpu_profile_action kpu1_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 3, 0,
@@ -7518,7 +8432,7 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 0, 0, 0,
+ 4, 8, 12, 0, 0,
NPC_S_KPU2_CTAG, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
@@ -7550,14 +8464,6 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 18, 22, 26, 0, 0,
- NPC_S_KPU2_ITAG, 12, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ITAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 2, 0,
NPC_S_KPU4_MPLS, 14, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
@@ -7590,6 +8496,14 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 2, 0,
+ NPC_S_KPU4_PPPOE, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LA, NPC_LT_LA_8023,
@@ -7701,17 +8615,7 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
8, 12, 26, 0, 0,
NPC_S_KPU2_ETAG, 20, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
- NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
- | NPC_F_LA_L_WITH_ETAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 18, 22, 26, 0, 0,
- NPC_S_KPU2_ITAG, 20, 1,
- NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
- NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
- | NPC_F_LA_L_WITH_ITAG,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
0, 0, 0, 0,
},
{
@@ -7747,30 +8651,6 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 12, 14, 16, 0, 0,
- NPC_S_KPU2_PREHEADER, 8, 1,
- NPC_LID_LA, NPC_LT_LA_IH_8_ETHER,
- 0,
- 1, 0xff, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 12, 14, 16, 0, 0,
- NPC_S_KPU2_PREHEADER, 4, 1,
- NPC_LID_LA, NPC_LT_LA_IH_4_ETHER,
- 0,
- 1, 0xff, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 12, 14, 16, 0, 0,
- NPC_S_KPU2_PREHEADER, 2, 1,
- NPC_LID_LA, NPC_LT_LA_IH_2_ETHER,
- 0,
- 1, 0xff, 0, 0,
- },
- {
NPC_ERRLEV_LA, NPC_EC_IH_LENGTH,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
@@ -7788,7 +8668,7 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 16, 2, 0,
+ 4, 8, 12, 2, 0,
NPC_S_KPU4_FDSA, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
0,
@@ -7891,17 +8771,7 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
8, 12, 26, 0, 0,
NPC_S_KPU2_ETAG, 28, 1,
NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
- NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
- | NPC_F_LA_L_WITH_ETAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 18, 22, 26, 0, 0,
- NPC_S_KPU2_ITAG, 28, 1,
- NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
- NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
- | NPC_F_LA_L_WITH_ITAG,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
0, 0, 0, 0,
},
{
@@ -8025,17 +8895,7 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
8, 12, 26, 0, 0,
NPC_S_KPU2_ETAG, 36, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
- NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
- | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 18, 22, 26, 0, 0,
- NPC_S_KPU2_ITAG, 36, 1,
- NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
- NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
- | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ITAG,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
0, 0, 0, 0,
},
{
@@ -8075,6 +8935,166 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_CPT_IP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_CPT_IP6, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CPT_CTAG, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CPT_QINQ, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 0, 0, 1, 0,
+ NPC_S_KPU3_VLAN_EXDSA, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LA, NPC_EC_L2_K1,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -8084,7 +9104,13 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu2_action_entries[] = {
+static struct npc_kpu_profile_action kpu2_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
@@ -8159,6 +9185,30 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_NGIO, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LB, NPC_LT_LB_CTAG,
@@ -8551,14 +9601,6 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 16, 20, 24, 0, 0,
- NPC_S_KPU3_ITAG, 14, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_BTAG_ITAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 0, 0, 0,
NPC_S_KPU3_STAG, 10, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
@@ -8632,142 +9674,6 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 20, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 20, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 20, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_RARP, 20, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 28, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 28, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 28, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
NPC_S_KPU5_IP, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -8856,102 +9762,6 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_RARP, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_PTP, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_FCOE, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 0, 0, 0,
- NPC_S_KPU3_CTAG_C, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 20, 0, 0,
- NPC_S_KPU3_STAG_C, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 0, 0, 0,
- NPC_S_KPU3_QINQ_C, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 10, 1, 0,
- NPC_S_KPU4_MPLS, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 10, 1, 0,
- NPC_S_KPU4_MPLS, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 0, 0, 1, 0,
- NPC_S_KPU4_NSH, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
NPC_S_KPU5_IP, 18, 1,
NPC_LID_LB, NPC_LT_LB_EDSA,
NPC_F_LB_L_EDSA,
@@ -9078,6 +9888,38 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_CPT_IP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_CPT_IP6, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_CPT_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_CPT_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K3,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -9087,11 +9929,17 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu3_action_entries[] = {
+static struct npc_kpu_profile_action kpu3_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
- NPC_S_KPU5_IP, 4, 0,
+ NPC_S_KPU5_IP, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9099,7 +9947,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
6, 0, 0, 1, 0,
- NPC_S_KPU5_IP6, 4, 0,
+ NPC_S_KPU5_IP6, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9107,7 +9955,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_ARP, 4, 0,
+ NPC_S_KPU5_ARP, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9115,7 +9963,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_RARP, 4, 0,
+ NPC_S_KPU5_RARP, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9123,7 +9971,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_PTP, 4, 0,
+ NPC_S_KPU5_PTP, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9131,7 +9979,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_FCOE, 4, 0,
+ NPC_S_KPU5_FCOE, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9139,7 +9987,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 0, 0,
- NPC_S_KPU4_MPLS, 4, 0,
+ NPC_S_KPU4_MPLS, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9147,7 +9995,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 0, 0,
- NPC_S_KPU4_MPLS, 4, 0,
+ NPC_S_KPU4_MPLS, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9155,7 +10003,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 0, 0, 0, 0,
- NPC_S_KPU4_NSH, 4, 0,
+ NPC_S_KPU4_NSH, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9458,142 +10306,6 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 18, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 18, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 18, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_RARP, 18, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 1, 0,
- NPC_S_KPU5_IP, 26, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
- NPC_S_KPU5_IP6, 26, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU5_ARP, 26, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 1, 0,
- NPC_S_KPU5_IP, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
- NPC_S_KPU5_IP6, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU5_ARP, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 1, 0,
- NPC_S_KPU5_IP, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
- NPC_S_KPU5_IP6, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU5_ARP, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
NPC_S_KPU5_IP, 4, 1,
NPC_LID_LB, NPC_LT_LB_CTAG,
@@ -10073,6 +10785,14 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU4_VLAN_EXDSA, 12, 1,
+ NPC_LID_LB, NPC_LT_LB_VLAN_EXDSA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K3,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -10082,7 +10802,13 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu4_action_entries[] = {
+static struct npc_kpu_profile_action kpu4_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
@@ -10205,6 +10931,70 @@ static const struct npc_kpu_profile_action kpu4_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LB, NPC_LT_LB_FDSA,
@@ -10212,6 +11002,78 @@ static const struct npc_kpu_profile_action kpu4_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_ARP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_RARP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_PTP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_FCOE, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 10, 0,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 10, 0,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K4,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -10221,7 +11083,13 @@ static const struct npc_kpu_profile_action kpu4_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu5_action_entries[] = {
+static struct npc_kpu_profile_action kpu5_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_LC, NPC_EC_IP_TTL_0,
0, 0, 0, 0, 1,
@@ -10719,6 +11587,390 @@ static const struct npc_kpu_profile_action kpu5_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_LC, NPC_EC_IP_TTL_0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP_FRAG_OFFSET_1,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_IP_FRAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_UDP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_IGMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_IP_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_6TO4,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_MPLS_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 2, 0,
+ NPC_S_KPU8_UDP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_IGMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_IP_IN_IP,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_6TO4,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_MPLS_IN_IP,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP6_HOP_0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_UDP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_L_IP6_TUN_IP6,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_L_IP6_MPLS_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_HOP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_DEST,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_ROUT, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_ROUT,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_FRAG, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_U_IP6_FRAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_MOBILITY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_HOSTID,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_SHIM6,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP6_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NGIO,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LC, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -10728,7 +11980,13 @@ static const struct npc_kpu_profile_action kpu5_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu6_action_entries[] = {
+static struct npc_kpu_profile_action kpu6_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -11026,6 +12284,294 @@ static const struct npc_kpu_profile_action kpu6_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU7_IP6_ROUT, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU7_CPT_IP6_FRAG, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU7_CPT_IP6_FRAG, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LC, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -11035,7 +12581,13 @@ static const struct npc_kpu_profile_action kpu6_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu7_action_entries[] = {
+static struct npc_kpu_profile_action kpu7_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -11221,6 +12773,94 @@ static const struct npc_kpu_profile_action kpu7_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 0, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 0, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 4, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LC, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -11230,7 +12870,13 @@ static const struct npc_kpu_profile_action kpu7_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu8_action_entries[] = {
+static struct npc_kpu_profile_action kpu8_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_FIN_ONLY,
0, 0, 0, 0, 1,
@@ -11889,7 +13535,13 @@ static const struct npc_kpu_profile_action kpu8_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu9_action_entries[] = {
+static struct npc_kpu_profile_action kpu9_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
@@ -12244,18 +13896,10 @@ static const struct npc_kpu_profile_action kpu9_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU12_TU_IP, 8, 1,
- NPC_LID_LE, NPC_LT_LE_GTPU,
- NPC_F_LE_L_GTPU_G_PDU,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 1,
+ 8, 0, 6, 2, 1,
NPC_S_NA, 0, 1,
NPC_LID_LE, NPC_LT_LE_GTPU,
- NPC_F_LE_L_GTPU_UNK,
+ 0,
0, 0, 0, 0,
},
{
@@ -12308,7 +13952,13 @@ static const struct npc_kpu_profile_action kpu9_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu10_action_entries[] = {
+static struct npc_kpu_profile_action kpu10_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
@@ -12455,7 +14105,13 @@ static const struct npc_kpu_profile_action kpu10_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu11_action_entries[] = {
+static struct npc_kpu_profile_action kpu11_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 0, 0,
@@ -12730,7 +14386,13 @@ static const struct npc_kpu_profile_action kpu11_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu12_action_entries[] = {
+static struct npc_kpu_profile_action kpu12_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 12, 0, 2, 0,
@@ -12957,7 +14619,13 @@ static const struct npc_kpu_profile_action kpu12_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu13_action_entries[] = {
+static struct npc_kpu_profile_action kpu13_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -12968,7 +14636,13 @@ static const struct npc_kpu_profile_action kpu13_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu14_action_entries[] = {
+static struct npc_kpu_profile_action kpu14_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -12979,7 +14653,13 @@ static const struct npc_kpu_profile_action kpu14_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu15_action_entries[] = {
+static struct npc_kpu_profile_action kpu15_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_FIN_ONLY,
0, 0, 0, 0, 1,
@@ -13158,7 +14838,13 @@ static const struct npc_kpu_profile_action kpu15_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu16_action_entries[] = {
+static struct npc_kpu_profile_action kpu16_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -13209,7 +14895,7 @@ static const struct npc_kpu_profile_action kpu16_action_entries[] = {
},
};
-static const struct npc_kpu_profile npc_kpu_profiles[] = {
+static struct npc_kpu_profile npc_kpu_profiles[] = {
{
ARRAY_SIZE(kpu1_cam_entries),
ARRAY_SIZE(kpu1_action_entries),
@@ -13314,6 +15000,16 @@ static const struct npc_lt_def_cfg npc_lt_defaults = {
.ltype_match = NPC_LT_LA_ETHER,
.ltype_mask = 0x0F,
},
+ .ovlan = {
+ .lid = NPC_LID_LB,
+ .ltype_match = NPC_LT_LB_CTAG,
+ .ltype_mask = 0x0F,
+ },
+ .ivlan = {
+ .lid = NPC_LID_LB,
+ .ltype_match = NPC_LT_LB_STAG_QINQ,
+ .ltype_mask = 0x0F,
+ },
.rx_oip4 = {
.lid = NPC_LID_LC,
.ltype_match = NPC_LT_LC_IP,
@@ -13392,9 +15088,35 @@ static const struct npc_lt_def_cfg npc_lt_defaults = {
.ltype_match = NPC_LT_LG_TU_IP,
.ltype_mask = 0x0F,
},
+ .rx_apad0 = {
+ .valid = 0,
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP6,
+ .ltype_mask = 0x0F,
+ },
+ .rx_apad1 = {
+ .valid = 0,
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP6,
+ .ltype_mask = 0x0F,
+ },
+ .rx_et = {
+ {
+ .offset = -2,
+ .valid = 1,
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_NA,
+ .ltype_mask = 0x0,
+ },
+ {
+ .lid = NPC_LID_LB,
+ .ltype_match = NPC_LT_NA,
+ .ltype_mask = 0x0,
+ },
+ },
};
-static const struct npc_mcam_kex npc_mkex_default = {
+static struct npc_mcam_kex npc_mkex_default = {
.mkex_sign = MKEX_SIGN,
.name = "default",
.kpu_version = NPC_KPU_PROFILE_VER,
@@ -13410,30 +15132,40 @@ static const struct npc_mcam_kex npc_mkex_default = {
[NPC_LID_LA] = {
/* Layer A: Ethernet: */
[NPC_LT_LA_ETHER] = {
- /* DMAC: 6 bytes, KW1[47:0] */
+ /* DMAC: 6 bytes, KW1[55:8] */
KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_KEXOF_DMAC),
- /* Ethertype: 2 bytes, KW0[47:32] */
- KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x4),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x5),
+ },
+ /* Layer A: HiGig2: */
+ [NPC_LT_LA_HIGIG2_ETHER] = {
+ /* Classification: 2 bytes, KW1[23:8] */
+ KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, NPC_KEXOF_DMAC),
+ /* VID: 2 bytes, KW1[39:24] */
+ KEX_LD_CFG(0x01, 0xc, 0x1, 0x0,
+ NPC_KEXOF_DMAC + 2),
},
},
[NPC_LID_LB] = {
/* Layer B: Single VLAN (CTAG) */
- /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
[NPC_LT_LB_CTAG] = {
- KEX_LD_CFG(0x03, 0x2, 0x1, 0x0, 0x4),
+ /* CTAG VLAN: 2 bytes, KW1[7:0], KW0[63:56] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x7),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x5),
},
/* Layer B: Stacked VLAN (STAG|QinQ) */
[NPC_LT_LB_STAG_QINQ] = {
- /* Outer VLAN: 2 bytes, KW0[63:48] */
- KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
- /* Ethertype: 2 bytes, KW0[47:32] */
- KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x4),
+ /* Outer VLAN: 2 bytes, KW1[7:0], KW0[63:56] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x7),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x5),
},
[NPC_LT_LB_FDSA] = {
- /* SWITCH PORT: 1 byte, KW0[63:48] */
- KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0x6),
- /* Ethertype: 2 bytes, KW0[47:32] */
- KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x4),
+ /* SWITCH PORT: 1 byte, KW0[63:56] */
+ KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0x7),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x5),
},
},
[NPC_LID_LC] = {
@@ -13477,6 +15209,13 @@ static const struct npc_mcam_kex npc_mkex_default = {
/* DMAC: 6 bytes, KW1[63:16] */
KEX_LD_CFG(0x05, 0x8, 0x1, 0x0, 0xa),
},
+ /* Layer A: HiGig2: */
+ [NPC_LT_LA_IH_NIX_HIGIG2_ETHER] = {
+ /* PF_FUNC: 2B , KW0 [47:32] */
+ KEX_LD_CFG(0x01, 0x0, 0x1, 0x0, 0x4),
+ /* VID: 2 bytes, KW1[31:16] */
+ KEX_LD_CFG(0x01, 0x10, 0x1, 0x0, 0xa),
+ },
},
[NPC_LID_LB] = {
/* Layer B: Single VLAN (CTAG) */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index f69f4f35ae48..b500b165732b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Marvell PTP driver
*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020 Marvell.
+ *
*/
#include <linux/bitfield.h>
@@ -19,75 +20,147 @@
#define PCI_SUBSYS_DEVID_OCTX2_98xx_PTP 0xB100
#define PCI_SUBSYS_DEVID_OCTX2_96XX_PTP 0xB200
#define PCI_SUBSYS_DEVID_OCTX2_95XX_PTP 0xB300
-#define PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP 0xB400
+#define PCI_SUBSYS_DEVID_OCTX2_95XXN_PTP 0xB400
#define PCI_SUBSYS_DEVID_OCTX2_95MM_PTP 0xB500
+#define PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP 0xB600
#define PCI_DEVID_OCTEONTX2_RST 0xA085
+#define PCI_DEVID_CN10K_PTP 0xA09E
+#define PCI_SUBSYS_DEVID_CN10K_A_PTP 0xB900
+#define PCI_SUBSYS_DEVID_CNF10K_A_PTP 0xBA00
+#define PCI_SUBSYS_DEVID_CNF10K_B_PTP 0xBC00
#define PCI_PTP_BAR_NO 0
-#define PCI_RST_BAR_NO 0
#define PTP_CLOCK_CFG 0xF00ULL
#define PTP_CLOCK_CFG_PTP_EN BIT_ULL(0)
+#define PTP_CLOCK_CFG_EXT_CLK_EN BIT_ULL(1)
+#define PTP_CLOCK_CFG_EXT_CLK_IN_MASK GENMASK_ULL(7, 2)
+#define PTP_CLOCK_CFG_TSTMP_EDGE BIT_ULL(9)
+#define PTP_CLOCK_CFG_TSTMP_EN BIT_ULL(8)
+#define PTP_CLOCK_CFG_TSTMP_IN_MASK GENMASK_ULL(15, 10)
+#define PTP_CLOCK_CFG_PPS_EN BIT_ULL(30)
+#define PTP_CLOCK_CFG_PPS_INV BIT_ULL(31)
+
+#define PTP_PPS_HI_INCR 0xF60ULL
+#define PTP_PPS_LO_INCR 0xF68ULL
+#define PTP_PPS_THRESH_HI 0xF58ULL
+
#define PTP_CLOCK_LO 0xF08ULL
#define PTP_CLOCK_HI 0xF10ULL
#define PTP_CLOCK_COMP 0xF18ULL
+#define PTP_TIMESTAMP 0xF20ULL
+#define PTP_CLOCK_SEC 0xFD0ULL
-#define RST_BOOT 0x1600ULL
-#define RST_MUL_BITS GENMASK_ULL(38, 33)
-#define CLOCK_BASE_RATE 50000000ULL
+#define CYCLE_MULT 1000
-static u64 get_clock_rate(void)
+static struct ptp *first_ptp_block;
+static const struct pci_device_id ptp_id_table[];
+
+static bool cn10k_ptp_errata(struct ptp *ptp)
{
- u64 cfg, ret = CLOCK_BASE_RATE * 16;
- struct pci_dev *pdev;
- void __iomem *base;
+ if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP ||
+ ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP)
+ return true;
+ return false;
+}
- /* To get the input clock frequency with which PTP co-processor
- * block is running the base frequency(50 MHz) needs to be multiplied
- * with multiplier bits present in RST_BOOT register of RESET block.
- * Hence below code gets the multiplier bits from the RESET PCI
- * device present in the system.
- */
- pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
- PCI_DEVID_OCTEONTX2_RST, NULL);
- if (!pdev)
- goto error;
+static bool is_ptp_tsfmt_sec_nsec(struct ptp *ptp)
+{
+ if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP ||
+ ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP)
+ return true;
+ return false;
+}
- base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO);
- if (!base)
- goto error_put_pdev;
+static u64 read_ptp_tstmp_sec_nsec(struct ptp *ptp)
+{
+ u64 sec, sec1, nsec;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ptp->ptp_lock, flags);
+ sec = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL;
+ nsec = readq(ptp->reg_base + PTP_CLOCK_HI);
+ sec1 = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL;
+ /* check nsec rollover */
+ if (sec1 > sec) {
+ nsec = readq(ptp->reg_base + PTP_CLOCK_HI);
+ sec = sec1;
+ }
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
- cfg = readq(base + RST_BOOT);
- ret = CLOCK_BASE_RATE * FIELD_GET(RST_MUL_BITS, cfg);
+ return sec * NSEC_PER_SEC + nsec;
+}
- iounmap(base);
+static u64 read_ptp_tstmp_nsec(struct ptp *ptp)
+{
+ return readq(ptp->reg_base + PTP_CLOCK_HI);
+}
-error_put_pdev:
- pci_dev_put(pdev);
+static u64 ptp_calc_adjusted_comp(u64 ptp_clock_freq)
+{
+ u64 comp, adj = 0, cycles_per_sec, ns_drift = 0;
+ u32 ptp_clock_nsec, cycle_time;
+ int cycle;
+
+ /* Errata:
+ * Issue #1: At the time of 1 sec rollover of the nano-second counter,
+ * the nano-second counter is set to 0. However, it should be set to
+ * (existing counter_value - 10^9).
+ *
+ * Issue #2: The nano-second counter rolls over at 0x3B9A_C9FF.
+ * It should roll over at 0x3B9A_CA00.
+ */
-error:
- return ret;
+ /* calculate ptp_clock_comp value */
+ comp = ((u64)1000000000ULL << 32) / ptp_clock_freq;
+ /* use CYCLE_MULT to avoid accuracy loss due to integer arithmetic */
+ cycle_time = NSEC_PER_SEC * CYCLE_MULT / ptp_clock_freq;
+ /* cycles per sec */
+ cycles_per_sec = ptp_clock_freq;
+
+ /* check whether ptp nanosecond counter rolls over early */
+ cycle = cycles_per_sec - 1;
+ ptp_clock_nsec = (cycle * comp) >> 32;
+ while (ptp_clock_nsec < NSEC_PER_SEC) {
+ if (ptp_clock_nsec == 0x3B9AC9FF)
+ goto calc_adj_comp;
+ cycle++;
+ ptp_clock_nsec = (cycle * comp) >> 32;
+ }
+ /* compute nanoseconds lost per second when nsec counter rolls over */
+ ns_drift = ptp_clock_nsec - NSEC_PER_SEC;
+ /* calculate ptp_clock_comp adjustment */
+ if (ns_drift > 0) {
+ adj = comp * ns_drift;
+ adj = adj / 1000000000ULL;
+ }
+ /* speed up the ptp clock to account for nanoseconds lost */
+ comp += adj;
+ return comp;
+
+calc_adj_comp:
+ /* slow down the ptp clock to not rollover early */
+ adj = comp * cycle_time;
+ adj = adj / 1000000000ULL;
+ adj = adj / CYCLE_MULT;
+ comp -= adj;
+
+ return comp;
}
struct ptp *ptp_get(void)
{
- struct pci_dev *pdev;
- struct ptp *ptp;
+ struct ptp *ptp = first_ptp_block;
- /* If the PTP pci device is found on the system and ptp
- * driver is bound to it then the PTP pci device is returned
- * to the caller(rvu driver).
- */
- pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
- PCI_DEVID_OCTEONTX2_PTP, NULL);
- if (!pdev)
+ /* Check PTP block is present in hardware */
+ if (!pci_dev_present(ptp_id_table))
return ERR_PTR(-ENODEV);
- ptp = pci_get_drvdata(pdev);
+ /* Check driver is bound to PTP block */
if (!ptp)
ptp = ERR_PTR(-EPROBE_DEFER);
- if (IS_ERR(ptp))
- pci_dev_put(pdev);
+ else
+ pci_dev_get(ptp->pdev);
return ptp;
}
@@ -103,8 +176,8 @@ void ptp_put(struct ptp *ptp)
static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
{
bool neg_adj = false;
- u64 comp;
- u64 adj;
+ u32 freq, freq_adj;
+ u64 comp, adj;
s64 ppb;
if (scaled_ppm < 0) {
@@ -126,24 +199,150 @@ static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
* where tbase is the basic compensation value calculated
* initialy in the probe function.
*/
- comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
/* convert scaled_ppm to ppb */
ppb = 1 + scaled_ppm;
ppb *= 125;
ppb >>= 13;
- adj = comp * ppb;
- adj = div_u64(adj, 1000000000ull);
- comp = neg_adj ? comp - adj : comp + adj;
+ if (cn10k_ptp_errata(ptp)) {
+ /* calculate the new frequency based on ppb */
+ freq_adj = (ptp->clock_rate * ppb) / 1000000000ULL;
+ freq = neg_adj ? ptp->clock_rate + freq_adj : ptp->clock_rate - freq_adj;
+ comp = ptp_calc_adjusted_comp(freq);
+ } else {
+ comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+ adj = comp * ppb;
+ adj = div_u64(adj, 1000000000ull);
+ comp = neg_adj ? comp - adj : comp + adj;
+ }
writeq(comp, ptp->reg_base + PTP_CLOCK_COMP);
return 0;
}
-static int ptp_get_clock(struct ptp *ptp, u64 *clk)
+static inline u64 get_tsc(bool is_pmu)
+{
+#if defined(CONFIG_ARM64)
+ return is_pmu ? read_sysreg(pmccntr_el0) : read_sysreg(cntvct_el0);
+#else
+ return 0;
+#endif
+}
+
+static int ptp_get_clock(struct ptp *ptp, bool is_pmu, u64 *clk, u64 *tsc)
{
- /* Return the current PTP clock */
- *clk = readq(ptp->reg_base + PTP_CLOCK_HI);
+ u64 end, start;
+ u8 retries = 0;
+
+ do {
+ start = get_tsc(0);
+ *tsc = get_tsc(is_pmu);
+ *clk = ptp->read_ptp_tstmp(ptp);
+ end = get_tsc(0);
+ retries++;
+ } while (((end - start) > 50) && retries < 5);
+
+ return 0;
+}
+
+/* On CN10K the ptp time is represented by set of registers one for seconds and other
+ * for nano seconds where as on 96xx ptp time is represented by single register.
+ * nano second register on CN10K rolls over after each second.
+ */
+static int ptp_set_clock(struct ptp *ptp, u64 nsec)
+{
+ if (is_ptp_tsfmt_sec_nsec(ptp)) {
+ writeq(nsec / NSEC_PER_SEC, ptp->reg_base + PTP_CLOCK_SEC);
+ writeq(nsec % NSEC_PER_SEC, ptp->reg_base + PTP_CLOCK_HI);
+ } else {
+ writeq(nsec, ptp->reg_base + PTP_CLOCK_HI);
+ }
+
+ return 0;
+}
+
+static int ptp_adj_clock(struct ptp *ptp, s64 delta)
+{
+ u64 regval, sec;
+
+ regval = readq(ptp->reg_base + PTP_CLOCK_HI);
+ regval += delta;
+
+ if (is_ptp_tsfmt_sec_nsec(ptp)) {
+ sec = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL;
+ sec += regval / NSEC_PER_SEC;
+ writeq(sec, ptp->reg_base + PTP_CLOCK_SEC);
+ writeq(regval % NSEC_PER_SEC, ptp->reg_base + PTP_CLOCK_HI);
+ } else {
+ writeq(regval, ptp->reg_base + PTP_CLOCK_HI);
+ }
+
+ return 0;
+}
+
+void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
+{
+ struct pci_dev *pdev;
+ u64 clock_comp;
+ u64 clock_cfg;
+
+ if (!ptp)
+ return;
+
+ pdev = ptp->pdev;
+
+ if (!sclk) {
+ dev_err(&pdev->dev, "PTP input clock cannot be zero\n");
+ return;
+ }
+
+ /* sclk is in MHz */
+ ptp->clock_rate = sclk * 1000000;
+
+ /* Enable PTP clock */
+ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+
+ if (ext_clk_freq) {
+ ptp->clock_rate = ext_clk_freq;
+ /* Set GPIO as PTP clock source */
+ clock_cfg &= ~PTP_CLOCK_CFG_EXT_CLK_IN_MASK;
+ clock_cfg |= PTP_CLOCK_CFG_EXT_CLK_EN;
+ }
+
+ if (extts) {
+ clock_cfg |= PTP_CLOCK_CFG_TSTMP_EDGE;
+ /* Set GPIO as timestamping source */
+ clock_cfg &= ~PTP_CLOCK_CFG_TSTMP_IN_MASK;
+ clock_cfg |= PTP_CLOCK_CFG_TSTMP_EN;
+ }
+
+ clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
+ clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV;
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
+
+ /* Set 50% duty cycle for 1Hz output */
+ writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR);
+ writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR);
+
+ if (cn10k_ptp_errata(ptp))
+ clock_comp = ptp_calc_adjusted_comp(ptp->clock_rate);
+ else
+ clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+
+ /* Initial compensation value to start the nanosecs counter */
+ writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
+}
+
+static int ptp_get_tstmp(struct ptp *ptp, u64 *clk)
+{
+ *clk = readq(ptp->reg_base + PTP_TIMESTAMP);
+
+ return 0;
+}
+
+static int ptp_set_thresh(struct ptp *ptp, u64 thresh)
+{
+ writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI);
return 0;
}
@@ -153,8 +352,6 @@ static int ptp_probe(struct pci_dev *pdev,
{
struct device *dev = &pdev->dev;
struct ptp *ptp;
- u64 clock_comp;
- u64 clock_cfg;
int err;
ptp = devm_kzalloc(dev, sizeof(*ptp), GFP_KERNEL);
@@ -175,18 +372,15 @@ static int ptp_probe(struct pci_dev *pdev,
ptp->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
- ptp->clock_rate = get_clock_rate();
-
- /* Enable PTP clock */
- clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
- clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
- writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
-
- clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
- /* Initial compensation value to start the nanosecs counter */
- writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
-
pci_set_drvdata(pdev, ptp);
+ if (!first_ptp_block)
+ first_ptp_block = ptp;
+
+ spin_lock_init(&ptp->ptp_lock);
+ if (is_ptp_tsfmt_sec_nsec(ptp))
+ ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec;
+ else
+ ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
return 0;
@@ -201,6 +395,9 @@ error:
* `dev->driver_data`.
*/
pci_set_drvdata(pdev, ERR_PTR(err));
+ if (!first_ptp_block)
+ first_ptp_block = ERR_PTR(err);
+
return 0;
}
@@ -230,10 +427,14 @@ static const struct pci_device_id ptp_id_table[] = {
PCI_SUBSYS_DEVID_OCTX2_95XX_PTP) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
PCI_VENDOR_ID_CAVIUM,
- PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP) },
+ PCI_SUBSYS_DEVID_OCTX2_95XXN_PTP) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
PCI_VENDOR_ID_CAVIUM,
PCI_SUBSYS_DEVID_OCTX2_95MM_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_PTP) },
{ 0, }
};
@@ -264,7 +465,20 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
err = ptp_adjfine(rvu->ptp, req->scaled_ppm);
break;
case PTP_OP_GET_CLOCK:
- err = ptp_get_clock(rvu->ptp, &rsp->clk);
+ err = ptp_get_clock(rvu->ptp, req->is_pmu, &rsp->clk,
+ &rsp->tsc);
+ break;
+ case PTP_OP_GET_TSTMP:
+ err = ptp_get_tstmp(rvu->ptp, &rsp->clk);
+ break;
+ case PTP_OP_SET_THRESH:
+ err = ptp_set_thresh(rvu->ptp, req->thresh);
+ break;
+ case PTP_OP_SET_CLOCK:
+ err = ptp_set_clock(rvu->ptp, req->nsec);
+ break;
+ case PTP_OP_ADJ_CLOCK:
+ err = ptp_adj_clock(rvu->ptp, req->delta);
break;
default:
err = -EINVAL;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
index 878bc395d28f..95a955159f40 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell PTP driver
*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020 Marvell.
+ *
*/
#ifndef PTP_H
@@ -14,11 +15,14 @@
struct ptp {
struct pci_dev *pdev;
void __iomem *reg_base;
+ u64 (*read_ptp_tstmp)(struct ptp *ptp);
+ spinlock_t ptp_lock; /* lock */
u32 clock_rate;
};
struct ptp *ptp_get(void);
void ptp_put(struct ptp *ptp);
+void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts);
extern struct pci_driver ptp_driver;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
new file mode 100644
index 000000000000..42669432c438
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -0,0 +1,464 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CN10K RPM driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include "cgx.h"
+#include "lmac_common.h"
+
+static struct mac_ops rpm_mac_ops = {
+ .name = "rpm",
+ .csr_offset = 0x4e00,
+ .lmac_offset = 20,
+ .int_register = RPMX_CMRX_SW_INT,
+ .int_set_reg = RPMX_CMRX_SW_INT_ENA_W1S,
+ .irq_offset = 1,
+ .int_ena_bit = BIT_ULL(0),
+ .lmac_fwi = RPM_LMAC_FWI,
+ .non_contiguous_serdes_lane = true,
+ .rx_stats_cnt = 43,
+ .tx_stats_cnt = 34,
+ .get_nr_lmacs = rpm_get_nr_lmacs,
+ .get_lmac_type = rpm_get_lmac_type,
+ .mac_lmac_intl_lbk = rpm_lmac_internal_loopback,
+ .mac_get_rx_stats = rpm_get_rx_stats,
+ .mac_get_tx_stats = rpm_get_tx_stats,
+ .mac_enadis_rx_pause_fwding = rpm_lmac_enadis_rx_pause_fwding,
+ .mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status,
+ .mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm,
+ .mac_pause_frm_config = rpm_lmac_pause_frm_config,
+ .mac_enadis_ptp_config = rpm_lmac_ptp_config,
+ .mac_rx_tx_enable = rpm_lmac_rx_tx_enable,
+ .mac_tx_enable = rpm_lmac_tx_enable,
+ .pfc_config = rpm_lmac_pfc_config,
+ .mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
+};
+
+struct mac_ops *rpm_get_mac_ops(void)
+{
+ return &rpm_mac_ops;
+}
+
+static void rpm_write(rpm_t *rpm, u64 lmac, u64 offset, u64 val)
+{
+ cgx_write(rpm, lmac, offset, val);
+}
+
+static u64 rpm_read(rpm_t *rpm, u64 lmac, u64 offset)
+{
+ return cgx_read(rpm, lmac, offset);
+}
+
+int rpm_get_nr_lmacs(void *rpmd)
+{
+ rpm_t *rpm = rpmd;
+
+ return hweight8(rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS) & 0xFULL);
+}
+
+int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg, last;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ last = cfg;
+ if (enable)
+ cfg |= RPM_TX_EN;
+ else
+ cfg &= ~(RPM_TX_EN);
+
+ if (cfg != last)
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ return !!(last & RPM_TX_EN);
+}
+
+int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ if (enable)
+ cfg |= RPM_RX_EN | RPM_TX_EN;
+ else
+ cfg &= ~(RPM_RX_EN | RPM_TX_EN);
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ return 0;
+}
+
+void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ struct lmac *lmac;
+ u64 cfg;
+
+ if (!rpm)
+ return;
+
+ lmac = lmac_pdata(lmac_id, rpm);
+ if (!lmac)
+ return;
+
+ /* Pause frames are not enabled just return */
+ if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max))
+ return;
+
+ if (enable) {
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ } else {
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ }
+}
+
+int rpm_lmac_get_pause_frm_status(void *rpmd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ if (!(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE)) {
+ *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE);
+ *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE);
+ }
+
+ return 0;
+}
+
+static void rpm_cfg_pfc_quanta_thresh(rpm_t *rpm, int lmac_id, u16 pfc_en,
+ bool enable)
+{
+ u64 quanta_offset = 0, quanta_thresh = 0, cfg;
+ int i, shift;
+
+ /* Set pause time and interval */
+ for_each_set_bit(i, (unsigned long *)&pfc_en, 16) {
+ switch (i) {
+ case 0:
+ case 1:
+ quanta_offset = RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL01_QUANTA_THRESH;
+ break;
+ case 2:
+ case 3:
+ quanta_offset = RPMX_MTI_MAC100X_CL23_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL23_QUANTA_THRESH;
+ break;
+ case 4:
+ case 5:
+ quanta_offset = RPMX_MTI_MAC100X_CL45_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL45_QUANTA_THRESH;
+ break;
+ case 6:
+ case 7:
+ quanta_offset = RPMX_MTI_MAC100X_CL67_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL67_QUANTA_THRESH;
+ break;
+ case 8:
+ case 9:
+ quanta_offset = RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL89_QUANTA_THRESH;
+ break;
+ case 10:
+ case 11:
+ quanta_offset = RPMX_MTI_MAC100X_CL1011_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1011_QUANTA_THRESH;
+ break;
+ case 12:
+ case 13:
+ quanta_offset = RPMX_MTI_MAC100X_CL1213_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1213_QUANTA_THRESH;
+ break;
+ case 14:
+ case 15:
+ quanta_offset = RPMX_MTI_MAC100X_CL1415_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1415_QUANTA_THRESH;
+ break;
+ }
+
+ if (!quanta_offset || !quanta_thresh)
+ continue;
+
+ shift = (i % 2) ? 1 : 0;
+ cfg = rpm_read(rpm, lmac_id, quanta_offset);
+ if (enable) {
+ cfg |= ((u64)RPM_DEFAULT_PAUSE_TIME << shift * 16);
+ } else {
+ if (!shift)
+ cfg &= ~GENMASK_ULL(15, 0);
+ else
+ cfg &= ~GENMASK_ULL(31, 16);
+ }
+ rpm_write(rpm, lmac_id, quanta_offset, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, quanta_thresh);
+ if (enable) {
+ cfg |= ((u64)(RPM_DEFAULT_PAUSE_TIME / 2) << shift * 16);
+ } else {
+ if (!shift)
+ cfg &= ~GENMASK_ULL(15, 0);
+ else
+ cfg &= ~GENMASK_ULL(31, 16);
+ }
+ rpm_write(rpm, lmac_id, quanta_thresh, cfg);
+ }
+}
+
+int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
+ u8 rx_pause)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
+ cfg |= rx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ cfg |= rx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ cfg |= tx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ cfg = rpm_read(rpm, 0, RPMX_CMR_RX_OVR_BP);
+ if (tx_pause) {
+ /* Configure CL0 Pause Quanta & threshold for 802.3X frames */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 1, true);
+ cfg &= ~RPMX_CMR_RX_OVR_BP_EN(lmac_id);
+ } else {
+ /* Disable all Pause Quanta & threshold values */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false);
+ cfg |= RPMX_CMR_RX_OVR_BP_EN(lmac_id);
+ cfg &= ~RPMX_CMR_RX_OVR_BP_BP(lmac_id);
+ }
+ rpm_write(rpm, 0, RPMX_CMR_RX_OVR_BP, cfg);
+ return 0;
+}
+
+void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ /* ALL pause frames received are completely ignored */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Disable forward pause to TX block */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Disable pause frames transmission */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Enable channel mask for all LMACS */
+ rpm_write(rpm, 0, RPMX_CMR_CHAN_MSK_OR, ~0ULL);
+}
+
+int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat)
+{
+ rpm_t *rpm = rpmd;
+ u64 val_lo, val_hi;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ mutex_lock(&rpm->lock);
+
+ /* Update idx to point per lmac Rx statistics page */
+ idx += lmac_id * rpm->mac_ops->rx_stats_cnt;
+
+ /* Read lower 32 bits of counter */
+ val_lo = rpm_read(rpm, 0, RPMX_MTI_STAT_RX_STAT_PAGES_COUNTERX +
+ (idx * 8));
+
+ /* upon read of lower 32 bits, higher 32 bits are written
+ * to RPMX_MTI_STAT_DATA_HI_CDC
+ */
+ val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
+
+ *rx_stat = (val_hi << 32 | val_lo);
+
+ mutex_unlock(&rpm->lock);
+ return 0;
+}
+
+int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat)
+{
+ rpm_t *rpm = rpmd;
+ u64 val_lo, val_hi;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ mutex_lock(&rpm->lock);
+
+ /* Update idx to point per lmac Tx statistics page */
+ idx += lmac_id * rpm->mac_ops->tx_stats_cnt;
+
+ val_lo = rpm_read(rpm, 0, RPMX_MTI_STAT_TX_STAT_PAGES_COUNTERX +
+ (idx * 8));
+ val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
+
+ *tx_stat = (val_hi << 32 | val_lo);
+
+ mutex_unlock(&rpm->lock);
+ return 0;
+}
+
+u8 rpm_get_lmac_type(void *rpmd, int lmac_id)
+{
+ rpm_t *rpm = rpmd;
+ u64 req = 0, resp;
+ int err;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_LINK_STS, req);
+ err = cgx_fwi_cmd_generic(req, &resp, rpm, 0);
+ if (!err)
+ return FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, resp);
+ return err;
+}
+
+int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u8 lmac_type;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+ lmac_type = rpm->mac_ops->get_lmac_type(rpm, lmac_id);
+
+ if (lmac_type == LMAC_MODE_QSGMII || lmac_type == LMAC_MODE_SGMII) {
+ dev_err(&rpm->pdev->dev, "loopback not supported for LPC mode\n");
+ return 0;
+ }
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1);
+
+ if (enable)
+ cfg |= RPMX_MTI_PCS_LBK;
+ else
+ cfg &= ~RPMX_MTI_PCS_LBK;
+ rpm_write(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1, cfg);
+
+ return 0;
+}
+
+void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_CFG);
+ if (enable) {
+ cfg |= RPMX_RX_TS_PREPEND;
+ cfg |= RPMX_TX_PTP_1S_SUPPORT;
+ } else {
+ cfg &= ~RPMX_RX_TS_PREPEND;
+ cfg &= ~RPMX_TX_PTP_1S_SUPPORT;
+ }
+
+ rpm_write(rpm, lmac_id, RPMX_CMRX_CFG, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_XIF_MODE);
+
+ if (enable) {
+ cfg |= RPMX_ONESTEP_ENABLE;
+ cfg &= ~RPMX_TS_BINARY_MODE;
+ } else {
+ cfg &= ~RPMX_ONESTEP_ENABLE;
+ }
+
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_XIF_MODE, cfg);
+}
+
+int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 pfc_en)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ /* reset PFC class quanta and threshold */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+
+ if (rx_pause) {
+ cfg &= ~(RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
+ } else {
+ cfg |= (RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
+ }
+
+ if (tx_pause) {
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, pfc_en, true);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ } else {
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xfff, false);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ }
+
+ if (!rx_pause && !tx_pause)
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
+ else
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
+
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
+ cfg = FIELD_SET(RPM_PFC_CLASS_MASK, pfc_en, cfg);
+ rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, cfg);
+
+ return 0;
+}
+
+int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause, u8 *rx_pause)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ if (cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE) {
+ *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE);
+ *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
new file mode 100644
index 000000000000..398f3d1af499
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell CN10K RPM driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef RPM_H
+#define RPM_H
+
+#include <linux/bits.h>
+
+/* PCI device IDs */
+#define PCI_DEVID_CN10K_RPM 0xA060
+
+/* Registers */
+#define RPMX_CMRX_CFG 0x00
+#define RPMX_RX_TS_PREPEND BIT_ULL(22)
+#define RPMX_TX_PTP_1S_SUPPORT BIT_ULL(17)
+#define RPMX_CMRX_SW_INT 0x180
+#define RPMX_CMRX_SW_INT_W1S 0x188
+#define RPMX_CMRX_SW_INT_ENA_W1S 0x198
+#define RPMX_CMRX_LINK_CFG 0x1070
+#define RPMX_MTI_PCS100X_CONTROL1 0x20000
+#define RPMX_MTI_LPCSX_CONTROL(id) (0x30000 | ((id) * 0x100))
+#define RPMX_MTI_PCS_LBK BIT_ULL(14)
+
+#define RPMX_CMRX_LINK_RANGE_MASK GENMASK_ULL(19, 16)
+#define RPMX_CMRX_LINK_BASE_MASK GENMASK_ULL(11, 0)
+
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG 0x8010
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE BIT_ULL(29)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE BIT_ULL(28)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE BIT_ULL(8)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE BIT_ULL(19)
+#define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8
+#define RPMX_MTI_MAC100X_CL23_PAUSE_QUANTA 0x80B0
+#define RPMX_MTI_MAC100X_CL45_PAUSE_QUANTA 0x80B8
+#define RPMX_MTI_MAC100X_CL67_PAUSE_QUANTA 0x80C0
+#define RPMX_MTI_MAC100X_CL01_QUANTA_THRESH 0x80C8
+#define RPMX_MTI_MAC100X_CL23_QUANTA_THRESH 0x80D0
+#define RPMX_MTI_MAC100X_CL45_QUANTA_THRESH 0x80D8
+#define RPMX_MTI_MAC100X_CL67_QUANTA_THRESH 0x80E0
+#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
+#define RPMX_MTI_MAC100X_CL1011_PAUSE_QUANTA 0x8110
+#define RPMX_MTI_MAC100X_CL1213_PAUSE_QUANTA 0x8118
+#define RPMX_MTI_MAC100X_CL1415_PAUSE_QUANTA 0x8120
+#define RPMX_MTI_MAC100X_CL89_QUANTA_THRESH 0x8128
+#define RPMX_MTI_MAC100X_CL1011_QUANTA_THRESH 0x8130
+#define RPMX_MTI_MAC100X_CL1213_QUANTA_THRESH 0x8138
+#define RPMX_MTI_MAC100X_CL1415_QUANTA_THRESH 0x8140
+#define RPM_DEFAULT_PAUSE_TIME 0xFFFF
+#define RPMX_CMR_RX_OVR_BP 0x4120
+#define RPMX_CMR_RX_OVR_BP_EN(x) BIT_ULL((x) + 8)
+#define RPMX_CMR_RX_OVR_BP_BP(x) BIT_ULL((x) + 4)
+#define RPMX_CMR_CHAN_MSK_OR 0x4118
+#define RPMX_MTI_STAT_RX_STAT_PAGES_COUNTERX 0x12000
+#define RPMX_MTI_STAT_TX_STAT_PAGES_COUNTERX 0x13000
+#define RPMX_MTI_STAT_DATA_HI_CDC 0x10038
+#define RPM_LMAC_FWI 0xa
+#define RPM_TX_EN BIT_ULL(0)
+#define RPM_RX_EN BIT_ULL(1)
+#define RPMX_CMRX_PRT_CBFC_CTL 0x5B08
+#define RPMX_CMRX_PRT_CBFC_CTL_LOGL_EN_RX_SHIFT 33
+#define RPMX_CMRX_PRT_CBFC_CTL_PHYS_BP_SHIFT 16
+#define RPMX_CMRX_PRT_CBFC_CTL_LOGL_EN_TX_SHIFT 0
+#define RPM_PFC_CLASS_MASK GENMASK_ULL(48, 33)
+#define RPMX_MTI_MAC100X_CL89_QUANTA_THRESH 0x8128
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_PAD_EN BIT_ULL(11)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE BIT_ULL(8)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD BIT_ULL(7)
+#define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8
+#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
+#define RPM_DEFAULT_PAUSE_TIME 0xFFFF
+
+#define RPMX_MTI_MAC100X_XIF_MODE 0x8100
+#define RPMX_ONESTEP_ENABLE BIT_ULL(5)
+#define RPMX_TS_BINARY_MODE BIT_ULL(11)
+
+/* Function Declarations */
+int rpm_get_nr_lmacs(void *rpmd);
+u8 rpm_get_lmac_type(void *rpmd, int lmac_id);
+int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable);
+void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_get_pause_frm_status(void *cgxd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause);
+void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
+ u8 rx_pause);
+int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat);
+int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat);
+void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause);
+#endif /* RPM_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index c26652436c53..4049b616a9eb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -1,11 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
@@ -22,8 +19,8 @@
#include "rvu_trace.h"
-#define DRV_NAME "octeontx2-af"
-#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
+#define DRV_NAME "rvu_af"
+#define DRV_STRING "Marvell RVU Admin Function Driver"
static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
@@ -48,7 +45,7 @@ static const struct pci_device_id rvu_id_table[] = {
{ 0, } /* end of table */
};
-MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
+MODULE_AUTHOR("Marvell.");
MODULE_DESCRIPTION(DRV_STRING);
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(pci, rvu_id_table);
@@ -57,6 +54,10 @@ static char *mkex_profile; /* MKEX profile name */
module_param(mkex_profile, charp, 0000);
MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
+static char *kpu_profile; /* KPU profile name */
+module_param(kpu_profile, charp, 0000);
+MODULE_PARM_DESC(kpu_profile, "KPU profile name string");
+
static void rvu_setup_hw_capabilities(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -66,8 +67,10 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu)
hw->cap.nix_shaping = true;
hw->cap.nix_tx_link_bp = true;
hw->cap.nix_rx_multicast = true;
+ hw->cap.nix_shaper_toggle_wait = false;
+ hw->rvu = rvu;
- if (is_rvu_96xx_B0(rvu)) {
+ if (is_rvu_pre_96xx_C0(rvu)) {
hw->cap.nix_fixed_txschq_mapping = true;
hw->cap.nix_txsch_per_cgx_lmac = 4;
hw->cap.nix_txsch_per_lbk_lmac = 132;
@@ -77,6 +80,11 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu)
if (is_rvu_96xx_A0(rvu))
hw->cap.nix_rx_multicast = false;
}
+ if (!is_rvu_pre_96xx_C0(rvu))
+ hw->cap.nix_shaper_toggle_wait = true;
+
+ if (!is_rvu_otx2(rvu))
+ hw->cap.per_pf_mbox_regs = true;
}
/* Poll a RVU block's register 'offset', for a 'zero'
@@ -186,6 +194,14 @@ int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
return (rsrc->max - used);
}
+bool is_rsrc_free(struct rsrc_bmap *rsrc, int id)
+{
+ if (!rsrc->bmap)
+ return false;
+
+ return !test_bit(id, rsrc->bmap);
+}
+
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
{
rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
@@ -195,6 +211,11 @@ int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
return 0;
}
+void rvu_free_bitmap(struct rsrc_bmap *rsrc)
+{
+ kfree(rsrc->bmap);
+}
+
/* Get block LF's HW index from a PF_FUNC's block slot number */
int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
{
@@ -220,6 +241,9 @@ int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
* multiple blocks of same type.
*
* @pcifunc has to be zero when no LF is yet attached.
+ *
+ * For a pcifunc if LFs are attached from multiple blocks of same type, then
+ * return blkaddr of first encountered block.
*/
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
{
@@ -257,6 +281,12 @@ int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
goto exit;
}
break;
+ case BLKTYPE_REE:
+ if (!pcifunc) {
+ blkaddr = BLKADDR_REE0;
+ goto exit;
+ }
+ break;
}
/* Check if this is a RVU PF or VF */
@@ -268,20 +298,59 @@ int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
devnum = rvu_get_pf(pcifunc);
}
- /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' */
+ /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or
+ * 'BLKADDR_NIX1'.
+ */
if (blktype == BLKTYPE_NIX) {
- reg = is_pf ? RVU_PRIV_PFX_NIX0_CFG : RVU_PRIV_HWVFX_NIX0_CFG;
+ reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(0) :
+ RVU_PRIV_HWVFX_NIXX_CFG(0);
cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
- if (cfg)
+ if (cfg) {
blkaddr = BLKADDR_NIX0;
+ goto exit;
+ }
+
+ reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(1) :
+ RVU_PRIV_HWVFX_NIXX_CFG(1);
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg)
+ blkaddr = BLKADDR_NIX1;
}
- /* Check if the 'pcifunc' has a CPT LF from 'BLKADDR_CPT0' */
if (blktype == BLKTYPE_CPT) {
- reg = is_pf ? RVU_PRIV_PFX_CPT0_CFG : RVU_PRIV_HWVFX_CPT0_CFG;
+ reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(0) :
+ RVU_PRIV_HWVFX_CPTX_CFG(0);
cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
- if (cfg)
+ if (cfg) {
blkaddr = BLKADDR_CPT0;
+ goto exit;
+ }
+
+ reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(1) :
+ RVU_PRIV_HWVFX_CPTX_CFG(1);
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg)
+ blkaddr = BLKADDR_CPT1;
+ }
+
+ /* Check if the 'pcifunc' has a REE LF from 'BLKADDR_REE0' or
+ * 'BLKADDR_REE1'. If pcifunc has REE LFs from both then only
+ * BLKADDR_REE0 is returned.
+ */
+ if (blktype == BLKTYPE_REE) {
+ reg = is_pf ? RVU_PRIV_PFX_REEX_CFG(0) :
+ RVU_PRIV_HWVFX_REEX_CFG(0);
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg) {
+ blkaddr = BLKADDR_REE0;
+ goto exit;
+ }
+
+ reg = is_pf ? RVU_PRIV_PFX_REEX_CFG(1) :
+ RVU_PRIV_HWVFX_REEX_CFG(1);
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg)
+ blkaddr = BLKADDR_REE1;
}
exit:
@@ -316,31 +385,44 @@ static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
block->fn_map[lf] = attach ? pcifunc : 0;
- switch (block->type) {
- case BLKTYPE_NPA:
+ switch (block->addr) {
+ case BLKADDR_NPA:
pfvf->npalf = attach ? true : false;
num_lfs = pfvf->npalf;
break;
- case BLKTYPE_NIX:
+ case BLKADDR_NIX0:
+ case BLKADDR_NIX1:
pfvf->nixlf = attach ? true : false;
num_lfs = pfvf->nixlf;
break;
- case BLKTYPE_SSO:
+ case BLKADDR_SSO:
attach ? pfvf->sso++ : pfvf->sso--;
num_lfs = pfvf->sso;
break;
- case BLKTYPE_SSOW:
+ case BLKADDR_SSOW:
attach ? pfvf->ssow++ : pfvf->ssow--;
num_lfs = pfvf->ssow;
break;
- case BLKTYPE_TIM:
+ case BLKADDR_TIM:
attach ? pfvf->timlfs++ : pfvf->timlfs--;
num_lfs = pfvf->timlfs;
break;
- case BLKTYPE_CPT:
+ case BLKADDR_CPT0:
attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
num_lfs = pfvf->cptlfs;
break;
+ case BLKADDR_CPT1:
+ attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--;
+ num_lfs = pfvf->cpt1_lfs;
+ break;
+ case BLKADDR_REE0:
+ attach ? pfvf->ree0_lfs++ : pfvf->ree0_lfs--;
+ num_lfs = pfvf->ree0_lfs;
+ break;
+ case BLKADDR_REE1:
+ attach ? pfvf->ree1_lfs++ : pfvf->ree1_lfs--;
+ num_lfs = pfvf->ree1_lfs;
+ break;
}
reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
@@ -463,12 +545,18 @@ int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
{
struct rvu_block *block = &rvu->hw->block[blkaddr];
+ int err;
if (!block->implemented)
return;
rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
- rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
+ err = rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
+ if (err) {
+ dev_err(rvu->dev, "HW block:%d reset timeout retrying again\n", blkaddr);
+ while (rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true) == -EBUSY)
+ ;
+ }
}
static void rvu_reset_all_blocks(struct rvu *rvu)
@@ -476,13 +564,19 @@ static void rvu_reset_all_blocks(struct rvu *rvu)
/* Do a HW reset of all RVU blocks */
rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NIX1, NIX_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_CPT1, CPT_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NIX1_RX, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NIX1_TX, NDC_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_REE0, REE_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_REE1, REE_AF_BLK_RST);
}
static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
@@ -624,7 +718,7 @@ setup_vfmsix:
}
/* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
- * create a IOMMU mapping for the physcial address configured by
+ * create an IOMMU mapping for the physical address configured by
* firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
*/
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
@@ -666,6 +760,8 @@ static void rvu_free_hw_resources(struct rvu *rvu)
rvu_npa_freemem(rvu);
rvu_npc_freemem(rvu);
rvu_nix_freemem(rvu);
+ rvu_sso_freemem(rvu);
+ rvu_ree_freemem(rvu);
/* Free block LF bitmaps */
for (id = 0; id < BLK_COUNT; id++) {
@@ -705,6 +801,10 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
u64 *mac;
for (pf = 0; pf < hw->total_pfs; pf++) {
+ /* For PF0(AF), Assign MAC address to only VFs (LBKVFs) */
+ if (!pf)
+ goto lbkvf;
+
if (!is_pf_cgxmapped(rvu, pf))
continue;
/* Assign MAC address to PF */
@@ -718,7 +818,9 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
} else {
eth_random_addr(pfvf->mac_addr);
}
+ ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
+lbkvf:
/* Assign MAC address to VFs */
rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
for (vf = 0; vf < numvfs; vf++, hwvf++) {
@@ -732,6 +834,7 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
} else {
eth_random_addr(pfvf->mac_addr);
}
+ ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
}
}
}
@@ -767,6 +870,121 @@ static void rvu_fwdata_exit(struct rvu *rvu)
iounmap(rvu->fwdata);
}
+static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkid;
+ u64 cfg;
+
+ /* Init NIX LF's bitmap */
+ block = &hw->block[blkaddr];
+ if (!block->implemented)
+ return 0;
+ blkid = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ block->lf.max = cfg & 0xFFF;
+ block->addr = blkaddr;
+ block->type = BLKTYPE_NIX;
+ block->lfshift = 8;
+ block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_NIXX_CFG(blkid);
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIXX_CFG(blkid);
+ block->lfcfg_reg = NIX_PRIV_LFX_CFG;
+ block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = NIX_AF_LF_RST;
+ block->rvu = rvu;
+ sprintf(block->name, "NIX%d", blkid);
+ rvu->nix_blkaddr[blkid] = blkaddr;
+ return rvu_alloc_bitmap(&block->lf);
+}
+
+static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkid;
+ u64 cfg;
+
+ /* Init CPT LF's bitmap */
+ block = &hw->block[blkaddr];
+ if (!block->implemented)
+ return 0;
+ blkid = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
+ cfg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
+ block->lf.max = cfg & 0xFF;
+ block->addr = blkaddr;
+ block->type = BLKTYPE_CPT;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_CPTX_CFG(blkid);
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPTX_CFG(blkid);
+ block->lfcfg_reg = CPT_PRIV_LFX_CFG;
+ block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = CPT_AF_LF_RST;
+ block->rvu = rvu;
+ sprintf(block->name, "CPT%d", blkid);
+ return rvu_alloc_bitmap(&block->lf);
+}
+
+static int rvu_setup_ree_hw_resource(struct rvu *rvu, int blkaddr, int blkid)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int err;
+ u64 cfg;
+
+ /* Init REE LF's bitmap */
+ block = &hw->block[blkaddr];
+ if (!block->implemented)
+ return 0;
+ cfg = rvu_read64(rvu, blkaddr, REE_AF_CONSTANTS);
+ block->lf.max = cfg & 0xFF;
+ block->addr = blkaddr;
+ block->type = BLKTYPE_REE;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = REE_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_REEX_CFG(blkid);
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_REEX_CFG(blkid);
+ block->lfcfg_reg = REE_PRIV_LFX_CFG;
+ block->msixcfg_reg = REE_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = REE_AF_LF_RST;
+ block->rvu = rvu;
+ sprintf(block->name, "REE%d", blkid);
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err)
+ return err;
+ return 0;
+}
+
+static void rvu_get_lbk_bufsize(struct rvu *rvu)
+{
+ struct pci_dev *pdev = NULL;
+ void __iomem *base;
+ u64 lbk_const;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_LBK, pdev);
+ if (!pdev)
+ return;
+
+ base = pci_ioremap_bar(pdev, 0);
+ if (!base)
+ goto err_put;
+
+ lbk_const = readq(base + LBK_CONST);
+
+ /* cache fifo size */
+ rvu->hw->lbk_bufsize = FIELD_GET(LBK_CONST_BUF_SIZE, lbk_const);
+
+ iounmap(base);
+err_put:
+ pci_dev_put(pdev);
+}
+
+/* Function to perform operations (read/write) on lmtst map table */
static int rvu_setup_hw_resources(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -780,6 +998,9 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
hw->total_vfs = (cfg >> 20) & 0xFFF;
hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
+ if (!is_rvu_otx2(rvu))
+ rvu_apr_block_cn10k_init(rvu);
+
/* Init NPA LF's bitmap */
block = &hw->block[BLKADDR_NPA];
if (!block->implemented)
@@ -795,33 +1016,30 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
block->lfcfg_reg = NPA_PRIV_LFX_CFG;
block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
block->lfreset_reg = NPA_AF_LF_RST;
+ block->rvu = rvu;
sprintf(block->name, "NPA");
err = rvu_alloc_bitmap(&block->lf);
- if (err)
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate NPA LF bitmap\n", __func__);
return err;
+ }
nix:
- /* Init NIX LF's bitmap */
- block = &hw->block[BLKADDR_NIX0];
- if (!block->implemented)
- goto sso;
- cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2);
- block->lf.max = cfg & 0xFFF;
- block->addr = BLKADDR_NIX0;
- block->type = BLKTYPE_NIX;
- block->lfshift = 8;
- block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
- block->pf_lfcnt_reg = RVU_PRIV_PFX_NIX0_CFG;
- block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIX0_CFG;
- block->lfcfg_reg = NIX_PRIV_LFX_CFG;
- block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
- block->lfreset_reg = NIX_AF_LF_RST;
- sprintf(block->name, "NIX");
- err = rvu_alloc_bitmap(&block->lf);
- if (err)
+ err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0);
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate NIX0 LFs bitmap\n", __func__);
return err;
+ }
+
+ err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1);
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate NIX1 LFs bitmap\n", __func__);
+ return err;
+ }
-sso:
/* Init SSO group's bitmap */
block = &hw->block[BLKADDR_SSO];
if (!block->implemented)
@@ -838,10 +1056,14 @@ sso:
block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
+ block->rvu = rvu;
sprintf(block->name, "SSO GROUP");
err = rvu_alloc_bitmap(&block->lf);
- if (err)
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate SSO LF bitmap\n", __func__);
return err;
+ }
ssow:
/* Init SSO workslot's bitmap */
@@ -859,10 +1081,14 @@ ssow:
block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
block->lfreset_reg = SSOW_AF_LF_HWS_RST;
+ block->rvu = rvu;
sprintf(block->name, "SSOWS");
err = rvu_alloc_bitmap(&block->lf);
- if (err)
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate SSOW LF bitmap\n", __func__);
return err;
+ }
tim:
/* Init TIM LF's bitmap */
@@ -881,52 +1107,66 @@ tim:
block->lfcfg_reg = TIM_PRIV_LFX_CFG;
block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
block->lfreset_reg = TIM_AF_LF_RST;
+ block->rvu = rvu;
sprintf(block->name, "TIM");
err = rvu_alloc_bitmap(&block->lf);
- if (err)
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate TIM LF bitmap\n", __func__);
return err;
+ }
cpt:
- /* Init CPT LF's bitmap */
- block = &hw->block[BLKADDR_CPT0];
- if (!block->implemented)
- goto init;
- cfg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS0);
- block->lf.max = cfg & 0xFF;
- block->addr = BLKADDR_CPT0;
- block->type = BLKTYPE_CPT;
- block->multislot = true;
- block->lfshift = 3;
- block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
- block->pf_lfcnt_reg = RVU_PRIV_PFX_CPT0_CFG;
- block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPT0_CFG;
- block->lfcfg_reg = CPT_PRIV_LFX_CFG;
- block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
- block->lfreset_reg = CPT_AF_LF_RST;
- sprintf(block->name, "CPT");
- err = rvu_alloc_bitmap(&block->lf);
+ err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0);
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate CPT0 LF bitmap\n", __func__);
+ return err;
+ }
+ err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1);
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate CPT1 LF bitmap\n", __func__);
+ return err;
+ }
+
+ /* REE */
+ err = rvu_setup_ree_hw_resource(rvu, BLKADDR_REE0, 0);
+ if (err)
+ return err;
+ err = rvu_setup_ree_hw_resource(rvu, BLKADDR_REE1, 1);
if (err)
return err;
-init:
/* Allocate memory for PFVF data */
rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
sizeof(struct rvu_pfvf), GFP_KERNEL);
- if (!rvu->pf)
+ if (!rvu->pf) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate memory for PF's rvu_pfvf struct\n", __func__);
return -ENOMEM;
+ }
rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
sizeof(struct rvu_pfvf), GFP_KERNEL);
- if (!rvu->hwvf)
+ if (!rvu->hwvf) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate memory for VF's rvu_pfvf struct\n", __func__);
return -ENOMEM;
+ }
mutex_init(&rvu->rsrc_lock);
- rvu_fwdata_init(rvu);
+ err = rvu_fwdata_init(rvu);
+ if (err)
+ goto msix_err;
err = rvu_setup_msix_resources(rvu);
- if (err)
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to setup MSIX resources\n", __func__);
return err;
+ }
for (blkid = 0; blkid < BLK_COUNT; blkid++) {
block = &hw->block[blkid];
@@ -947,24 +1187,70 @@ init:
rvu_scan_block(rvu, block);
}
- err = rvu_npc_init(rvu);
+ err = rvu_set_channels_base(rvu);
if (err)
+ goto msix_err;
+
+ err = rvu_npc_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize npc\n", __func__);
goto npc_err;
+ }
err = rvu_cgx_init(rvu);
- if (err)
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize cgx\n", __func__);
goto cgx_err;
+ }
+
+ err = rvu_sso_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize sso\n", __func__);
+ goto sso_err;
+ }
+
+ err = rvu_tim_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize tim\n", __func__);
+ goto sso_err;
+ }
+
+ err = rvu_sdp_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize sdp\n", __func__);
+ goto sso_err;
+ }
/* Assign MACs for CGX mapped functions */
rvu_setup_pfvf_macaddress(rvu);
err = rvu_npa_init(rvu);
- if (err)
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize npa\n", __func__);
goto npa_err;
+ }
+
+ rvu_get_lbk_bufsize(rvu);
err = rvu_nix_init(rvu);
- if (err)
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize nix\n", __func__);
+ goto nix_err;
+ }
+
+ err = rvu_ree_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize ree\n", __func__);
goto nix_err;
+ }
+
+ err = rvu_cpt_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize cpt\n", __func__);
+ goto nix_err;
+ }
+
+ rvu_program_channels(rvu);
return 0;
@@ -972,6 +1258,8 @@ nix_err:
rvu_nix_freemem(rvu);
npa_err:
rvu_npa_freemem(rvu);
+sso_err:
+ rvu_sso_freemem(rvu);
cgx_err:
rvu_cgx_exit(rvu);
npc_err:
@@ -1035,7 +1323,34 @@ int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
/* Get current count of a RVU block's LF/slots
* provisioned to a given RVU func.
*/
-static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype)
+u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr)
+{
+ switch (blkaddr) {
+ case BLKADDR_NPA:
+ return pfvf->npalf ? 1 : 0;
+ case BLKADDR_NIX0:
+ case BLKADDR_NIX1:
+ return pfvf->nixlf ? 1 : 0;
+ case BLKADDR_SSO:
+ return pfvf->sso;
+ case BLKADDR_SSOW:
+ return pfvf->ssow;
+ case BLKADDR_TIM:
+ return pfvf->timlfs;
+ case BLKADDR_CPT0:
+ return pfvf->cptlfs;
+ case BLKADDR_CPT1:
+ return pfvf->cpt1_lfs;
+ case BLKADDR_REE0:
+ return pfvf->ree0_lfs;
+ case BLKADDR_REE1:
+ return pfvf->ree1_lfs;
+ }
+ return 0;
+}
+
+/* Return true if LFs of block type are attached to pcifunc */
+static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype)
{
switch (blktype) {
case BLKTYPE_NPA:
@@ -1043,15 +1358,18 @@ static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype)
case BLKTYPE_NIX:
return pfvf->nixlf ? 1 : 0;
case BLKTYPE_SSO:
- return pfvf->sso;
+ return !!pfvf->sso;
case BLKTYPE_SSOW:
- return pfvf->ssow;
+ return !!pfvf->ssow;
case BLKTYPE_TIM:
- return pfvf->timlfs;
+ return !!pfvf->timlfs;
case BLKTYPE_CPT:
- return pfvf->cptlfs;
+ return pfvf->cptlfs || pfvf->cpt1_lfs;
+ case BLKTYPE_REE:
+ return pfvf->ree0_lfs || pfvf->ree1_lfs;
}
- return 0;
+
+ return false;
}
bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
@@ -1064,7 +1382,7 @@ bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
pfvf = rvu_get_pfvf(rvu, pcifunc);
/* Check if this PFFUNC has a LF of type blktype attached */
- if (!rvu_get_rsrc_mapcount(pfvf, blktype))
+ if (blktype != BLKTYPE_SSO && !is_blktype_attached(pfvf, blktype))
return false;
return true;
@@ -1075,6 +1393,9 @@ static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
{
u64 val;
+ if (block->type == BLKTYPE_TIM)
+ return rvu_tim_lookup_rsrc(rvu, block, pcifunc, slot);
+
val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
rvu_write64(rvu, block->addr, block->lookup_reg, val);
/* Wait for the lookup to finish */
@@ -1091,6 +1412,60 @@ static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
return (val & 0xFFF);
}
+int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
+ u16 global_slot, u16 *slot_in_block)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int numlfs, total_lfs = 0, nr_blocks = 0;
+ int i, num_blkaddr[BLK_COUNT] = { 0 };
+ struct rvu_block *block;
+ int blkaddr = -ENODEV;
+ u16 start_slot;
+
+ if (!is_blktype_attached(pfvf, blktype))
+ return -ENODEV;
+
+ /* Get all the block addresses from which LFs are attached to
+ * the given pcifunc in num_blkaddr[].
+ */
+ for (blkaddr = BLKADDR_RVUM; blkaddr < BLK_COUNT; blkaddr++) {
+ block = &rvu->hw->block[blkaddr];
+ if (block->type != blktype)
+ continue;
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ continue;
+
+ numlfs = rvu_get_rsrc_mapcount(pfvf, blkaddr);
+ if (numlfs) {
+ total_lfs += numlfs;
+ num_blkaddr[nr_blocks] = blkaddr;
+ nr_blocks++;
+ }
+ }
+
+ if (global_slot >= total_lfs)
+ return -ENODEV;
+
+ /* Based on the given global slot number retrieve the
+ * correct block address out of all attached block
+ * addresses and slot number in that block.
+ */
+ total_lfs = 0;
+ blkaddr = -ENODEV;
+ for (i = 0; i < nr_blocks; i++) {
+ numlfs = rvu_get_rsrc_mapcount(pfvf, num_blkaddr[i]);
+ total_lfs += numlfs;
+ if (global_slot < total_lfs) {
+ blkaddr = num_blkaddr[i];
+ start_slot = total_lfs - numlfs;
+ *slot_in_block = global_slot - start_slot;
+ break;
+ }
+ }
+
+ return blkaddr;
+}
+
static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -1103,9 +1478,12 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
if (blkaddr < 0)
return;
+ if (blktype == BLKTYPE_NIX)
+ rvu_nix_reset_mac(pfvf, pcifunc);
+
block = &hw->block[blkaddr];
- num_lfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
if (!num_lfs)
return;
@@ -1156,6 +1534,8 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
continue;
else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
continue;
+ else if ((blkid == BLKADDR_NIX1) && !detach->nixlf)
+ continue;
else if ((blkid == BLKADDR_SSO) && !detach->sso)
continue;
else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
@@ -1164,6 +1544,12 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
continue;
else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
continue;
+ else if ((blkid == BLKADDR_CPT1) && !detach->cptlfs)
+ continue;
+ else if ((blkid == BLKADDR_REE0) && !detach->reelfs)
+ continue;
+ else if ((blkid == BLKADDR_REE1) && !detach->reelfs)
+ continue;
}
rvu_detach_block(rvu, pcifunc, block->type);
}
@@ -1179,8 +1565,84 @@ int rvu_mbox_handler_detach_resources(struct rvu *rvu,
return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
}
-static void rvu_attach_block(struct rvu *rvu, int pcifunc,
- int blktype, int num_lfs)
+int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int blkaddr = BLKADDR_NIX0, vf;
+ struct rvu_pfvf *pf;
+
+ pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+
+ /* All CGX mapped PFs are set with assigned NIX block during init */
+ if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
+ blkaddr = pf->nix_blkaddr;
+ } else if (is_afvf(pcifunc)) {
+ vf = pcifunc - 1;
+ /* Assign NIX based on VF number. All even numbered VFs get
+ * NIX0 and odd numbered gets NIX1
+ */
+ blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0;
+ /* NIX1 is not present on all silicons */
+ if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
+ blkaddr = BLKADDR_NIX0;
+ }
+
+ /* if SDP1 then the blkaddr is NIX1 */
+ if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1)
+ blkaddr = BLKADDR_NIX1;
+
+ switch (blkaddr) {
+ case BLKADDR_NIX1:
+ pfvf->nix_blkaddr = BLKADDR_NIX1;
+ pfvf->nix_rx_intf = NIX_INTFX_RX(1);
+ pfvf->nix_tx_intf = NIX_INTFX_TX(1);
+ break;
+ case BLKADDR_NIX0:
+ default:
+ pfvf->nix_blkaddr = BLKADDR_NIX0;
+ pfvf->nix_rx_intf = NIX_INTFX_RX(0);
+ pfvf->nix_tx_intf = NIX_INTFX_TX(0);
+ break;
+ }
+
+ return pfvf->nix_blkaddr;
+}
+
+static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype,
+ u16 pcifunc, struct rsrc_attach *attach)
+{
+ int blkaddr;
+
+ switch (blktype) {
+ case BLKTYPE_NIX:
+ blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc);
+ break;
+ case BLKTYPE_CPT:
+ if (attach->hdr.ver < RVU_MULTI_BLK_VER)
+ return rvu_get_blkaddr(rvu, blktype, 0);
+ blkaddr = attach->cpt_blkaddr ? attach->cpt_blkaddr :
+ BLKADDR_CPT0;
+ if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
+ return -ENODEV;
+ break;
+ case BLKTYPE_REE:
+ blkaddr = attach->ree_blkaddr ? attach->ree_blkaddr :
+ BLKADDR_REE0;
+ if (blkaddr != BLKADDR_REE0 && blkaddr != BLKADDR_REE1)
+ return -ENODEV;
+ break;
+ default:
+ return rvu_get_blkaddr(rvu, blktype, 0);
+ }
+
+ if (is_block_implemented(rvu->hw, blkaddr))
+ return blkaddr;
+
+ return -ENODEV;
+}
+
+static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
+ int num_lfs, struct rsrc_attach *attach)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct rvu_hwinfo *hw = rvu->hw;
@@ -1192,7 +1654,7 @@ static void rvu_attach_block(struct rvu *rvu, int pcifunc,
if (!num_lfs)
return;
- blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
+ blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc, attach);
if (blkaddr < 0)
return;
@@ -1221,12 +1683,20 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
struct rsrc_attach *req, u16 pcifunc)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int free_lfs, mappedlfs, blkaddr;
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_block *block;
- int free_lfs, mappedlfs;
+ int ret;
+
+ ret = rvu_check_rsrc_policy(rvu, req, pcifunc);
+ if (ret) {
+ dev_err(rvu->dev, "Func 0x%x: Resource policy check failed\n",
+ pcifunc);
+ return ret;
+ }
/* Only one NPA LF can be attached */
- if (req->npalf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NPA)) {
+ if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) {
block = &hw->block[BLKADDR_NPA];
free_lfs = rvu_rsrc_free_count(&block->lf);
if (!free_lfs)
@@ -1239,8 +1709,12 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
}
/* Only one NIX LF can be attached */
- if (req->nixlf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NIX)) {
- block = &hw->block[BLKADDR_NIX0];
+ if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) {
+ blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX,
+ pcifunc, req);
+ if (blkaddr < 0)
+ return blkaddr;
+ block = &hw->block[blkaddr];
free_lfs = rvu_rsrc_free_count(&block->lf);
if (!free_lfs)
goto fail;
@@ -1260,7 +1734,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
pcifunc, req->sso, block->lf.max);
return -EINVAL;
}
- mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
free_lfs = rvu_rsrc_free_count(&block->lf);
/* Check if additional resources are available */
if (req->sso > mappedlfs &&
@@ -1276,7 +1750,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
pcifunc, req->sso, block->lf.max);
return -EINVAL;
}
- mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
free_lfs = rvu_rsrc_free_count(&block->lf);
if (req->ssow > mappedlfs &&
((req->ssow - mappedlfs) > free_lfs))
@@ -1291,7 +1765,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
pcifunc, req->timlfs, block->lf.max);
return -EINVAL;
}
- mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
free_lfs = rvu_rsrc_free_count(&block->lf);
if (req->timlfs > mappedlfs &&
((req->timlfs - mappedlfs) > free_lfs))
@@ -1299,20 +1773,43 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
}
if (req->cptlfs) {
- block = &hw->block[BLKADDR_CPT0];
+ blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_CPT,
+ pcifunc, req);
+ if (blkaddr < 0)
+ return blkaddr;
+ block = &hw->block[blkaddr];
if (req->cptlfs > block->lf.max) {
dev_err(&rvu->pdev->dev,
"Func 0x%x: Invalid CPTLF req, %d > max %d\n",
pcifunc, req->cptlfs, block->lf.max);
return -EINVAL;
}
- mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
free_lfs = rvu_rsrc_free_count(&block->lf);
if (req->cptlfs > mappedlfs &&
((req->cptlfs - mappedlfs) > free_lfs))
goto fail;
}
+ if (req->hdr.ver >= RVU_MULTI_BLK_VER && req->reelfs) {
+ blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_REE,
+ pcifunc, req);
+ if (blkaddr < 0)
+ return blkaddr;
+ block = &hw->block[blkaddr];
+ if (req->reelfs > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid REELF req, %d > max %d\n",
+ pcifunc, req->reelfs, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (req->reelfs > mappedlfs &&
+ ((req->reelfs - mappedlfs) > free_lfs))
+ goto fail;
+ }
+
return 0;
fail:
@@ -1320,6 +1817,22 @@ fail:
return -ENOSPC;
}
+static bool rvu_attach_from_same_block(struct rvu *rvu, int blktype,
+ struct rsrc_attach *attach)
+{
+ int blkaddr, num_lfs;
+
+ blkaddr = rvu_get_attach_blkaddr(rvu, blktype,
+ attach->hdr.pcifunc, attach);
+ if (blkaddr < 0)
+ return false;
+
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, attach->hdr.pcifunc),
+ blkaddr);
+ /* Requester already has LFs from given block ? */
+ return !!num_lfs;
+}
+
int rvu_mbox_handler_attach_resources(struct rvu *rvu,
struct rsrc_attach *attach,
struct msg_rsp *rsp)
@@ -1340,10 +1853,10 @@ int rvu_mbox_handler_attach_resources(struct rvu *rvu,
/* Now attach the requested resources */
if (attach->npalf)
- rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach);
if (attach->nixlf)
- rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach);
if (attach->sso) {
/* RVU func doesn't know which exact LF or slot is attached
@@ -1353,25 +1866,38 @@ int rvu_mbox_handler_attach_resources(struct rvu *rvu,
*/
if (attach->modify)
rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
- rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, attach->sso);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO,
+ attach->sso, attach);
}
if (attach->ssow) {
if (attach->modify)
rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
- rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, attach->ssow);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW,
+ attach->ssow, attach);
}
if (attach->timlfs) {
if (attach->modify)
rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
- rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, attach->timlfs);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM,
+ attach->timlfs, attach);
}
if (attach->cptlfs) {
- if (attach->modify)
+ if (attach->modify &&
+ rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach))
rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
- rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, attach->cptlfs);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT,
+ attach->cptlfs, attach);
+ }
+
+ if (attach->hdr.ver >= RVU_MULTI_BLK_VER && attach->reelfs) {
+ if (attach->modify &&
+ rvu_attach_from_same_block(rvu, BLKTYPE_REE, attach))
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_REE);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_REE,
+ attach->reelfs, attach);
}
exit:
@@ -1434,6 +1960,8 @@ static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
(lf << block->lfshift), cfg & ~0x7FFULL);
offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
+ if (offset == MSIX_VECTOR_INVALID)
+ return;
/* Update the mapping */
for (vec = 0; vec < nvecs; vec++)
@@ -1449,7 +1977,7 @@ int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
- int lf, slot;
+ int lf, slot, blkaddr;
pfvf = rvu_get_pfvf(rvu, pcifunc);
if (!pfvf->msix.bmap)
@@ -1459,8 +1987,14 @@ int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
- lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NIX0], pcifunc, 0);
- rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NIX0, lf);
+ /* Get BLKADDR from which LFs are attached to pcifunc */
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0) {
+ rsp->nix_msixoff = MSIX_VECTOR_INVALID;
+ } else {
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, blkaddr, lf);
+ }
rsp->sso = pfvf->sso;
for (slot = 0; slot < rsp->sso; slot++) {
@@ -1489,6 +2023,28 @@ int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
rsp->cptlf_msixoff[slot] =
rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
}
+
+ rsp->cpt1_lfs = pfvf->cpt1_lfs;
+ for (slot = 0; slot < rsp->cpt1_lfs; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT1], pcifunc, slot);
+ rsp->cpt1_lf_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT1, lf);
+ }
+
+ rsp->ree0_lfs = pfvf->ree0_lfs;
+ for (slot = 0; slot < rsp->ree0_lfs; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_REE0], pcifunc, slot);
+ rsp->ree0_lf_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_REE0, lf);
+ }
+
+ rsp->ree1_lfs = pfvf->ree1_lfs;
+ for (slot = 0; slot < rsp->ree1_lfs; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_REE1], pcifunc, slot);
+ rsp->ree1_lf_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_REE1, lf);
+ }
+
return 0;
}
@@ -1512,6 +2068,13 @@ int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_ndc_sync(struct rvu *rvu, int lfblkaddr, int lfidx, u64 lfoffset)
+{
+ /* Sync cached info for this LF in NDC to LLC/DRAM */
+ rvu_write64(rvu, lfblkaddr, lfoffset, BIT_ULL(12) | lfidx);
+ return rvu_poll_reg(rvu, lfblkaddr, lfoffset, BIT_ULL(12), true);
+}
+
int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
struct get_hw_cap_rsp *rsp)
{
@@ -1523,6 +2086,107 @@ int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_mbox_handler_ndc_sync_op(struct rvu *rvu,
+ struct ndc_sync_op *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int err, lfidx, lfblkaddr;
+
+ if (req->npa_lf_sync) {
+ /* Get NPA LF data */
+ lfblkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
+ if (lfblkaddr < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ lfidx = rvu_get_lf(rvu, &hw->block[lfblkaddr], pcifunc, 0);
+ if (lfidx < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ /* Sync NPA NDC */
+ err = rvu_ndc_sync(rvu, lfblkaddr,
+ lfidx, NPA_AF_NDC_SYNC);
+ if (err)
+ dev_err(rvu->dev,
+ "NDC-NPA sync failed for LF %u\n", lfidx);
+ }
+
+ if (!req->nix_lf_tx_sync && !req->nix_lf_rx_sync)
+ return 0;
+
+ /* Get NIX LF data */
+ lfblkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (lfblkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ lfidx = rvu_get_lf(rvu, &hw->block[lfblkaddr], pcifunc, 0);
+ if (lfidx < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (req->nix_lf_tx_sync) {
+ /* Sync NIX TX NDC */
+ err = rvu_ndc_sync(rvu, lfblkaddr,
+ lfidx, NIX_AF_NDC_TX_SYNC);
+ if (err)
+ dev_err(rvu->dev,
+ "NDC-NIX-TX sync fail for LF %u\n", lfidx);
+ }
+
+ if (req->nix_lf_rx_sync) {
+ /* Sync NIX RX NDC */
+ err = rvu_ndc_sync(rvu, lfblkaddr,
+ lfidx, NIX_AF_NDC_RX_SYNC);
+ if (err)
+ dev_err(rvu->dev,
+ "NDC-NIX-RX sync failed for LF %u\n", lfidx);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int blkaddr, nixlf;
+ u16 target;
+
+ /* Only PF can add VF permissions */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc))
+ return -EOPNOTSUPP;
+
+ target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1);
+ pfvf = rvu_get_pfvf(rvu, target);
+
+ if (req->flags & RESET_VF_PERM) {
+ pfvf->flags &= RVU_CLEAR_VF_PERM;
+ } else if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) ^
+ (req->flags & VF_TRUSTED)) {
+ change_bit(PF_SET_VF_TRUSTED, &pfvf->flags);
+ /* disable multicast and promisc entries */
+ if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags)) {
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, target);
+ if (blkaddr < 0)
+ return 0;
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
+ target, 0);
+ if (nixlf < 0)
+ return 0;
+ npc_enadis_default_mce_entry(rvu, target, nixlf,
+ NIXLF_ALLMULTI_ENTRY,
+ false);
+ npc_enadis_default_mce_entry(rvu, target, nixlf,
+ NIXLF_PROMISC_ENTRY,
+ false);
+ }
+ }
+
+ return 0;
+}
+
static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
struct mbox_msghdr *req)
{
@@ -1741,41 +2405,105 @@ static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
__rvu_mbox_up_handler(mwork, TYPE_AFVF);
}
+static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+ int num, int type)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int region;
+ u64 bar4;
+
+ /* For cn10k platform VF mailbox regions of a PF follows after the
+ * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from
+ * RVU_PF_VF_BAR4_ADDR register.
+ */
+ if (type == TYPE_AFVF) {
+ for (region = 0; region < num; region++) {
+ if (hw->cap.per_pf_mbox_regs) {
+ bar4 = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFX_BAR4_ADDR(0)) +
+ MBOX_SIZE;
+ bar4 += region * MBOX_SIZE;
+ } else {
+ bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
+ bar4 += region * MBOX_SIZE;
+ }
+ mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
+ if (!mbox_addr[region])
+ goto error;
+ }
+ return 0;
+ }
+
+ /* For cn10k platform AF <-> PF mailbox region of a PF is read from per
+ * PF registers. Whereas for Octeontx2 it is read from
+ * RVU_AF_PF_BAR4_ADDR register.
+ */
+ for (region = 0; region < num; region++) {
+ if (hw->cap.per_pf_mbox_regs) {
+ bar4 = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFX_BAR4_ADDR(region));
+ } else {
+ bar4 = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_AF_PF_BAR4_ADDR);
+ bar4 += region * MBOX_SIZE;
+ }
+ mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
+ if (!mbox_addr[region])
+ goto error;
+ }
+ return 0;
+
+error:
+ while (region--)
+ iounmap((void __iomem *)mbox_addr[region]);
+ return -ENOMEM;
+}
+
static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
int type, int num,
void (mbox_handler)(struct work_struct *),
void (mbox_up_handler)(struct work_struct *))
{
- void __iomem *hwbase = NULL, *reg_base;
- int err, i, dir, dir_up;
+ int err = -EINVAL, i, dir, dir_up;
+ void __iomem *reg_base;
struct rvu_work *mwork;
+ void **mbox_regions;
const char *name;
- u64 bar4_addr;
+
+ mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
+ if (!mbox_regions)
+ return -ENOMEM;
switch (type) {
case TYPE_AFPF:
name = "rvu_afpf_mailbox";
- bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR);
dir = MBOX_DIR_AFPF;
dir_up = MBOX_DIR_AFPF_UP;
reg_base = rvu->afreg_base;
+ err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF);
+ if (err)
+ goto free_regions;
break;
case TYPE_AFVF:
name = "rvu_afvf_mailbox";
- bar4_addr = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
dir = MBOX_DIR_PFVF;
dir_up = MBOX_DIR_PFVF_UP;
reg_base = rvu->pfreg_base;
+ err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF);
+ if (err)
+ goto free_regions;
break;
default:
- return -EINVAL;
+ return err;
}
mw->mbox_wq = alloc_workqueue(name,
WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
num);
- if (!mw->mbox_wq)
- return -ENOMEM;
+ if (!mw->mbox_wq) {
+ err = -ENOMEM;
+ goto unmap_regions;
+ }
mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
sizeof(struct rvu_work), GFP_KERNEL);
@@ -1791,23 +2519,13 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
goto exit;
}
- /* Mailbox is a reserved memory (in RAM) region shared between
- * RVU devices, shouldn't be mapped as device memory to allow
- * unaligned accesses.
- */
- hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * num);
- if (!hwbase) {
- dev_err(rvu->dev, "Unable to map mailbox region\n");
- err = -ENOMEM;
- goto exit;
- }
-
- err = otx2_mbox_init(&mw->mbox, hwbase, rvu->pdev, reg_base, dir, num);
+ err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev,
+ reg_base, dir, num);
if (err)
goto exit;
- err = otx2_mbox_init(&mw->mbox_up, hwbase, rvu->pdev,
- reg_base, dir_up, num);
+ err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev,
+ reg_base, dir_up, num);
if (err)
goto exit;
@@ -1820,25 +2538,36 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
mwork->rvu = rvu;
INIT_WORK(&mwork->work, mbox_up_handler);
}
-
+ kfree(mbox_regions);
return 0;
+
exit:
- if (hwbase)
- iounmap((void __iomem *)hwbase);
destroy_workqueue(mw->mbox_wq);
+unmap_regions:
+ while (num--)
+ iounmap((void __iomem *)mbox_regions[num]);
+free_regions:
+ kfree(mbox_regions);
return err;
}
static void rvu_mbox_destroy(struct mbox_wq_info *mw)
{
+ struct otx2_mbox *mbox = &mw->mbox;
+ struct otx2_mbox_dev *mdev;
+ int devid;
+
if (mw->mbox_wq) {
flush_workqueue(mw->mbox_wq);
destroy_workqueue(mw->mbox_wq);
mw->mbox_wq = NULL;
}
- if (mw->mbox.hwbase)
- iounmap((void __iomem *)mw->mbox.hwbase);
+ for (devid = 0; devid < mbox->ndevs; devid++) {
+ mdev = &mbox->dev[devid];
+ if (mdev->hwbase)
+ iounmap((void __iomem *)mdev->hwbase);
+ }
otx2_mbox_destroy(&mw->mbox);
otx2_mbox_destroy(&mw->mbox_up);
@@ -1934,6 +2663,125 @@ static void rvu_enable_mbox_intr(struct rvu *rvu)
INTR_MASK(hw->total_pfs) & ~1ULL);
}
+static void rvu_npa_lf_mapped_nix_lf_teardown(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *nix_block;
+ struct rsrc_detach detach;
+ u16 nix_pcifunc;
+ int blkaddr, lf;
+ u64 regval;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return;
+
+ nix_block = &hw->block[blkaddr];
+ for (lf = 0; lf < nix_block->lf.max; lf++) {
+ /* Loop through all the NIX LFs and check if the NPA lf is
+ * being used based on pcifunc.
+ */
+ regval = rvu_read64(rvu, blkaddr, NIX_AF_LFX_CFG(lf));
+ if ((regval & 0xFFFF) != pcifunc)
+ continue;
+
+ nix_pcifunc = nix_block->fn_map[lf];
+
+ /* Skip NIX LF attached to the pcifunc as it is already
+ * quiesced.
+ */
+ if (nix_pcifunc == pcifunc)
+ continue;
+
+ detach.partial = true;
+ detach.nixlf = true;
+ /* Teardown the NIX LF. */
+ rvu_nix_lf_teardown(rvu, nix_pcifunc, blkaddr, lf);
+ rvu_lf_reset(rvu, nix_block, lf);
+ /* Detach the NIX LF. */
+ rvu_detach_rsrcs(rvu, &detach, nix_pcifunc);
+ }
+}
+
+static void rvu_npa_lf_mapped_sso_lf_teardown(struct rvu *rvu, u16 pcifunc)
+{
+ u16 sso_pcifunc, match_cnt = 0;
+ int npa_blkaddr, blkaddr, lf;
+ struct rvu_block *sso_block;
+ struct rsrc_detach detach;
+ u16 *pcifunc_arr;
+ u64 regval;
+
+ pcifunc_arr = kcalloc(rvu->hw->total_pfs + rvu->hw->total_vfs,
+ sizeof(*pcifunc_arr), GFP_KERNEL);
+ if (!pcifunc_arr)
+ return;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ npa_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return;
+
+ regval = BIT_ULL(16) | pcifunc;
+ rvu_write64(rvu, npa_blkaddr, NPA_AF_BAR2_SEL, regval);
+
+ sso_block = &rvu->hw->block[blkaddr];
+ for (lf = 0; lf < sso_block->lf.max; lf++) {
+ regval = rvu_read64(rvu, blkaddr, SSO_AF_XAQX_GMCTL(lf));
+ if ((regval & 0xFFFF) != pcifunc)
+ continue;
+
+ regval = rvu_read64(rvu, blkaddr, sso_block->lfcfg_reg |
+ (lf << sso_block->lfshift));
+ rvu_sso_lf_drain_queues(rvu, sso_pcifunc, lf, regval & 0xF);
+
+ regval = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_XAQ_AURA(lf));
+ rvu_sso_deinit_xaq_aura(rvu, sso_pcifunc, pcifunc, regval, lf);
+ }
+
+ for (lf = 0; lf < sso_block->lf.max; lf++) {
+ regval = rvu_read64(rvu, blkaddr, SSO_AF_XAQX_GMCTL(lf));
+ if ((regval & 0xFFFF) != pcifunc)
+ continue;
+
+ regval = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_XAQ_AURA(lf));
+ if (rvu_sso_poll_aura_cnt(rvu, npa_blkaddr, regval))
+ dev_err(rvu->dev,
+ "[%d]Failed to free XAQs to aura[%lld]\n",
+ __LINE__, regval);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_XAQ_AURA(lf), 0);
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_GMCTL(lf), 0);
+ }
+
+ for (lf = 0; lf < sso_block->lf.max; lf++) {
+ regval = rvu_read64(rvu, blkaddr, SSO_AF_XAQX_GMCTL(lf));
+ if ((regval & 0xFFFF) != pcifunc)
+ continue;
+
+ sso_pcifunc = sso_block->fn_map[lf];
+ regval = rvu_read64(rvu, blkaddr, sso_block->lfcfg_reg |
+ (lf << sso_block->lfshift));
+ /* Save SSO PF_FUNC info to detach all LFs of that PF_FUNC at
+ * once later.
+ */
+ rvu_sso_lf_teardown(rvu, sso_pcifunc, lf, regval & 0xF);
+ rvu_lf_reset(rvu, sso_block, lf);
+ pcifunc_arr[match_cnt] = sso_pcifunc;
+ match_cnt++;
+ }
+
+ detach.partial = true;
+ detach.sso = true;
+
+ for (sso_pcifunc = 0; sso_pcifunc < match_cnt; sso_pcifunc++)
+ rvu_detach_rsrcs(rvu, &detach, pcifunc_arr[sso_pcifunc]);
+ kfree(pcifunc_arr);
+}
+
static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
{
struct rvu_block *block;
@@ -1942,25 +2790,62 @@ static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
block = &rvu->hw->block[blkaddr];
num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
- block->type);
+ block->addr);
if (!num_lfs)
return;
+
+ if (block->addr == BLKADDR_SSO) {
+ for (slot = 0; slot < num_lfs; slot++) {
+ lf = rvu_get_lf(rvu, block, pcifunc, slot);
+ if (lf < 0)
+ continue;
+ rvu_sso_lf_drain_queues(rvu, pcifunc, lf, slot);
+ }
+ rvu_sso_cleanup_xaq_aura(rvu, pcifunc, num_lfs);
+ }
+
for (slot = 0; slot < num_lfs; slot++) {
lf = rvu_get_lf(rvu, block, pcifunc, slot);
if (lf < 0)
continue;
/* Cleanup LF and reset it */
- if (block->addr == BLKADDR_NIX0)
+ if (block->addr == BLKADDR_NIX0 || block->addr == BLKADDR_NIX1)
rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
- else if (block->addr == BLKADDR_NPA)
+ else if (block->addr == BLKADDR_NPA) {
+ rvu_npa_lf_mapped_nix_lf_teardown(rvu, pcifunc);
+ rvu_npa_lf_mapped_sso_lf_teardown(rvu, pcifunc);
rvu_npa_lf_teardown(rvu, pcifunc, lf);
+ } else if (block->addr == BLKADDR_SSO)
+ rvu_sso_lf_teardown(rvu, pcifunc, lf, slot);
+ else if (block->addr == BLKADDR_SSOW)
+ rvu_ssow_lf_teardown(rvu, pcifunc, lf, slot);
+ else if (block->addr == BLKADDR_TIM)
+ rvu_tim_lf_teardown(rvu, pcifunc, lf, slot);
+ else if ((block->addr == BLKADDR_CPT0) ||
+ (block->addr == BLKADDR_CPT1))
+ rvu_cpt_lf_teardown(rvu, pcifunc, block->addr, lf,
+ slot);
err = rvu_lf_reset(rvu, block, lf);
if (err) {
dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
block->addr, lf);
}
+
+ if (block->addr == BLKADDR_SSO)
+ rvu_sso_hwgrp_config_thresh(rvu, block->addr, lf);
+ }
+}
+
+static void rvu_sso_pfvf_rst(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (pfvf->sso_uniq_ident) {
+ rvu_free_rsrc(&hw->sso.pfvf_ident, pfvf->sso_uniq_ident);
+ pfvf->sso_uniq_ident = 0;
}
}
@@ -1973,12 +2858,24 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
* 3. Cleanup pools (NPA)
*/
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT1);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_REE0);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_REE1);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
+ rvu_reset_lmt_map_tbl(rvu, pcifunc);
rvu_detach_rsrcs(rvu, NULL, pcifunc);
+ rvu_sso_pfvf_rst(rvu, pcifunc);
+ /* In scenarios where PF/VF drivers detach NIXLF without freeing MCAM
+ * entries, check and free the MCAM entries explicitly to avoid leak.
+ * Since LF is detached use LF number as -1.
+ */
+ rvu_npc_free_mcam_entries(rvu, pcifunc, -1);
+
mutex_unlock(&rvu->flr_lock);
}
@@ -2044,11 +2941,12 @@ static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs)
for (vf = 0; vf < numvfs; vf++) {
if (!(intr & BIT_ULL(vf)))
continue;
- dev = vf + start_vf + rvu->hw->total_pfs;
- queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
/* Clear and disable the interrupt */
rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf));
+
+ dev = vf + start_vf + rvu->hw->total_pfs;
+ queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
}
}
@@ -2064,14 +2962,14 @@ static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
if (intr & (1ULL << pf)) {
- /* PF is already dead do only AF related operations */
- queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
/* clear interrupt */
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT,
BIT_ULL(pf));
/* Disable the interrupt */
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
BIT_ULL(pf));
+ /* PF is already dead do only AF related operations */
+ queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
}
}
@@ -2148,6 +3046,10 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
{
int irq;
+ rvu_sso_unregister_interrupts(rvu);
+ rvu_cpt_unregister_interrupts(rvu);
+ rvu_ree_unregister_interrupts(rvu);
+
/* Disable the Mbox interrupt */
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
@@ -2357,8 +3259,20 @@ static int rvu_register_interrupts(struct rvu *rvu)
goto fail;
}
rvu->irq_allocated[offset] = true;
- return 0;
+ ret = rvu_sso_register_interrupts(rvu);
+ if (ret)
+ goto fail;
+
+ ret = rvu_cpt_register_interrupts(rvu);
+ if (ret)
+ goto fail;
+
+ ret = rvu_ree_register_interrupts(rvu);
+ if (ret)
+ goto fail;
+
+ return 0;
fail:
rvu_unregister_interrupts(rvu);
return ret;
@@ -2455,9 +3369,7 @@ static void rvu_enable_afvf_intr(struct rvu *rvu)
rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
}
-#define PCI_DEVID_OCTEONTX2_LBK 0xA061
-
-static int lbk_get_num_chans(void)
+int rvu_get_num_lbk_chans(void)
{
struct pci_dev *pdev;
void __iomem *base;
@@ -2492,7 +3404,7 @@ static int rvu_enable_sriov(struct rvu *rvu)
return 0;
}
- chans = lbk_get_num_chans();
+ chans = rvu_get_num_lbk_chans();
if (chans < 0)
return chans;
@@ -2505,6 +3417,12 @@ static int rvu_enable_sriov(struct rvu *rvu)
if (!vfs)
return 0;
+ /* LBK channel number 63 is used for switching packets between
+ * CGX mapped VFs. Hence limit LBK pairs till 62 only.
+ */
+ if (vfs > 62)
+ vfs = 62;
+
/* Save VFs number for reference in VF interrupts handlers.
* Since interrupts might start arriving during SRIOV enablement
* ordinary API cannot be used to get number of enabled VFs.
@@ -2543,6 +3461,8 @@ static void rvu_update_module_params(struct rvu *rvu)
strscpy(rvu->mkex_pfl_name,
mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
+ strscpy(rvu->kpu_pfl_name,
+ kpu_profile ? kpu_profile : default_pfl_name, KPU_NAME_LEN);
}
static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -2588,8 +3508,11 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rvu->ptp = ptp_get();
if (IS_ERR(rvu->ptp)) {
err = PTR_ERR(rvu->ptp);
- if (err == -EPROBE_DEFER)
+ if (err == -EPROBE_DEFER) {
+ dev_err(dev,
+ "PTP driver not loaded, deferring probe\n");
goto err_release_regions;
+ }
rvu->ptp = NULL;
}
@@ -2620,28 +3543,57 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
rvu->hw->total_pfs, rvu_afpf_mbox_handler,
rvu_afpf_mbox_up_handler);
- if (err)
+ if (err) {
+ dev_err(dev, "%s: Failed to initialize mbox\n", __func__);
goto err_hwsetup;
+ }
err = rvu_flr_init(rvu);
- if (err)
+ if (err) {
+ dev_err(dev, "%s: Failed to initialize flr\n", __func__);
goto err_mbox;
+ }
err = rvu_register_interrupts(rvu);
- if (err)
+ if (err) {
+ dev_err(dev, "%s: Failed to register interrupts\n", __func__);
goto err_flr;
+ }
+
+ err = rvu_register_dl(rvu);
+ if (err) {
+ dev_err(dev, "%s: Failed to register devlink\n", __func__);
+ goto err_irq;
+ }
rvu_setup_rvum_blk_revid(rvu);
+ err = rvu_policy_init(rvu);
+ if (err)
+ goto err_dl;
+
/* Enable AF's VFs (if any) */
err = rvu_enable_sriov(rvu);
- if (err)
- goto err_irq;
+ if (err) {
+ dev_err(dev, "%s: Failed to enable sriov\n", __func__);
+ goto err_policy;
+ }
/* Initialize debugfs */
rvu_dbg_init(rvu);
+ mutex_init(&rvu->rswitch.switch_lock);
+
+ if (rvu->fwdata)
+ ptp_start(rvu->ptp, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate,
+ rvu->fwdata->ptp_ext_tstamp);
+
return 0;
+
+err_policy:
+ rvu_policy_destroy(rvu);
+err_dl:
+ rvu_unregister_dl(rvu);
err_irq:
rvu_unregister_interrupts(rvu);
err_flr:
@@ -2672,6 +3624,8 @@ static void rvu_remove(struct pci_dev *pdev)
struct rvu *rvu = pci_get_drvdata(pdev);
rvu_dbg_exit(rvu);
+ rvu_policy_destroy(rvu);
+ rvu_unregister_dl(rvu);
rvu_unregister_interrupts(rvu);
rvu_flr_wq_destroy(rvu);
rvu_cgx_exit(rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 90eed3160915..8504d276bc57 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -1,26 +1,34 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef RVU_H
#define RVU_H
#include <linux/pci.h>
+#include <net/devlink.h>
+
#include "rvu_struct.h"
+#include "rvu_devlink.h"
#include "common.h"
#include "mbox.h"
+#include "npc.h"
+#include "rvu_validation.h"
+#include "rvu_reg.h"
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065
+#define PCI_DEVID_OCTEONTX2_LBK 0xA061
/* Subsystem Device ID */
+#define PCI_SUBSYS_DEVID_98XX 0xB100
#define PCI_SUBSYS_DEVID_96XX 0xB200
+#define PCI_SUBSYS_DEVID_CN10K_A 0xB900
+#define PCI_SUBSYS_DEVID_CNF10K_A 0xBA00
+#define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00
/* PCI BAR nos */
#define PCI_AF_REG_BAR_NUM 0
@@ -28,6 +36,7 @@
#define PCI_MBOX_BAR_NUM 4
#define NAME_SIZE 32
+#define MAX_NIX_BLKS 2
/* PF_FUNC */
#define RVU_PFVF_PF_SHIFT 10
@@ -42,6 +51,10 @@ struct dump_ctx {
bool all;
};
+struct cpt_dump_ctx {
+ char e_type[NAME_SIZE];
+};
+
struct rvu_debugfs {
struct dentry *root;
struct dentry *cgx_root;
@@ -50,11 +63,16 @@ struct rvu_debugfs {
struct dentry *npa;
struct dentry *nix;
struct dentry *npc;
+ struct dentry *sso;
+ struct dentry *sso_hwgrp;
+ struct dentry *sso_hws;
+ struct dentry *cpt;
struct dump_ctx npa_aura_ctx;
struct dump_ctx npa_pool_ctx;
struct dump_ctx nix_cq_ctx;
struct dump_ctx nix_rq_ctx;
struct dump_ctx nix_sq_ctx;
+ struct cpt_dump_ctx cpt_ctx;
int npa_qsize_id;
int nix_qsize_id;
};
@@ -88,6 +106,7 @@ struct rvu_block {
u64 msixcfg_reg;
u64 lfreset_reg;
unsigned char name[NAME_SIZE];
+ struct rvu *rvu;
};
struct nix_mcast {
@@ -104,6 +123,36 @@ struct nix_mce_list {
int max;
};
+/* layer metadata to uniquely identify a packet header field */
+struct npc_layer_mdata {
+ u8 lid;
+ u8 ltype;
+ u8 hdr;
+ u8 key;
+ u8 len;
+};
+
+/* Structure to represent a field present in the
+ * generated key. A key field may present anywhere and can
+ * be of any size in the generated key. Once this structure
+ * is populated for fields of interest then field's presence
+ * and location (if present) can be known.
+ */
+struct npc_key_field {
+ /* Masks where all set bits indicate position
+ * of a field in the key
+ */
+ u64 kw_mask[NPC_MAX_KWS_IN_KEY];
+ /* Number of words in the key a field spans. If a field is
+ * of 16 bytes and key offset is 4 then the field will use
+ * 4 bytes in KW0, 8 bytes in KW1 and 4 bytes in KW2 and
+ * nr_kws will be 3(KW0, KW1 and KW2).
+ */
+ int nr_kws;
+ /* used by packet header fields */
+ struct npc_layer_mdata layer_mdata;
+};
+
struct npc_mcam {
struct rsrc_bmap counters;
struct mutex lock; /* MCAM entries and counters update lock */
@@ -115,6 +164,7 @@ struct npc_mcam {
u16 *entry2cntr_map;
u16 *cntr2pfvf_map;
u16 *cntr_refcnt;
+ u16 *entry2target_pffunc;
u8 keysize; /* MCAM keysize 112/224/448 bits */
u8 banks; /* Number of MCAM banks */
u8 banks_per_entry;/* Number of keywords in key */
@@ -126,7 +176,49 @@ struct npc_mcam {
u16 lprio_start;
u16 hprio_count;
u16 hprio_end;
- u16 rx_miss_act_cntr; /* Counter for RX MISS action */
+ u16 rx_miss_act_cntr; /* Counter for RX MISS action */
+ /* fields present in the generated key */
+ struct npc_key_field tx_key_fields[NPC_KEY_FIELDS_MAX];
+ struct npc_key_field rx_key_fields[NPC_KEY_FIELDS_MAX];
+ u64 tx_features;
+ u64 rx_features;
+ struct list_head mcam_rules;
+};
+
+struct sso_rsrc {
+ u8 sso_hws;
+ u16 sso_hwgrps;
+ u16 sso_xaq_num_works;
+ u16 sso_xaq_buf_size;
+ u16 sso_iue;
+ u64 iaq_rsvd;
+ u64 iaq_max;
+ u64 taq_rsvd;
+ u64 taq_max;
+ struct rsrc_bmap pfvf_ident;
+};
+
+enum tim_ring_interval {
+ TIM_INTERVAL_1US = 0,
+ TIM_INTERVAL_10US,
+ TIM_INTERVAL_1MS,
+ TIM_INTERVAL_INVAL,
+};
+
+struct tim_rsrc {
+ u16 rings_per_intvl[TIM_INTERVAL_INVAL];
+ enum tim_ring_interval *ring_intvls;
+};
+
+struct ree_rsrc {
+ struct qmem *graph_ctx; /* Graph base address - used by HW */
+ struct qmem *prefix_ctx; /* Prefix blocks - used by HW */
+ void **ruledb; /* ROF file from application */
+ u8 *ruledbi; /* Incremental checksum instructions */
+ u32 aq_head; /* AF AQ head address */
+ u32 ruledb_len; /* Length of ruledb */
+ u32 ruledbi_len; /* Length of ruledbi */
+ u8 ruledb_blocks; /* Number of blocks pointed by ruledb */
};
/* Structure for per RVU func info ie PF/VF */
@@ -137,7 +229,11 @@ struct rvu_pfvf {
u16 ssow;
u16 cptlfs;
u16 timlfs;
+ u16 cpt1_lfs;
+ u16 ree0_lfs;
+ u16 ree1_lfs;
u8 cgx_lmac;
+ u8 sso_uniq_ident;
/* Block LF's MSIX vector info */
struct rsrc_bmap msix; /* Bitmap for MSIX vector alloc */
@@ -169,21 +265,47 @@ struct rvu_pfvf {
u16 maxlen;
u16 minlen;
+ bool hw_rx_tstamp_en; /* Is rx_tstamp enabled */
u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
+ u8 default_mac[ETH_ALEN]; /* MAC address from FWdata */
- /* Broadcast pkt replication info */
+ /* Broadcast/Multicast/Promisc pkt replication info */
u16 bcast_mce_idx;
+ u16 mcast_mce_idx;
+ u16 promisc_mce_idx;
struct nix_mce_list bcast_mce_list;
+ struct nix_mce_list mcast_mce_list;
+ struct nix_mce_list promisc_mce_list;
+ bool use_mce_list;
- /* VLAN offload */
- struct mcam_entry entry;
- int rxvlan_index;
- bool rxvlan;
+ /* For resource limits */
+ struct pci_dev *pdev;
+ struct kobject *limits_kobj;
bool cgx_in_use; /* this PF/VF using CGX? */
int cgx_users; /* number of cgx users - used only by PFs */
+ int intf_mode;
+ u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */
+ u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */
+ u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */
+ u8 lbkid; /* NIX0/1 lbk link ID */
+ u64 lmt_base_addr; /* Preseving the pcifunc's lmtst base addr*/
+ u64 lmt_map_ent_w1; /*Preseving the word1 of lmtst map table entry*/
+ unsigned long flags;
+ struct sdp_node_info *sdp_info;
+
+ struct rvu_npc_mcam_rule *def_ucast_rule;
+};
+
+enum rvu_pfvf_flags {
+ NIXLF_INITIALIZED = 0,
+ PF_SET_VF_MAC,
+ PF_SET_VF_CFG,
+ PF_SET_VF_TRUSTED,
};
+#define RVU_CLEAR_VF_PERM ~GENMASK(PF_SET_VF_TRUSTED, PF_SET_VF_MAC)
+
struct nix_txsch {
struct rsrc_bmap schq;
u8 lvl;
@@ -218,12 +340,32 @@ struct nix_lso {
u8 in_use;
};
+struct nix_txvlan {
+#define NIX_TX_VTAG_DEF_MAX 0x400
+ struct rsrc_bmap rsrc;
+ u16 *entry2pfvf_map;
+ struct mutex rsrc_lock; /* Serialize resource alloc/free */
+};
+
+struct nix_ipolicer {
+ struct rsrc_bmap band_prof;
+ u16 *pfvf_map;
+ u16 *match_id;
+ u16 *ref_count;
+};
+
struct nix_hw {
+ int blkaddr;
+ struct rvu *rvu;
struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
struct nix_mcast mcast;
struct nix_flowkey flowkey;
struct nix_mark_format mark_format;
struct nix_lso lso;
+ struct nix_txvlan txvlan;
+ struct nix_ipolicer *ipolicer;
+ u64 *tx_credits;
+ void *tx_stall;
};
/* RVU block's capabilities or functionality,
@@ -237,8 +379,13 @@ struct hw_cap {
u16 nix_txsch_per_sdp_lmac; /* Max Q's transmitting to SDP LMAC */
bool nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
bool nix_shaping; /* Is shaping and coloring supported */
+ bool nix_shaper_toggle_wait; /* Shaping toggle needs poll/wait */
bool nix_tx_link_bp; /* Can link backpressure TL queues ? */
bool nix_rx_multicast; /* Rx packet replication support */
+ bool nix_common_dwrr_mtu; /* Common DWRR MTU for quantum config */
+ bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */
+ bool programmable_chans; /* Channels programmable ? */
+ bool ipolicer;
};
struct rvu_hwinfo {
@@ -247,16 +394,32 @@ struct rvu_hwinfo {
u16 max_vfs_per_pf; /* Max VFs that can be attached to a PF */
u8 cgx;
u8 lmac_per_cgx;
+ u16 cgx_chan_base; /* CGX base channel number */
+ u16 lbk_chan_base; /* LBK base channel number */
+ u16 sdp_chan_base; /* SDP base channel number */
+ u16 cpt_chan_base; /* CPT base channel number */
u8 cgx_links;
u8 lbk_links;
u8 sdp_links;
+ u8 cpt_links; /* Number of CPT links */
u8 npc_kpus; /* No of parser units */
+ u8 npc_pkinds; /* No of port kinds */
+ u8 npc_intfs; /* No of interfaces */
+ u8 npc_kpu_entries; /* No of KPU entries */
+ u16 npc_counters; /* No of match stats counters */
+ u32 lbk_bufsize; /* FIFO size supported by LBK */
+ bool npc_ext_set; /* Extended register set */
+ u64 npc_stat_ena; /* Match stats enable bit */
struct hw_cap cap;
struct rvu_block block[BLK_COUNT]; /* Block info */
- struct nix_hw *nix0;
+ struct nix_hw *nix;
+ struct rvu *rvu;
struct npc_pkind pkind;
struct npc_mcam mcam;
+ struct sso_rsrc sso;
+ struct tim_rsrc tim;
+ struct ree_rsrc *ree;
};
struct mbox_wq_info {
@@ -285,8 +448,14 @@ struct rvu_fwdata {
u64 mcam_addr;
u64 mcam_sz;
u64 msixtr_base;
-#define FWDATA_RESERVED_MEM 1023
+ u32 ptp_ext_clk_rate;
+ u32 ptp_ext_tstamp;
+#define FWDATA_RESERVED_MEM 1022
u64 reserved[FWDATA_RESERVED_MEM];
+#define CGX_MAX 5
+#define CGX_LMACS_MAX 4
+ struct cgx_lmac_fwdata_s cgx_fw_data[CGX_MAX][CGX_LMACS_MAX];
+ /* Do not add new fields below this line */
};
struct ptp;
@@ -300,11 +469,22 @@ struct npc_kpu_profile_adapter {
const struct npc_lt_def_cfg *lt_def;
const struct npc_kpu_profile_action *ikpu; /* array[pkinds] */
const struct npc_kpu_profile *kpu; /* array[kpus] */
- const struct npc_mcam_kex *mkex;
+ struct npc_mcam_kex *mkex;
+ bool custom;
size_t pkinds;
size_t kpus;
};
+#define RVU_SWITCH_LBK_CHAN 63
+
+struct rvu_switch {
+ struct mutex switch_lock; /* Serialize flow installation */
+ u32 used_entries;
+ u16 *entry2pcifunc;
+ u16 mode;
+ u16 start_entry;
+};
+
struct rvu {
void __iomem *afreg_base;
void __iomem *pfreg_base;
@@ -313,8 +493,10 @@ struct rvu {
struct rvu_hwinfo *hw;
struct rvu_pfvf *pf;
struct rvu_pfvf *hwvf;
+ struct rvu_limits pf_limits;
struct mutex rsrc_lock; /* Serialize resource alloc/free */
int vfs; /* Number of VFs attached to RVU */
+ int nix_blkaddr[MAX_NIX_BLKS];
/* Mbox */
struct mbox_wq_info afpf_wq_info;
@@ -334,6 +516,7 @@ struct rvu {
/* CGX */
#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */
+ u16 cgx_mapped_vfs; /* maximum CGX mapped VFs */
u8 cgx_mapped_pfs;
u8 cgx_cnt_max; /* CGX port count max */
u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */
@@ -349,18 +532,28 @@ struct rvu {
struct mutex cgx_cfg_lock; /* serialize cgx configuration */
char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */
+ char kpu_pfl_name[KPU_NAME_LEN]; /* Configured KPU profile name */
/* Firmware data */
struct rvu_fwdata *fwdata;
+ void *kpu_fwdata;
+ size_t kpu_fwdata_sz;
+ void __iomem *kpu_prfl_addr;
/* NPC KPU data */
struct npc_kpu_profile_adapter kpu;
struct ptp *ptp;
+ int cpt_pf_num;
+
#ifdef CONFIG_DEBUG_FS
struct rvu_debugfs rvu_dbg;
#endif
+ struct rvu_devlink *rvu_dl;
+
+ /* RVU switch implementation over NPC with DMAC rules */
+ struct rvu_switch rswitch;
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -384,30 +577,132 @@ static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
}
/* Silicon revisions */
+static inline bool is_rvu_pre_96xx_C0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+ /* 96XX A0/B0, 95XX A0/A1/B0 chips */
+ return ((pdev->revision == 0x00) || (pdev->revision == 0x01) ||
+ (pdev->revision == 0x10) || (pdev->revision == 0x11) ||
+ (pdev->revision == 0x14));
+}
+
static inline bool is_rvu_96xx_A0(struct rvu *rvu)
{
struct pci_dev *pdev = rvu->pdev;
- return (pdev->revision == 0x00) &&
- (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
+ return (pdev->revision == 0x00);
}
static inline bool is_rvu_96xx_B0(struct rvu *rvu)
{
struct pci_dev *pdev = rvu->pdev;
- return ((pdev->revision == 0x00) || (pdev->revision == 0x01)) &&
- (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
+ return (pdev->revision == 0x00) || (pdev->revision == 0x01);
+}
+
+static inline bool is_rvu_95xx_A0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ return (pdev->revision == 0x10) || (pdev->revision == 0x11);
+}
+
+/* REVID for PCIe devices.
+ * Bits 0..1: minor pass, bit 3..2: major pass
+ * bits 7..4: midr id
+ */
+#define PCI_REVISION_ID_96XX 0x00
+#define PCI_REVISION_ID_95XX 0x10
+#define PCI_REVISION_ID_95XXN 0x20
+#define PCI_REVISION_ID_98XX 0x30
+#define PCI_REVISION_ID_95XXMM 0x40
+#define PCI_REVISION_ID_95XXO 0xE0
+
+static inline bool is_rvu_otx2(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ u8 midr = pdev->revision & 0xF0;
+
+ return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX ||
+ midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX ||
+ midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
+}
+
+static inline bool is_cgx_mapped_to_nix(unsigned short id, u8 cgx_id)
+{
+ /* On CNF10KA and CNF10KB silicons only two CGX blocks are connected
+ * to NIX.
+ */
+ if (id == PCI_SUBSYS_DEVID_CNF10K_A || id == PCI_SUBSYS_DEVID_CNF10K_B)
+ return cgx_id <= 1;
+
+ return !(cgx_id && !(id == PCI_SUBSYS_DEVID_96XX ||
+ id == PCI_SUBSYS_DEVID_98XX ||
+ id == PCI_SUBSYS_DEVID_CN10K_A));
+}
+
+static inline u16 rvu_nix_chan_cgx(struct rvu *rvu, u8 cgxid,
+ u8 lmacid, u8 chan)
+{
+ u64 nix_const = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST);
+ u16 cgx_chans = nix_const & 0xFFULL;
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (!hw->cap.programmable_chans)
+ return NIX_CHAN_CGX_LMAC_CHX(cgxid, lmacid, chan);
+
+ return rvu->hw->cgx_chan_base +
+ (cgxid * hw->lmac_per_cgx + lmacid) * cgx_chans + chan;
+}
+
+static inline u16 rvu_nix_chan_lbk(struct rvu *rvu, u8 lbkid,
+ u8 chan)
+{
+ u64 nix_const = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST);
+ u16 lbk_chans = (nix_const >> 16) & 0xFFULL;
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (!hw->cap.programmable_chans)
+ return NIX_CHAN_LBK_CHX(lbkid, chan);
+
+ return rvu->hw->lbk_chan_base + lbkid * lbk_chans + chan;
+}
+
+static inline u16 rvu_nix_chan_sdp(struct rvu *rvu, u8 chan)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (!hw->cap.programmable_chans)
+ return NIX_CHAN_SDP_CHX(chan);
+
+ return hw->sdp_chan_base + chan;
+}
+
+static inline u16 rvu_nix_chan_cpt(struct rvu *rvu, u8 chan)
+{
+ return rvu->hw->cpt_chan_base + chan;
}
/* Function Prototypes
* RVU
*/
-static inline int is_afvf(u16 pcifunc)
+static inline bool is_afvf(u16 pcifunc)
{
return !(pcifunc & ~RVU_PFVF_FUNC_MASK);
}
+static inline bool is_vf(u16 pcifunc)
+{
+ return !!(pcifunc & RVU_PFVF_FUNC_MASK);
+}
+
+/* check if PF_FUNC is AF */
+static inline bool is_pffunc_af(u16 pcifunc)
+{
+ return !pcifunc;
+}
+
static inline bool is_rvu_fwdata_valid(struct rvu *rvu)
{
return (rvu->fwdata->header_magic == RVU_FWDATA_HEADER_MAGIC) &&
@@ -415,11 +710,14 @@ static inline bool is_rvu_fwdata_valid(struct rvu *rvu)
}
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
+void rvu_free_bitmap(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
+bool is_rsrc_free(struct rsrc_bmap *rsrc, int id);
int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
+u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr);
int rvu_get_pf(u16 pcifunc);
struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
@@ -429,6 +727,10 @@ int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
+int rvu_get_num_lbk_chans(void);
+int rvu_ndc_sync(struct rvu *rvu, int lfblkid, int lfidx, u64 lfoffset);
+int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
+ u16 global_slot, u16 *slot_in_block);
/* RVU HW reg validation */
enum regmap_block {
@@ -443,10 +745,17 @@ int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
int qsize, int inst_size, int res_size);
void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);
+/* SDP APIs */
+int rvu_sdp_init(struct rvu *rvu);
+bool is_sdp_pfvf(u16 pcifunc);
+bool is_sdp_pf(u16 pcifunc);
+bool is_sdp_vf(u16 pcifunc);
+
/* CGX APIs */
static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
{
- return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs);
+ return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs) &&
+ !is_sdp_pf(pf << RVU_PFVF_PF_SHIFT);
}
static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
@@ -455,6 +764,12 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
*lmac_id = (map & 0xF);
}
+static inline bool is_cgx_vf(struct rvu *rvu, u16 pcifunc)
+{
+ return ((pcifunc & RVU_PFVF_FUNC_MASK) &&
+ is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)));
+}
+
#define M(_name, _id, fn_name, req, rsp) \
int rvu_mbox_handler_ ## fn_name(struct rvu *, struct req *, struct rsp *);
MBOX_MESSAGES
@@ -465,9 +780,36 @@ int rvu_cgx_exit(struct rvu *rvu);
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu);
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start);
void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable);
+void rvu_cgx_enadis_higig2(struct rvu *rvu, int pf, bool enable);
+bool rvu_cgx_is_higig2_enabled(struct rvu *rvu, int pf);
+void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc);
int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start);
int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index,
int rxtxflag, u64 *stat);
+bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc);
+bool rvu_cgx_is_pkind_config_permitted(struct rvu *rvu, u16 pcifunc);
+void *rvu_first_cgx_pdata(struct rvu *rvu);
+int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
+int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable);
+int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause);
+
+/* SSO APIs */
+int rvu_sso_init(struct rvu *rvu);
+void rvu_sso_freemem(struct rvu *rvu);
+int rvu_sso_register_interrupts(struct rvu *rvu);
+void rvu_sso_unregister_interrupts(struct rvu *rvu);
+int rvu_sso_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot_id);
+int rvu_ssow_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot_id);
+void rvu_sso_hwgrp_config_thresh(struct rvu *rvu, int blkaddr, int lf);
+void rvu_sso_block_cn10k_init(struct rvu *rvu, int blkaddr);
+void rvu_sso_lf_drain_queues(struct rvu *rvu, u16 pcifunc, int lf, int slot);
+int rvu_sso_cleanup_xaq_aura(struct rvu *rvu, u16 pcifunc, int hwgrp);
+int rvu_sso_poll_aura_cnt(struct rvu *rvu, int npa_blkaddr, int aura);
+void rvu_sso_deinit_xaq_aura(struct rvu *rvu, int blkaddr, int npa_blkaddr,
+ int aura, int lf);
+
/* NPA APIs */
int rvu_npa_init(struct rvu *rvu);
void rvu_npa_freemem(struct rvu *rvu);
@@ -484,7 +826,26 @@ void rvu_nix_freemem(struct rvu *rvu);
int rvu_get_nixlf_count(struct rvu *rvu);
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr);
-int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
+int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
+ struct nix_mce_list *mce_list,
+ int mce_idx, int mcam_index, bool add);
+void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
+ struct nix_mce_list **mce_list, int *mce_idx);
+bool rvu_nix_is_ptp_tx_enabled(struct rvu *rvu, u16 pcifunc);
+struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr);
+int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr);
+void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc);
+int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
+ struct nix_hw **nix_hw, int *blkaddr);
+int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
+ u16 rq_idx, u16 match_id);
+int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_cn10k_aq_enq_req *aq_req,
+ struct nix_cn10k_aq_enq_rsp *aq_rsp,
+ u16 pcifunc, u8 ctype, u32 qidx);
+int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc);
+u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu);
+u32 convert_bytes_to_dwrr_mtu(u32 bytes);
/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
@@ -495,14 +856,21 @@ int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool en);
void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, u8 *mac_addr);
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
- int nixlf, u64 chan, bool allmulti);
-void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
-void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
+ int nixlf, u64 chan, u8 chan_cnt);
+void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable);
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan);
-void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable);
-int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable);
+void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ u64 chan);
+void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable);
+void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, int type, bool enable);
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
@@ -513,6 +881,52 @@ void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
int blkaddr, int *alloc_cnt,
int *enable_cnt);
+bool is_npc_intf_tx(u8 intf);
+bool is_npc_intf_rx(u8 intf);
+bool is_npc_interface_valid(struct rvu *rvu, u8 intf);
+int npc_flow_steering_init(struct rvu *rvu, int blkaddr);
+const char *npc_get_field_name(u8 hdr);
+int npc_get_bank(struct npc_mcam *mcam, int index);
+void npc_mcam_enable_flows(struct rvu *rvu, u16 target);
+void npc_mcam_disable_flows(struct rvu *rvu, u16 target);
+void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, bool enable);
+void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 src, struct mcam_entry *entry,
+ u8 *intf, u8 *ena);
+bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
+u32 rvu_cgx_get_fifolen(struct rvu *rvu);
+void *rvu_first_cgx_pdata(struct rvu *rvu);
+int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
+ int type);
+bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr,
+ int index);
+
+/* CPT APIs */
+int rvu_cpt_register_interrupts(struct rvu *rvu);
+void rvu_cpt_unregister_interrupts(struct rvu *rvu);
+int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf,
+ int slot);
+int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc);
+int rvu_cpt_init(struct rvu *rvu);
+
+/* CN10K RVU */
+int rvu_set_channels_base(struct rvu *rvu);
+void rvu_program_channels(struct rvu *rvu);
+
+/* CN10K RVU - LMT*/
+void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc);
+void rvu_apr_block_cn10k_init(struct rvu *rvu);
+
+/* TIM APIs */
+int rvu_tim_init(struct rvu *rvu);
+int rvu_tim_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot);
+
+/* REE APIs */
+int rvu_ree_init(struct rvu *rvu);
+void rvu_ree_freemem(struct rvu *rvu);
+int rvu_ree_register_interrupts(struct rvu *rvu);
+void rvu_ree_unregister_interrupts(struct rvu *rvu);
#ifdef CONFIG_DEBUG_FS
void rvu_dbg_init(struct rvu *rvu);
@@ -521,4 +935,37 @@ void rvu_dbg_exit(struct rvu *rvu);
static inline void rvu_dbg_init(struct rvu *rvu) {}
static inline void rvu_dbg_exit(struct rvu *rvu) {}
#endif
+
+/* HW workarounds/fixes */
+void rvu_nix_txsch_lock(struct nix_hw *nix_hw);
+void rvu_nix_txsch_unlock(struct nix_hw *nix_hw);
+void rvu_nix_update_link_credits(struct rvu *rvu, int blkaddr,
+ int link, u64 ncredits);
+
+void rvu_nix_update_sq_smq_mapping(struct rvu *rvu, int blkaddr, int nixlf,
+ u16 sq, u16 smq);
+void rvu_nix_txsch_config_changed(struct nix_hw *nix_hw);
+ssize_t rvu_nix_get_tx_stall_counters(struct nix_hw *nix_hw,
+ char __user *buffer, loff_t *ppos);
+int rvu_nix_fixes_init(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr);
+void rvu_nix_fixes_exit(struct rvu *rvu, struct nix_hw *nix_hw);
+int rvu_tim_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
+ u16 pcifunc, int slot);
+int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena);
+bool is_parse_nibble_config_valid(struct rvu *rvu,
+ struct npc_mcam_kex *mcam_kex);
+int
+rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
+ u64 pkind, u8 var_len_off, u8 var_len_off_mask,
+ u8 shift_dir);
+void rvu_tim_hw_fixes(struct rvu *rvu, int blkaddr);
+
+/* CN10K NIX */
+void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw);
+
+/* RVU Switch */
+void rvu_switch_enable(struct rvu *rvu);
+void rvu_switch_disable(struct rvu *rvu);
+void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
+
#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index f4ecc755eaff..9e4b370fb2b9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -1,11 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/types.h>
@@ -14,6 +11,7 @@
#include "rvu.h"
#include "cgx.h"
+#include "lmac_common.h"
#include "rvu_reg.h"
#include "rvu_trace.h"
@@ -42,13 +40,27 @@ static struct _req_type __maybe_unused \
MBOX_UP_CGX_MESSAGES
#undef M
+bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
+{
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return 0;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+
+ return (cgx_features_get(cgxd) & feature);
+}
+
/* Returns bitmap of mapped PFs */
-static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
+static u64 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
{
return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
}
-static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
+int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
{
unsigned long pfmap;
@@ -58,7 +70,8 @@ static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
if (!pfmap)
return -ENODEV;
else
- return find_first_bit(&pfmap, 16);
+ return find_first_bit(&pfmap,
+ rvu->cgx_cnt_max * MAX_LMAC_PER_CGX);
}
static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
@@ -74,13 +87,44 @@ void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
return rvu->cgx_idmap[cgx_id];
}
+/* Return first enabled CGX instance if none are enabled then return NULL */
+void *rvu_first_cgx_pdata(struct rvu *rvu)
+{
+ int first_enabled_cgx = 0;
+ void *cgxd = NULL;
+
+ for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) {
+ cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu);
+ if (cgxd)
+ break;
+ }
+
+ return cgxd;
+}
+
+/* Based on P2X connectivity find mapped NIX block for a PF */
+static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
+ int cgx_id, int lmac_id)
+{
+ struct rvu_pfvf *pfvf = &rvu->pf[pf];
+ u8 p2x;
+
+ p2x = cgx_lmac_get_p2x(cgx_id, lmac_id);
+ /* Firmware sets P2X_SELECT as either NIX0 or NIX1 */
+ pfvf->nix_blkaddr = BLKADDR_NIX0;
+ if (p2x == CMR_P2X_SEL_NIX1)
+ pfvf->nix_blkaddr = BLKADDR_NIX1;
+}
+
static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
{
struct npc_pkind *pkind = &rvu->hw->pkind;
int cgx_cnt_max = rvu->cgx_cnt_max;
- int cgx, lmac_cnt, lmac;
int pf = PF_CGXMAP_BASE;
+ unsigned long lmac_bmap;
int size, free_pkind;
+ int cgx, lmac, iter;
+ int numvfs, hwvfs;
if (!cgx_cnt_max)
return 0;
@@ -102,7 +146,7 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
/* Reverse map table */
rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
- cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
+ cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u64),
GFP_KERNEL);
if (!rvu->cgxlmac2pf_map)
return -ENOMEM;
@@ -111,13 +155,19 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
if (!rvu_cgx_pdata(cgx, rvu))
continue;
- lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
- for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
+ lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
+ for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
+ lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
+ iter);
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
+ rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
rvu->cgx_mapped_pfs++;
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
+ rvu->cgx_mapped_vfs += numvfs;
+ pf++;
}
}
return 0;
@@ -139,8 +189,10 @@ static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
&qentry->link_event.link_uinfo);
qentry->link_event.cgx_id = cgx_id;
qentry->link_event.lmac_id = lmac_id;
- if (err)
+ if (err) {
+ kfree(qentry);
goto skip_add;
+ }
list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
skip_add:
spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
@@ -183,16 +235,13 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
do {
- pfid = find_first_bit(&pfmap, 16);
+ pfid = find_first_bit(&pfmap,
+ rvu->cgx_cnt_max * MAX_LMAC_PER_CGX);
clear_bit(pfid, &pfmap);
/* check if notification is enabled */
- if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
- dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
- event->cgx_id, event->lmac_id,
- linfo->link_up ? "UP" : "DOWN");
+ if (!test_bit(pfid, &rvu->pf_notify_bmap))
continue;
- }
/* Send mbox message to PF */
msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
@@ -236,6 +285,7 @@ static void cgx_evhandler_task(struct work_struct *work)
static int cgx_lmac_event_handler_init(struct rvu *rvu)
{
+ unsigned long lmac_bmap;
struct cgx_event_cb cb;
int cgx, lmac, err;
void *cgxd;
@@ -256,7 +306,8 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu)
cgxd = rvu_cgx_pdata(cgx, rvu);
if (!cgxd)
continue;
- for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
+ lmac_bmap = cgx_get_lmac_bmap(cgxd);
+ for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) {
err = cgx_lmac_evh_register(&cb, cgxd, lmac);
if (err)
dev_err(rvu->dev,
@@ -288,7 +339,7 @@ int rvu_cgx_init(struct rvu *rvu)
rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
if (!rvu->cgx_cnt_max) {
dev_info(rvu->dev, "No CGX devices found!\n");
- return -ENODEV;
+ return 0;
}
rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
@@ -334,6 +385,7 @@ int rvu_cgx_init(struct rvu *rvu)
int rvu_cgx_exit(struct rvu *rvu)
{
+ unsigned long lmac_bmap;
int cgx, lmac;
void *cgxd;
@@ -341,7 +393,8 @@ int rvu_cgx_exit(struct rvu *rvu)
cgxd = rvu_cgx_pdata(cgx, rvu);
if (!cgxd)
continue;
- for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++)
+ lmac_bmap = cgx_get_lmac_bmap(cgxd);
+ for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX)
cgx_lmac_evh_unregister(cgxd, lmac);
}
@@ -356,7 +409,7 @@ int rvu_cgx_exit(struct rvu *rvu)
* VF's of mapped PF and other PFs are not allowed. This fn() checks
* whether a PFFUNC is permitted to do the config or not.
*/
-static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
+bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
{
if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
@@ -366,6 +419,7 @@ static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
{
+ struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
@@ -375,26 +429,92 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
/* Set / clear CTL_BCK to control pause frame forwarding to NIX */
if (enable)
- cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
+ mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
else
- cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
+ mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
+}
+
+void rvu_cgx_enadis_higig2(struct rvu *rvu, int pf, bool enable)
+{
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ cgx_lmac_enadis_higig2(cgxd, lmac_id, enable);
+}
+
+bool rvu_cgx_is_higig2_enabled(struct rvu *rvu, int pf)
+{
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_HIGIG2))
+ return 0;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return false;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+
+ return is_higig2_enabled(cgxd, lmac_id);
}
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
{
int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
+ void *cgxd;
if (!is_cgx_config_permitted(rvu, pcifunc))
- return -EPERM;
+ return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
- cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
+ return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start);
+}
- return 0;
+int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
+{
+ struct mac_ops *mac_ops;
+
+ mac_ops = get_mac_ops(cgxd);
+ return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
+}
+
+void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
+{
+ int pf = rvu_get_pf(pcifunc);
+ int i = 0, lmac_count = 0;
+ u8 max_dmac_filters;
+ u8 cgx_id, lmac_id;
+ void *cgx_dev;
+
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgx_dev = cgx_get_pdata(cgx_id);
+ lmac_count = cgx_get_lmac_cnt(cgx_dev);
+ max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count;
+
+ for (i = 0; i < max_dmac_filters; i++)
+ cgx_lmac_addr_del(cgx_id, lmac_id, i);
+
+ /* As cgx_lmac_addr_del does not clear entry for index 0
+ * so it needs to be done explicitly
+ */
+ cgx_lmac_addr_reset(cgx_id, lmac_id);
}
int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
@@ -411,78 +531,194 @@ int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
return 0;
}
-int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
- struct cgx_stats_rsp *rsp)
+static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req,
+ void *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct mac_ops *mac_ops;
int stat = 0, err = 0;
u64 tx_stat, rx_stat;
u8 cgx_idx, lmac;
void *cgxd;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
- return -ENODEV;
+ return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ mac_ops = get_mac_ops(cgxd);
- /* Rx stats */
- while (stat < CGX_RX_STATS_COUNT) {
- err = cgx_get_rx_stats(cgxd, lmac, stat, &rx_stat);
+ while (stat < mac_ops->rx_stats_cnt) {
+ err = mac_ops->mac_get_rx_stats(cgxd, lmac, stat, &rx_stat);
if (err)
return err;
- rsp->rx_stats[stat] = rx_stat;
+ if (mac_ops->rx_stats_cnt == RPM_RX_STATS_COUNT)
+ ((struct rpm_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
+ else
+ ((struct cgx_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
stat++;
}
/* Tx stats */
stat = 0;
- while (stat < CGX_TX_STATS_COUNT) {
- err = cgx_get_tx_stats(cgxd, lmac, stat, &tx_stat);
+ while (stat < mac_ops->tx_stats_cnt) {
+ err = mac_ops->mac_get_tx_stats(cgxd, lmac, stat, &tx_stat);
if (err)
return err;
- rsp->tx_stats[stat] = tx_stat;
+ if (mac_ops->tx_stats_cnt == RPM_TX_STATS_COUNT)
+ ((struct rpm_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
+ else
+ ((struct cgx_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
stat++;
}
return 0;
}
+int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
+ struct cgx_stats_rsp *rsp)
+{
+ return rvu_lmac_get_stats(rvu, req, (void *)rsp);
+}
+
+int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
+ struct rpm_stats_rsp *rsp)
+{
+ return rvu_lmac_get_stats(rvu, req, (void *)rsp);
+}
+
+int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
+ struct msg_req *req,
+ struct cgx_fec_stats_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ return cgx_get_fec_stats(cgxd, lmac, rsp);
+}
+
+int rvu_mbox_handler_cgx_stats_rst(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct rvu_pfvf *parent_pf;
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ parent_pf = &rvu->pf[pf];
+ /* To ensure reset cgx stats won't affect VF stats,
+ * check if it used by only PF interface.
+ * If not, return
+ */
+ if (parent_pf->cgx_users > 1) {
+ dev_info(rvu->dev, "CGX busy, could not reset statistics\n");
+ return 0;
+ }
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+
+ return cgx_stats_rst(cgxd, lmac);
+}
+
int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
- return -EPERM;
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ pfvf = &rvu->pf[pf];
+ memcpy(pfvf->mac_addr, req->mac_addr, ETH_ALEN);
cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
return 0;
}
-int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
- struct cgx_mac_addr_set_or_get *req,
- struct cgx_mac_addr_set_or_get *rsp)
+int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
+ struct cgx_mac_addr_add_req *req,
+ struct cgx_mac_addr_add_rsp *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
- int rc = 0, i;
- u64 cfg;
+ int rc = 0;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
- return -EPERM;
+ return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
+ if (rc >= 0) {
+ rsp->index = rc;
+ return 0;
+ }
+
+ return rc;
+}
+
+int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
+ struct cgx_mac_addr_del_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
+}
+
+int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
+ struct msg_req *req,
+ struct cgx_max_dmac_entries_get_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* If msg is received from PFs(which are not mapped to CGX LMACs)
+ * or VF then no entries are allocated for DMAC filters at CGX level.
+ * So returning zero.
+ */
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) {
+ rsp->max_dmac_filters = 0;
+ return 0;
+ }
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
+ struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ int i;
- rsp->hdr.rc = rc;
- cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
- /* copy 48 bit mac address to req->mac_addr */
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
+
+ /* copy 48 bit mac address to rsp->mac_addr */
for (i = 0; i < ETH_ALEN; i++)
- rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
+ rsp->mac_addr[i] = pfvf->mac_addr[i];
+
return 0;
}
@@ -494,7 +730,7 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
- return -EPERM;
+ return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -509,7 +745,7 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
- return -EPERM;
+ return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -517,30 +753,59 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
return 0;
}
+static void cgx_notify_up_ptp_info(struct rvu *rvu, int pf, bool enable)
+{
+ struct cgx_ptp_rx_info_msg *msg;
+ int err;
+
+ /* Send mbox message to PF */
+ msg = otx2_mbox_alloc_msg_cgx_ptp_rx_info(rvu, pf);
+ if (!msg) {
+ dev_err(rvu->dev, "ptp notification to pf %d failed\n", pf);
+ return;
+ }
+
+ msg->ptp_en = enable;
+ otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pf);
+ err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
+ if (err)
+ dev_err(rvu->dev, "ptp notification to pf %d failed\n", pf);
+}
+
static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
- /* This msg is expected only from PFs that are mapped to CGX LMACs,
- * if received from other PF/VF simply ACK, nothing to do.
- */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
- !is_pf_cgxmapped(rvu, pf))
- return -ENODEV;
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
+ return 0;
+
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
+ return -EPERM;
+
+ /* Silicon does not support enabling time stamp in higig mode */
+ if (rvu_cgx_is_higig2_enabled(rvu, pf))
+ return NIX_AF_ERR_PTP_CONFIG_FAIL;
+
+ cgx_notify_up_ptp_info(rvu, pf, enable);
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgxd = rvu_cgx_pdata(cgx_id, rvu);
- cgx_lmac_ptp_config(cgxd, lmac_id, enable);
+ mac_ops = get_mac_ops(cgxd);
+ mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, enable);
+
/* If PTP is enabled then inform NPC that packets to be
* parsed by this PF will have their data shifted by 8 bytes
* and if PTP is disabled then no shift is required
*/
if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
- return -EINVAL;
-
+ return NIX_AF_ERR_PTP_CONFIG_FAIL;
+ /* This flag is required to clean up CGX conf if app gets killed */
+ pfvf->hw_rx_tstamp_en = enable;
return 0;
}
@@ -562,7 +827,7 @@ static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, pcifunc))
- return -EPERM;
+ return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -600,7 +865,7 @@ int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
pf = rvu_get_pf(req->hdr.pcifunc);
if (!is_pf_cgxmapped(rvu, pf))
- return -ENODEV;
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -612,14 +877,16 @@ int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
{
int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, pcifunc))
- return -EPERM;
+ return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
- return cgx_lmac_internal_loopback(rvu_cgx_pdata(cgx_id, rvu),
+ return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu),
lmac_id, en);
}
@@ -637,28 +904,84 @@ int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 rx_pfc = 0, tx_pfc = 0;
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
+ return 0;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &tx_pfc, &rx_pfc);
+ if (tx_pfc || rx_pfc) {
+ dev_warn(rvu->dev,
+ "Can not configure 802.3X flow control as PFC frames are enabled");
+ return LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
+ pcifunc & RVU_PFVF_FUNC_MASK)) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return LMAC_AF_ERR_PERM_DENIED;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return mac_ops->mac_enadis_pause_frm(cgxd, lmac_id, tx_pause, rx_pause);
+}
+
int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
struct cgx_pause_frm_cfg *req,
struct cgx_pause_frm_cfg *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
+ int err = 0;
+ void *cgxd;
/* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
* if received from other PF/VF simply ACK, nothing to do.
*/
if (!is_pf_cgxmapped(rvu, pf))
- return -ENODEV;
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
if (req->set)
- cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
- req->tx_pause, req->rx_pause);
+ err = rvu_cgx_cfg_pause_frm(rvu, req->hdr.pcifunc, req->tx_pause, req->rx_pause);
else
- cgx_lmac_get_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
- &rsp->tx_pause, &rsp->rx_pause);
- return 0;
+ mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
+
+ return err;
+}
+
+int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
}
/* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
@@ -706,6 +1029,42 @@ int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
return 0;
}
+/* Dont allow cgx mapped VFs to overwrite PKIND config
+ * incase of special PKINDs are configured like (HIGIG/EDSA)
+ */
+bool rvu_cgx_is_pkind_config_permitted(struct rvu *rvu, u16 pcifunc)
+{
+ int rc, pf, rxpkind;
+ u8 cgx_id, lmac_id;
+
+ pf = rvu_get_pf(pcifunc);
+
+ /* Ret here for PFs or non cgx interfaces */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ return true;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return true;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ rc = cgx_get_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, &rxpkind);
+ if (rc)
+ return false;
+
+ switch (rxpkind) {
+ /* Add here specific pkinds reserved for pkt parsing */
+ case NPC_RX_HIGIG_PKIND:
+ case NPC_RX_EDSA_PKIND:
+ rc = false;
+ break;
+ default:
+ rc = true;
+ }
+
+ return rc;
+}
+
int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
{
struct rvu_pfvf *parent_pf, *pfvf;
@@ -752,3 +1111,231 @@ exit:
mutex_unlock(&rvu->cgx_cfg_lock);
return err;
}
+
+int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
+ struct fec_mode *req,
+ struct fec_mode *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
+
+ if (req->fec == OTX2_FEC_OFF)
+ req->fec = OTX2_FEC_NONE;
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rsp->fec = cgx_set_fec(req->fec, cgx_id, lmac_id);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
+ struct cgx_fw_data *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!rvu->fwdata)
+ return LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ memcpy(&rsp->fwdata, &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
+ sizeof(struct cgx_lmac_fwdata_s));
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
+ struct cgx_set_link_mode_req *req,
+ struct cgx_set_link_mode_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_set_link_state(struct rvu *rvu,
+ struct cgx_set_link_state_msg *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ u8 cgx_id, lmac_id;
+ int pf, err;
+
+ pf = rvu_get_pf(pcifunc);
+
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ err = cgx_set_link_state(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ !!req->enable);
+ if (err)
+ dev_warn(rvu->dev, "Cannot set link state to %s, err %d\n",
+ (req->enable) ? "enable" : "disable", err);
+
+ return err;
+}
+
+int rvu_mbox_handler_cgx_set_phy_mod_type(struct rvu *rvu,
+ struct cgx_phy_mod_type *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_set_phy_mod_type(req->mod, rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id);
+}
+
+int rvu_mbox_handler_cgx_get_phy_mod_type(struct rvu *rvu, struct msg_req *req,
+ struct cgx_phy_mod_type *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rsp->mod = cgx_get_phy_mod_type(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
+ if (rsp->mod < 0)
+ return rsp->mod;
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
+ struct msg_req *req,
+ struct cgx_features_info_msg *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return 0;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ rsp->lmac_features = cgx_features_get(cgxd);
+
+ return 0;
+}
+
+u32 rvu_cgx_get_fifolen(struct rvu *rvu)
+{
+ struct mac_ops *mac_ops;
+ u32 fifo_len;
+
+ mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
+ fifo_len = mac_ops ? mac_ops->fifo_len : 0;
+
+ return fifo_len;
+}
+
+int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_lmac_addr_reset(cgx_id, lmac_id);
+}
+
+int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
+ struct cgx_mac_addr_update_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
+}
+
+int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause,
+ u8 rx_pause, u16 pfc_en)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 rx_8023 = 0, tx_8023 = 0;
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &tx_8023, &rx_8023);
+ if (tx_8023 || rx_8023) {
+ dev_warn(rvu->dev,
+ "Can not configure PFC as 802.3X pause frames are enabled");
+ return LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
+ pcifunc & RVU_PFVF_FUNC_MASK)) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return LMAC_AF_ERR_PERM_DENIED;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return mac_ops->pfc_config(cgxd, lmac_id, tx_pause, rx_pause, pfc_en);
+}
+
+int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
+ struct cgx_pfc_cfg *req,
+ struct cgx_pfc_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+ int err;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ err = rvu_cgx_prio_flow_ctrl_cfg(rvu, req->hdr.pcifunc, req->tx_pause,
+ req->rx_pause, req->pfc_en);
+
+ mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
+ return err;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
new file mode 100644
index 000000000000..4bf948417adb
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -0,0 +1,568 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RPM CN10K driver
+ *
+ * Copyright (C) 2020 Marvell.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/pci.h>
+#include "rvu.h"
+#include "cgx.h"
+#include "rvu_reg.h"
+
+/* RVU LMTST */
+#define LMT_TBL_OP_READ 0
+#define LMT_TBL_OP_WRITE 1
+#define LMT_MAP_TABLE_SIZE (128 * 1024)
+#define LMT_MAPTBL_ENTRY_SIZE 16
+
+static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
+ int lmt_tbl_op)
+{
+ void __iomem *lmt_map_base;
+ u64 tbl_base;
+
+ tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+
+ lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE);
+ if (!lmt_map_base) {
+ dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
+ return -ENOMEM;
+ }
+
+ if (lmt_tbl_op == LMT_TBL_OP_READ) {
+ *val = readq(lmt_map_base + index);
+ } else {
+ writeq((*val), (lmt_map_base + index));
+ /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S
+ * changes effective. Write 1 for flush and read is being used as a
+ * barrier and sets up a data dependency. Write to 0 after a write
+ * to 1 to complete the flush.
+ */
+ rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0));
+ rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CTL);
+ rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, 0x00);
+ }
+
+ iounmap(lmt_map_base);
+ return 0;
+}
+
+#define LMT_MAP_TBL_W1_OFF 8
+static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
+{
+ return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
+ (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
+}
+
+static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
+ u64 iova, u64 *lmt_addr)
+{
+ u64 pa, val, pf;
+ int err;
+
+ if (!iova) {
+ dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__);
+ return -EINVAL;
+ }
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
+ pf = rvu_get_pf(pcifunc) & 0x1F;
+ val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
+ ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
+
+ err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false);
+ if (err) {
+ dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__);
+ return err;
+ }
+ val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS);
+ if (val & ~0x1ULL) {
+ dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val);
+ return -EIO;
+ }
+ /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT0[57:18]
+ * PA[11:0] = IOVA[11:0]
+ */
+ pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT0) >> 18;
+ pa &= GENMASK_ULL(39, 0);
+ *lmt_addr = (pa << 12) | (iova & 0xFFF);
+
+ return 0;
+}
+
+static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ u32 tbl_idx;
+ int err = 0;
+ u64 val;
+
+ /* Read the current lmt addr of pcifunc */
+ tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
+ err = lmtst_map_table_ops(rvu, tbl_idx, &val, LMT_TBL_OP_READ);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to read LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ return err;
+ }
+
+ /* Storing the seondary's lmt base address as this needs to be
+ * reverted in FLR. Also making sure this default value doesn't
+ * get overwritten on multiple calls to this mailbox.
+ */
+ if (!pfvf->lmt_base_addr)
+ pfvf->lmt_base_addr = val;
+
+ /* Update the LMT table with new addr */
+ err = lmtst_map_table_ops(rvu, tbl_idx, &lmt_addr, LMT_TBL_OP_WRITE);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ return err;
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu,
+ struct lmtst_tbl_setup_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ u32 pri_tbl_idx, tbl_idx;
+ u64 lmt_addr;
+ int err = 0;
+ u64 val;
+
+ /* Check if PF_FUNC wants to use it's own local memory as LMTLINE
+ * region, if so, convert that IOVA to physical address and
+ * populate LMT table with that address
+ */
+ if (req->use_local_lmt_region) {
+ err = rvu_get_lmtaddr(rvu, req->hdr.pcifunc,
+ req->lmt_iova, &lmt_addr);
+ if (err < 0)
+ return err;
+
+ /* Update the lmt addr for this PFFUNC in the LMT table */
+ err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, lmt_addr);
+ if (err)
+ return err;
+ }
+
+ /* Reconfiguring lmtst map table in lmt region shared mode i.e. make
+ * multiple PF_FUNCs to share an LMTLINE region, so primary/base
+ * pcifunc (which is passed as an argument to mailbox) is the one
+ * whose lmt base address will be shared among other secondary
+ * pcifunc (will be the one who is calling this mailbox).
+ */
+ if (req->base_pcifunc) {
+ /* Calculating the LMT table index equivalent to primary
+ * pcifunc.
+ */
+ pri_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->base_pcifunc);
+
+ /* Read the base lmt addr of the primary pcifunc */
+ err = lmtst_map_table_ops(rvu, pri_tbl_idx, &val,
+ LMT_TBL_OP_READ);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to read LMT map table: index 0x%x err %d\n",
+ pri_tbl_idx, err);
+ goto error;
+ }
+
+ /* Update the base lmt addr of secondary with primary's base
+ * lmt addr.
+ */
+ err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, val);
+ if (err)
+ return err;
+ }
+
+ /* This mailbox can also be used to update word1 of APR_LMT_MAP_ENTRY_S
+ * like enabling scheduled LMTST, disable LMTLINE prefetch, disable
+ * early completion for ordered LMTST.
+ */
+ if (req->sch_ena || req->dis_sched_early_comp || req->dis_line_pref) {
+ tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->hdr.pcifunc);
+ err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF,
+ &val, LMT_TBL_OP_READ);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to read LMT map table: index 0x%x err %d\n",
+ tbl_idx + LMT_MAP_TBL_W1_OFF, err);
+ goto error;
+ }
+
+ /* Storing lmt map table entry word1 default value as this needs
+ * to be reverted in FLR. Also making sure this default value
+ * doesn't get overwritten on multiple calls to this mailbox.
+ */
+ if (!pfvf->lmt_map_ent_w1)
+ pfvf->lmt_map_ent_w1 = val;
+
+ /* Disable early completion for Ordered LMTSTs. */
+ if (req->dis_sched_early_comp)
+ val |= (req->dis_sched_early_comp <<
+ APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT);
+ /* Enable scheduled LMTST */
+ if (req->sch_ena)
+ val |= (req->sch_ena << APR_LMT_MAP_ENT_SCH_ENA_SHIFT) |
+ req->ssow_pf_func;
+ /* Disables LMTLINE prefetch before receiving store data. */
+ if (req->dis_line_pref)
+ val |= (req->dis_line_pref <<
+ APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT);
+
+ err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF,
+ &val, LMT_TBL_OP_WRITE);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx + LMT_MAP_TBL_W1_OFF, err);
+ goto error;
+ }
+ }
+
+error:
+ return err;
+}
+
+/* Resetting the lmtst map table to original default values */
+void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ u32 tbl_idx;
+ int err;
+
+ if (is_rvu_otx2(rvu))
+ return;
+
+ if (pfvf->lmt_base_addr || pfvf->lmt_map_ent_w1) {
+ /* This corresponds to lmt map table index */
+ tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
+ /* Reverting back original lmt base addr for respective
+ * pcifunc.
+ */
+ if (pfvf->lmt_base_addr) {
+ err = lmtst_map_table_ops(rvu, tbl_idx,
+ &pfvf->lmt_base_addr,
+ LMT_TBL_OP_WRITE);
+ if (err)
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ pfvf->lmt_base_addr = 0;
+ }
+ /* Reverting back to orginal word1 val of lmtst map table entry
+ * which underwent changes.
+ */
+ if (pfvf->lmt_map_ent_w1) {
+ err = lmtst_map_table_ops(rvu,
+ tbl_idx + LMT_MAP_TBL_W1_OFF,
+ &pfvf->lmt_map_ent_w1,
+ LMT_TBL_OP_WRITE);
+ if (err)
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx + LMT_MAP_TBL_W1_OFF, err);
+ pfvf->lmt_map_ent_w1 = 0;
+ }
+ }
+}
+
+int rvu_set_channels_base(struct rvu *rvu)
+{
+ u16 nr_lbk_chans, nr_sdp_chans, nr_cgx_chans, nr_cpt_chans;
+ u16 sdp_chan_base, cgx_chan_base, cpt_chan_base;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 nix_const, nix_const1;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+
+ hw->cgx = (nix_const >> 12) & 0xFULL;
+ hw->lmac_per_cgx = (nix_const >> 8) & 0xFULL;
+ hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
+ hw->lbk_links = (nix_const >> 24) & 0xFULL;
+ hw->cpt_links = (nix_const >> 44) & 0xFULL;
+ hw->sdp_links = 1;
+
+ hw->cgx_chan_base = NIX_CHAN_CGX_LMAC_CHX(0, 0, 0);
+ hw->lbk_chan_base = NIX_CHAN_LBK_CHX(0, 0);
+ hw->sdp_chan_base = NIX_CHAN_SDP_CH_START;
+
+ /* No Programmable channels */
+ if (!(nix_const & BIT_ULL(60)))
+ return 0;
+
+ hw->cap.programmable_chans = true;
+
+ /* If programmable channels are present then configure
+ * channels such that all channel numbers are contiguous
+ * leaving no holes. This way the new CPT channels can be
+ * accomodated. The order of channel numbers assigned is
+ * LBK, SDP, CGX and CPT. Also the base channel number
+ * of a block must be multiple of number of channels
+ * of the block.
+ */
+ nr_lbk_chans = (nix_const >> 16) & 0xFFULL;
+ nr_sdp_chans = nix_const1 & 0xFFFULL;
+ nr_cgx_chans = nix_const & 0xFFULL;
+ nr_cpt_chans = (nix_const >> 32) & 0xFFFULL;
+
+ sdp_chan_base = hw->lbk_chan_base + hw->lbk_links * nr_lbk_chans;
+ /* Round up base channel to multiple of number of channels */
+ hw->sdp_chan_base = ALIGN(sdp_chan_base, nr_sdp_chans);
+
+ cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * nr_sdp_chans;
+ hw->cgx_chan_base = ALIGN(cgx_chan_base, nr_cgx_chans);
+
+ cpt_chan_base = hw->cgx_chan_base + hw->cgx_links * nr_cgx_chans;
+ hw->cpt_chan_base = ALIGN(cpt_chan_base, nr_cpt_chans);
+
+ /* Out of 4096 channels start CPT from 2048 so
+ * that MSB for CPT channels is always set
+ */
+ if (cpt_chan_base <= NIX_CHAN_CPT_CH_START) {
+ hw->cpt_chan_base = NIX_CHAN_CPT_CH_START;
+ } else {
+ dev_err(rvu->dev,
+ "CPT channels could not fit in the range 2048-4095\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define LBK_CONNECT_NIXX(a) (0x0 + (a))
+
+static void __rvu_lbk_set_chans(struct rvu *rvu, void __iomem *base,
+ u64 offset, int lbkid, u16 chans)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 cfg;
+
+ cfg = readq(base + offset);
+ cfg &= ~(LBK_LINK_CFG_RANGE_MASK |
+ LBK_LINK_CFG_ID_MASK | LBK_LINK_CFG_BASE_MASK);
+ cfg |= FIELD_PREP(LBK_LINK_CFG_RANGE_MASK, ilog2(chans));
+ cfg |= FIELD_PREP(LBK_LINK_CFG_ID_MASK, lbkid);
+ cfg |= FIELD_PREP(LBK_LINK_CFG_BASE_MASK, hw->lbk_chan_base);
+
+ writeq(cfg, base + offset);
+}
+
+static void rvu_lbk_set_channels(struct rvu *rvu)
+{
+ struct pci_dev *pdev = NULL;
+ void __iomem *base;
+ u64 lbk_const;
+ u8 src, dst;
+ u16 chans;
+
+ /* To loopback packets between multiple NIX blocks
+ * mutliple LBK blocks are needed. With two NIX blocks,
+ * four LBK blocks are needed and each LBK block
+ * source and destination are as follows:
+ * LBK0 - source NIX0 and destination NIX1
+ * LBK1 - source NIX0 and destination NIX1
+ * LBK2 - source NIX1 and destination NIX0
+ * LBK3 - source NIX1 and destination NIX1
+ * As per the HRM channel numbers should be programmed as:
+ * P2X and X2P of LBK0 as same
+ * P2X and X2P of LBK3 as same
+ * P2X of LBK1 and X2P of LBK2 as same
+ * P2X of LBK2 and X2P of LBK1 as same
+ */
+ while (true) {
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_LBK, pdev);
+ if (!pdev)
+ return;
+
+ base = pci_ioremap_bar(pdev, 0);
+ if (!base)
+ goto err_put;
+
+ lbk_const = readq(base + LBK_CONST);
+ chans = FIELD_GET(LBK_CONST_CHANS, lbk_const);
+ dst = FIELD_GET(LBK_CONST_DST, lbk_const);
+ src = FIELD_GET(LBK_CONST_SRC, lbk_const);
+
+ if (src == dst) {
+ if (src == LBK_CONNECT_NIXX(0)) { /* LBK0 */
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
+ 0, chans);
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
+ 0, chans);
+ } else if (src == LBK_CONNECT_NIXX(1)) { /* LBK3 */
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
+ 1, chans);
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
+ 1, chans);
+ }
+ } else {
+ if (src == LBK_CONNECT_NIXX(0)) { /* LBK1 */
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
+ 0, chans);
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
+ 1, chans);
+ } else if (src == LBK_CONNECT_NIXX(1)) { /* LBK2 */
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
+ 1, chans);
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
+ 0, chans);
+ }
+ }
+ iounmap(base);
+ }
+err_put:
+ pci_dev_put(pdev);
+}
+
+static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr)
+{
+ u64 nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+ u64 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ u16 cgx_chans, lbk_chans, sdp_chans, cpt_chans;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int link, nix_link = 0;
+ u16 start;
+ u64 cfg;
+
+ cgx_chans = nix_const & 0xFFULL;
+ lbk_chans = (nix_const >> 16) & 0xFFULL;
+ sdp_chans = nix_const1 & 0xFFFULL;
+ cpt_chans = (nix_const >> 32) & 0xFFFULL;
+
+ start = hw->cgx_chan_base;
+ for (link = 0; link < hw->cgx_links; link++, nix_link++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
+ cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cgx_chans));
+ cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
+ rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
+ start += cgx_chans;
+ }
+
+ start = hw->lbk_chan_base;
+ for (link = 0; link < hw->lbk_links; link++, nix_link++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
+ cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(lbk_chans));
+ cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
+ rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
+ start += lbk_chans;
+ }
+
+ start = hw->sdp_chan_base;
+ for (link = 0; link < hw->sdp_links; link++, nix_link++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
+ cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(sdp_chans));
+ cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
+ rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
+ start += sdp_chans;
+ }
+
+ start = hw->cpt_chan_base;
+ for (link = 0; link < hw->cpt_links; link++, nix_link++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
+ cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cpt_chans));
+ cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
+ rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
+ start += cpt_chans;
+ }
+}
+
+static void rvu_nix_set_channels(struct rvu *rvu)
+{
+ int blkaddr = 0;
+
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ __rvu_nix_set_channels(rvu, blkaddr);
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ }
+}
+
+static void __rvu_rpm_set_channels(int cgxid, int lmacid, u16 base)
+{
+ u64 cfg;
+
+ cfg = cgx_lmac_read(cgxid, lmacid, RPMX_CMRX_LINK_CFG);
+ cfg &= ~(RPMX_CMRX_LINK_BASE_MASK | RPMX_CMRX_LINK_RANGE_MASK);
+
+ /* There is no read-only constant register to read
+ * the number of channels for LMAC and it is always 16.
+ */
+ cfg |= FIELD_PREP(RPMX_CMRX_LINK_RANGE_MASK, ilog2(16));
+ cfg |= FIELD_PREP(RPMX_CMRX_LINK_BASE_MASK, base);
+ cgx_lmac_write(cgxid, lmacid, RPMX_CMRX_LINK_CFG, cfg);
+}
+
+static void rvu_rpm_set_channels(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 base = hw->cgx_chan_base;
+ int cgx, lmac;
+
+ for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) {
+ for (lmac = 0; lmac < hw->lmac_per_cgx; lmac++) {
+ __rvu_rpm_set_channels(cgx, lmac, base);
+ base += 16;
+ }
+ }
+}
+
+void rvu_program_channels(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (!hw->cap.programmable_chans)
+ return;
+
+ rvu_nix_set_channels(rvu);
+ rvu_lbk_set_channels(rvu);
+ rvu_rpm_set_channels(rvu);
+}
+
+void rvu_sso_block_cn10k_init(struct rvu *rvu, int blkaddr)
+{
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_WS_CFG);
+ /* Enable GET_WORK prefetching to the GWCs. */
+ reg &= ~BIT_ULL(4);
+ rvu_write64(rvu, blkaddr, SSO_AF_WS_CFG, reg);
+}
+
+void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ int blkaddr = nix_hw->blkaddr;
+
+ /* Set AF vWQE timer interval to a LF configurable range of
+ * 6.4us to 1.632ms.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_VWQE_TIMER, 0x3FULL);
+}
+
+void rvu_apr_block_cn10k_init(struct rvu *rvu)
+{
+ u64 reg;
+
+ reg = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
+ reg |= 0xFULL << 35;
+ rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CFG, reg);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
new file mode 100644
index 000000000000..ae8c10089b79
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -0,0 +1,1110 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/pci.h>
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "mbox.h"
+#include "rvu.h"
+
+/* CPT PF device id */
+#define PCI_DEVID_OTX2_CPT_PF 0xA0FD
+#define PCI_DEVID_OTX2_CPT10K_PF 0xA0F2
+
+/* Length of initial context fetch in 128 byte words */
+#define CPT_CTX_ILEN 2ULL
+
+#define cpt_get_eng_sts(e_min, e_max, rsp, etype) \
+({ \
+ u64 free_sts = 0, busy_sts = 0; \
+ typeof(rsp) _rsp = rsp; \
+ u32 e, i; \
+ \
+ for (e = (e_min), i = 0; e < (e_max); e++, i++) { \
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e)); \
+ if (reg & 0x1) \
+ busy_sts |= 1ULL << i; \
+ \
+ if (reg & 0x2) \
+ free_sts |= 1ULL << i; \
+ } \
+ (_rsp)->busy_sts_##etype = busy_sts; \
+ (_rsp)->free_sts_##etype = free_sts; \
+})
+
+static irqreturn_t rvu_cpt_af_flt_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 reg0, reg1, reg2 = 0;
+
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
+ reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
+ if (!is_rvu_otx2(rvu)) {
+ reg2 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(2));
+ dev_err_ratelimited(rvu->dev,
+ "Received CPTAF FLT irq : 0x%llx, 0x%llx, 0x%llx",
+ reg0, reg1, reg2);
+ } else {
+ dev_err_ratelimited(rvu->dev,
+ "Received CPTAF FLT irq : 0x%llx, 0x%llx",
+ reg0, reg1);
+ }
+
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(0), reg0);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(1), reg1);
+ if (!is_rvu_otx2(rvu))
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(2), reg2);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_cpt_af_rvu_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
+ dev_err_ratelimited(rvu->dev, "Received CPTAF RVU irq : 0x%llx", reg);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT, reg);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_cpt_af_ras_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
+ dev_err_ratelimited(rvu->dev, "Received CPTAF RAS irq : 0x%llx", reg);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT, reg);
+ return IRQ_HANDLED;
+}
+
+static int rvu_cpt_do_register_interrupt(struct rvu_block *block, int irq_offs,
+ irq_handler_t handler,
+ const char *name)
+{
+ struct rvu *rvu = block->rvu;
+ int ret;
+
+ ret = request_irq(pci_irq_vector(rvu->pdev, irq_offs), handler, 0,
+ name, block);
+ if (ret) {
+ dev_err(rvu->dev, "RVUAF: %s irq registration failed", name);
+ return ret;
+ }
+
+ WARN_ON(rvu->irq_allocated[irq_offs]);
+ rvu->irq_allocated[irq_offs] = true;
+ return 0;
+}
+
+static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off)
+{
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ int i;
+
+ /* Disable all CPT AF interrupts */
+ for (i = 0; i < CPT_10K_AF_INT_VEC_RVU; i++)
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
+
+ for (i = 0; i < CPT_10K_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[off + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, off + i), block);
+ rvu->irq_allocated[off + i] = false;
+ }
+}
+
+static void cpt_unregister_interrupts(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int i, offs;
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return;
+ offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs) {
+ dev_warn(rvu->dev,
+ "Failed to get CPT_AF_INT vector offsets\n");
+ return;
+ }
+ block = &hw->block[blkaddr];
+ if (!is_rvu_otx2(rvu))
+ return cpt_10k_unregister_interrupts(block, offs);
+
+ /* Disable all CPT AF interrupts */
+ for (i = 0; i < CPT_AF_INT_VEC_RVU; i++)
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
+
+ for (i = 0; i < CPT_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), block);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+void rvu_cpt_unregister_interrupts(struct rvu *rvu)
+{
+ cpt_unregister_interrupts(rvu, BLKADDR_CPT0);
+ cpt_unregister_interrupts(rvu, BLKADDR_CPT1);
+}
+
+static int cpt_10k_register_interrupts(struct rvu_block *block, int off)
+{
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ int i, ret;
+
+ for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) {
+ sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i);
+ ret = rvu_cpt_do_register_interrupt(block, off + i,
+ rvu_cpt_af_flt_intr_handler,
+ &rvu->irq_name[(off + i) * NAME_SIZE]);
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
+ }
+
+ ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU,
+ rvu_cpt_af_rvu_intr_handler,
+ "CPTAF RVU");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
+
+ ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RAS,
+ rvu_cpt_af_ras_intr_handler,
+ "CPTAF RAS");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1);
+
+ return 0;
+err:
+ rvu_cpt_unregister_interrupts(rvu);
+ return ret;
+}
+
+static int cpt_register_interrupts(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int i, offs, ret = 0;
+ char irq_name[16];
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return 0;
+
+ block = &hw->block[blkaddr];
+ offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs) {
+ dev_warn(rvu->dev,
+ "Failed to get CPT_AF_INT vector offsets\n");
+ return 0;
+ }
+
+ if (!is_rvu_otx2(rvu))
+ return cpt_10k_register_interrupts(block, offs);
+
+ for (i = CPT_AF_INT_VEC_FLT0; i < CPT_AF_INT_VEC_RVU; i++) {
+ snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i);
+ ret = rvu_cpt_do_register_interrupt(block, offs + i,
+ rvu_cpt_af_flt_intr_handler,
+ irq_name);
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
+ }
+
+ ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RVU,
+ rvu_cpt_af_rvu_intr_handler,
+ "CPTAF RVU");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
+
+ ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RAS,
+ rvu_cpt_af_ras_intr_handler,
+ "CPTAF RAS");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1);
+
+ return 0;
+err:
+ rvu_cpt_unregister_interrupts(rvu);
+ return ret;
+}
+
+int rvu_cpt_register_interrupts(struct rvu *rvu)
+{
+ int ret;
+
+ ret = cpt_register_interrupts(rvu, BLKADDR_CPT0);
+ if (ret)
+ return ret;
+
+ return cpt_register_interrupts(rvu, BLKADDR_CPT1);
+}
+
+static int get_cpt_pf_num(struct rvu *rvu)
+{
+ int i, domain_nr, cpt_pf_num = -1;
+ struct pci_dev *pdev;
+
+ domain_nr = pci_domain_nr(rvu->pdev->bus);
+ for (i = 0; i < rvu->hw->total_pfs; i++) {
+ pdev = pci_get_domain_bus_and_slot(domain_nr, i + 1, 0);
+ if (!pdev)
+ continue;
+
+ if (pdev->device == PCI_DEVID_OTX2_CPT_PF ||
+ pdev->device == PCI_DEVID_OTX2_CPT10K_PF) {
+ cpt_pf_num = i;
+ put_device(&pdev->dev);
+ break;
+ }
+ put_device(&pdev->dev);
+ }
+ return cpt_pf_num;
+}
+
+static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc)
+{
+ int cpt_pf_num = rvu->cpt_pf_num;
+
+ if (rvu_get_pf(pcifunc) != cpt_pf_num)
+ return false;
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ return false;
+
+ return true;
+}
+
+static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc)
+{
+ int cpt_pf_num = rvu->cpt_pf_num;
+
+ if (rvu_get_pf(pcifunc) != cpt_pf_num)
+ return false;
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ return false;
+
+ return true;
+}
+
+static int validate_and_get_cpt_blkaddr(int req_blkaddr)
+{
+ int blkaddr;
+
+ blkaddr = req_blkaddr ? req_blkaddr : BLKADDR_CPT0;
+ if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
+ return -EINVAL;
+
+ return blkaddr;
+}
+
+int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
+ struct cpt_lf_alloc_req_msg *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ int cptlf, blkaddr;
+ int num_lfs, slot;
+ u64 val;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (req->eng_grpmsk == 0x0)
+ return CPT_AF_ERR_GRP_INVALID;
+
+ block = &rvu->hw->block[blkaddr];
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ block->addr);
+ if (!num_lfs)
+ return CPT_AF_ERR_LF_INVALID;
+
+ /* Check if requested 'CPTLF <=> NIXLF' mapping is valid */
+ if (req->nix_pf_func) {
+ /* If default, use 'this' CPTLF's PFFUNC */
+ if (req->nix_pf_func == RVU_DEFAULT_PF_FUNC)
+ req->nix_pf_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->nix_pf_func, BLKTYPE_NIX))
+ return CPT_AF_ERR_NIX_PF_FUNC_INVALID;
+ }
+
+ /* Check if requested 'CPTLF <=> SSOLF' mapping is valid */
+ if (req->sso_pf_func) {
+ /* If default, use 'this' CPTLF's PFFUNC */
+ if (req->sso_pf_func == RVU_DEFAULT_PF_FUNC)
+ req->sso_pf_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->sso_pf_func, BLKTYPE_SSO))
+ return CPT_AF_ERR_SSO_PF_FUNC_INVALID;
+ }
+
+ for (slot = 0; slot < num_lfs; slot++) {
+ cptlf = rvu_get_lf(rvu, block, pcifunc, slot);
+ if (cptlf < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ /* Set CPT LF group and priority */
+ val = (u64)req->eng_grpmsk << 48 | 1;
+ if (!is_rvu_otx2(rvu))
+ val |= (CPT_CTX_ILEN << 17);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+
+ /* Set CPT LF NIX_PF_FUNC and SSO_PF_FUNC. EXE_LDWB is set
+ * on reset.
+ */
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+ val &= ~(GENMASK_ULL(63, 48) | GENMASK_ULL(47, 32));
+ val |= ((u64)req->nix_pf_func << 48 |
+ (u64)req->sso_pf_func << 32);
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
+ }
+
+ return 0;
+}
+
+static int cpt_lf_free(struct rvu *rvu, struct msg_req *req, int blkaddr)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int num_lfs, cptlf, slot, err;
+ struct rvu_block *block;
+
+ block = &rvu->hw->block[blkaddr];
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ block->addr);
+ if (!num_lfs)
+ return 0;
+
+ for (slot = 0; slot < num_lfs; slot++) {
+ cptlf = rvu_get_lf(rvu, block, pcifunc, slot);
+ if (cptlf < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ /* Perform teardown */
+ rvu_cpt_lf_teardown(rvu, pcifunc, blkaddr, cptlf, slot);
+
+ /* Reset LF */
+ err = rvu_lf_reset(rvu, block, cptlf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
+ block->addr, cptlf);
+ }
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int ret;
+
+ ret = cpt_lf_free(rvu, req, BLKADDR_CPT0);
+ if (ret)
+ return ret;
+
+ if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
+ ret = cpt_lf_free(rvu, req, BLKADDR_CPT1);
+
+ return ret;
+}
+
+static int cpt_inline_ipsec_cfg_inbound(struct rvu *rvu, int blkaddr, u8 cptlf,
+ struct cpt_inline_ipsec_cfg_msg *req)
+{
+ u16 sso_pf_func = req->sso_pf_func;
+ u8 nix_sel;
+ u64 val;
+
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ if (req->enable && (val & BIT_ULL(16))) {
+ /* IPSec inline outbound path is already enabled for a given
+ * CPT LF, HRM states that inline inbound & outbound paths
+ * must not be enabled at the same time for a given CPT LF
+ */
+ return CPT_AF_ERR_INLINE_IPSEC_INB_ENA;
+ }
+ /* Check if requested 'CPTLF <=> SSOLF' mapping is valid */
+ if (sso_pf_func && !is_pffunc_map_valid(rvu, sso_pf_func, BLKTYPE_SSO))
+ return CPT_AF_ERR_SSO_PF_FUNC_INVALID;
+
+ nix_sel = (blkaddr == BLKADDR_CPT1) ? 1 : 0;
+ /* Enable CPT LF for IPsec inline inbound operations */
+ if (req->enable)
+ val |= BIT_ULL(9);
+ else
+ val &= ~BIT_ULL(9);
+
+ val |= (u64)nix_sel << 8;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+
+ if (sso_pf_func) {
+ /* Set SSO_PF_FUNC */
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+ val |= (u64)sso_pf_func << 32;
+ val |= (u64)req->nix_pf_func << 48;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
+ }
+ if (req->sso_pf_func_ovrd)
+ /* Set SSO_PF_FUNC_OVRD for inline IPSec */
+ rvu_write64(rvu, blkaddr, CPT_AF_ECO, 0x1);
+
+ /* Configure the X2P Link register with the cpt base channel number and
+ * range of channels it should propagate to X2P
+ */
+ if (!is_rvu_otx2(rvu)) {
+ val = (ilog2(NIX_CHAN_CPT_X2P_MASK + 1) << 16);
+ val |= (u64)rvu->hw->cpt_chan_base;
+
+ rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0), val);
+ rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1), val);
+ }
+
+ return 0;
+}
+
+static int cpt_inline_ipsec_cfg_outbound(struct rvu *rvu, int blkaddr, u8 cptlf,
+ struct cpt_inline_ipsec_cfg_msg *req)
+{
+ u16 nix_pf_func = req->nix_pf_func;
+ int nix_blkaddr;
+ u8 nix_sel;
+ u64 val;
+
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ if (req->enable && (val & BIT_ULL(9))) {
+ /* IPSec inline inbound path is already enabled for a given
+ * CPT LF, HRM states that inline inbound & outbound paths
+ * must not be enabled at the same time for a given CPT LF
+ */
+ return CPT_AF_ERR_INLINE_IPSEC_OUT_ENA;
+ }
+
+ /* Check if requested 'CPTLF <=> NIXLF' mapping is valid */
+ if (nix_pf_func && !is_pffunc_map_valid(rvu, nix_pf_func, BLKTYPE_NIX))
+ return CPT_AF_ERR_NIX_PF_FUNC_INVALID;
+
+ /* Enable CPT LF for IPsec inline outbound operations */
+ if (req->enable)
+ val |= BIT_ULL(16);
+ else
+ val &= ~BIT_ULL(16);
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+
+ if (nix_pf_func) {
+ /* Set NIX_PF_FUNC */
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+ val |= (u64)nix_pf_func << 48;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
+
+ nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, nix_pf_func);
+ nix_sel = (nix_blkaddr == BLKADDR_NIX0) ? 0 : 1;
+
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ val |= (u64)nix_sel << 8;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_cpt_inline_ipsec_cfg(struct rvu *rvu,
+ struct cpt_inline_ipsec_cfg_msg *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ int cptlf, blkaddr, ret;
+ u16 actual_slot;
+
+ blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc,
+ req->slot, &actual_slot);
+ if (blkaddr < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ block = &rvu->hw->block[blkaddr];
+
+ cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot);
+ if (cptlf < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ switch (req->dir) {
+ case CPT_INLINE_INBOUND:
+ ret = cpt_inline_ipsec_cfg_inbound(rvu, blkaddr, cptlf, req);
+ break;
+
+ case CPT_INLINE_OUTBOUND:
+ ret = cpt_inline_ipsec_cfg_outbound(rvu, blkaddr, cptlf, req);
+ break;
+
+ default:
+ return CPT_AF_ERR_PARAM;
+ }
+
+ return ret;
+}
+
+static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
+{
+ u64 offset = req->reg_offset;
+ int blkaddr, num_lfs, lf;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return false;
+
+ /* Registers that can be accessed from PF/VF */
+ if ((offset & 0xFF000) == CPT_AF_LFX_CTL(0) ||
+ (offset & 0xFF000) == CPT_AF_LFX_CTL2(0)) {
+ if (offset & 7)
+ return false;
+
+ lf = (offset & 0xFFF) >> 3;
+ block = &rvu->hw->block[blkaddr];
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+ if (lf >= num_lfs)
+ /* Slot is not valid for that PF/VF */
+ return false;
+
+ /* Translate local LF used by VFs to global CPT LF */
+ lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr],
+ req->hdr.pcifunc, lf);
+ if (lf < 0)
+ return false;
+
+ req->reg_offset &= 0xFF000;
+ req->reg_offset += lf << 3;
+ return true;
+ } else if (!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK)) {
+ /* Registers that can be accessed from PF */
+ switch (offset) {
+ case CPT_AF_DIAG:
+ case CPT_AF_CTL:
+ case CPT_AF_PF_FUNC:
+ case CPT_AF_BLK_RST:
+ case CPT_AF_CONSTANTS1:
+ case CPT_AF_CTX_FLUSH_TIMER:
+ return true;
+ }
+
+ switch (offset & 0xFF000) {
+ case CPT_AF_EXEX_STS(0):
+ case CPT_AF_EXEX_CTL(0):
+ case CPT_AF_EXEX_CTL2(0):
+ case CPT_AF_EXEX_UCODE_BASE(0):
+ if (offset & 7)
+ return false;
+ break;
+ default:
+ return false;
+ }
+ return true;
+ }
+ return false;
+}
+
+int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
+ struct cpt_rd_wr_reg_msg *req,
+ struct cpt_rd_wr_reg_msg *rsp)
+{
+ int blkaddr;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* This message is accepted only if sent from CPT PF/VF */
+ if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
+ !is_cpt_vf(rvu, req->hdr.pcifunc))
+ return CPT_AF_ERR_ACCESS_DENIED;
+
+ if (!is_valid_offset(rvu, req))
+ return CPT_AF_ERR_ACCESS_DENIED;
+
+ rsp->reg_offset = req->reg_offset;
+ rsp->ret_val = req->ret_val;
+ rsp->is_write = req->is_write;
+
+
+ if (req->is_write)
+ rvu_write64(rvu, blkaddr, req->reg_offset, req->val);
+ else
+ rsp->val = rvu_read64(rvu, blkaddr, req->reg_offset);
+
+ return 0;
+}
+
+static void get_ctx_pc(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
+{
+ if (is_rvu_otx2(rvu))
+ return;
+
+ rsp->ctx_mis_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_MIS_PC);
+ rsp->ctx_hit_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_HIT_PC);
+ rsp->ctx_aop_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_AOP_PC);
+ rsp->ctx_aop_lat_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_CTX_AOP_LATENCY_PC);
+ rsp->ctx_ifetch_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_IFETCH_PC);
+ rsp->ctx_ifetch_lat_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_CTX_IFETCH_LATENCY_PC);
+ rsp->ctx_ffetch_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
+ rsp->ctx_ffetch_lat_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_CTX_FFETCH_LATENCY_PC);
+ rsp->ctx_wback_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
+ rsp->ctx_wback_lat_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_CTX_FFETCH_LATENCY_PC);
+ rsp->ctx_psh_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
+ rsp->ctx_psh_lat_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_CTX_FFETCH_LATENCY_PC);
+ rsp->ctx_err = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ERR);
+ rsp->ctx_enc_id = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ENC_ID);
+ rsp->ctx_flush_timer = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FLUSH_TIMER);
+
+ rsp->rxc_time = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME);
+ rsp->rxc_time_cfg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG);
+ rsp->rxc_active_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
+ rsp->rxc_zombie_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS);
+ rsp->rxc_dfrg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG);
+ rsp->x2p_link_cfg0 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0));
+ rsp->x2p_link_cfg1 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1));
+}
+
+static void get_eng_sts(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
+{
+ u16 max_ses, max_ies, max_aes;
+ u32 e_min = 0, e_max = 0;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
+ max_ses = reg & 0xffff;
+ max_ies = (reg >> 16) & 0xffff;
+ max_aes = (reg >> 32) & 0xffff;
+
+ /* Get AE status */
+ e_min = max_ses + max_ies;
+ e_max = max_ses + max_ies + max_aes;
+ cpt_get_eng_sts(e_min, e_max, rsp, ae);
+ /* Get SE status */
+ e_min = 0;
+ e_max = max_ses;
+ cpt_get_eng_sts(e_min, e_max, rsp, se);
+ /* Get IE status */
+ e_min = max_ses;
+ e_max = max_ses + max_ies;
+ cpt_get_eng_sts(e_min, e_max, rsp, ie);
+}
+
+int rvu_mbox_handler_cpt_sts(struct rvu *rvu, struct cpt_sts_req *req,
+ struct cpt_sts_rsp *rsp)
+{
+ int blkaddr;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* This message is accepted only if sent from CPT PF/VF */
+ if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
+ !is_cpt_vf(rvu, req->hdr.pcifunc))
+ return CPT_AF_ERR_ACCESS_DENIED;
+
+ get_ctx_pc(rvu, rsp, blkaddr);
+
+ /* Get CPT engines status */
+ get_eng_sts(rvu, rsp, blkaddr);
+
+ /* Read CPT instruction PC registers */
+ rsp->inst_req_pc = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
+ rsp->inst_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
+ rsp->rd_req_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
+ rsp->rd_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
+ rsp->rd_uc_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
+ rsp->active_cycles_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_ACTIVE_CYCLES_PC);
+ rsp->exe_err_info = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
+ rsp->cptclk_cnt = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
+ rsp->diag = rvu_read64(rvu, blkaddr, CPT_AF_DIAG);
+
+ return 0;
+}
+
+#define RXC_ZOMBIE_THRES GENMASK_ULL(59, 48)
+#define RXC_ZOMBIE_LIMIT GENMASK_ULL(43, 32)
+#define RXC_ACTIVE_THRES GENMASK_ULL(27, 16)
+#define RXC_ACTIVE_LIMIT GENMASK_ULL(11, 0)
+#define RXC_ACTIVE_COUNT GENMASK_ULL(60, 48)
+#define RXC_ZOMBIE_COUNT GENMASK_ULL(60, 48)
+
+static void cpt_rxc_time_cfg(struct rvu *rvu, struct cpt_rxc_time_cfg_req *req,
+ int blkaddr, struct cpt_rxc_time_cfg_req *save)
+{
+ u64 dfrg_reg;
+
+ if (save) {
+ /* Save older config */
+ dfrg_reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG);
+ save->zombie_thres = FIELD_GET(RXC_ZOMBIE_THRES, dfrg_reg);
+ save->zombie_limit = FIELD_GET(RXC_ZOMBIE_LIMIT, dfrg_reg);
+ save->active_thres = FIELD_GET(RXC_ACTIVE_THRES, dfrg_reg);
+ save->active_limit = FIELD_GET(RXC_ACTIVE_LIMIT, dfrg_reg);
+
+ save->step = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG);
+ }
+
+ dfrg_reg = FIELD_PREP(RXC_ZOMBIE_THRES, req->zombie_thres);
+ dfrg_reg |= FIELD_PREP(RXC_ZOMBIE_LIMIT, req->zombie_limit);
+ dfrg_reg |= FIELD_PREP(RXC_ACTIVE_THRES, req->active_thres);
+ dfrg_reg |= FIELD_PREP(RXC_ACTIVE_LIMIT, req->active_limit);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG, req->step);
+ rvu_write64(rvu, blkaddr, CPT_AF_RXC_DFRG, dfrg_reg);
+}
+
+int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu,
+ struct cpt_rxc_time_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ int blkaddr;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* This message is accepted only if sent from CPT PF/VF */
+ if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
+ !is_cpt_vf(rvu, req->hdr.pcifunc))
+ return CPT_AF_ERR_ACCESS_DENIED;
+
+ cpt_rxc_time_cfg(rvu, req, blkaddr, NULL);
+
+ return 0;
+}
+
+int rvu_mbox_handler_cpt_ctx_cache_sync(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_cpt_ctx_flush(rvu, req->hdr.pcifunc);
+}
+
+static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
+{
+ struct cpt_rxc_time_cfg_req req, prev;
+ int timeout = 2000;
+ u64 reg;
+
+ if (is_rvu_otx2(rvu))
+ return;
+
+ /* Set time limit to minimum values, so that rxc entries will be
+ * flushed out quickly.
+ */
+ req.step = 1;
+ req.zombie_thres = 1;
+ req.zombie_limit = 1;
+ req.active_thres = 1;
+ req.active_limit = 1;
+
+ cpt_rxc_time_cfg(rvu, &req, blkaddr, &prev);
+
+ do {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
+ udelay(1);
+ if (FIELD_GET(RXC_ACTIVE_COUNT, reg))
+ timeout--;
+ else
+ break;
+ } while (timeout);
+
+ if (timeout == 0)
+ dev_warn(rvu->dev, "Poll for RXC active count hits hard loop counter\n");
+
+ timeout = 2000;
+ do {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS);
+ udelay(1);
+ if (FIELD_GET(RXC_ZOMBIE_COUNT, reg))
+ timeout--;
+ else
+ break;
+ } while (timeout);
+
+ if (timeout == 0)
+ dev_warn(rvu->dev, "Poll for RXC zombie count hits hard loop counter\n");
+
+ /* Restore config */
+ cpt_rxc_time_cfg(rvu, &prev, blkaddr, NULL);
+}
+
+#define INPROG_INFLIGHT(reg) ((reg) & 0x1FF)
+#define INPROG_GRB_PARTIAL(reg) ((reg) & BIT_ULL(31))
+#define INPROG_GRB(reg) (((reg) >> 32) & 0xFF)
+#define INPROG_GWB(reg) (((reg) >> 40) & 0xFF)
+
+static void cpt_lf_disable_iqueue(struct rvu *rvu, int blkaddr, int slot)
+{
+ int i = 0, hard_lp_ctr = 100000;
+ u64 inprog, grp_ptr;
+ u16 nq_ptr, dq_ptr;
+
+ /* Disable instructions enqueuing */
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTL), 0x0);
+
+ /* Disable executions in the LF's queue */
+ inprog = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
+ inprog &= ~BIT_ULL(16);
+ rvu_write64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), inprog);
+
+ /* Wait for CPT queue to become execution-quiescent */
+ do {
+ inprog = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
+ if (INPROG_GRB_PARTIAL(inprog)) {
+ i = 0;
+ hard_lp_ctr--;
+ } else {
+ i++;
+ }
+
+ grp_ptr = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot,
+ CPT_LF_Q_GRP_PTR));
+ nq_ptr = (grp_ptr >> 32) & 0x7FFF;
+ dq_ptr = grp_ptr & 0x7FFF;
+
+ } while (hard_lp_ctr && (i < 10) && (nq_ptr != dq_ptr));
+
+ if (hard_lp_ctr == 0)
+ dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n");
+
+ i = 0;
+ hard_lp_ctr = 100000;
+ do {
+ inprog = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
+
+ if ((INPROG_INFLIGHT(inprog) == 0) &&
+ (INPROG_GWB(inprog) < 40) &&
+ ((INPROG_GRB(inprog) == 0) ||
+ (INPROG_GRB((inprog)) == 40))) {
+ i++;
+ } else {
+ i = 0;
+ hard_lp_ctr--;
+ }
+ } while (hard_lp_ctr && (i < 10));
+
+ if (hard_lp_ctr == 0)
+ dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n");
+}
+
+int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int slot)
+{
+ u64 reg;
+
+ if (is_cpt_pf(rvu, pcifunc) || is_cpt_vf(rvu, pcifunc))
+ cpt_rxc_teardown(rvu, blkaddr);
+
+ /* Enable BAR2 ALIAS for this pcifunc. */
+ reg = BIT_ULL(16) | pcifunc;
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
+
+ cpt_lf_disable_iqueue(rvu, blkaddr, slot);
+
+ /* Set group drop to help clear out hardware */
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
+ reg |= BIT_ULL(17);
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), reg);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
+
+ return 0;
+}
+
+#define CPT_RES_LEN 16
+#define CPT_SE_IE_EGRP 1ULL
+
+static int cpt_inline_inb_lf_cmd_send(struct rvu *rvu, int blkaddr,
+ int nix_blkaddr)
+{
+ int cpt_pf_num = rvu->cpt_pf_num;
+ struct cpt_inst_lmtst_req *req;
+ dma_addr_t res_daddr;
+ int timeout = 3000;
+ u8 cpt_idx;
+ u64 *inst;
+ u16 *res;
+ int rc;
+
+ res = kzalloc(CPT_RES_LEN, GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ res_daddr = dma_map_single(rvu->dev, res, CPT_RES_LEN,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(rvu->dev, res_daddr)) {
+ dev_err(rvu->dev, "DMA mapping failed for CPT result\n");
+ rc = -EFAULT;
+ goto res_free;
+ }
+ *res = 0xFFFF;
+
+ /* Send mbox message to CPT PF */
+ req = (struct cpt_inst_lmtst_req *)
+ otx2_mbox_alloc_msg_rsp(&rvu->afpf_wq_info.mbox_up,
+ cpt_pf_num, sizeof(*req),
+ sizeof(struct msg_rsp));
+ if (!req) {
+ rc = -ENOMEM;
+ goto res_daddr_unmap;
+ }
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.id = MBOX_MSG_CPT_INST_LMTST;
+
+ inst = req->inst;
+ /* Prepare CPT_INST_S */
+ inst[0] = 0;
+ inst[1] = res_daddr;
+ /* AF PF FUNC */
+ inst[2] = 0;
+ /* Set QORD */
+ inst[3] = 1;
+ inst[4] = 0;
+ inst[5] = 0;
+ inst[6] = 0;
+ /* Set EGRP */
+ inst[7] = CPT_SE_IE_EGRP << 61;
+
+ /* Subtract 1 from the NIX-CPT credit count to preserve
+ * credit counts.
+ */
+ cpt_idx = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
+ rvu_write64(rvu, nix_blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ BIT_ULL(22) - 1);
+
+ otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, cpt_pf_num);
+ rc = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, cpt_pf_num);
+ if (rc)
+ dev_warn(rvu->dev, "notification to pf %d failed\n",
+ cpt_pf_num);
+ /* Wait for CPT instruction to be completed */
+ do {
+ mdelay(1);
+ if (*res == 0xFFFF)
+ timeout--;
+ else
+ break;
+ } while (timeout);
+
+ if (timeout == 0)
+ dev_warn(rvu->dev, "Poll for result hits hard loop counter\n");
+
+res_daddr_unmap:
+ dma_unmap_single(rvu->dev, res_daddr, CPT_RES_LEN, DMA_BIDIRECTIONAL);
+res_free:
+ kfree(res);
+
+ return 0;
+}
+
+#define CTX_CAM_PF_FUNC GENMASK_ULL(61, 46)
+#define CTX_CAM_CPTR GENMASK_ULL(45, 0)
+
+int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc)
+{
+ int nix_blkaddr, blkaddr;
+ u16 max_ctx_entries, i;
+ int slot = 0, num_lfs;
+ u64 reg, cam_data;
+ int rc;
+
+ nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (nix_blkaddr < 0)
+ return -EINVAL;
+
+ if (is_rvu_otx2(rvu))
+ return 0;
+
+ blkaddr = (nix_blkaddr == BLKADDR_NIX1) ? BLKADDR_CPT1 : BLKADDR_CPT0;
+
+ /* Submit CPT_INST_S to track when all packets have been
+ * flushed through for the NIX PF FUNC in inline inbound case.
+ */
+ rc = cpt_inline_inb_lf_cmd_send(rvu, blkaddr, nix_blkaddr);
+ if (rc)
+ return rc;
+
+ /* Wait for rxc entries to be flushed out */
+ cpt_rxc_teardown(rvu, blkaddr);
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
+ max_ctx_entries = (reg >> 48) & 0xFFF;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ blkaddr);
+ if (num_lfs == 0) {
+ dev_warn(rvu->dev, "CPT LF is not configured\n");
+ goto unlock;
+ }
+
+ /* Enable BAR2 ALIAS for this pcifunc. */
+ reg = BIT_ULL(16) | pcifunc;
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
+
+ for (i = 0; i < max_ctx_entries; i++) {
+ cam_data = rvu_read64(rvu, blkaddr, CPT_AF_CTX_CAM_DATA(i));
+
+ if ((FIELD_GET(CTX_CAM_PF_FUNC, cam_data) == pcifunc) &&
+ FIELD_GET(CTX_CAM_CPTR, cam_data)) {
+ reg = BIT_ULL(46) | FIELD_GET(CTX_CAM_CPTR, cam_data);
+ rvu_write64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTX_FLUSH),
+ reg);
+ }
+ }
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
+
+unlock:
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return 0;
+}
+
+int rvu_cpt_init(struct rvu *rvu)
+{
+ /* Retrieve CPT PF number */
+ rvu->cpt_pf_num = get_cpt_pf_num(rvu);
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 5205796859f6..d71c3e51373a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -1,11 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2019 Marvell International Ltd.
+ * Copyright (C) 2019 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifdef CONFIG_DEBUG_FS
@@ -19,6 +16,7 @@
#include "rvu_reg.h"
#include "rvu.h"
#include "cgx.h"
+#include "lmac_common.h"
#include "npc.h"
#define DEBUGFS_DIR_NAME "octeontx2"
@@ -97,7 +95,7 @@ static char *cgx_tx_stats_fields[] = {
[CGX_STAT5] = "Total frames sent on the interface",
[CGX_STAT6] = "Packets sent with an octet count < 64",
[CGX_STAT7] = "Packets sent with an octet count == 64",
- [CGX_STAT8] = "Packets sent with an octet count of 65–127",
+ [CGX_STAT8] = "Packets sent with an octet count of 65-127",
[CGX_STAT9] = "Packets sent with an octet count of 128-255",
[CGX_STAT10] = "Packets sent with an octet count of 256-511",
[CGX_STAT11] = "Packets sent with an octet count of 512-1023",
@@ -109,6 +107,89 @@ static char *cgx_tx_stats_fields[] = {
[CGX_STAT17] = "Control/PAUSE packets sent",
};
+static char *rpm_rx_stats_fields[] = {
+ "Octets of received packets",
+ "Octets of received packets with out error",
+ "Received packets with alignment errors",
+ "Control/PAUSE packets received",
+ "Packets received with Frame too long Errors",
+ "Packets received with a1nrange length Errors",
+ "Received packets",
+ "Packets received with FrameCheckSequenceErrors",
+ "Packets received with VLAN header",
+ "Error packets",
+ "Packets received with unicast DMAC",
+ "Packets received with multicast DMAC",
+ "Packets received with broadcast DMAC",
+ "Dropped packets",
+ "Total frames received on interface",
+ "Packets received with an octet count < 64",
+ "Packets received with an octet count == 64",
+ "Packets received with an octet count of 65-127",
+ "Packets received with an octet count of 128-255",
+ "Packets received with an octet count of 256-511",
+ "Packets received with an octet count of 512-1023",
+ "Packets received with an octet count of 1024-1518",
+ "Packets received with an octet count of > 1518",
+ "Oversized Packets",
+ "Jabber Packets",
+ "Fragmented Packets",
+ "CBFC(class based flow control) pause frames received for class 0",
+ "CBFC pause frames received for class 1",
+ "CBFC pause frames received for class 2",
+ "CBFC pause frames received for class 3",
+ "CBFC pause frames received for class 4",
+ "CBFC pause frames received for class 5",
+ "CBFC pause frames received for class 6",
+ "CBFC pause frames received for class 7",
+ "CBFC pause frames received for class 8",
+ "CBFC pause frames received for class 9",
+ "CBFC pause frames received for class 10",
+ "CBFC pause frames received for class 11",
+ "CBFC pause frames received for class 12",
+ "CBFC pause frames received for class 13",
+ "CBFC pause frames received for class 14",
+ "CBFC pause frames received for class 15",
+ "MAC control packets received",
+};
+
+static char *rpm_tx_stats_fields[] = {
+ "Total octets sent on the interface",
+ "Total octets transmitted OK",
+ "Control/Pause frames sent",
+ "Total frames transmitted OK",
+ "Total frames sent with VLAN header",
+ "Error Packets",
+ "Packets sent to unicast DMAC",
+ "Packets sent to the multicast DMAC",
+ "Packets sent to a broadcast DMAC",
+ "Packets sent with an octet count == 64",
+ "Packets sent with an octet count of 65-127",
+ "Packets sent with an octet count of 128-255",
+ "Packets sent with an octet count of 256-511",
+ "Packets sent with an octet count of 512-1023",
+ "Packets sent with an octet count of 1024-1518",
+ "Packets sent with an octet count of > 1518",
+ "CBFC(class based flow control) pause frames transmitted for class 0",
+ "CBFC pause frames transmitted for class 1",
+ "CBFC pause frames transmitted for class 2",
+ "CBFC pause frames transmitted for class 3",
+ "CBFC pause frames transmitted for class 4",
+ "CBFC pause frames transmitted for class 5",
+ "CBFC pause frames transmitted for class 6",
+ "CBFC pause frames transmitted for class 7",
+ "CBFC pause frames transmitted for class 8",
+ "CBFC pause frames transmitted for class 9",
+ "CBFC pause frames transmitted for class 10",
+ "CBFC pause frames transmitted for class 11",
+ "CBFC pause frames transmitted for class 12",
+ "CBFC pause frames transmitted for class 13",
+ "CBFC pause frames transmitted for class 14",
+ "CBFC pause frames transmitted for class 15",
+ "MAC control packets sent",
+ "Total frames sent on the interface"
+};
+
#define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
blk_addr, NDC_AF_CONST) & 0xFF)
@@ -139,6 +220,96 @@ static const struct file_operations rvu_dbg_##name##_fops = { \
static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
+#define LMT_MAPTBL_ENTRY_SIZE 16
+/* Dump LMTST map table */
+static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct rvu *rvu = filp->private_data;
+ u64 lmt_addr, val, tbl_base;
+ int pf, vf, num_vfs, hw_vfs;
+ void __iomem *lmt_map_base;
+ int index = 0, off = 0;
+ int bytes_not_copied;
+ int buf_size = 10240;
+ char *buf;
+
+ /* don't allow partial reads */
+ if (*ppos != 0)
+ return 0;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOSPC;
+
+ tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+
+ lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
+ if (!lmt_map_base) {
+ dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
+ kfree(buf);
+ return false;
+ }
+
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "\n\t\t\t\t\tLmtst Map Table Entries");
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "\n\t\t\t\t\t=======================");
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
+ off += scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "Lmtline Base (word 0)\t\t");
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "Lmt Map Entry (word 1)");
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t",
+ pf);
+
+ index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
+ off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
+ (tbl_base + index));
+ lmt_addr = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%016llx\t\t", lmt_addr);
+ index += 8;
+ val = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
+ val);
+ /* Reading num of VFs per PF */
+ rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
+ for (vf = 0; vf < num_vfs; vf++) {
+ index = (pf * rvu->hw->total_vfs * 16) +
+ ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "PF%d:VF%d \t\t", pf, vf);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%llx\t\t", (tbl_base + index));
+ lmt_addr = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%016llx\t\t", lmt_addr);
+ index += 8;
+ val = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%016llx\n", val);
+ }
+ }
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+
+ bytes_not_copied = copy_to_user(buffer, buf, off);
+ kfree(buf);
+
+ iounmap(lmt_map_base);
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ *ppos = off;
+ return off;
+}
+
+RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
+
static void get_lf_str_list(struct rvu_block block, int pcifunc,
char *lfs)
{
@@ -237,6 +408,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
kfree(buf);
return -ENOMEM;
}
+
off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
"pcifunc");
for (index = 0; index < BLK_COUNT; index++)
@@ -312,18 +484,59 @@ out:
RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
-static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blktype, int lf,
+static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
+{
+ struct rvu *rvu = filp->private;
+ struct pci_dev *pdev = NULL;
+ struct mac_ops *mac_ops;
+ char cgx[10], lmac[10];
+ struct rvu_pfvf *pfvf;
+ int pf, domain, blkid;
+ u8 cgx_id, lmac_id;
+ u16 pcifunc;
+
+ domain = 2;
+ mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
+ /* There can be no CGX devices at all */
+ if (!mac_ops)
+ return 0;
+ seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
+ mac_ops->name);
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+
+ pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
+ if (!pdev)
+ continue;
+
+ cgx[0] = 0;
+ lmac[0] = 0;
+ pcifunc = pf << 10;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ if (pfvf->nix_blkaddr == BLKADDR_NIX0)
+ blkid = 0;
+ else
+ blkid = 1;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
+ &lmac_id);
+ sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
+ sprintf(lmac, "LMAC%d", lmac_id);
+ seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
+ dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
+ }
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
+
+static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
u16 *pcifunc)
{
struct rvu_block *block;
struct rvu_hwinfo *hw;
- int blkaddr;
-
- blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
- if (blkaddr < 0) {
- dev_warn(rvu->dev, "Invalid blktype\n");
- return false;
- }
hw = rvu->hw;
block = &hw->block[blkaddr];
@@ -379,10 +592,12 @@ static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
{
void (*print_qsize)(struct seq_file *filp,
struct rvu_pfvf *pfvf) = NULL;
+ struct dentry *current_dir;
struct rvu_pfvf *pfvf;
struct rvu *rvu;
int qsize_id;
u16 pcifunc;
+ int blkaddr;
rvu = filp->private;
switch (blktype) {
@@ -400,7 +615,15 @@ static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
return -EINVAL;
}
- if (!rvu_dbg_is_valid_lf(rvu, blktype, qsize_id, &pcifunc))
+ if (blktype == BLKTYPE_NPA) {
+ blkaddr = BLKADDR_NPA;
+ } else {
+ current_dir = filp->file->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
+ BLKADDR_NIX1 : BLKADDR_NIX0);
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -417,6 +640,8 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
struct seq_file *seqfile = filp->private_data;
char *cmd_buf, *cmd_buf_tmp, *subtoken;
struct rvu *rvu = seqfile->private;
+ struct dentry *current_dir;
+ int blkaddr;
u16 pcifunc;
int ret, lf;
@@ -438,12 +663,20 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
if (cmd_buf)
ret = -EINVAL;
- if (!strncmp(subtoken, "help", 4) || ret < 0) {
+ if (ret < 0 || !strncmp(subtoken, "help", 4)) {
dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
goto qsize_write_done;
}
- if (!rvu_dbg_is_valid_lf(rvu, blktype, lf, &pcifunc)) {
+ if (blktype == BLKTYPE_NPA) {
+ blkaddr = BLKADDR_NPA;
+ } else {
+ current_dir = filp->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
+ BLKADDR_NIX1 : BLKADDR_NIX0);
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
ret = -EINVAL;
goto qsize_write_done;
}
@@ -476,6 +709,7 @@ RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
{
struct npa_aura_s *aura = &rsp->aura;
+ struct rvu *rvu = m->private;
seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
@@ -495,6 +729,9 @@ static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
(u64)aura->limit, aura->bp, aura->fc_ena);
+
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
aura->fc_up_crossing, aura->fc_stype);
seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
@@ -512,12 +749,15 @@ static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
}
/* Dumps given NPA Pool's context */
static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
{
struct npa_pool_s *pool = &rsp->pool;
+ struct rvu *rvu = m->private;
seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
@@ -539,6 +779,8 @@ static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
pool->avg_con, pool->fc_ena, pool->fc_stype);
seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
pool->fc_hyst_bits, pool->fc_up_crossing);
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
@@ -552,8 +794,10 @@ static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
pool->thresh_int_ena, pool->thresh_up);
- seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t\t%d\n",
+ seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
pool->thresh_qint_idx, pool->err_qint_idx);
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
}
/* Reads aura/pool's ctx from admin queue */
@@ -586,7 +830,7 @@ static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
return -EINVAL;
}
- if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc))
+ if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -644,7 +888,7 @@ static int write_npa_ctx(struct rvu *rvu, bool all,
int max_id = 0;
u16 pcifunc;
- if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc))
+ if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -792,9 +1036,17 @@ static void ndc_cache_stats(struct seq_file *s, int blk_addr,
int ctype, int transaction)
{
u64 req, out_req, lat, cant_alloc;
- struct rvu *rvu = s->private;
+ struct nix_hw *nix_hw;
+ struct rvu *rvu;
int port;
+ if (blk_addr == BLKADDR_NDC_NPA0) {
+ rvu = s->private;
+ } else {
+ nix_hw = s->private;
+ rvu = nix_hw->rvu;
+ }
+
for (port = 0; port < NDC_MAX_PORT; port++) {
req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
(port, ctype, transaction));
@@ -837,9 +1089,17 @@ RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
{
- struct rvu *rvu = s->private;
+ struct nix_hw *nix_hw;
+ struct rvu *rvu;
int bank, max_bank;
+ if (blk_addr == BLKADDR_NDC_NPA0) {
+ rvu = s->private;
+ } else {
+ nix_hw = s->private;
+ rvu = nix_hw->rvu;
+ }
+
max_bank = NDC_MAX_BANK(rvu, blk_addr);
for (bank = 0; bank < max_bank; bank++) {
seq_printf(s, "BANK:%d\n", bank);
@@ -855,16 +1115,30 @@ static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
{
- return ndc_blk_cache_stats(filp, NIX0_RX,
- BLKADDR_NDC_NIX0_RX);
+ struct nix_hw *nix_hw = filp->private;
+ int blkaddr = 0;
+ int ndc_idx = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
+ ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
+
+ return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
}
RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
{
- return ndc_blk_cache_stats(filp, NIX0_TX,
- BLKADDR_NDC_NIX0_TX);
+ struct nix_hw *nix_hw = filp->private;
+ int blkaddr = 0;
+ int ndc_idx = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
+ ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
+
+ return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
}
RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
@@ -880,8 +1154,14 @@ RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
void *unused)
{
- return ndc_blk_hits_miss_stats(filp,
- NPA0_U, BLKADDR_NDC_NIX0_RX);
+ struct nix_hw *nix_hw = filp->private;
+ int ndc_idx = NPA0_U;
+ int blkaddr = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
+
+ return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
}
RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
@@ -889,16 +1169,92 @@ RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
void *unused)
{
- return ndc_blk_hits_miss_stats(filp,
- NPA0_U, BLKADDR_NDC_NIX0_TX);
+ struct nix_hw *nix_hw = filp->private;
+ int ndc_idx = NPA0_U;
+ int blkaddr = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
+
+ return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
}
RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
+static void print_nix_cn10k_sq_ctx(struct seq_file *m,
+ struct nix_cn10k_sq_ctx_s *sq_ctx)
+{
+ seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
+ sq_ctx->ena, sq_ctx->qint_idx);
+ seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
+ sq_ctx->substream, sq_ctx->sdp_mcast);
+ seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
+ sq_ctx->cq, sq_ctx->sqe_way_mask);
+
+ seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
+ sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
+ seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
+ sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
+ seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
+ sq_ctx->default_chan, sq_ctx->sqb_count);
+
+ seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
+ seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
+ seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
+ sq_ctx->sqb_aura, sq_ctx->sq_int);
+ seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
+ sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
+
+ seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
+ sq_ctx->max_sqe_size, sq_ctx->cq_limit);
+ seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
+ sq_ctx->mnq_dis, sq_ctx->lmt_dis);
+ seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
+ sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
+ seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
+ sq_ctx->tail_offset, sq_ctx->smenq_offset);
+ seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
+ sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
+
+ seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
+ sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
+ seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
+ seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
+ seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
+ seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
+ sq_ctx->smenq_next_sqb);
+
+ seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
+
+ seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
+ seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
+ sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
+ seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
+ sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
+ seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
+ sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
+
+ seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
+ (u64)sq_ctx->scm_lso_rem);
+ seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
+ seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
+ seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_octs);
+ seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_pkts);
+}
+
/* Dumps given nix_sq's context */
static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
{
struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+
+ if (!is_rvu_otx2(rvu)) {
+ print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
+ return;
+ }
seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
sq_ctx->sqe_way_mask, sq_ctx->cq);
@@ -959,10 +1315,94 @@ static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
(u64)sq_ctx->dropped_pkts);
}
+static void print_nix_cn10k_rq_ctx(struct seq_file *m,
+ struct nix_cn10k_rq_ctx_s *rq_ctx)
+{
+ seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
+ rq_ctx->ena, rq_ctx->sso_ena);
+ seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
+ rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
+ seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
+ rq_ctx->cq, rq_ctx->lenerr_dis);
+ seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
+ rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
+ seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
+ rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
+ seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
+ rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
+ seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
+
+ seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
+ rq_ctx->spb_aura, rq_ctx->lpb_aura);
+ seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
+ seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
+ rq_ctx->sso_grp, rq_ctx->sso_tt);
+ seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
+ rq_ctx->pb_caching, rq_ctx->wqe_caching);
+ seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
+ rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
+ seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
+ rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
+ seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
+ rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
+
+ seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
+ seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
+ seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
+ seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
+ rq_ctx->wqe_skip, rq_ctx->spb_ena);
+ seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
+ rq_ctx->lpb_sizem1, rq_ctx->first_skip);
+ seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
+ rq_ctx->later_skip, rq_ctx->xqe_imm_size);
+ seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
+ rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
+
+ seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
+ rq_ctx->xqe_drop, rq_ctx->xqe_pass);
+ seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
+ rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
+ seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
+ rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
+ seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
+ rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
+
+ seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
+ rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
+ seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
+ rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
+ seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
+ rq_ctx->rq_int, rq_ctx->rq_int_ena);
+ seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
+
+ seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
+ rq_ctx->ltag, rq_ctx->good_utag);
+ seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
+ rq_ctx->bad_utag, rq_ctx->flow_tagw);
+ seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
+ rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
+ seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
+ rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
+ seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
+
+ seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
+ seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
+ seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
+ seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
+ seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
+}
+
/* Dumps given nix_rq's context */
static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
{
struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+
+ if (!is_rvu_otx2(rvu)) {
+ print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
+ return;
+ }
seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
rq_ctx->wqe_aura, rq_ctx->substream);
@@ -1057,7 +1497,8 @@ static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
{
void (*print_nix_ctx)(struct seq_file *filp,
struct nix_aq_enq_rsp *rsp) = NULL;
- struct rvu *rvu = filp->private;
+ struct nix_hw *nix_hw = filp->private;
+ struct rvu *rvu = nix_hw->rvu;
struct nix_aq_enq_req aq_req;
struct nix_aq_enq_rsp rsp;
char *ctype_string = NULL;
@@ -1089,7 +1530,7 @@ static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
return -EINVAL;
}
- if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc))
+ if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -1141,13 +1582,15 @@ static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
}
static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
- int id, int ctype, char *ctype_string)
+ int id, int ctype, char *ctype_string,
+ struct seq_file *m)
{
+ struct nix_hw *nix_hw = m->private;
struct rvu_pfvf *pfvf;
int max_id = 0;
u16 pcifunc;
- if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc))
+ if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -1207,7 +1650,8 @@ static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
int ctype)
{
struct seq_file *m = filp->private_data;
- struct rvu *rvu = m->private;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
char *cmd_buf, *ctype_string;
int nixlf, id = 0, ret;
bool all = false;
@@ -1243,7 +1687,7 @@ static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
goto done;
} else {
ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
- ctype_string);
+ ctype_string, m);
}
done:
kfree(cmd_buf);
@@ -1347,102 +1791,242 @@ static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
-static void rvu_dbg_nix_init(struct rvu *rvu)
+static ssize_t rvu_dbg_nix_tx_stall_hwissue_display(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
{
- const struct device *dev = &rvu->pdev->dev;
- struct dentry *pfile;
+ return rvu_nix_get_tx_stall_counters(filp->private_data, buffer, ppos);
+}
- rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
- if (!rvu->rvu_dbg.nix) {
- dev_err(rvu->dev, "create debugfs dir failed for nix\n");
- return;
- }
+RVU_DEBUG_FOPS(nix_tx_stall_hwissue, nix_tx_stall_hwissue_display, NULL);
- pfile = debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_sq_ctx_fops);
- if (!pfile)
- goto create_failed;
+static void print_band_prof_ctx(struct seq_file *m,
+ struct nix_bandprof_s *prof)
+{
+ char *str;
- pfile = debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_rq_ctx_fops);
- if (!pfile)
- goto create_failed;
+ switch (prof->pc_mode) {
+ case NIX_RX_PC_MODE_VLAN:
+ str = "VLAN";
+ break;
+ case NIX_RX_PC_MODE_DSCP:
+ str = "DSCP";
+ break;
+ case NIX_RX_PC_MODE_GEN:
+ str = "Generic";
+ break;
+ case NIX_RX_PC_MODE_RSVD:
+ str = "Reserved";
+ break;
+ }
+ seq_printf(m, "W0: pc_mode\t\t%s\n", str);
+ str = (prof->icolor == 3) ? "Color blind" :
+ (prof->icolor == 0) ? "Green" :
+ (prof->icolor == 1) ? "Yellow" : "Red";
+ seq_printf(m, "W0: icolor\t\t%s\n", str);
+ seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
+ seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
+ seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
+ seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
+ seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
+ seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
+ seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
+ seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
+
+ seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
+ str = (prof->lmode == 0) ? "byte" : "packet";
+ seq_printf(m, "W1: lmode\t\t%s\n", str);
+ seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
+ seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
+ seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
+ seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
+ str = (prof->gc_action == 0) ? "PASS" :
+ (prof->gc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: gc_action\t\t%s\n", str);
+ str = (prof->yc_action == 0) ? "PASS" :
+ (prof->yc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: yc_action\t\t%s\n", str);
+ str = (prof->rc_action == 0) ? "PASS" :
+ (prof->rc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: rc_action\t\t%s\n", str);
+ seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
+ seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
+ seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
+
+ seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
+ seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
+ seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
+ seq_printf(m, "W4: green_pkt_pass\t%lld\n",
+ (u64)prof->green_pkt_pass);
+ seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
+ (u64)prof->yellow_pkt_pass);
+ seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
+ seq_printf(m, "W7: green_octs_pass\t%lld\n",
+ (u64)prof->green_octs_pass);
+ seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
+ (u64)prof->yellow_octs_pass);
+ seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
+ seq_printf(m, "W10: green_pkt_drop\t%lld\n",
+ (u64)prof->green_pkt_drop);
+ seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
+ (u64)prof->yellow_pkt_drop);
+ seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
+ seq_printf(m, "W13: green_octs_drop\t%lld\n",
+ (u64)prof->green_octs_drop);
+ seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
+ (u64)prof->yellow_octs_drop);
+ seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
+ seq_puts(m, "==============================\n");
+}
- pfile = debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_cq_ctx_fops);
- if (!pfile)
- goto create_failed;
+static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
+{
+ struct nix_hw *nix_hw = m->private;
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ struct rvu *rvu = nix_hw->rvu;
+ struct nix_ipolicer *ipolicer;
+ int layer, prof_idx, idx, rc;
+ u16 pcifunc;
+ char *str;
- pfile = debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_ndc_tx_cache_fops);
- if (!pfile)
- goto create_failed;
+ /* Ingress policers do not exist on all platforms */
+ if (!nix_hw->ipolicer)
+ return 0;
- pfile = debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_ndc_rx_cache_fops);
- if (!pfile)
- goto create_failed;
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
+ (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
- pfile = debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix,
- rvu, &rvu_dbg_nix_ndc_tx_hits_miss_fops);
- if (!pfile)
- goto create_failed;
+ seq_printf(m, "\n%s bandwidth profiles\n", str);
+ seq_puts(m, "=======================\n");
- pfile = debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix,
- rvu, &rvu_dbg_nix_ndc_rx_hits_miss_fops);
- if (!pfile)
- goto create_failed;
+ ipolicer = &nix_hw->ipolicer[layer];
- pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_qsize_fops);
- if (!pfile)
- goto create_failed;
+ for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
+ if (is_rsrc_free(&ipolicer->band_prof, idx))
+ continue;
- return;
-create_failed:
- dev_err(dev, "Failed to create debugfs dir/file for NIX\n");
- debugfs_remove_recursive(rvu->rvu_dbg.nix);
+ prof_idx = (idx & 0x3FFF) | (layer << 14);
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
+ 0x00, NIX_AQ_CTYPE_BANDPROF,
+ prof_idx);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of %s profile %d, err %d\n",
+ __func__, str, idx, rc);
+ return 0;
+ }
+ seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
+ pcifunc = ipolicer->pfvf_map[idx];
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ seq_printf(m, "Allocated to :: PF %d\n",
+ rvu_get_pf(pcifunc));
+ else
+ seq_printf(m, "Allocated to :: PF %d VF %d\n",
+ rvu_get_pf(pcifunc),
+ (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
+ print_band_prof_ctx(m, &aq_rsp.prof);
+ }
+ }
+ return 0;
}
-static void rvu_dbg_npa_init(struct rvu *rvu)
+RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
+
+static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
{
- const struct device *dev = &rvu->pdev->dev;
- struct dentry *pfile;
+ struct nix_hw *nix_hw = m->private;
+ struct nix_ipolicer *ipolicer;
+ int layer;
+ char *str;
- rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
- if (!rvu->rvu_dbg.npa)
- return;
+ /* Ingress policers do not exist on all platforms */
+ if (!nix_hw->ipolicer)
+ return 0;
+
+ seq_puts(m, "\nBandwidth profile resource free count\n");
+ seq_puts(m, "=====================================\n");
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
+ (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ seq_printf(m, "%s :: Max: %4d Free: %4d\n", str,
+ ipolicer->band_prof.max,
+ rvu_rsrc_free_count(&ipolicer->band_prof));
+ }
+ seq_puts(m, "=====================================\n");
- pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
- &rvu_dbg_npa_qsize_fops);
- if (!pfile)
- goto create_failed;
+ return 0;
+}
- pfile = debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
- &rvu_dbg_npa_aura_ctx_fops);
- if (!pfile)
- goto create_failed;
+RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
- pfile = debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
- &rvu_dbg_npa_pool_ctx_fops);
- if (!pfile)
- goto create_failed;
+static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
+{
+ struct nix_hw *nix_hw;
- pfile = debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
- &rvu_dbg_npa_ndc_cache_fops);
- if (!pfile)
- goto create_failed;
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return;
- pfile = debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa,
- rvu, &rvu_dbg_npa_ndc_hits_miss_fops);
- if (!pfile)
- goto create_failed;
+ if (blkaddr == BLKADDR_NIX0) {
+ nix_hw = &rvu->hw->nix[0];
+ } else {
+ rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
+ rvu->rvu_dbg.root);
+ if (!rvu->rvu_dbg.nix) {
+ dev_err(rvu->dev,
+ "create debugfs dir failed for nix1\n");
+ return;
+ }
+ nix_hw = &rvu->hw->nix[1];
+ }
- return;
+ debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_sq_ctx_fops);
+ debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_rq_ctx_fops);
+ debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_cq_ctx_fops);
+ debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_ndc_tx_cache_fops);
+ debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_ndc_rx_cache_fops);
+ debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_ndc_tx_hits_miss_fops);
+ debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_ndc_rx_hits_miss_fops);
+ debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
+ &rvu_dbg_nix_qsize_fops);
+ debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_band_prof_ctx_fops);
+ debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_band_prof_rsrc_fops);
+ if (is_rvu_96xx_A0(rvu)) {
+ debugfs_create_file("tx_stall_hwissue", 0600,
+ rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_tx_stall_hwissue_fops);
+ }
+}
-create_failed:
- dev_err(dev, "Failed to create debugfs dir/file for NPA\n");
- debugfs_remove_recursive(rvu->rvu_dbg.npa);
+static void rvu_dbg_npa_init(struct rvu *rvu)
+{
+ rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
+ debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_qsize_fops);
+ debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_aura_ctx_fops);
+ debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_pool_ctx_fops);
+ debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_ndc_cache_fops);
+ debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_ndc_hits_miss_fops);
}
#define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \
@@ -1468,6 +2052,7 @@ create_failed:
static int cgx_print_stats(struct seq_file *s, int lmac_id)
{
struct cgx_link_user_info linfo;
+ struct mac_ops *mac_ops;
void *cgxd = s->private;
u64 ucast, mcast, bcast;
int stat = 0, err = 0;
@@ -1479,6 +2064,11 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
if (!rvu)
return -ENODEV;
+ mac_ops = get_mac_ops(cgxd);
+ /* There can be no CGX devices at all */
+ if (!mac_ops)
+ return 0;
+
/* Link status */
seq_puts(s, "\n=======Link Status======\n\n");
err = cgx_get_link_info(cgxd, lmac_id, &linfo);
@@ -1488,7 +2078,8 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
linfo.link_up ? "UP" : "DOWN", linfo.speed);
/* Rx stats */
- seq_puts(s, "\n=======NIX RX_STATS(CGX port level)======\n\n");
+ seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
+ mac_ops->name);
ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
if (err)
return err;
@@ -1510,7 +2101,8 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
return err;
/* Tx stats */
- seq_puts(s, "\n=======NIX TX_STATS(CGX port level)======\n\n");
+ seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
+ mac_ops->name);
ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
if (err)
return err;
@@ -1529,33 +2121,43 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
return err;
/* Rx stats */
- seq_puts(s, "\n=======CGX RX_STATS======\n\n");
- while (stat < CGX_RX_STATS_COUNT) {
- err = cgx_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
+ seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
+ while (stat < mac_ops->rx_stats_cnt) {
+ err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
if (err)
return err;
- seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat], rx_stat);
+ if (is_rvu_otx2(rvu))
+ seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
+ rx_stat);
+ else
+ seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
+ rx_stat);
stat++;
}
/* Tx stats */
stat = 0;
- seq_puts(s, "\n=======CGX TX_STATS======\n\n");
- while (stat < CGX_TX_STATS_COUNT) {
- err = cgx_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
+ seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
+ while (stat < mac_ops->tx_stats_cnt) {
+ err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
if (err)
return err;
- seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], tx_stat);
+
+ if (is_rvu_otx2(rvu))
+ seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
+ tx_stat);
+ else
+ seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
+ tx_stat);
stat++;
}
return err;
}
-static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
{
struct dentry *current_dir;
- int err, lmac_id;
char *buf;
current_dir = filp->file->f_path.dentry->d_parent;
@@ -1563,53 +2165,127 @@ static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
if (!buf)
return -EINVAL;
- err = kstrtoint(buf + 1, 10, &lmac_id);
- if (!err) {
- err = cgx_print_stats(filp, lmac_id);
- if (err)
- return err;
- }
+ return kstrtoint(buf + 1, 10, lmac_id);
+}
+
+static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+{
+ int lmac_id, err;
+
+ err = rvu_dbg_derive_lmacid(filp, &lmac_id);
+ if (!err)
+ return cgx_print_stats(filp, lmac_id);
+
return err;
}
RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
+static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
+{
+ struct pci_dev *pdev = NULL;
+ void *cgxd = s->private;
+ char *bcast, *mcast;
+ u16 index, domain;
+ u8 dmac[ETH_ALEN];
+ struct rvu *rvu;
+ u64 cfg, mac;
+ int pf;
+
+ rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
+ if (!rvu)
+ return -ENODEV;
+
+ pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
+ domain = 2;
+
+ pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
+ if (!pdev)
+ return 0;
+
+ cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
+ bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
+ mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
+
+ seq_puts(s,
+ "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n");
+ seq_printf(s, "%s PF%d %9s %9s",
+ dev_name(&pdev->dev), pf, bcast, mcast);
+ if (cfg & CGX_DMAC_CAM_ACCEPT)
+ seq_printf(s, "%12s\n\n", "UNICAST");
+ else
+ seq_printf(s, "%16s\n\n", "PROMISCUOUS");
+
+ seq_puts(s, "\nDMAC-INDEX ADDRESS\n");
+
+ for (index = 0 ; index < 32 ; index++) {
+ cfg = cgx_read_dmac_entry(cgxd, index);
+ /* Display enabled dmac entries associated with current lmac */
+ if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
+ FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
+ mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
+ u64_to_ether_addr(mac, dmac);
+ seq_printf(s, "%7d %pM\n", index, dmac);
+ }
+ }
+
+ return 0;
+}
+
+static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
+{
+ int err, lmac_id;
+
+ err = rvu_dbg_derive_lmacid(filp, &lmac_id);
+ if (!err)
+ return cgx_print_dmac_flt(filp, lmac_id);
+
+ return err;
+}
+
+RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
+
static void rvu_dbg_cgx_init(struct rvu *rvu)
{
- const struct device *dev = &rvu->pdev->dev;
- struct dentry *pfile;
+ struct mac_ops *mac_ops;
+ unsigned long lmac_bmap;
int i, lmac_id;
char dname[20];
void *cgx;
- rvu->rvu_dbg.cgx_root = debugfs_create_dir("cgx", rvu->rvu_dbg.root);
+ if (!cgx_get_cgxcnt_max())
+ return;
+
+ mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
+ if (!mac_ops)
+ return;
+
+ rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
+ rvu->rvu_dbg.root);
for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
cgx = rvu_cgx_pdata(i, rvu);
if (!cgx)
continue;
+ lmac_bmap = cgx_get_lmac_bmap(cgx);
/* cgx debugfs dir */
- sprintf(dname, "cgx%d", i);
+ sprintf(dname, "%s%d", mac_ops->name, i);
rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
rvu->rvu_dbg.cgx_root);
- for (lmac_id = 0; lmac_id < cgx_get_lmac_cnt(cgx); lmac_id++) {
+ for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
/* lmac debugfs dir */
sprintf(dname, "lmac%d", lmac_id);
rvu->rvu_dbg.lmac =
debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
- pfile = debugfs_create_file("stats", 0600,
- rvu->rvu_dbg.lmac, cgx,
- &rvu_dbg_cgx_stat_fops);
- if (!pfile)
- goto create_failed;
+ debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
+ cgx, &rvu_dbg_cgx_stat_fops);
+ debugfs_create_file("mac_filter", 0600,
+ rvu->rvu_dbg.lmac, cgx,
+ &rvu_dbg_cgx_dmac_flt_fops);
}
}
- return;
-
-create_failed:
- dev_err(dev, "Failed to create debugfs dir/file for CGX\n");
- debugfs_remove_recursive(rvu->rvu_dbg.cgx_root);
}
/* NPC debugfs APIs */
@@ -1620,9 +2296,6 @@ static void rvu_print_npc_mcam_info(struct seq_file *s,
int entry_acnt, entry_ecnt;
int cntr_acnt, cntr_ecnt;
- /* Skip PF0 */
- if (!pcifunc)
- return;
rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
&entry_acnt, &entry_ecnt);
rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
@@ -1653,7 +2326,7 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
struct rvu *rvu = filp->private;
int pf, vf, numvfs, blkaddr;
struct npc_mcam *mcam;
- u16 pcifunc;
+ u16 pcifunc, counters;
u64 cfg;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -1661,6 +2334,7 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
return -ENODEV;
mcam = &rvu->hw->mcam;
+ counters = rvu->hw->npc_counters;
seq_puts(filp, "\nNPC MCAM info:\n");
/* MCAM keywidth on receive and transmit sides */
@@ -1683,10 +2357,9 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
/* MCAM counters */
- cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
- cfg = (cfg >> 48) & 0xFFFF;
- seq_printf(filp, "\n\t\t MCAM counters \t: %lld\n", cfg);
- seq_printf(filp, "\t\t Reserved \t: %lld\n", cfg - mcam->counters.max);
+ seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
+ seq_printf(filp, "\t\t Reserved \t: %d\n",
+ counters - mcam->counters.max);
seq_printf(filp, "\t\t Available \t: %d\n",
rvu_rsrc_free_count(&mcam->counters));
@@ -1713,6 +2386,989 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
return 0;
}
+static int parse_sso_cmd_buffer(char *cmd_buf, size_t *count,
+ const char __user *buffer, int *ssolf,
+ bool *all)
+{
+ int ret, bytes_not_copied;
+ char *cmd_buf_tmp;
+ char *subtoken;
+
+ bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ cmd_buf[*count] = '\0';
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ *count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ subtoken = strsep(&cmd_buf, " ");
+ if (subtoken && strcmp(subtoken, "all") == 0) {
+ *all = true;
+ } else{
+ ret = subtoken ? kstrtoint(subtoken, 10, ssolf) : -EINVAL;
+ if (ret < 0)
+ return ret;
+ }
+ if (cmd_buf)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void sso_hwgrp_display_iq_list(struct rvu *rvu, int ssolf, u16 idx,
+ u16 tail_idx, u8 queue_type)
+{
+ const char *queue[3] = {"DQ", "CQ", "AQ"};
+ int blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ pr_info("SSO HWGGRP[%d] [%s] Chain queue head[%d]", ssolf,
+ queue[queue_type], idx);
+ pr_info("SSO HWGGRP[%d] [%s] Chain queue tail[%d]", ssolf,
+ queue[queue_type], tail_idx);
+ pr_info("--------------------------------------------------\n");
+ do {
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_TAG(idx));
+ pr_info("SSO HWGGRP[%d] [%s] IE[%d] TAG 0x%llx\n", ssolf,
+ queue[queue_type], idx, reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_GRP(idx));
+ pr_info("SSO HWGGRP[%d] [%s] IE[%d] GRP 0x%llx\n", ssolf,
+ queue[queue_type], idx, reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_PENDTAG(idx));
+ pr_info("SSO HWGGRP[%d] [%s] IE[%d] PENDTAG 0x%llx\n", ssolf,
+ queue[queue_type], idx, reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_LINKS(idx));
+ pr_info("SSO HWGGRP[%d] [%s] IE[%d] LINKS 0x%llx\n", ssolf,
+ queue[queue_type], idx, reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_QLINKS(idx));
+ pr_info("SSO HWGGRP[%d] [%s] IE[%d] QLINKS 0x%llx\n", ssolf,
+ queue[queue_type], idx, reg);
+ pr_info("--------------------------------------------------\n");
+ if (idx == tail_idx)
+ break;
+ idx = reg & 0x1FFF;
+ } while (idx != 0x1FFF);
+}
+
+static void sso_hwgrp_display_taq_list(struct rvu *rvu, int ssolf, u8 wae_head,
+ u16 ent_head, u8 wae_used, u8 taq_lines)
+{
+ int i, blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ pr_info("--------------------------------------------------\n");
+ do {
+ for (i = wae_head; i < taq_lines && wae_used; i++) {
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_TAQX_WAEY_TAG(ent_head, i));
+ pr_info("SSO HWGGRP[%d] TAQ[%d] WAE[%d] TAG 0x%llx\n",
+ ssolf, ent_head, i, reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_TAQX_WAEY_WQP(ent_head, i));
+ pr_info("SSO HWGGRP[%d] TAQ[%d] WAE[%d] WQP 0x%llx\n",
+ ssolf, ent_head, i, reg);
+ wae_used--;
+ }
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_TAQX_LINK(ent_head));
+ pr_info("SSO HWGGRP[%d] TAQ[%d] LINK 0x%llx\n",
+ ssolf, ent_head, reg);
+ ent_head = reg & 0x7FF;
+ pr_info("--------------------------------------------------\n");
+ } while (ent_head && wae_used);
+}
+
+static int read_sso_pc(struct rvu *rvu)
+{
+ int blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_ACTIVE_CYCLES0);
+ pr_info("SSO Add-Work active cycles %lld\n", reg);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_ACTIVE_CYCLES1);
+ pr_info("SSO Get-Work active cycles %lld\n", reg);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_ACTIVE_CYCLES2);
+ pr_info("SSO Work-Slot active cycles %lld\n", reg);
+ pr_info("\n");
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_NOS_CNT) & 0x1FFF;
+ pr_info("SSO work-queue entries on the no-schedule list %lld\n", reg);
+ pr_info("\n");
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_AW_READ_ARB);
+ pr_info("SSO XAQ reads outstanding %lld\n",
+ (reg >> 24) & 0x3F);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_XAQ_REQ_PC);
+ pr_info("SSO XAQ reads requests %lld\n", reg);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_XAQ_LATENCY_PC);
+ pr_info("SSO XAQ read latency cycles %lld\n", reg);
+ pr_info("\n");
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_AW_WE);
+ pr_info("SSO IAQ reserved %lld\n",
+ (reg >> 16) & 0x3FFF);
+ pr_info("SSO IAQ total %lld\n", reg & 0x3FFF);
+ pr_info("\n");
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_TAQ_CNT);
+ pr_info("SSO TAQ reserved %lld\n",
+ (reg >> 16) & 0x7FF);
+ pr_info("SSO TAQ total %lld\n", reg & 0x7FF);
+ pr_info("\n");
+
+ return 0;
+}
+
+/* Reads SSO hwgrp perfomance counters */
+static void read_sso_hwgrp_pc(struct rvu *rvu, int ssolf, bool all)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr, max_id;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ if (ssolf < 0 || ssolf >= block->lf.max) {
+ pr_info("Invalid SSOLF(HWGRP), valid range is 0-%d\n",
+ block->lf.max - 1);
+ return;
+ }
+ max_id = block->lf.max;
+
+ if (all)
+ ssolf = 0;
+ else
+ max_id = ssolf + 1;
+
+ pr_info("==================================================\n");
+ for (; ssolf < max_id; ssolf++) {
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_WS_PC(ssolf));
+ pr_info("SSO HWGGRP[%d] Work-Schedule PC 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_EXT_PC(ssolf));
+ pr_info("SSO HWGGRP[%d] External Schedule PC 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_WA_PC(ssolf));
+ pr_info("SSO HWGGRP[%d] Work-Add PC 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_TS_PC(ssolf));
+ pr_info("SSO HWGGRP[%d] Tag Switch PC 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_DS_PC(ssolf));
+ pr_info("SSO HWGGRP[%d] Deschedule PC 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_DQ_PC(ssolf));
+ pr_info("SSO HWGGRP[%d] Work-Descheduled PC 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWGRPX_PAGE_CNT(ssolf));
+ pr_info("SSO HWGGRP[%d] In-use Page Count 0x%llx\n", ssolf,
+ reg);
+ pr_info("==================================================\n");
+ }
+}
+
+/* Reads SSO hwgrp Threshold */
+static void read_sso_hwgrp_thresh(struct rvu *rvu, int ssolf, bool all)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr, max_id;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ if (ssolf < 0 || ssolf >= block->lf.max) {
+ pr_info("Invalid SSOLF(HWGRP), valid range is 0-%d\n",
+ block->lf.max - 1);
+ return;
+ }
+ max_id = block->lf.max;
+
+ if (all)
+ ssolf = 0;
+ else
+ max_id = ssolf + 1;
+
+ pr_info("==================================================\n");
+ for (; ssolf < max_id; ssolf++) {
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWGRPX_IAQ_THR(ssolf));
+ pr_info("SSO HWGGRP[%d] IAQ Threshold 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWGRPX_TAQ_THR(ssolf));
+ pr_info("SSO HWGGRP[%d] TAQ Threshold 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWGRPX_XAQ_AURA(ssolf));
+ pr_info("SSO HWGGRP[%d] XAQ Aura 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWGRPX_XAQ_LIMIT(ssolf));
+ pr_info("SSO HWGGRP[%d] XAQ Limit 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWGRPX_IU_ACCNT(ssolf));
+ pr_info("SSO HWGGRP[%d] IU Account Index 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_IU_ACCNTX_CFG(reg & 0xFF));
+ pr_info("SSO HWGGRP[%d] IU Accounting Cfg 0x%llx\n", ssolf,
+ reg);
+ pr_info("==================================================\n");
+ }
+}
+
+/* Reads SSO hwgrp TAQ list */
+static void read_sso_hwgrp_taq_list(struct rvu *rvu, int ssolf, bool all)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u8 taq_entries, wae_head;
+ struct rvu_block *block;
+ u16 ent_head, cl_used;
+ int blkaddr, max_id;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ if (ssolf < 0 || ssolf >= block->lf.max) {
+ pr_info("Invalid SSOLF(HWGRP), valid range is 0-%d\n",
+ block->lf.max - 1);
+ return;
+ }
+ max_id = block->lf.max;
+
+ if (all)
+ ssolf = 0;
+ else
+ max_id = ssolf + 1;
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_CONST);
+ taq_entries = (reg >> 48) & 0xFF;
+ pr_info("==================================================\n");
+ for (; ssolf < max_id; ssolf++) {
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("SSO HWGGRP[%d] Transitory Output Admission Queue",
+ ssolf);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_TOAQX_STATUS(ssolf));
+ pr_info("SSO HWGGRP[%d] TOAQ Status 0x%llx\n", ssolf,
+ reg);
+ ent_head = (reg >> 12) & 0x7FF;
+ cl_used = (reg >> 32) & 0x7FF;
+ if (reg & BIT_ULL(61) && cl_used) {
+ pr_info("SSO HWGGRP[%d] TOAQ CL_USED 0x%x\n",
+ ssolf, cl_used);
+ sso_hwgrp_display_taq_list(rvu, ssolf, ent_head, 0,
+ cl_used * taq_entries,
+ taq_entries);
+ }
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("SSO HWGGRP[%d] Transitory Input Admission Queue",
+ ssolf);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_TIAQX_STATUS(ssolf));
+ pr_info("SSO HWGGRP[%d] TIAQ Status 0x%llx\n", ssolf,
+ reg);
+ wae_head = (reg >> 60) & 0xF;
+ cl_used = (reg >> 32) & 0x7FFF;
+ ent_head = (reg >> 12) & 0x7FF;
+ if (reg & BIT_ULL(61) && cl_used) {
+ pr_info("SSO HWGGRP[%d] TIAQ WAE_USED 0x%x\n",
+ ssolf, cl_used);
+ sso_hwgrp_display_taq_list(rvu, ssolf, ent_head,
+ wae_head, cl_used,
+ taq_entries);
+ }
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("==================================================\n");
+ }
+}
+
+/* Reads SSO hwgrp IAQ list */
+static void read_sso_hwgrp_iaq_list(struct rvu *rvu, int ssolf, bool all)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ u16 head_idx, tail_idx;
+ int blkaddr, max_id;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ if (ssolf < 0 || ssolf >= block->lf.max) {
+ pr_info("Invalid SSOLF(HWGRP), valid range is 0-%d\n",
+ block->lf.max - 1);
+ return;
+ }
+ max_id = block->lf.max;
+
+ if (all)
+ ssolf = 0;
+ else
+ max_id = ssolf + 1;
+ pr_info("==================================================\n");
+ for (; ssolf < max_id; ssolf++) {
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("SSO HWGGRP[%d] Deschedule Queue(DQ)\n", ssolf);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IPL_DESCHEDX(ssolf));
+ pr_info("SSO HWGGRP[%d] DQ List 0x%llx\n", ssolf,
+ reg);
+ head_idx = (reg >> 13) & 0x1FFF;
+ tail_idx = reg & 0x1FFF;
+ if (reg & (BIT_ULL(26) | BIT_ULL(27)))
+ sso_hwgrp_display_iq_list(rvu, ssolf, head_idx,
+ tail_idx, 0);
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("SSO HWGGRP[%d] Conflict Queue(CQ)\n", ssolf);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IPL_CONFX(ssolf));
+ pr_info("SSO HWGGRP[%d] CQ List 0x%llx\n", ssolf,
+ reg);
+ head_idx = (reg >> 13) & 0x1FFF;
+ tail_idx = reg & 0x1FFF;
+ if (reg & (BIT_ULL(26) | BIT_ULL(27)))
+ sso_hwgrp_display_iq_list(rvu, ssolf, head_idx,
+ tail_idx, 1);
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("SSO HWGGRP[%d] Admission Queue(AQ)\n", ssolf);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IPL_IAQX(ssolf));
+ pr_info("SSO HWGGRP[%d] AQ List 0x%llx\n", ssolf,
+ reg);
+ head_idx = (reg >> 13) & 0x1FFF;
+ tail_idx = reg & 0x1FFF;
+ if (reg & (BIT_ULL(26) | BIT_ULL(27)))
+ sso_hwgrp_display_iq_list(rvu, ssolf, head_idx,
+ tail_idx, 2);
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("==================================================\n");
+ }
+}
+
+/* Reads SSO hwgrp IENT list */
+static int read_sso_hwgrp_ient_list(struct rvu *rvu)
+{
+ const char *tt_c[4] = {"SSO_TT_ORDERED_", "SSO_TT_ATOMIC__",
+ "SSO_TT_UNTAGGED", "SSO_TT_EMPTY___"};
+ struct rvu_hwinfo *hw = rvu->hw;
+ int max_idx = hw->sso.sso_iue;
+ u64 pendtag, qlinks, links;
+ int len, idx, blkaddr;
+ u64 tag, grp, wqp;
+ char str[300];
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ for (idx = 0; idx < max_idx; idx++) {
+ len = 0;
+ tag = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_TAG(idx));
+ grp = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_GRP(idx));
+ pendtag = rvu_read64(rvu, blkaddr,
+ SSO_AF_IENTX_PENDTAG(idx));
+ links = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_LINKS(idx));
+ qlinks = rvu_read64(rvu, blkaddr,
+ SSO_AF_IENTX_QLINKS(idx));
+ wqp = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_WQP(idx));
+ len = snprintf(str + len, 300,
+ "SSO IENT[%4d] TT [%s] HWGRP [%3lld] ", idx,
+ tt_c[(tag >> 32) & 0x3], (grp >> 48) & 0x1f);
+ len += snprintf(str + len, 300 - len,
+ "TAG [0x%010llx] GRP [0x%016llx] ", tag, grp);
+ len += snprintf(str + len, 300 - len, "PENDTAG [0x%010llx] ",
+ pendtag);
+ len += snprintf(str + len, 300 - len,
+ "LINKS [0x%016llx] QLINKS [0x%010llx] ", links,
+ qlinks);
+ snprintf(str + len, 300 - len, "WQP [0x%016llx]\n", wqp);
+ pr_info("%s", str);
+ }
+
+ return 0;
+}
+
+/* Reads SSO hwgrp free list */
+static int read_sso_hwgrp_free_list(struct rvu *rvu)
+{
+ int blkaddr;
+ u64 reg;
+ u8 idx;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ pr_info("==================================================\n");
+ for (idx = 0; idx < 4; idx++) {
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IPL_FREEX(idx));
+ pr_info("SSO FREE LIST[%d]\n", idx);
+ pr_info("qnum_head : %lld qnum_tail : %lld\n",
+ (reg >> 58) & 0x3, (reg >> 56) & 0x3);
+ pr_info("queue_cnt : %llx\n", (reg >> 26) & 0x7fff);
+ pr_info("queue_val : %lld queue_head : %4lld queue_tail %4lld\n"
+ , (reg >> 40) & 0x1, (reg >> 13) & 0x1fff,
+ reg & 0x1fff);
+ pr_info("==================================================\n");
+ }
+
+ return 0;
+}
+
+/* Reads SSO hwgrp perfomance counters */
+static void read_sso_hws_info(struct rvu *rvu, int ssowlf, bool all)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr;
+ int max_id;
+ u64 reg;
+ u8 mask;
+ u8 set;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ if (ssowlf < 0 || ssowlf >= block->lf.max) {
+ pr_info("Invalid SSOWLF(HWS), valid range is 0-%d\n",
+ block->lf.max - 1);
+ return;
+ }
+ max_id = block->lf.max;
+
+ if (all)
+ ssowlf = 0;
+ else
+ max_id = ssowlf + 1;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ pr_info("==================================================\n");
+ for (; ssowlf < max_id; ssowlf++) {
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWSX_ARB(ssowlf));
+ pr_info("SSOW HWS[%d] Arbitration State 0x%llx\n", ssowlf,
+ reg);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWSX_GMCTL(ssowlf));
+ pr_info("SSOW HWS[%d] Guest Machine Control 0x%llx\n", ssowlf,
+ reg);
+ for (set = 0; set < 2; set++)
+ for (mask = 0; mask < 4; mask++) {
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWSX_SX_GRPMSKX(ssowlf,
+ set,
+ mask));
+ pr_info(
+ "SSOW HWS[%d] SET[%d] Group Mask[%d] 0x%llx\n",
+ ssowlf, set, mask, reg);
+ }
+ pr_info("==================================================\n");
+ }
+}
+
+typedef void (*sso_dump_cb)(struct rvu *rvu, int ssolf, bool all);
+
+static ssize_t rvu_dbg_sso_cmd_parser(struct file *filp,
+ const char __user *buffer, size_t count,
+ loff_t *ppos, char *lf_type,
+ char *file_nm, sso_dump_cb fn)
+{
+ struct rvu *rvu = filp->private_data;
+ bool all = false;
+ char *cmd_buf;
+ int lf = 0;
+
+ if ((*ppos != 0) || !count)
+ return -EINVAL;
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return -ENOSPC;
+
+ if (parse_sso_cmd_buffer(cmd_buf, &count, buffer,
+ &lf, &all) < 0) {
+ pr_info("Usage: echo [<%s>/all] > %s\n", lf_type, file_nm);
+ } else {
+ fn(rvu, lf, all);
+ }
+ kfree(cmd_buf);
+
+ return count;
+}
+
+/* SSO debugfs APIs */
+static ssize_t rvu_dbg_sso_pc_display(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return read_sso_pc(filp->private_data);
+}
+
+static ssize_t rvu_dbg_sso_hwgrp_pc_display(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_sso_cmd_parser(filp, buffer, count, ppos, "hwgrp",
+ "sso_hwgrp_pc", read_sso_hwgrp_pc);
+}
+
+static ssize_t rvu_dbg_sso_hwgrp_thresh_display(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_sso_cmd_parser(filp, buffer, count, ppos, "hwgrp",
+ "sso_hwgrp_thresh", read_sso_hwgrp_thresh);
+}
+
+static ssize_t rvu_dbg_sso_hwgrp_taq_wlk_display(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_sso_cmd_parser(filp, buffer, count, ppos, "hwgrp",
+ "sso_hwgrp_taq_wlk", read_sso_hwgrp_taq_list);
+}
+
+static ssize_t rvu_dbg_sso_hwgrp_iaq_wlk_display(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_sso_cmd_parser(filp, buffer, count, ppos, "hwgrp",
+ "sso_hwgrp_iaq_wlk", read_sso_hwgrp_iaq_list);
+}
+
+static ssize_t rvu_dbg_sso_hwgrp_ient_wlk_display(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return read_sso_hwgrp_ient_list(filp->private_data);
+}
+
+static ssize_t rvu_dbg_sso_hwgrp_fl_wlk_display(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return read_sso_hwgrp_free_list(filp->private_data);
+}
+
+static ssize_t rvu_dbg_sso_hws_info_display(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_sso_cmd_parser(filp, buffer, count, ppos, "hws",
+ "sso_hws_info", read_sso_hws_info);
+}
+
+RVU_DEBUG_FOPS(sso_pc, sso_pc_display, NULL);
+RVU_DEBUG_FOPS(sso_hwgrp_pc, NULL, sso_hwgrp_pc_display);
+RVU_DEBUG_FOPS(sso_hwgrp_thresh, NULL, sso_hwgrp_thresh_display);
+RVU_DEBUG_FOPS(sso_hwgrp_taq_wlk, NULL, sso_hwgrp_taq_wlk_display);
+RVU_DEBUG_FOPS(sso_hwgrp_iaq_wlk, NULL, sso_hwgrp_iaq_wlk_display);
+RVU_DEBUG_FOPS(sso_hwgrp_ient_wlk, sso_hwgrp_ient_wlk_display, NULL);
+RVU_DEBUG_FOPS(sso_hwgrp_fl_wlk, sso_hwgrp_fl_wlk_display, NULL);
+RVU_DEBUG_FOPS(sso_hws_info, NULL, sso_hws_info_display);
+
+static void rvu_dbg_sso_init(struct rvu *rvu)
+{
+ rvu->rvu_dbg.sso = debugfs_create_dir("sso", rvu->rvu_dbg.root);
+ rvu->rvu_dbg.sso_hwgrp = debugfs_create_dir("hwgrp", rvu->rvu_dbg.sso);
+ rvu->rvu_dbg.sso_hws = debugfs_create_dir("hws", rvu->rvu_dbg.sso);
+
+ debugfs_create_file("sso_pc", 0600, rvu->rvu_dbg.sso, rvu,
+ &rvu_dbg_sso_pc_fops);
+
+ debugfs_create_file("sso_hwgrp_pc", 0600, rvu->rvu_dbg.sso_hwgrp,
+ rvu, &rvu_dbg_sso_hwgrp_pc_fops);
+
+ debugfs_create_file("sso_hwgrp_thresh", 0600, rvu->rvu_dbg.sso_hwgrp,
+ rvu, &rvu_dbg_sso_hwgrp_thresh_fops);
+
+ debugfs_create_file("sso_hwgrp_taq_walk", 0600, rvu->rvu_dbg.sso_hwgrp,
+ rvu, &rvu_dbg_sso_hwgrp_taq_wlk_fops);
+
+ debugfs_create_file("sso_hwgrp_iaq_walk", 0600, rvu->rvu_dbg.sso_hwgrp,
+ rvu, &rvu_dbg_sso_hwgrp_iaq_wlk_fops);
+
+ debugfs_create_file("sso_hwgrp_ient_walk", 0600, rvu->rvu_dbg.sso_hwgrp,
+ rvu, &rvu_dbg_sso_hwgrp_ient_wlk_fops);
+
+ debugfs_create_file("sso_hwgrp_free_list_walk", 0600,
+ rvu->rvu_dbg.sso_hwgrp, rvu,
+ &rvu_dbg_sso_hwgrp_fl_wlk_fops);
+
+ debugfs_create_file("sso_hws_info", 0600, rvu->rvu_dbg.sso_hws,
+ rvu, &rvu_dbg_sso_hws_info_fops);
+}
+
+/* CPT debugfs APIs */
+static int parse_cpt_cmd_buffer(char *cmd_buf, size_t *count,
+ const char __user *buffer, char *e_type)
+{
+ int bytes_not_copied;
+ char *cmd_buf_tmp;
+ char *subtoken;
+
+ bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ cmd_buf[*count] = '\0';
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ *count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ subtoken = strsep(&cmd_buf, " ");
+ if (subtoken)
+ strcpy(e_type, subtoken);
+ else
+ return -EINVAL;
+
+ if (cmd_buf)
+ return -EINVAL;
+
+ if (strcmp(e_type, "SE") && strcmp(e_type, "IE") &&
+ strcmp(e_type, "AE") && strcmp(e_type, "all"))
+ return -EINVAL;
+
+ return 0;
+}
+
+static ssize_t rvu_dbg_cpt_cmd_parser(struct file *filp,
+ const char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct seq_file *s = filp->private_data;
+ struct rvu *rvu = s->private;
+ char *cmd_buf;
+ int ret = 0;
+
+ if ((*ppos != 0) || !count)
+ return -EINVAL;
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return -ENOSPC;
+
+ if (parse_cpt_cmd_buffer(cmd_buf, &count, buffer,
+ rvu->rvu_dbg.cpt_ctx.e_type) < 0)
+ ret = -EINVAL;
+
+ kfree(cmd_buf);
+
+ if (ret)
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t rvu_dbg_cpt_engines_sts_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_cpt_cmd_parser(filp, buffer, count, ppos);
+}
+
+static int rvu_dbg_cpt_engines_sts_display(struct seq_file *filp, void *unused)
+{
+ u64 busy_sts[2] = {0}, free_sts[2] = {0};
+ struct rvu *rvu = filp->private;
+ u16 max_ses, max_ies, max_aes;
+ u32 e_min = 0, e_max = 0, e;
+ struct dentry *current_dir;
+ int blkaddr;
+ char *e_type;
+ u64 reg;
+
+ current_dir = filp->file->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "cpt1") ?
+ BLKADDR_CPT1 : BLKADDR_CPT0);
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
+ max_ses = reg & 0xffff;
+ max_ies = (reg >> 16) & 0xffff;
+ max_aes = (reg >> 32) & 0xffff;
+
+ e_type = rvu->rvu_dbg.cpt_ctx.e_type;
+
+ if (strcmp(e_type, "SE") == 0) {
+ e_min = 0;
+ e_max = max_ses - 1;
+ } else if (strcmp(e_type, "IE") == 0) {
+ e_min = max_ses;
+ e_max = max_ses + max_ies - 1;
+ } else if (strcmp(e_type, "AE") == 0) {
+ e_min = max_ses + max_ies;
+ e_max = max_ses + max_ies + max_aes - 1;
+ } else if (strcmp(e_type, "all") == 0) {
+ e_min = 0;
+ e_max = max_ses + max_ies + max_aes - 1;
+ } else {
+ return -EINVAL;
+ }
+
+ for (e = e_min; e <= e_max; e++) {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
+ if (reg & 0x1) {
+ if (e < max_ses)
+ busy_sts[0] |= 1ULL << e;
+ else if (e >= max_ses)
+ busy_sts[1] |= 1ULL << (e - max_ses);
+ }
+ if (reg & 0x2) {
+ if (e < max_ses)
+ free_sts[0] |= 1ULL << e;
+ else if (e >= max_ses)
+ free_sts[1] |= 1ULL << (e - max_ses);
+ }
+ }
+ seq_printf(filp, "FREE STS : 0x%016llx 0x%016llx\n", free_sts[1],
+ free_sts[0]);
+ seq_printf(filp, "BUSY STS : 0x%016llx 0x%016llx\n", busy_sts[1],
+ busy_sts[0]);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_engines_sts, cpt_engines_sts_display,
+ cpt_engines_sts_write);
+
+static ssize_t rvu_dbg_cpt_engines_info_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_cpt_cmd_parser(filp, buffer, count, ppos);
+}
+
+static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
+{
+ struct rvu *rvu = filp->private;
+ u16 max_ses, max_ies, max_aes;
+ struct dentry *current_dir;
+ u32 e_min, e_max, e;
+ int blkaddr;
+ char *e_type;
+ u64 reg;
+
+ current_dir = filp->file->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "cpt1") ?
+ BLKADDR_CPT1 : BLKADDR_CPT0);
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
+ max_ses = reg & 0xffff;
+ max_ies = (reg >> 16) & 0xffff;
+ max_aes = (reg >> 32) & 0xffff;
+
+ e_type = rvu->rvu_dbg.cpt_ctx.e_type;
+
+ if (strcmp(e_type, "SE") == 0) {
+ e_min = 0;
+ e_max = max_ses - 1;
+ } else if (strcmp(e_type, "IE") == 0) {
+ e_min = max_ses;
+ e_max = max_ses + max_ies - 1;
+ } else if (strcmp(e_type, "AE") == 0) {
+ e_min = max_ses + max_ies;
+ e_max = max_ses + max_ies + max_aes - 1;
+ } else if (strcmp(e_type, "all") == 0) {
+ e_min = 0;
+ e_max = max_ses + max_ies + max_aes - 1;
+ } else {
+ return -EINVAL;
+ }
+
+ seq_puts(filp, "===========================================\n");
+ for (e = e_min; e <= e_max; e++) {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
+ seq_printf(filp, "CPT Engine[%u] Group Enable 0x%02llx\n", e,
+ reg & 0xff);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
+ seq_printf(filp, "CPT Engine[%u] Active Info 0x%llx\n", e,
+ reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
+ seq_printf(filp, "CPT Engine[%u] Control 0x%llx\n", e,
+ reg);
+ seq_puts(filp, "===========================================\n");
+ }
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display,
+ cpt_engines_info_write);
+
+static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
+{
+ struct rvu *rvu = filp->private;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct dentry *current_dir;
+ struct rvu_block *block;
+ int blkaddr;
+ u64 reg;
+ u32 lf;
+
+ current_dir = filp->file->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "cpt1") ?
+ BLKADDR_CPT1 : BLKADDR_CPT0);
+
+ block = &hw->block[blkaddr];
+ if (!block->lf.bmap)
+ return -ENODEV;
+
+ seq_puts(filp, "===========================================\n");
+ for (lf = 0; lf < block->lf.max; lf++) {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
+
+ seq_printf(filp, "CPT Lf[%u] CTL 0x%llx\n", lf, reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
+ seq_printf(filp, "CPT Lf[%u] CTL2 0x%llx\n", lf, reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
+ seq_printf(filp, "CPT Lf[%u] PTR_CTL 0x%llx\n", lf, reg);
+ reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
+ (lf << block->lfshift));
+ seq_printf(filp, "CPT Lf[%u] CFG 0x%llx\n", lf, reg);
+ seq_puts(filp, "===========================================\n");
+ }
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
+
+static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
+{
+ struct rvu *rvu = filp->private;
+ struct dentry *current_dir;
+ u64 reg0, reg1;
+ int blkaddr;
+
+ current_dir = filp->file->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "cpt1") ?
+ BLKADDR_CPT1 : BLKADDR_CPT0);
+
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
+ reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
+ seq_printf(filp, "CPT_AF_FLTX_INT: 0x%llx 0x%llx\n", reg0, reg1);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
+ reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
+ seq_printf(filp, "CPT_AF_PSNX_EXE: 0x%llx 0x%llx\n", reg0, reg1);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
+ seq_printf(filp, "CPT_AF_PSNX_LF: 0x%llx\n", reg0);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
+ seq_printf(filp, "CPT_AF_RVU_INT: 0x%llx\n", reg0);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
+ seq_printf(filp, "CPT_AF_RAS_INT: 0x%llx\n", reg0);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
+ seq_printf(filp, "CPT_AF_EXE_ERR_INFO: 0x%llx\n", reg0);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
+
+static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
+{
+ struct dentry *current_dir;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 reg;
+
+ rvu = filp->private;
+
+ current_dir = filp->file->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "cpt1") ?
+ BLKADDR_CPT1 : BLKADDR_CPT0);
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
+ seq_printf(filp, "CPT instruction requests %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
+ seq_printf(filp, "CPT instruction latency %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
+ seq_printf(filp, "CPT NCB read requests %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
+ seq_printf(filp, "CPT NCB read latency %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
+ seq_printf(filp, "CPT read requests caused by UC fills %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
+ seq_printf(filp, "CPT active cycles pc %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
+ seq_printf(filp, "CPT clock count pc %llu\n", reg);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
+
+static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
+{
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return;
+
+ if (blkaddr == BLKADDR_CPT0) {
+ rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
+ } else {
+ rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
+ rvu->rvu_dbg.root);
+ }
+ debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_pc_fops);
+ debugfs_create_file("cpt_engines_sts", 0600, rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_engines_sts_fops);
+ debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_engines_info_fops);
+ debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_lfs_info_fops);
+ debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_err_info_fops);
+}
+
RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
@@ -1738,57 +3394,255 @@ static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
-static void rvu_dbg_npc_init(struct rvu *rvu)
+static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
+ struct rvu_npc_mcam_rule *rule)
{
- const struct device *dev = &rvu->pdev->dev;
- struct dentry *pfile;
+ u8 bit;
+
+ for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
+ seq_printf(s, "\t%s ", npc_get_field_name(bit));
+ switch (bit) {
+ case NPC_DMAC:
+ seq_printf(s, "%pM ", rule->packet.dmac);
+ seq_printf(s, "mask %pM\n", rule->mask.dmac);
+ break;
+ case NPC_SMAC:
+ seq_printf(s, "%pM ", rule->packet.smac);
+ seq_printf(s, "mask %pM\n", rule->mask.smac);
+ break;
+ case NPC_ETYPE:
+ seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
+ seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
+ break;
+ case NPC_OUTER_VID:
+ case NPC_FDSA_VAL:
+ seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
+ seq_printf(s, "mask 0x%x\n",
+ ntohs(rule->mask.vlan_tci));
+ break;
+ case NPC_TOS:
+ seq_printf(s, "%d ", rule->packet.tos);
+ seq_printf(s, "mask 0x%x\n", rule->mask.tos);
+ break;
+ case NPC_SIP_IPV4:
+ seq_printf(s, "%pI4 ", &rule->packet.ip4src);
+ seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
+ break;
+ case NPC_DIP_IPV4:
+ seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
+ seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
+ break;
+ case NPC_SIP_IPV6:
+ seq_printf(s, "%pI6 ", rule->packet.ip6src);
+ seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
+ break;
+ case NPC_DIP_IPV6:
+ seq_printf(s, "%pI6 ", rule->packet.ip6dst);
+ seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
+ break;
+ case NPC_SPORT_TCP:
+ case NPC_SPORT_UDP:
+ case NPC_SPORT_SCTP:
+ seq_printf(s, "%d ", ntohs(rule->packet.sport));
+ seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
+ break;
+ case NPC_DPORT_TCP:
+ case NPC_DPORT_UDP:
+ case NPC_DPORT_SCTP:
+ seq_printf(s, "%d ", ntohs(rule->packet.dport));
+ seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
+ break;
+ default:
+ seq_puts(s, "\n");
+ break;
+ }
+ }
+}
- rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
- if (!rvu->rvu_dbg.npc)
- return;
+static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
+ struct rvu_npc_mcam_rule *rule)
+{
+ if (is_npc_intf_tx(rule->intf)) {
+ switch (rule->tx_action.op) {
+ case NIX_TX_ACTIONOP_DROP:
+ seq_puts(s, "\taction: Drop\n");
+ break;
+ case NIX_TX_ACTIONOP_UCAST_DEFAULT:
+ seq_puts(s, "\taction: Unicast to default channel\n");
+ break;
+ case NIX_TX_ACTIONOP_UCAST_CHAN:
+ seq_printf(s, "\taction: Unicast to channel %d\n",
+ rule->tx_action.index);
+ break;
+ case NIX_TX_ACTIONOP_MCAST:
+ seq_puts(s, "\taction: Multicast\n");
+ break;
+ case NIX_TX_ACTIONOP_DROP_VIOL:
+ seq_puts(s, "\taction: Lockdown Violation Drop\n");
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (rule->rx_action.op) {
+ case NIX_RX_ACTIONOP_DROP:
+ seq_puts(s, "\taction: Drop\n");
+ break;
+ case NIX_RX_ACTIONOP_UCAST:
+ seq_printf(s, "\taction: Direct to queue %d\n",
+ rule->rx_action.index);
+ break;
+ case NIX_RX_ACTIONOP_RSS:
+ seq_puts(s, "\taction: RSS\n");
+ break;
+ case NIX_RX_ACTIONOP_UCAST_IPSEC:
+ seq_puts(s, "\taction: Unicast ipsec\n");
+ break;
+ case NIX_RX_ACTIONOP_MCAST:
+ seq_puts(s, "\taction: Multicast\n");
+ break;
+ default:
+ break;
+ }
+ }
+}
- pfile = debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc,
- rvu, &rvu_dbg_npc_mcam_info_fops);
- if (!pfile)
- goto create_failed;
+static const char *rvu_dbg_get_intf_name(int intf)
+{
+ switch (intf) {
+ case NIX_INTFX_RX(0):
+ return "NIX0_RX";
+ case NIX_INTFX_RX(1):
+ return "NIX1_RX";
+ case NIX_INTFX_TX(0):
+ return "NIX0_TX";
+ case NIX_INTFX_TX(1):
+ return "NIX1_TX";
+ default:
+ break;
+ }
- pfile = debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc,
- rvu, &rvu_dbg_npc_rx_miss_act_fops);
- if (!pfile)
- goto create_failed;
+ return "unknown";
+}
- return;
+static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
+{
+ struct rvu_npc_mcam_rule *iter;
+ struct rvu *rvu = s->private;
+ struct npc_mcam *mcam;
+ int pf, vf = -1;
+ bool enabled;
+ int blkaddr;
+ u16 target;
+ u64 hits;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return 0;
+
+ mcam = &rvu->hw->mcam;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(iter, &mcam->mcam_rules, list) {
+ pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+ seq_printf(s, "\n\tInstalled by: PF%d ", pf);
+
+ if (iter->owner & RVU_PFVF_FUNC_MASK) {
+ vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
+ seq_printf(s, "VF%d", vf);
+ }
+ seq_puts(s, "\n");
+
+ seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
+ "RX" : "TX");
+ seq_printf(s, "\tinterface: %s\n",
+ rvu_dbg_get_intf_name(iter->intf));
+ seq_printf(s, "\tmcam entry: %d\n", iter->entry);
+
+ rvu_dbg_npc_mcam_show_flows(s, iter);
+ if (is_npc_intf_rx(iter->intf)) {
+ target = iter->rx_action.pf_func;
+ pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+ seq_printf(s, "\tForward to: PF%d ", pf);
+
+ if (target & RVU_PFVF_FUNC_MASK) {
+ vf = (target & RVU_PFVF_FUNC_MASK) - 1;
+ seq_printf(s, "VF%d", vf);
+ }
+ seq_puts(s, "\n");
+ seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
+ seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
+ }
+
+ rvu_dbg_npc_mcam_show_action(s, iter);
+
+ enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
+ seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
+
+ if (!iter->has_cntr)
+ continue;
+ seq_printf(s, "\tcounter: %d\n", iter->cntr);
+
+ hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
+ seq_printf(s, "\thits: %lld\n", hits);
+ }
+ mutex_unlock(&mcam->lock);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
+
+static void rvu_dbg_npc_init(struct rvu *rvu)
+{
+ rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
+ debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_mcam_info_fops);
+ debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_mcam_rules_fops);
+ debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_rx_miss_act_fops);
+}
-create_failed:
- dev_err(dev, "Failed to create debugfs dir/file for NPC\n");
- debugfs_remove_recursive(rvu->rvu_dbg.npc);
+static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
+{
+ if (!is_rvu_otx2(rvu))
+ return "cn10k";
+ else
+ return "octeontx2";
}
void rvu_dbg_init(struct rvu *rvu)
{
- struct device *dev = &rvu->pdev->dev;
- struct dentry *pfile;
+ rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
- rvu->rvu_dbg.root = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL);
- if (!rvu->rvu_dbg.root) {
- dev_err(rvu->dev, "%s failed\n", __func__);
- return;
- }
- pfile = debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
- &rvu_dbg_rsrc_status_fops);
- if (!pfile)
- goto create_failed;
+ debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
+ &rvu_dbg_rsrc_status_fops);
+
+ if (!is_rvu_otx2(rvu))
+ debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
+ rvu, &rvu_dbg_lmtst_map_table_fops);
+ if (!cgx_get_cgxcnt_max())
+ goto create;
+
+ if (is_rvu_otx2(rvu))
+ debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
+ rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
+ else
+ debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
+ rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
+
+create:
rvu_dbg_npa_init(rvu);
- rvu_dbg_nix_init(rvu);
+ rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
+ rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
rvu_dbg_cgx_init(rvu);
rvu_dbg_npc_init(rvu);
+ rvu_dbg_sso_init(rvu);
- return;
-
-create_failed:
- dev_err(dev, "Failed to create debugfs dir\n");
- debugfs_remove_recursive(rvu->rvu_dbg.root);
+ rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
+ rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
}
void rvu_dbg_exit(struct rvu *rvu)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
new file mode 100644
index 000000000000..eb2da2b77af4
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -0,0 +1,1817 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function Devlink
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include<linux/bitfield.h>
+
+#include "rvu.h"
+#include "rvu_reg.h"
+#include "rvu_struct.h"
+
+#define DRV_NAME "octeontx2-af"
+
+static int rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ return devlink_fmsg_obj_nest_start(fmsg);
+}
+
+static int rvu_report_pair_end(struct devlink_fmsg *fmsg)
+{
+ int err;
+
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return devlink_fmsg_pair_nest_end(fmsg);
+}
+
+static bool rvu_common_request_irq(struct rvu *rvu, int offset,
+ const char *name, irq_handler_t fn)
+{
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ int rc;
+
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "%s", name);
+ rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu_dl);
+ if (rc)
+ dev_warn(rvu->dev, "Failed to register %s irq\n", name);
+ else
+ rvu->irq_allocated[offset] = true;
+
+ return rvu->irq_allocated[offset];
+}
+
+static void rvu_nix_intr_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, intr_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_intr_reporter,
+ "NIX_AF_RVU Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_RVU_INT);
+ nix_event_context->nix_af_rvu_int = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->intr_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_gen_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, gen_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_gen_reporter,
+ "NIX_AF_GEN Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_gen_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_GEN_INT);
+ nix_event_context->nix_af_rvu_gen = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->gen_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_err_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, err_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_err_reporter,
+ "NIX_AF_ERR Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_err_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
+ nix_event_context->nix_af_rvu_err = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->err_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_ras_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, ras_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_ras_reporter,
+ "NIX_AF_RAS Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_ras_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
+ nix_event_context->nix_af_rvu_ras = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->ras_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_unregister_interrupts(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ int offs, i, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return;
+
+ offs = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
+ if (!offs)
+ return;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
+
+ if (rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + NIX_AF_INT_VEC_RVU),
+ rvu_dl);
+ rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false;
+ }
+
+ for (i = NIX_AF_INT_VEC_GEN; i < NIX_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+static int rvu_nix_register_interrupts(struct rvu *rvu)
+{
+ int blkaddr, base;
+ bool rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* Get NIX AF MSIX vectors offset. */
+ base = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
+ if (!base) {
+ dev_warn(rvu->dev,
+ "Failed to get NIX%d NIX_AF_INT vector offsets\n",
+ blkaddr - BLKADDR_NIX0);
+ return 0;
+ }
+ /* Register and enable NIX_AF_RVU_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_RVU,
+ "NIX_AF_RVU_INT",
+ rvu_nix_af_rvu_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NIX_AF_GEN_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_GEN,
+ "NIX_AF_GEN_INT",
+ rvu_nix_af_rvu_gen_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NIX_AF_ERR_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_AF_ERR,
+ "NIX_AF_ERR_INT",
+ rvu_nix_af_rvu_err_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NIX_AF_RAS interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_POISON,
+ "NIX_AF_RAS",
+ rvu_nix_af_rvu_ras_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+err:
+ rvu_nix_unregister_interrupts(rvu);
+ return rc;
+}
+
+static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx,
+ enum nix_af_rvu_health health_reporter)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ u64 intr_val;
+ int err;
+
+ nix_event_context = ctx;
+ switch (health_reporter) {
+ case NIX_AF_RVU_INTR:
+ intr_val = nix_event_context->nix_af_rvu_int;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_RVU");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ",
+ nix_event_context->nix_af_rvu_int);
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NIX_AF_RVU_GEN:
+ intr_val = nix_event_context->nix_af_rvu_gen;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_GENERAL");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ",
+ nix_event_context->nix_af_rvu_gen);
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(1)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(4)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NIX_AF_RVU_ERR:
+ intr_val = nix_event_context->nix_af_rvu_err;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_ERR");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ",
+ nix_event_context->nix_af_rvu_err);
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(14)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(13)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(12)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(6)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(5)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(4)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(3)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(2)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(1)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NIX_AF_RVU_RAS:
+ intr_val = nix_event_context->nix_af_rvu_err;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_RAS");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ",
+ nix_event_context->nix_af_rvu_err);
+ if (err)
+ return err;
+ err = devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:");
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(34)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(33)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(32)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tHW ctx");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(4)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(3)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer");
+
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(2)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(1)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rvu_hw_nix_intr_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx, struct netlink_ext_ack * ext_ack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_INTR) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_INTR);
+}
+
+static int rvu_hw_nix_intr_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack * ext_ack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_int)
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_nix_gen_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx, struct netlink_ext_ack * ext_ack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_GEN) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_GEN);
+}
+
+static int rvu_hw_nix_gen_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack * ext_ack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_gen)
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_nix_err_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx, struct netlink_ext_ack * ext_ack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_ERR) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_ERR);
+}
+
+static int rvu_hw_nix_err_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack * ext_ack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_err)
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_nix_ras_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx, struct netlink_ext_ack * ext_ack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_RAS) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_RAS);
+}
+
+static int rvu_hw_nix_ras_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack * ext_ack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_int)
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+RVU_REPORTERS(hw_nix_intr);
+RVU_REPORTERS(hw_nix_gen);
+RVU_REPORTERS(hw_nix_err);
+RVU_REPORTERS(hw_nix_ras);
+
+static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl);
+
+static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
+{
+ struct rvu_nix_health_reporters *rvu_reporters;
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu *rvu = rvu_dl->rvu;
+
+ rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
+ if (!rvu_reporters)
+ return -ENOMEM;
+
+ rvu_dl->rvu_nix_health_reporter = rvu_reporters;
+ nix_event_context = kzalloc(sizeof(*nix_event_context), GFP_KERNEL);
+ if (!nix_event_context)
+ return -ENOMEM;
+
+ rvu_reporters->nix_event_ctx = nix_event_context;
+ rvu_reporters->rvu_hw_nix_intr_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_intr_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_intr reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter);
+ }
+
+ rvu_reporters->rvu_hw_nix_gen_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_gen_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_gen reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter);
+ }
+
+ rvu_reporters->rvu_hw_nix_err_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_err_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_err_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_err reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter);
+ }
+
+ rvu_reporters->rvu_hw_nix_ras_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_ras_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_ras reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter);
+ }
+
+ rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
+ if (!rvu_dl->devlink_wq)
+ goto err;
+
+ INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
+ INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
+ INIT_WORK(&rvu_reporters->err_work, rvu_nix_err_work);
+ INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
+
+ return 0;
+err:
+ rvu_nix_health_reporters_destroy(rvu_dl);
+ return -ENOMEM;
+}
+
+static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
+{
+ struct rvu *rvu = rvu_dl->rvu;
+ int err;
+
+ err = rvu_nix_register_reporters(rvu_dl);
+ if (err) {
+ dev_warn(rvu->dev, "Failed to create nix reporter, err =%d\n",
+ err);
+ return err;
+ }
+ rvu_nix_register_interrupts(rvu);
+
+ return 0;
+}
+
+static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl)
+{
+ struct rvu_nix_health_reporters *nix_reporters;
+ struct rvu *rvu = rvu_dl->rvu;
+
+ nix_reporters = rvu_dl->rvu_nix_health_reporter;
+
+ if (!nix_reporters->rvu_hw_nix_ras_reporter)
+ return;
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_intr_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_intr_reporter);
+
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_gen_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_gen_reporter);
+
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_err_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_err_reporter);
+
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_ras_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_ras_reporter);
+
+ rvu_nix_unregister_interrupts(rvu);
+ kfree(rvu_dl->rvu_nix_health_reporter->nix_event_ctx);
+ kfree(rvu_dl->rvu_nix_health_reporter);
+}
+
+static void rvu_npa_intr_work(struct work_struct *work)
+{
+ struct rvu_npa_health_reporters *rvu_npa_health_reporter;
+
+ rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, intr_work);
+ devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_intr_reporter,
+ "NPA_AF_RVU Error",
+ rvu_npa_health_reporter->npa_event_ctx);
+}
+
+static irqreturn_t rvu_npa_af_rvu_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_npa_event_ctx *npa_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NPA_AF_RVU_INT);
+ npa_event_context->npa_af_rvu_int = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT, intr);
+ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->intr_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_npa_gen_work(struct work_struct *work)
+{
+ struct rvu_npa_health_reporters *rvu_npa_health_reporter;
+
+ rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, gen_work);
+ devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_gen_reporter,
+ "NPA_AF_GEN Error",
+ rvu_npa_health_reporter->npa_event_ctx);
+}
+
+static irqreturn_t rvu_npa_af_gen_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_npa_event_ctx *npa_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NPA_AF_GEN_INT);
+ npa_event_context->npa_af_rvu_gen = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT, intr);
+ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->gen_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_npa_err_work(struct work_struct *work)
+{
+ struct rvu_npa_health_reporters *rvu_npa_health_reporter;
+
+ rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, err_work);
+ devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_err_reporter,
+ "NPA_AF_ERR Error",
+ rvu_npa_health_reporter->npa_event_ctx);
+}
+
+static irqreturn_t rvu_npa_af_err_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_npa_event_ctx *npa_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+ npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NPA_AF_ERR_INT);
+ npa_event_context->npa_af_rvu_err = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT, intr);
+ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->err_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_npa_ras_work(struct work_struct *work)
+{
+ struct rvu_npa_health_reporters *rvu_npa_health_reporter;
+
+ rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, ras_work);
+ devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_ras_reporter,
+ "HW NPA_AF_RAS Error reported",
+ rvu_npa_health_reporter->npa_event_ctx);
+}
+
+static irqreturn_t rvu_npa_af_ras_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_npa_event_ctx *npa_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NPA_AF_RAS);
+ npa_event_context->npa_af_rvu_ras = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NPA_AF_RAS, intr);
+ rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->ras_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_npa_unregister_interrupts(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ int i, offs, blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return;
+
+ reg = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG);
+ offs = reg & 0x3FF;
+
+ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
+
+ for (i = 0; i < NPA_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+static int rvu_npa_register_interrupts(struct rvu *rvu)
+{
+ int blkaddr, base;
+ bool rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* Get NPA AF MSIX vectors offset. */
+ base = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG) & 0x3ff;
+ if (!base) {
+ dev_warn(rvu->dev,
+ "Failed to get NPA_AF_INT vector offsets\n");
+ return 0;
+ }
+
+ /* Register and enable NPA_AF_RVU_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_RVU,
+ "NPA_AF_RVU_INT",
+ rvu_npa_af_rvu_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NPA_AF_GEN_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_GEN,
+ "NPA_AF_RVU_GEN",
+ rvu_npa_af_gen_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NPA_AF_ERR_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_AF_ERR,
+ "NPA_AF_ERR_INT",
+ rvu_npa_af_err_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NPA_AF_RAS interrupt */
+ rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_POISON,
+ "NPA_AF_RAS",
+ rvu_npa_af_ras_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+err:
+ rvu_npa_unregister_interrupts(rvu);
+ return rc;
+}
+
+static int rvu_npa_report_show(struct devlink_fmsg *fmsg, void *ctx,
+ enum npa_af_rvu_health health_reporter)
+{
+ struct rvu_npa_event_ctx *npa_event_context;
+ unsigned int intr_val, alloc_dis, free_dis;
+ int err;
+
+ npa_event_context = ctx;
+ switch (health_reporter) {
+ case NPA_AF_RVU_GEN:
+ intr_val = npa_event_context->npa_af_rvu_gen;
+ err = rvu_report_pair_start(fmsg, "NPA_AF_GENERAL");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA General Interrupt Reg ",
+ npa_event_context->npa_af_rvu_gen);
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(32)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tUnmap PF Error");
+ if (err)
+ return err;
+ }
+
+ free_dis = FIELD_GET(GENMASK(15, 0), intr_val);
+ if (free_dis & BIT(NPA_INPQ_NIX0_RX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: free disabled RX");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_NIX0_TX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:free disabled TX");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_NIX1_RX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: free disabled RX");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_NIX1_TX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:free disabled TX");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_SSO)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for SSO");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_TIM)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for TIM");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_DPI)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for DPI");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_AURA_OP)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for AURA");
+ if (err)
+ return err;
+ }
+
+ alloc_dis = FIELD_GET(GENMASK(31, 16), intr_val);
+ if (alloc_dis & BIT(NPA_INPQ_NIX0_RX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: alloc disabled RX");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_NIX0_TX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:alloc disabled TX");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_NIX1_RX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: alloc disabled RX");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_NIX1_TX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:alloc disabled TX");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_SSO)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for SSO");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_TIM)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for TIM");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_DPI)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for DPI");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_AURA_OP)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for AURA");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NPA_AF_RVU_ERR:
+ err = rvu_report_pair_start(fmsg, "NPA_AF_ERR");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA Error Interrupt Reg ",
+ npa_event_context->npa_af_rvu_err);
+ if (err)
+ return err;
+
+ if (npa_event_context->npa_af_rvu_err & BIT_ULL(14)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_INST_S read");
+ if (err)
+ return err;
+ }
+ if (npa_event_context->npa_af_rvu_err & BIT_ULL(13)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_RES_S write");
+ if (err)
+ return err;
+ }
+ if (npa_event_context->npa_af_rvu_err & BIT_ULL(12)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NPA_AF_RVU_RAS:
+ err = rvu_report_pair_start(fmsg, "NPA_AF_RVU_RAS");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RAS Interrupt Reg ",
+ npa_event_context->npa_af_rvu_ras);
+ if (err)
+ return err;
+ if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_INST_S");
+ if (err)
+ return err;
+ }
+ if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_RES_S");
+ if (err)
+ return err;
+ }
+ if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on HW context");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NPA_AF_RVU_INTR:
+ err = rvu_report_pair_start(fmsg, "NPA_AF_RVU");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RVU Interrupt Reg ",
+ npa_event_context->npa_af_rvu_int);
+ if (err)
+ return err;
+ if (npa_event_context->npa_af_rvu_int & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
+ if (err)
+ return err;
+ }
+ return rvu_report_pair_end(fmsg);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rvu_hw_npa_intr_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx, struct netlink_ext_ack *extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_npa_event_ctx *npa_ctx;
+
+ npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+
+ return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_INTR) :
+ rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_INTR);
+}
+
+static int rvu_hw_npa_intr_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_npa_event_ctx *npa_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (npa_event_ctx->npa_af_rvu_int)
+ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_npa_gen_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx, struct netlink_ext_ack *extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_npa_event_ctx *npa_ctx;
+
+ npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+
+ return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_GEN) :
+ rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_GEN);
+}
+
+static int rvu_hw_npa_gen_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_npa_event_ctx *npa_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (npa_event_ctx->npa_af_rvu_gen)
+ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_npa_err_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx, struct netlink_ext_ack *extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_npa_event_ctx *npa_ctx;
+
+ npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+
+ return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_ERR) :
+ rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_ERR);
+}
+
+static int rvu_hw_npa_err_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_npa_event_ctx *npa_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (npa_event_ctx->npa_af_rvu_err)
+ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_npa_ras_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx, struct netlink_ext_ack *extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_npa_event_ctx *npa_ctx;
+
+ npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+
+ return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_RAS) :
+ rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_RAS);
+}
+
+static int rvu_hw_npa_ras_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_npa_event_ctx *npa_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (npa_event_ctx->npa_af_rvu_ras)
+ rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+RVU_REPORTERS(hw_npa_intr);
+RVU_REPORTERS(hw_npa_gen);
+RVU_REPORTERS(hw_npa_err);
+RVU_REPORTERS(hw_npa_ras);
+
+static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl);
+
+static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
+{
+ struct rvu_npa_health_reporters *rvu_reporters;
+ struct rvu_npa_event_ctx *npa_event_context;
+ struct rvu *rvu = rvu_dl->rvu;
+
+ rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
+ if (!rvu_reporters)
+ return -ENOMEM;
+
+ rvu_dl->rvu_npa_health_reporter = rvu_reporters;
+ npa_event_context = kzalloc(sizeof(*npa_event_context), GFP_KERNEL);
+ if (!npa_event_context)
+ return -ENOMEM;
+
+ rvu_reporters->npa_event_ctx = npa_event_context;
+ rvu_reporters->rvu_hw_npa_intr_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_intr_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_npa_intr_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_npa_intr reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter);
+ }
+
+ rvu_reporters->rvu_hw_npa_gen_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_gen_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_npa_gen_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_npa_gen reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter);
+ }
+
+ rvu_reporters->rvu_hw_npa_err_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_err_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_npa_err_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_npa_err reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter);
+ }
+
+ rvu_reporters->rvu_hw_npa_ras_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_ras_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_npa_ras_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_npa_ras reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter);
+ }
+
+ rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
+ if (!rvu_dl->devlink_wq)
+ goto err;
+
+ INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work);
+ INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work);
+ INIT_WORK(&rvu_reporters->gen_work, rvu_npa_gen_work);
+ INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work);
+
+ return 0;
+err:
+ rvu_npa_health_reporters_destroy(rvu_dl);
+ return -ENOMEM;
+}
+
+static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl)
+{
+ struct rvu *rvu = rvu_dl->rvu;
+ int err;
+
+ err = rvu_npa_register_reporters(rvu_dl);
+ if (err) {
+ dev_warn(rvu->dev, "Failed to create npa reporter, err =%d\n",
+ err);
+ return err;
+ }
+ rvu_npa_register_interrupts(rvu);
+
+ return 0;
+}
+
+static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl)
+{
+ struct rvu_npa_health_reporters *npa_reporters;
+ struct rvu *rvu = rvu_dl->rvu;
+
+ npa_reporters = rvu_dl->rvu_npa_health_reporter;
+
+ if (!npa_reporters->rvu_hw_npa_ras_reporter)
+ return;
+ if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_intr_reporter))
+ devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_intr_reporter);
+
+ if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_gen_reporter))
+ devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_gen_reporter);
+
+ if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_err_reporter))
+ devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_err_reporter);
+
+ if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_ras_reporter))
+ devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_ras_reporter);
+
+ rvu_npa_unregister_interrupts(rvu);
+ kfree(rvu_dl->rvu_npa_health_reporter->npa_event_ctx);
+ kfree(rvu_dl->rvu_npa_health_reporter);
+}
+
+static int rvu_health_reporters_create(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl;
+ int err;
+
+ rvu_dl = rvu->rvu_dl;
+ err = rvu_npa_health_reporters_create(rvu_dl);
+ if (err)
+ return err;
+
+ return rvu_nix_health_reporters_create(rvu_dl);
+}
+
+static void rvu_health_reporters_destroy(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl;
+
+ if (!rvu->rvu_dl)
+ return;
+
+ rvu_dl = rvu->rvu_dl;
+ rvu_npa_health_reporters_destroy(rvu_dl);
+ rvu_nix_health_reporters_destroy(rvu_dl);
+}
+
+enum rvu_af_dl_param_id {
+ RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_TIMERS,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_TENNS,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_GPIOS,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_GTI,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_PTP,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_SYNC,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_BTS,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_EXT_GTI,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_TIMERS,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_TENNS,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_GPIOS,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_GTI,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_PTP,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_BTS,
+};
+
+static u64 rvu_af_dl_tim_param_id_to_offset(u32 id)
+{
+ u64 offset = 0;
+
+ switch (id) {
+ case RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_TENNS:
+ offset = TIM_AF_CAPTURE_TENNS;
+ break;
+ case RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_GPIOS:
+ offset = TIM_AF_CAPTURE_GPIOS;
+ break;
+ case RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_GTI:
+ offset = TIM_AF_CAPTURE_GTI;
+ break;
+ case RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_PTP:
+ offset = TIM_AF_CAPTURE_PTP;
+ break;
+ case RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_BTS:
+ offset = TIM_AF_CAPTURE_BTS;
+ break;
+ case RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_EXT_GTI:
+ offset = TIM_AF_CAPTURE_EXT_GTI;
+ break;
+ case RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_TENNS:
+ offset = TIM_AF_ADJUST_TENNS;
+ break;
+ case RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_GPIOS:
+ offset = TIM_AF_ADJUST_GPIOS;
+ break;
+ case RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_GTI:
+ offset = TIM_AF_ADJUST_GTI;
+ break;
+ case RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_PTP:
+ offset = TIM_AF_ADJUST_PTP;
+ break;
+ case RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_BTS:
+ offset = TIM_AF_ADJUST_BTS;
+ break;
+ }
+
+ return offset;
+}
+
+/* Devlink Params APIs */
+static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ int dwrr_mtu = val.vu32;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+
+ if (!rvu->hw->cap.nix_common_dwrr_mtu) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Setting DWRR_MTU is not supported on this silicon");
+ return -EOPNOTSUPP;
+ }
+
+ if ((dwrr_mtu > 65536 || !is_power_of_2(dwrr_mtu)) &&
+ (dwrr_mtu != 9728 && dwrr_mtu != 10240)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid, supported MTUs are 0,2,4,8.16,32,64....4K,8K,32K,64K and 9728, 10240");
+ return -EINVAL;
+ }
+
+ nix_hw = get_nix_hw(rvu->hw, BLKADDR_NIX0);
+ if (!nix_hw)
+ return -ENODEV;
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+ if (rvu_rsrc_free_count(&txsch->schq) != txsch->schq.max) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Changing DWRR MTU is not supported when there are active NIXLFs");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Makesure none of the PF/VF interfaces are initialized and retry");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 dwrr_mtu;
+
+ dwrr_mtu = convert_bytes_to_dwrr_mtu(ctx->val.vu32);
+ rvu_write64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU, dwrr_mtu);
+
+ return 0;
+}
+
+static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 dwrr_mtu;
+
+ if (!rvu->hw->cap.nix_common_dwrr_mtu)
+ return -EOPNOTSUPP;
+
+ dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU);
+ ctx->val.vu32 = convert_dwrr_mtu_to_bytes(dwrr_mtu);
+
+ return 0;
+}
+
+static int rvu_af_dl_tim_capture_timers_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 capt_timers = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CAPTURE_TIMERS);
+
+ ctx->val.vu8 = (u8)(capt_timers & TIM_AF_CAPTURE_TIMERS_MASK);
+
+ return 0;
+}
+
+static int rvu_af_dl_tim_capture_timers_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+
+ rvu_write64(rvu, BLKADDR_TIM, TIM_AF_CAPTURE_TIMERS, (u64)ctx->val.vu8);
+
+ return 0;
+}
+
+static int rvu_af_dl_tim_capture_timers_validate(struct devlink *devlink,
+ u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (val.vu8 > TIM_AF_CAPTURE_TIMERS_MASK) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid value to set tim capture timers");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rvu_af_dl_tim_capture_time_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 time, offset;
+
+ offset = rvu_af_dl_tim_param_id_to_offset(id);
+ time = rvu_read64(rvu, BLKADDR_TIM, offset);
+ snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%llu", time);
+
+ return 0;
+}
+
+static int rvu_af_dl_tim_capture_time_set(struct devlink __always_unused *devlink,
+ u32 __always_unused id,
+ struct devlink_param_gset_ctx __always_unused *ctx)
+{
+ return 0;
+}
+
+static int rvu_af_dl_tim_adjust_timers_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 adjust_timer = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_ADJUST_TIMERS);
+
+ if (adjust_timer & TIM_AF_ADJUST_TIMERS_MASK)
+ ctx->val.vbool = true;
+ else
+ ctx->val.vbool = false;
+
+ return 0;
+}
+
+static int rvu_af_dl_tim_adjust_timers_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 adjust_timer = ctx->val.vbool ? BIT_ULL(0) : 0;
+
+ rvu_write64(rvu, BLKADDR_TIM, TIM_AF_ADJUST_TIMERS, adjust_timer);
+
+ return 0;
+}
+
+static int rvu_af_dl_tim_adjust_timer_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 offset, delta;
+
+ offset = rvu_af_dl_tim_param_id_to_offset(id);
+ delta = rvu_read64(rvu, BLKADDR_TIM, offset);
+ snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%llu", delta);
+
+ return 0;
+}
+
+static int rvu_af_dl_tim_adjust_timer_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 offset, delta;
+
+ if (kstrtoull(ctx->val.vstr, 10, &delta))
+ return -EINVAL;
+
+ offset = rvu_af_dl_tim_param_id_to_offset(id);
+ rvu_write64(rvu, BLKADDR_TIM, offset, delta);
+
+ return 0;
+}
+
+static int rvu_af_dl_tim_adjust_timer_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ u64 delta;
+
+ if (kstrtoull(val.vstr, 10, &delta)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid value to set tim adjust timer");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param rvu_af_dl_params[] = {
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
+ "dwrr_mtu", DEVLINK_PARAM_TYPE_U32,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
+ rvu_af_dl_dwrr_mtu_validate),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_TIMERS,
+ "tim_capture_timers", DEVLINK_PARAM_TYPE_U8,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_capture_timers_get,
+ rvu_af_dl_tim_capture_timers_set,
+ rvu_af_dl_tim_capture_timers_validate),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_TENNS,
+ "tim_capture_tenns", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_capture_time_get,
+ rvu_af_dl_tim_capture_time_set, NULL),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_GPIOS,
+ "tim_capture_gpios", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_capture_time_get,
+ rvu_af_dl_tim_capture_time_set, NULL),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_GTI,
+ "tim_capture_gti", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_capture_time_get,
+ rvu_af_dl_tim_capture_time_set, NULL),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_PTP,
+ "tim_capture_ptp", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_capture_time_get,
+ rvu_af_dl_tim_capture_time_set, NULL),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_SYNC,
+ "tim_capture_sync", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_capture_time_get,
+ rvu_af_dl_tim_capture_time_set, NULL),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_BTS,
+ "tim_capture_bts", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_capture_time_get,
+ rvu_af_dl_tim_capture_time_set, NULL),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_EXT_GTI,
+ "tim_capture_ext_gti", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_capture_time_get,
+ rvu_af_dl_tim_capture_time_set, NULL),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_TIMERS,
+ "tim_adjust_timers", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_adjust_timers_get,
+ rvu_af_dl_tim_adjust_timers_set, NULL),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_TENNS,
+ "tim_adjust_tenns", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_adjust_timer_get,
+ rvu_af_dl_tim_adjust_timer_set,
+ rvu_af_dl_tim_adjust_timer_validate),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_GPIOS,
+ "tim_adjust_gpios", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_adjust_timer_get,
+ rvu_af_dl_tim_adjust_timer_set,
+ rvu_af_dl_tim_adjust_timer_validate),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_GTI,
+ "tim_adjust_gti", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_adjust_timer_get,
+ rvu_af_dl_tim_adjust_timer_set,
+ rvu_af_dl_tim_adjust_timer_validate),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_PTP,
+ "tim_adjust_ptp", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_adjust_timer_get,
+ rvu_af_dl_tim_adjust_timer_set,
+ rvu_af_dl_tim_adjust_timer_validate),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_BTS,
+ "tim_adjust_bts", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_adjust_timer_get,
+ rvu_af_dl_tim_adjust_timer_set,
+ rvu_af_dl_tim_adjust_timer_validate),
+};
+
+/* Devlink switch mode */
+static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ struct rvu_switch *rswitch;
+
+ rswitch = &rvu->rswitch;
+ *mode = rswitch->mode;
+
+ return 0;
+}
+
+static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ struct rvu_switch *rswitch;
+
+ rswitch = &rvu->rswitch;
+ switch (mode) {
+ case DEVLINK_ESWITCH_MODE_LEGACY:
+ case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ if (rswitch->mode == mode)
+ return 0;
+ rswitch->mode = mode;
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ rvu_switch_enable(rvu);
+ else
+ rvu_switch_disable(rvu);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ return devlink_info_driver_name_put(req, DRV_NAME);
+}
+
+static const struct devlink_ops rvu_devlink_ops = {
+ .info_get = rvu_devlink_info_get,
+ .eswitch_mode_get = rvu_devlink_eswitch_mode_get,
+ .eswitch_mode_set = rvu_devlink_eswitch_mode_set,
+};
+
+int rvu_register_dl(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl;
+ struct devlink *dl;
+ int err;
+
+ dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink));
+ if (!dl) {
+ dev_warn(rvu->dev, "devlink_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ err = devlink_register(dl, rvu->dev);
+ if (err) {
+ dev_err(rvu->dev, "devlink register failed with error %d\n", err);
+ devlink_free(dl);
+ return err;
+ }
+
+ rvu_dl = devlink_priv(dl);
+ rvu_dl->dl = dl;
+ rvu_dl->rvu = rvu;
+ rvu->rvu_dl = rvu_dl;
+
+ err = rvu_health_reporters_create(rvu);
+ if (err) {
+ dev_err(rvu->dev,
+ "devlink health reporter creation failed with error %d\n", err);
+ goto err_dl_health;
+ }
+
+ err = devlink_params_register(dl, rvu_af_dl_params,
+ ARRAY_SIZE(rvu_af_dl_params));
+ if (err) {
+ dev_err(rvu->dev,
+ "devlink params register failed with error %d", err);
+ goto err_dl_health;
+ }
+
+ devlink_params_publish(dl);
+
+ return 0;
+
+err_dl_health:
+ rvu_health_reporters_destroy(rvu);
+ devlink_unregister(dl);
+ devlink_free(dl);
+ return err;
+}
+
+void rvu_unregister_dl(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct devlink *dl = rvu_dl->dl;
+
+ if (!dl)
+ return;
+
+ devlink_params_unregister(dl, rvu_af_dl_params,
+ ARRAY_SIZE(rvu_af_dl_params));
+ rvu_health_reporters_destroy(rvu);
+ devlink_unregister(dl);
+ devlink_free(dl);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h
new file mode 100644
index 000000000000..51efe88dce11
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function Devlink
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef RVU_DEVLINK_H
+#define RVU_DEVLINK_H
+
+#define RVU_REPORTERS(_name) \
+static const struct devlink_health_reporter_ops rvu_ ## _name ## _reporter_ops = { \
+ .name = #_name, \
+ .recover = rvu_ ## _name ## _recover, \
+ .dump = rvu_ ## _name ## _dump, \
+}
+
+enum npa_af_rvu_health {
+ NPA_AF_RVU_INTR,
+ NPA_AF_RVU_GEN,
+ NPA_AF_RVU_ERR,
+ NPA_AF_RVU_RAS,
+};
+
+struct rvu_npa_event_ctx {
+ u64 npa_af_rvu_int;
+ u64 npa_af_rvu_gen;
+ u64 npa_af_rvu_err;
+ u64 npa_af_rvu_ras;
+};
+
+struct rvu_npa_health_reporters {
+ struct rvu_npa_event_ctx *npa_event_ctx;
+ struct devlink_health_reporter *rvu_hw_npa_intr_reporter;
+ struct work_struct intr_work;
+ struct devlink_health_reporter *rvu_hw_npa_gen_reporter;
+ struct work_struct gen_work;
+ struct devlink_health_reporter *rvu_hw_npa_err_reporter;
+ struct work_struct err_work;
+ struct devlink_health_reporter *rvu_hw_npa_ras_reporter;
+ struct work_struct ras_work;
+};
+
+enum nix_af_rvu_health {
+ NIX_AF_RVU_INTR,
+ NIX_AF_RVU_GEN,
+ NIX_AF_RVU_ERR,
+ NIX_AF_RVU_RAS,
+};
+
+struct rvu_nix_event_ctx {
+ u64 nix_af_rvu_int;
+ u64 nix_af_rvu_gen;
+ u64 nix_af_rvu_err;
+ u64 nix_af_rvu_ras;
+};
+
+struct rvu_nix_health_reporters {
+ struct rvu_nix_event_ctx *nix_event_ctx;
+ struct devlink_health_reporter *rvu_hw_nix_intr_reporter;
+ struct work_struct intr_work;
+ struct devlink_health_reporter *rvu_hw_nix_gen_reporter;
+ struct work_struct gen_work;
+ struct devlink_health_reporter *rvu_hw_nix_err_reporter;
+ struct work_struct err_work;
+ struct devlink_health_reporter *rvu_hw_nix_ras_reporter;
+ struct work_struct ras_work;
+};
+
+struct rvu_devlink {
+ struct devlink *dl;
+ struct rvu *rvu;
+ struct workqueue_struct *devlink_wq;
+ struct rvu_npa_health_reporters *rvu_npa_health_reporter;
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+};
+
+/* Devlink APIs */
+int rvu_register_dl(struct rvu *rvu);
+void rvu_unregister_dl(struct rvu *rvu);
+
+#endif /* RVU_DEVLINK_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_fixes.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_fixes.c
new file mode 100644
index 000000000000..b350dbaf737c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_fixes.c
@@ -0,0 +1,1009 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2019 Marvell.
+ *
+ */
+
+#include <linux/kthread.h>
+#include <linux/pci.h>
+#include <linux/cpu.h>
+#include <linux/sched/signal.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "common.h"
+#include "mbox.h"
+#include "rvu.h"
+#include "cgx.h"
+#include "rvu_fixes.h"
+
+#define OTX2_MAX_CQ_CNT 64
+
+struct nix_tx_stall {
+ struct rvu *rvu;
+ int blkaddr;
+ int smq_count;
+ int tl4_count;
+ int tl3_count;
+ int tl2_count;
+ int sq_count;
+ u16 *smq_tl2_map;
+ u16 *tl4_tl2_map;
+ u16 *tl3_tl2_map;
+ u16 *tl2_tl1_map;
+ u16 *sq_smq_map;
+#define LINK_TYPE_SHIFT 7
+#define EXPR_LINK(map) (map & (1 << LINK_TYPE_SHIFT))
+#define LINK_CHAN_SHIFT 8
+#define LINK_CHAN(map) (map >> LINK_CHAN_SHIFT)
+ u16 *tl2_link_map;
+ u8 *nixlf_tl2_count;
+ u64 *nixlf_poll_count;
+ u64 *nixlf_stall_count;
+ u64 *nlink_credits; /* Normal link credits */
+ u64 poll_cntr;
+ u64 stalled_cntr;
+ int pse_link_bp_level;
+ bool txsch_config_changed;
+ struct mutex txsch_lock; /* To sync Tx SCHQ config update and poll */
+ struct task_struct *poll_thread; /* Tx stall condition polling thread */
+};
+
+/* Tranmsit stall hw issue's workaround reads loads of registers
+ * at frequent intervals, having barrier for every register access
+ * will increase the cycles spent in stall detection. Hence using
+ * relaxed counterparts.
+ */
+static inline void rvu_wr64(struct rvu *rvu, u64 block, u64 offset, u64 val)
+{
+ writeq_relaxed(val, rvu->afreg_base + ((block << 28) | offset));
+}
+
+static inline u64 rvu_rd64(struct rvu *rvu, u64 block, u64 offset)
+{
+ return readq_relaxed(rvu->afreg_base + ((block << 28) | offset));
+}
+
+/**
+ * rvu_usleep_interruptible - sleep waiting for signals
+ * @usecs: Time in microseconds to sleep for
+ *
+ * A replica of msleep_interruptable to reduce tx stall
+ * poll interval.
+ */
+static unsigned long rvu_usleep_interruptible(unsigned int usecs)
+{
+ unsigned long timeout = usecs_to_jiffies(usecs) + 1;
+
+ while (timeout && !signal_pending(current))
+ timeout = schedule_timeout_interruptible(timeout);
+ return jiffies_to_usecs(timeout);
+}
+
+void rvu_nix_txsch_lock(struct nix_hw *nix_hw)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+
+ if (tx_stall)
+ mutex_lock(&tx_stall->txsch_lock);
+}
+
+void rvu_nix_txsch_unlock(struct nix_hw *nix_hw)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+
+ if (tx_stall)
+ mutex_unlock(&tx_stall->txsch_lock);
+}
+
+void rvu_nix_txsch_config_changed(struct nix_hw *nix_hw)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+
+ if (tx_stall)
+ tx_stall->txsch_config_changed = true;
+}
+
+void rvu_nix_update_link_credits(struct rvu *rvu, int blkaddr,
+ int link, u64 ncredits)
+{
+ struct nix_tx_stall *tx_stall;
+ struct nix_hw *nix_hw;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
+ tx_stall = nix_hw->tx_stall;
+ if (!tx_stall)
+ return;
+
+ tx_stall->nlink_credits[link] = ncredits;
+}
+
+void rvu_nix_update_sq_smq_mapping(struct rvu *rvu, int blkaddr, int nixlf,
+ u16 sq, u16 smq)
+{
+ struct nix_tx_stall *tx_stall;
+ struct nix_hw *nix_hw;
+ int sq_count;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
+ tx_stall = nix_hw->tx_stall;
+ if (!tx_stall)
+ return;
+
+ sq_count = tx_stall->sq_count;
+
+ rvu_nix_txsch_lock(nix_hw);
+ tx_stall->sq_smq_map[nixlf * sq_count + sq] = smq;
+ rvu_nix_txsch_unlock(nix_hw);
+}
+
+static void rvu_nix_scan_link_credits(struct rvu *rvu, int blkaddr,
+ struct nix_tx_stall *tx_stall)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 credits;
+ int link;
+
+ for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
+ credits = rvu_rd64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_NORM_CREDIT(link));
+ tx_stall->nlink_credits[link] = credits;
+ }
+}
+
+static void rvu_nix_scan_tl2_link_mapping(struct rvu *rvu,
+ struct nix_tx_stall *tx_stall,
+ int blkaddr, int tl2, int smq)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int link, chan;
+ u64 link_cfg;
+
+ for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
+ link_cfg = rvu_rd64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(tl2, link));
+ if (!(link_cfg & BIT_ULL(12)))
+ continue;
+
+ /* Get channel of the LINK to which this TL2 is transmitting */
+ chan = link_cfg & 0x3F;
+ tx_stall->tl2_link_map[tl2] = chan << LINK_CHAN_SHIFT;
+
+ /* Save link info */
+ tx_stall->tl2_link_map[tl2] |= (link & 0x7F);
+
+ /* Workaround assumes TL2 transmits to only one link.
+ * So assume the first link enabled is the only one.
+ */
+ break;
+ }
+}
+
+static bool is_sq_allocated(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ int blkaddr, int sq)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ struct admin_queue *aq;
+
+ block = &hw->block[blkaddr];
+ aq = block->aq;
+ spin_lock(&aq->lock);
+ if (test_bit(sq, pfvf->sq_bmap)) {
+ spin_unlock(&aq->lock);
+ return true;
+ }
+ spin_unlock(&aq->lock);
+ return false;
+}
+
+static bool is_schq_allocated(struct rvu *rvu, struct nix_hw *nix_hw,
+ int lvl, int schq)
+{
+ struct nix_txsch *txsch = &nix_hw->txsch[lvl];
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (test_bit(schq, txsch->schq.bmap)) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return true;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return false;
+}
+
+static bool is_sw_xoff_set(struct rvu *rvu, int blkaddr, int lvl, int schq)
+{
+ u64 cfg, swxoff_reg = 0x00;
+
+ switch (lvl) {
+ case NIX_TXSCH_LVL_MDQ:
+ swxoff_reg = NIX_AF_MDQX_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ swxoff_reg = NIX_AF_TL4X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ swxoff_reg = NIX_AF_TL3X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ swxoff_reg = NIX_AF_TL2X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL1:
+ swxoff_reg = NIX_AF_TL1X_SW_XOFF(schq);
+ break;
+ }
+ if (!swxoff_reg)
+ return false;
+
+ cfg = rvu_rd64(rvu, blkaddr, swxoff_reg);
+ if (cfg & BIT_ULL(0))
+ return true;
+
+ return false;
+}
+
+static void rvu_nix_scan_txsch_hierarchy(struct rvu *rvu,
+ struct nix_hw *nix_hw, int blkaddr)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_txsch *tl2_txsch;
+ struct rvu_block *block;
+ int tl4, tl3, tl2, tl1;
+ int lf, smq, size;
+ u16 pcifunc;
+ u64 cfg;
+
+ /* Clear previous mappings */
+ size = sizeof(u16);
+ memset(tx_stall->smq_tl2_map, U16_MAX, tx_stall->smq_count * size);
+ memset(tx_stall->tl4_tl2_map, U16_MAX, tx_stall->tl4_count * size);
+ memset(tx_stall->tl3_tl2_map, U16_MAX, tx_stall->tl3_count * size);
+ memset(tx_stall->tl2_tl1_map, U16_MAX, tx_stall->tl2_count * size);
+ memset(tx_stall->tl2_link_map, U16_MAX, tx_stall->tl2_count * size);
+
+ for (smq = 0; smq < tx_stall->smq_count; smq++) {
+ /* Skip SMQ if it's not assigned to any */
+ if (!is_schq_allocated(rvu, nix_hw, NIX_TXSCH_LVL_SMQ, smq))
+ continue;
+
+ /* If SW_XOFF is set, ignore the scheduler queue */
+ cfg = rvu_rd64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
+ if (cfg & BIT_ULL(50))
+ continue;
+ if (is_sw_xoff_set(rvu, blkaddr, NIX_TXSCH_LVL_MDQ, smq))
+ continue;
+
+ cfg = rvu_rd64(rvu, blkaddr, NIX_AF_MDQX_PARENT(smq));
+ tl4 = (cfg >> 16) & 0x1FF;
+ if (is_sw_xoff_set(rvu, blkaddr, NIX_TXSCH_LVL_TL4, tl4))
+ continue;
+
+ cfg = rvu_rd64(rvu, blkaddr, NIX_AF_TL4X_PARENT(tl4));
+ tl3 = (cfg >> 16) & 0x1FF;
+ if (is_sw_xoff_set(rvu, blkaddr, NIX_TXSCH_LVL_TL3, tl3))
+ continue;
+
+ cfg = rvu_rd64(rvu, blkaddr, NIX_AF_TL3X_PARENT(tl3));
+ tl2 = (cfg >> 16) & 0x1FF;
+ if (is_sw_xoff_set(rvu, blkaddr, NIX_TXSCH_LVL_TL2, tl2))
+ continue;
+
+ cfg = rvu_rd64(rvu, blkaddr, NIX_AF_TL2X_PARENT(tl2));
+ tl1 = (cfg >> 16) & 0x1FF;
+ if (is_sw_xoff_set(rvu, blkaddr, NIX_TXSCH_LVL_TL1, tl1))
+ continue;
+
+ tx_stall->smq_tl2_map[smq] = tl2;
+ tx_stall->tl4_tl2_map[tl4] = tl2;
+ tx_stall->tl3_tl2_map[tl3] = tl2;
+ tx_stall->tl2_tl1_map[tl2] = tl1;
+ rvu_nix_scan_tl2_link_mapping(rvu, tx_stall, blkaddr, tl2, smq);
+ }
+
+ /* Get count of TL2s attached to each NIXLF */
+ tl2_txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+ block = &hw->block[blkaddr];
+ memset(tx_stall->nixlf_tl2_count, 0, block->lf.max * sizeof(u8));
+ for (lf = 0; lf < block->lf.max; lf++) {
+ mutex_lock(&rvu->rsrc_lock);
+ if (!test_bit(lf, block->lf.bmap)) {
+ mutex_unlock(&rvu->rsrc_lock);
+ continue;
+ }
+ pcifunc = block->fn_map[lf];
+ mutex_unlock(&rvu->rsrc_lock);
+
+ for (tl2 = 0; tl2 < tx_stall->tl2_count; tl2++) {
+ if (!is_schq_allocated(rvu, nix_hw,
+ NIX_TXSCH_LVL_TL2, tl2))
+ continue;
+ if (pcifunc == TXSCH_MAP_FUNC(tl2_txsch->pfvf_map[tl2]))
+ tx_stall->nixlf_tl2_count[lf]++;
+ }
+ }
+}
+
+#define TX_OCTS 4
+#define RVU_AF_BAR2_SEL (0x9000000ull)
+#define NIX_LF_SQ_OP_OCTS (0xa10)
+
+static bool is_sq_stalled(struct rvu *rvu, struct nix_hw *nix_hw, int smq)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+ u64 btx_octs, atx_octs, cfg, incr;
+ int sq_count = tx_stall->sq_count;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr = tx_stall->blkaddr;
+ struct nix_txsch *smq_txsch;
+ struct rvu_pfvf *pfvf;
+ atomic64_t *ptr;
+ int nixlf, sq;
+ u16 pcifunc;
+
+ smq_txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+ pcifunc = TXSCH_MAP_FUNC(smq_txsch->pfvf_map[smq]);
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return false;
+
+ /* If a NIXLF is transmitting pkts via only one TL2, then checking
+ * global NIXLF TX stats is sufficient.
+ */
+ if (tx_stall->nixlf_tl2_count[nixlf] != 1)
+ goto poll_sq_stats;
+
+ tx_stall->nixlf_poll_count[nixlf]++;
+ btx_octs = rvu_rd64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, TX_OCTS));
+ usleep_range(50, 60);
+ atx_octs = rvu_rd64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, TX_OCTS));
+ if (btx_octs == atx_octs) {
+ tx_stall->nixlf_stall_count[nixlf]++;
+ return true;
+ }
+ return false;
+
+poll_sq_stats:
+ if (!tx_stall->nixlf_tl2_count[nixlf])
+ return false;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ /* Enable BAR2 register access from AF BAR2 alias registers*/
+ cfg = BIT_ULL(16) | pcifunc;
+ rvu_wr64(rvu, blkaddr, RVU_AF_BAR2_SEL, cfg);
+
+ for (sq = 0; sq < pfvf->sq_ctx->qsize; sq++) {
+ if (!is_sq_allocated(rvu, pfvf, blkaddr, sq))
+ continue;
+
+ rvu_nix_txsch_lock(nix_hw);
+ if (tx_stall->sq_smq_map[nixlf * sq_count + sq] != smq) {
+ rvu_nix_txsch_unlock(nix_hw);
+ continue;
+ }
+ rvu_nix_txsch_unlock(nix_hw);
+
+ incr = (u64)sq << 32;
+ ptr = (__force atomic64_t *)(rvu->afreg_base + ((blkaddr << 28)
+ | RVU_AF_BAR2_ALIASX(nixlf, NIX_LF_SQ_OP_OCTS)));
+
+ btx_octs = atomic64_fetch_add_relaxed(incr, ptr);
+ usleep_range(50, 60);
+ atx_octs = atomic64_fetch_add_relaxed(incr, ptr);
+ /* If atleast one SQ is transmitting pkts then SMQ is
+ * not stalled.
+ */
+ if (btx_octs != atx_octs)
+ return false;
+ }
+ tx_stall->nixlf_stall_count[nixlf]++;
+
+ return true;
+}
+
+static bool rvu_nix_check_smq_stall(struct rvu *rvu, struct nix_hw *nix_hw,
+ int tl2)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+ int blkaddr = tx_stall->blkaddr;
+ u64 mdesc_cnt;
+ int smq;
+
+ for (smq = 0; smq < tx_stall->smq_count; smq++) {
+ if (tx_stall->smq_tl2_map[smq] != tl2)
+ continue;
+
+ mdesc_cnt = rvu_rd64(rvu, blkaddr, NIX_AF_SMQX_STATUS(smq));
+ if (!(mdesc_cnt & 0x7F))
+ continue;
+ if (is_sq_stalled(rvu, nix_hw, smq))
+ return true;
+ }
+ return false;
+}
+
+static bool is_cgx_idle(u64 status, u8 link_map)
+{
+ if (EXPR_LINK(link_map))
+ return status & CGXX_CMRX_TX_LMAC_E_IDLE;
+ return status & CGXX_CMRX_TX_LMAC_IDLE;
+}
+
+static bool rvu_cgx_tx_idle(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_txsch *tl2_txsch, int tl2)
+{
+ unsigned long timeout = jiffies + usecs_to_jiffies(20);
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+ u16 pcifunc, link_map;
+ u8 cgx_id, lmac_id;
+ u64 status;
+ void *cgxd;
+ int pf;
+
+ pcifunc = TXSCH_MAP_FUNC(tl2_txsch->pfvf_map[tl2]);
+ pf = rvu_get_pf(pcifunc);
+ if (!is_pf_cgxmapped(rvu, pf))
+ return false;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ if (!cgxd)
+ return false;
+
+ link_map = tx_stall->tl2_link_map[tl2];
+
+ /* Wait for LMAC TX_IDLE */
+ while (time_before(jiffies, timeout)) {
+ status = cgx_get_lmac_tx_fifo_status(cgxd, lmac_id);
+ if (is_cgx_idle(status, link_map))
+ return true;
+ usleep_range(1, 2);
+ }
+ return false;
+}
+
+static void rvu_nix_restore_tx(struct rvu *rvu, struct nix_hw *nix_hw,
+ int blkaddr, int tl2)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+ struct nix_txsch *tl2_txsch;
+ int tl, link;
+
+ link = tx_stall->tl2_link_map[tl2] & 0x7F;
+
+ tx_stall->stalled_cntr++;
+
+ tl2_txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+ rvu_nix_txsch_lock(nix_hw);
+
+ /* Set SW_XOFF for every TL2 queue which transmits to
+ * the associated link.
+ */
+ for (tl = 0; tl < tx_stall->tl2_count; tl++) {
+ if ((tx_stall->tl2_link_map[tl] & 0x7F) != link)
+ continue;
+ /* Full workaround is implemented assuming fixed 1:1
+ * TL3:TL2 mapping, ie TL3 and TL2 index can be used
+ * interchangeably. Hence except in this API, no other
+ * place we check for PSE backpressure level configured
+ * in NIX_AF_PSE_CHANNEL_LEVEL reg.
+ */
+ if (tx_stall->pse_link_bp_level == NIX_TXSCH_LVL_TL2)
+ rvu_wr64(rvu, blkaddr,
+ NIX_AF_TL2X_SW_XOFF(tl), BIT_ULL(0));
+ else
+ rvu_wr64(rvu, blkaddr,
+ NIX_AF_TL3X_SW_XOFF(tl), BIT_ULL(0));
+ }
+ usleep_range(20, 25);
+
+ /* Wait for LMAC TX_IDLE */
+ if (link < rvu->hw->cgx_links) {
+ if (!rvu_cgx_tx_idle(rvu, nix_hw, tl2_txsch, tl2))
+ goto clear_sw_xoff;
+ }
+
+ /* Restore link credits */
+ rvu_wr64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link),
+ tx_stall->nlink_credits[link]);
+
+ /* Toggle SW_XOFF of every scheduler queue at every level
+ * which points to this TL2.
+ */
+ for (tl = 0; tl < tx_stall->smq_count; tl++) {
+ if (tx_stall->smq_tl2_map[tl] != tl2)
+ continue;
+ rvu_wr64(rvu, blkaddr, NIX_AF_MDQX_SW_XOFF(tl), BIT_ULL(0));
+ rvu_wr64(rvu, blkaddr, NIX_AF_MDQX_SW_XOFF(tl), 0x00);
+ }
+
+ for (tl = 0; tl < tx_stall->tl4_count; tl++) {
+ if (tx_stall->tl4_tl2_map[tl] != tl2)
+ continue;
+ rvu_wr64(rvu, blkaddr, NIX_AF_TL4X_SW_XOFF(tl), BIT_ULL(0));
+ rvu_wr64(rvu, blkaddr, NIX_AF_TL4X_SW_XOFF(tl), 0x00);
+ }
+
+ for (tl = 0; tl < tx_stall->tl3_count; tl++) {
+ if (tx_stall->tl3_tl2_map[tl] != tl2)
+ continue;
+ if (tx_stall->pse_link_bp_level == NIX_TXSCH_LVL_TL2) {
+ rvu_wr64(rvu, blkaddr,
+ NIX_AF_TL3X_SW_XOFF(tl), BIT_ULL(0));
+ rvu_wr64(rvu, blkaddr, NIX_AF_TL3X_SW_XOFF(tl), 0x00);
+ } else {
+ /* TL3 and TL2 indices used by this NIXLF are same */
+ rvu_wr64(rvu, blkaddr,
+ NIX_AF_TL2X_SW_XOFF(tl), BIT_ULL(0));
+ rvu_wr64(rvu, blkaddr, NIX_AF_TL2X_SW_XOFF(tl), 0x00);
+ }
+ }
+
+clear_sw_xoff:
+ /* Clear SW_XOFF of all TL2 queues, which are set above */
+ for (tl = 0; tl < tx_stall->tl2_count; tl++) {
+ if ((tx_stall->tl2_link_map[tl] & 0x7F) != link)
+ continue;
+ if (tx_stall->pse_link_bp_level == NIX_TXSCH_LVL_TL2)
+ rvu_wr64(rvu, blkaddr, NIX_AF_TL2X_SW_XOFF(tl), 0x00);
+ else
+ rvu_wr64(rvu, blkaddr, NIX_AF_TL3X_SW_XOFF(tl), 0x00);
+ }
+ rvu_nix_txsch_unlock(nix_hw);
+}
+
+static bool is_link_backpressured(struct nix_tx_stall *tx_stall,
+ struct nix_hw *nix_hw,
+ int blkaddr, int tl2)
+{
+ struct rvu *rvu = tx_stall->rvu;
+ struct nix_txsch *tl2_txsch;
+ int pkt_cnt, unit_cnt;
+ int link, chan;
+ u64 cfg;
+
+ /* Skip uninitialized ones */
+ if (tx_stall->tl2_link_map[tl2] == U16_MAX)
+ return true;
+
+ link = tx_stall->tl2_link_map[tl2] & 0x7F;
+ chan = LINK_CHAN(tx_stall->tl2_link_map[tl2]);
+
+ cfg = rvu_rd64(rvu, blkaddr, NIX_AF_TX_LINKX_HW_XOFF(link));
+ if (cfg & BIT_ULL(chan))
+ return true;
+
+ /* Skip below checks for LBK links */
+ if (link >= rvu->hw->cgx_links)
+ return false;
+
+ cfg = rvu_rd64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
+
+ /* Check if current credits or pkt count is -ve or simply
+ * morethan what is configured.
+ */
+ pkt_cnt = (cfg >> 2) & 0x3FF;
+ unit_cnt = (cfg >> 12) & 0xFFFFF;
+ if (pkt_cnt > ((tx_stall->nlink_credits[link] >> 2) & 0x3FF) ||
+ unit_cnt > ((tx_stall->nlink_credits[link] >> 12) & 0xFFFFF)) {
+ return false;
+ }
+
+ tl2_txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+ if (rvu_cgx_tx_idle(rvu, nix_hw, tl2_txsch, tl2))
+ return false;
+
+ return true;
+}
+
+static int rvu_nix_poll_for_tx_stall(void *arg)
+{
+ struct nix_tx_stall *tx_stall = arg;
+ struct rvu *rvu = tx_stall->rvu;
+ int blkaddr = tx_stall->blkaddr;
+ struct nix_hw *nix_hw;
+ int tl2;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ while (!kthread_should_stop()) {
+ for (tl2 = 0; tl2 < tx_stall->tl2_count; tl2++) {
+ /* Skip TL2 if it's not assigned to any */
+ if (!is_schq_allocated(rvu, nix_hw,
+ NIX_TXSCH_LVL_TL2, tl2))
+ continue;
+
+ tx_stall->poll_cntr++;
+
+ if (tx_stall->txsch_config_changed) {
+ rvu_nix_txsch_lock(nix_hw);
+ rvu_nix_scan_txsch_hierarchy(rvu, nix_hw,
+ blkaddr);
+ tx_stall->txsch_config_changed = false;
+ rvu_nix_txsch_unlock(nix_hw);
+ }
+
+ rvu_nix_txsch_lock(nix_hw);
+ if (is_link_backpressured(tx_stall, nix_hw,
+ blkaddr, tl2)) {
+ rvu_nix_txsch_unlock(nix_hw);
+ continue;
+ }
+ rvu_nix_txsch_unlock(nix_hw);
+
+ if (!rvu_nix_check_smq_stall(rvu, nix_hw, tl2))
+ continue;
+
+ rvu_nix_restore_tx(rvu, nix_hw, blkaddr, tl2);
+ }
+ rvu_usleep_interruptible(250);
+ }
+
+ return 0;
+}
+
+static int rvu_nix_init_tl_map(struct rvu *rvu, struct nix_hw *nix_hw, int lvl)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+ struct nix_txsch *txsch;
+ u16 *tl_map;
+
+ txsch = &nix_hw->txsch[lvl];
+ tl_map = devm_kcalloc(rvu->dev, txsch->schq.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!tl_map)
+ return -ENOMEM;
+
+ switch (lvl) {
+ case NIX_TXSCH_LVL_SMQ:
+ tx_stall->smq_count = txsch->schq.max;
+ tx_stall->smq_tl2_map = tl_map;
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ tx_stall->tl4_count = txsch->schq.max;
+ tx_stall->tl4_tl2_map = tl_map;
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ tx_stall->tl3_count = txsch->schq.max;
+ tx_stall->tl3_tl2_map = tl_map;
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ tx_stall->tl2_count = txsch->schq.max;
+ tx_stall->tl2_tl1_map = tl_map;
+ break;
+ }
+ memset(tl_map, U16_MAX, txsch->schq.max * sizeof(u16));
+ return 0;
+}
+
+static int rvu_nix_tx_stall_workaround_init(struct rvu *rvu,
+ struct nix_hw *nix_hw, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_tx_stall *tx_stall;
+ struct rvu_block *block;
+ int links, err;
+
+ if (!hw->cap.nix_fixed_txschq_mapping)
+ return 0;
+
+ tx_stall = devm_kzalloc(rvu->dev,
+ sizeof(struct nix_tx_stall), GFP_KERNEL);
+ if (!tx_stall)
+ return -ENOMEM;
+
+ tx_stall->blkaddr = blkaddr;
+ tx_stall->rvu = rvu;
+ nix_hw->tx_stall = tx_stall;
+
+ /* Get the level at which link/chan will assert backpressure */
+ if (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL))
+ tx_stall->pse_link_bp_level = NIX_TXSCH_LVL_TL3;
+ else
+ tx_stall->pse_link_bp_level = NIX_TXSCH_LVL_TL2;
+
+ mutex_init(&tx_stall->txsch_lock);
+
+ /* Alloc memory for saving SMQ/TL4/TL3/TL1 to TL2 mapping */
+ err = rvu_nix_init_tl_map(rvu, nix_hw, NIX_TXSCH_LVL_SMQ);
+ if (err)
+ return err;
+ err = rvu_nix_init_tl_map(rvu, nix_hw, NIX_TXSCH_LVL_TL4);
+ if (err)
+ return err;
+ err = rvu_nix_init_tl_map(rvu, nix_hw, NIX_TXSCH_LVL_TL3);
+ if (err)
+ return err;
+ err = rvu_nix_init_tl_map(rvu, nix_hw, NIX_TXSCH_LVL_TL2);
+ if (err)
+ return err;
+
+ block = &hw->block[blkaddr];
+ tx_stall->sq_count = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT);
+
+ /* SMQs to nixlf SQ mapping info */
+ tx_stall->sq_smq_map = devm_kcalloc(rvu->dev,
+ block->lf.max * tx_stall->sq_count,
+ sizeof(u16), GFP_KERNEL);
+ if (!tx_stall->sq_smq_map)
+ return -ENOMEM;
+ memset(tx_stall->sq_smq_map, U16_MAX,
+ block->lf.max * tx_stall->sq_count * sizeof(u16));
+
+ /* TL2 to transmit link mapping info */
+ tx_stall->tl2_link_map = devm_kcalloc(rvu->dev, tx_stall->tl2_count,
+ sizeof(u16), GFP_KERNEL);
+ if (!tx_stall->tl2_link_map)
+ return -ENOMEM;
+ memset(tx_stall->tl2_link_map, U16_MAX,
+ tx_stall->tl2_count * sizeof(u16));
+
+ /* Number of Tl2s attached to NIXLF */
+ tx_stall->nixlf_tl2_count = devm_kcalloc(rvu->dev, block->lf.max,
+ sizeof(u8), GFP_KERNEL);
+ if (!tx_stall->nixlf_tl2_count)
+ return -ENOMEM;
+ memset(tx_stall->nixlf_tl2_count, 0, block->lf.max * sizeof(u8));
+
+ /* Per NIXLF poll and stall counters */
+ tx_stall->nixlf_poll_count = devm_kcalloc(rvu->dev, block->lf.max,
+ sizeof(u64), GFP_KERNEL);
+ if (!tx_stall->nixlf_poll_count)
+ return -ENOMEM;
+ memset(tx_stall->nixlf_poll_count, 0, block->lf.max * sizeof(u64));
+
+ tx_stall->nixlf_stall_count = devm_kcalloc(rvu->dev, block->lf.max,
+ sizeof(u64), GFP_KERNEL);
+ if (!tx_stall->nixlf_stall_count)
+ return -ENOMEM;
+ memset(tx_stall->nixlf_stall_count, 0, block->lf.max * sizeof(u64));
+
+ /* For saving HW link's transmit credits config */
+ links = rvu->hw->cgx_links + rvu->hw->lbk_links;
+ tx_stall->nlink_credits = devm_kcalloc(rvu->dev, links,
+ sizeof(u64), GFP_KERNEL);
+ if (!tx_stall->nlink_credits)
+ return -ENOMEM;
+ rvu_nix_scan_link_credits(rvu, blkaddr, tx_stall);
+
+ tx_stall->poll_thread = kthread_create(rvu_nix_poll_for_tx_stall,
+ (void *)tx_stall,
+ "nix_tx_stall_polling_kthread");
+ if (IS_ERR(tx_stall->poll_thread))
+ return PTR_ERR(tx_stall->poll_thread);
+
+ kthread_bind(tx_stall->poll_thread, cpumask_first(cpu_online_mask));
+ wake_up_process(tx_stall->poll_thread);
+ return 0;
+}
+
+static void rvu_nix_tx_stall_workaround_exit(struct rvu *rvu,
+ struct nix_hw *nix_hw)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+
+ if (!tx_stall)
+ return;
+
+ if (tx_stall->poll_thread)
+ kthread_stop(tx_stall->poll_thread);
+ mutex_destroy(&tx_stall->txsch_lock);
+}
+
+ssize_t rvu_nix_get_tx_stall_counters(struct nix_hw *nix_hw,
+ char __user *buffer, loff_t *ppos)
+{
+ struct rvu *rvu = nix_hw->rvu;
+ struct rvu_hwinfo *hw;
+ struct nix_tx_stall *tx_stall;
+ struct rvu_block *block;
+ int blkaddr, len, lf;
+ char kbuf[2000];
+
+ hw = rvu->hw;
+ if (*ppos)
+ return 0;
+
+ blkaddr = nix_hw->blkaddr;
+
+ tx_stall = nix_hw->tx_stall;
+ if (!tx_stall)
+ return -EFAULT;
+
+ len = snprintf(kbuf, sizeof(kbuf), "\n NIX transmit stall stats\n");
+ len += snprintf(kbuf + len, sizeof(kbuf),
+ "\t\tPolled: \t\t%lld\n", tx_stall->poll_cntr);
+ len += snprintf(kbuf + len, sizeof(kbuf),
+ "\t\tTx stall detected: \t%lld\n\n",
+ tx_stall->stalled_cntr);
+
+ block = &hw->block[blkaddr];
+ mutex_lock(&rvu->rsrc_lock);
+ for (lf = 0; lf < block->lf.max; lf++) {
+ if (!test_bit(lf, block->lf.bmap))
+ continue;
+ len += snprintf(kbuf + len, sizeof(kbuf),
+ "\t\tNIXLF%d Polled: %lld \tStalled: %lld\n",
+ lf, tx_stall->nixlf_poll_count[lf],
+ tx_stall->nixlf_stall_count[lf]);
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ if (len > 0) {
+ if (copy_to_user(buffer, kbuf, len))
+ return -EFAULT;
+ }
+
+ *ppos += len;
+ return len;
+}
+
+static void rvu_nix_enable_internal_bp(struct rvu *rvu, int blkaddr)
+{
+ /* An issue exists in A0 silicon whereby, NIX CQ may reach in CQ full
+ * state followed by CQ hang on CQM query response from stale
+ * CQ context. To avoid such condition, enable internal backpressure
+ * with BP_TEST registers.
+ */
+ if (is_rvu_96xx_A0(rvu)) {
+ /* Enable internal backpressure on pipe_stg0 */
+ rvu_write64(rvu, blkaddr, NIX_AF_RQM_BP_TEST,
+ BIT_ULL(51) | BIT_ULL(23) | BIT_ULL(22) | 0x100ULL);
+ /* Enable internal backpressure on cqm query request */
+ rvu_write64(rvu, blkaddr, NIX_AF_CQM_BP_TEST,
+ BIT_ULL(43) | BIT_ULL(23) | BIT_ULL(22) | 0x100ULL);
+ }
+}
+
+int rvu_nix_fixes_init(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
+{
+ int err;
+ u64 cfg;
+
+
+ /* As per a HW errata in 96xx A0 silicon, NIX may corrupt
+ * internal state when conditional clocks are turned off.
+ * Hence enable them.
+ */
+ if (is_rvu_96xx_A0(rvu))
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x5EULL);
+ if (is_rvu_pre_96xx_C0(rvu))
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
+
+ /* Set chan/link to backpressure TL3 instead of TL2 */
+ rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
+
+ /* Disable SQ manager's sticky mode operation (set TM6 = 0, TM11 = 0)
+ * This sticky mode is known to cause SQ stalls when multiple
+ * SQs are mapped to same SMQ and transmitting pkts simultaneously.
+ * NIX PSE may dead lock when therea are any sticky to non-sticky
+ * transmission. Hence disable it (TM5 = 0).
+ */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
+ cfg &= ~(BIT_ULL(15) | BIT_ULL(14) | BIT_ULL(23));
+ /* NIX may drop credits when condition clocks are turned off.
+ * Hence enable control flow clk (set TM9 = 1).
+ */
+ cfg |= BIT_ULL(21);
+ rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
+
+ rvu_nix_enable_internal_bp(rvu, blkaddr);
+
+ if (!is_rvu_96xx_A0(rvu))
+ return 0;
+
+ err = rvu_nix_tx_stall_workaround_init(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void rvu_nix_fixes_exit(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ if (!is_rvu_96xx_A0(rvu))
+ return;
+
+ rvu_nix_tx_stall_workaround_exit(rvu, nix_hw);
+}
+
+int rvu_tim_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
+ u16 pcifunc, int slot)
+{
+ int lf, blkaddr;
+ u64 val;
+
+ /* Due to a HW issue LF_CFG_DEBUG register cannot be used to
+ * find PF_FUNC <=> LF mapping, hence scan through LFX_CFG
+ * registers to find mapped LF for a given PF_FUNC.
+ */
+ if (is_rvu_96xx_B0(rvu)) {
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_TIM, pcifunc);
+ if (blkaddr < 0)
+ return TIM_AF_LF_INVALID;
+
+ for (lf = 0; lf < block->lf.max; lf++) {
+ val = rvu_read64(rvu, block->addr, block->lfcfg_reg |
+ (lf << block->lfshift));
+ if ((((val >> 8) & 0xffff) == pcifunc) &&
+ (val & 0xff) == slot)
+ return lf;
+ }
+ return -1;
+ }
+
+ val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
+ rvu_write64(rvu, block->addr, block->lookup_reg, val);
+
+ /* Wait for the lookup to finish */
+ while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
+ ;
+
+ val = rvu_read64(rvu, block->addr, block->lookup_reg);
+
+ /* Check LF valid bit */
+ if (!(val & (1ULL << 12)))
+ return -1;
+
+ return (val & 0xFFF);
+}
+
+int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena)
+{
+ /* Due to a HW issue in these silicon versions, parse nibble enable
+ * configuration has to be identical for both Rx and Tx interfaces.
+ */
+ if (is_rvu_96xx_B0(rvu))
+ return nibble_ena;
+ return 0;
+}
+
+bool is_parse_nibble_config_valid(struct rvu *rvu,
+ struct npc_mcam_kex *mcam_kex)
+{
+ if (!is_rvu_96xx_B0(rvu))
+ return true;
+
+ /* Due to a HW issue in above silicon versions, parse nibble enable
+ * configuration has to be identical for both Rx and Tx interfaces.
+ */
+ if (mcam_kex->keyx_cfg[NIX_INTF_RX] != mcam_kex->keyx_cfg[NIX_INTF_TX])
+ return false;
+ return true;
+}
+
+void __weak otx2smqvf_xmit(void)
+{
+ /* Nothing to do */
+}
+
+void rvu_smqvf_xmit(struct rvu *rvu)
+{
+ if (is_rvu_95xx_A0(rvu) || is_rvu_96xx_A0(rvu)) {
+ usleep_range(50, 60);
+ otx2smqvf_xmit();
+ }
+}
+
+void rvu_tim_hw_fixes(struct rvu *rvu, int blkaddr)
+{
+ u64 cfg;
+ /* Due wrong clock gating, TIM expire counter is updated wrongly.
+ * Workaround is to enable force clock (FORCE_CSCLK_ENA = 1).
+ */
+ cfg = rvu_read64(rvu, blkaddr, TIM_AF_FLAGS_REG);
+ cfg |= BIT_ULL(1);
+ rvu_write64(rvu, blkaddr, TIM_AF_FLAGS_REG, cfg);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_fixes.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_fixes.h
new file mode 100644
index 000000000000..16ddf487c4d3
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_fixes.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2019 Marvell.
+ *
+ */
+
+#ifndef RVU_FIXES_H
+#define RVU_FIXES_H
+
+#define RVU_SMQVF_PCIFUNC 17
+
+struct rvu;
+
+void otx2smqvf_xmit(void);
+void rvu_smqvf_xmit(struct rvu *rvu);
+
+#endif /* RVU_FIXES_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index f6a3cf3e6f23..915013fa28d4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -1,11 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
@@ -16,9 +13,23 @@
#include "rvu.h"
#include "npc.h"
#include "cgx.h"
+#include "lmac_common.h"
+#include "rvu_fixes.h"
+static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id);
+static int nix_setup_ipolicers(struct rvu *rvu,
+ struct nix_hw *nix_hw, int blkaddr);
+static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw);
+static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
+ struct nix_hw *nix_hw, u16 pcifunc);
+static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
+static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
+ u32 leaf_prof);
+static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
+ int type, bool add);
+static const char *nix_get_ctx_name(int ctype);
enum mc_tbl_sz {
MC_TBL_SZ_256,
@@ -68,6 +79,23 @@ struct mce {
u16 pcifunc;
};
+int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
+{
+ int i = 0;
+
+ /*If blkaddr is 0, return the first nix block address*/
+ if (blkaddr == 0)
+ return rvu->nix_blkaddr[blkaddr];
+
+ while (i + 1 < MAX_NIX_BLKS) {
+ if (rvu->nix_blkaddr[i] == blkaddr)
+ return rvu->nix_blkaddr[i + 1];
+ i++;
+ }
+
+ return 0;
+}
+
bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -81,14 +109,16 @@ bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
int rvu_get_nixlf_count(struct rvu *rvu)
{
+ int blkaddr = 0, max = 0;
struct rvu_block *block;
- int blkaddr;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
- if (blkaddr < 0)
- return 0;
- block = &rvu->hw->block[blkaddr];
- return block->lf.max;
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ block = &rvu->hw->block[blkaddr];
+ max += block->lf.max;
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ }
+ return max;
}
int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
@@ -111,6 +141,22 @@ int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
return 0;
}
+int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
+ struct nix_hw **nix_hw, int *blkaddr)
+{
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || *blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ *nix_hw = get_nix_hw(rvu->hw, *blkaddr);
+ if (!*nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+ return 0;
+}
+
static void nix_mce_list_init(struct nix_mce_list *list, int max)
{
INIT_HLIST_HEAD(&list->head);
@@ -130,23 +176,88 @@ static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
return idx;
}
-static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
+struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
{
- if (blkaddr == BLKADDR_NIX0 && hw->nix0)
- return hw->nix0;
-
+ int nix_blkaddr = 0, i = 0;
+ struct rvu *rvu = hw->rvu;
+
+ nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
+ while (nix_blkaddr) {
+ if (blkaddr == nix_blkaddr && hw->nix)
+ return &hw->nix[i];
+ nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
+ i++;
+ }
return NULL;
}
+u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)
+{
+ dwrr_mtu &= 0x1FULL;
+
+ /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
+ * Value of 4 is reserved for MTU value of 9728 bytes.
+ * Value of 5 is reserved for MTU value of 10240 bytes.
+ */
+ switch (dwrr_mtu) {
+ case 4:
+ return 9728;
+ case 5:
+ return 10240;
+ default:
+ return BIT_ULL(dwrr_mtu);
+ }
+
+ return 0;
+}
+
+u32 convert_bytes_to_dwrr_mtu(u32 bytes)
+{
+ /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
+ * Value of 4 is reserved for MTU value of 9728 bytes.
+ * Value of 5 is reserved for MTU value of 10240 bytes.
+ */
+ if (bytes > BIT_ULL(16))
+ return 0;
+
+ switch (bytes) {
+ case 9728:
+ return 4;
+ case 10240:
+ return 5;
+ default:
+ return ilog2(bytes);
+ }
+
+ return 0;
+}
+
static void nix_rx_sync(struct rvu *rvu, int blkaddr)
{
int err;
- /*Sync all in flight RX packets to LLC/DRAM */
+ /* Sync all in flight RX packets to LLC/DRAM */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
+ err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
+ if (err)
+ dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
+
+ /* SW_SYNC ensures all existing transactions are finished and pkts
+ * are written to LLC/DRAM, queues should be teared down after
+ * successful SW_SYNC. Due to a HW errata, in some rare scenarios
+ * an existing transaction might end after SW_SYNC operation. To
+ * ensure operation is fully done, do the SW_SYNC twice.
+ */
rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
if (err)
- dev_err(rvu->dev, "NIX RX software sync failed\n");
+ dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
+
+ /* As per a HW errata in 96xx A0 silicon, HW may clear SW_SYNC[ENA]
+ * bit too early. Hence wait for 50us more.
+ */
+ if (is_rvu_96xx_A0(rvu))
+ usleep_range(50, 60);
}
static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
@@ -184,15 +295,21 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
return true;
}
-static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
+static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
+ struct nix_lf_alloc_rsp *rsp, bool loop)
{
- struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ u16 req_chan_base, req_chan_end, req_chan_cnt;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct sdp_node_info *sdp_info;
+ int pkind, pf, vf, lbkid, vfid;
u8 cgx_id, lmac_id;
- int pkind, pf, vf;
+ bool from_vf;
int err;
pf = rvu_get_pf(pcifunc);
- if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
+ if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
+ type != NIX_INTF_TYPE_SDP)
return 0;
switch (type) {
@@ -206,32 +323,106 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
"PF_Func 0x%x: Invalid pkind\n", pcifunc);
return -EINVAL;
}
- pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0);
+ pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
pfvf->tx_chan_base = pfvf->rx_chan_base;
pfvf->rx_chan_cnt = 1;
pfvf->tx_chan_cnt = 1;
- cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
- rvu_npc_set_pkind(rvu, pkind, pfvf);
+ rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id;
+
+ if (rvu_cgx_is_pkind_config_permitted(rvu, pcifunc)) {
+ cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ pkind);
+ rvu_npc_set_pkind(rvu, pkind, pfvf);
+ }
- /* By default we enable pause frames */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
- cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu),
- lmac_id, true, true);
break;
case NIX_INTF_TYPE_LBK:
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+ /* If NIX1 block is present on the silicon then NIXes are
+ * assigned alternatively for lbk interfaces. NIX0 should
+ * send packets on lbk link 1 channels and NIX1 should send
+ * on lbk link 0 channels for the communication between
+ * NIX0 and NIX1.
+ */
+ lbkid = 0;
+ if (rvu->hw->lbk_links > 1)
+ lbkid = vf & 0x1 ? 0 : 1;
+
+ /* By default NIX0 is configured to send packet on lbk link 1
+ * (which corresponds to LBK1), same packet will receive on
+ * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0
+ * (which corresponds to LBK2) packet will receive on NIX0 lbk
+ * link 1.
+ * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0
+ * transmits and receives on lbk link 0, whick corresponds
+ * to LBK1 block, back to back connectivity between NIX and
+ * LBK can be achieved (which is similar to 96xx)
+ *
+ * RX TX
+ * NIX0 lbk link 1 (LBK2) 1 (LBK1)
+ * NIX0 lbk link 0 (LBK0) 0 (LBK0)
+ * NIX1 lbk link 0 (LBK1) 0 (LBK2)
+ * NIX1 lbk link 1 (LBK3) 1 (LBK3)
+ */
+ if (loop)
+ lbkid = !lbkid;
+
/* Note that AF's VFs work in pairs and talk over consecutive
* loopback channels.Therefore if odd number of AF VFs are
* enabled then the last VF remains with no pair.
*/
- pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf);
- pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) :
- NIX_CHAN_LBK_CHX(0, vf + 1);
+ pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
+ pfvf->tx_chan_base = vf & 0x1 ?
+ rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
+ rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
pfvf->rx_chan_cnt = 1;
pfvf->tx_chan_cnt = 1;
+ rsp->tx_link = hw->cgx_links + lbkid;
+ pfvf->lbkid = lbkid;
+ rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
- pfvf->rx_chan_base, false);
+ pfvf->rx_chan_base,
+ pfvf->rx_chan_cnt);
+
+ break;
+ case NIX_INTF_TYPE_SDP:
+ from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
+ parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
+ sdp_info = parent_pf->sdp_info;
+ if (!sdp_info) {
+ dev_err(rvu->dev, "Invalid sdp_info pointer\n");
+ return -EINVAL;
+ }
+ if (from_vf) {
+ req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn +
+ sdp_info->num_pf_rings;
+ vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+ for (vfid = 0; vfid < vf; vfid++)
+ req_chan_base += sdp_info->vf_rings[vfid];
+ req_chan_cnt = sdp_info->vf_rings[vf];
+ req_chan_end = req_chan_base + req_chan_cnt - 1;
+ if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) ||
+ req_chan_end > rvu_nix_chan_sdp(rvu, 255)) {
+ dev_err(rvu->dev,
+ "PF_Func 0x%x: Invalid channel base and count\n",
+ pcifunc);
+ return -EINVAL;
+ }
+ } else {
+ req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn;
+ req_chan_cnt = sdp_info->num_pf_rings;
+ }
+
+ pfvf->rx_chan_base = req_chan_base;
+ pfvf->rx_chan_cnt = req_chan_cnt;
+ pfvf->tx_chan_base = pfvf->rx_chan_base;
+ pfvf->tx_chan_cnt = pfvf->rx_chan_cnt;
+
+ rsp->tx_link = hw->cgx_links + hw->lbk_links;
+ rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base,
+ pfvf->rx_chan_cnt);
break;
}
@@ -242,16 +433,17 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
pfvf->rx_chan_base, pfvf->mac_addr);
/* Add this PF_FUNC to bcast pkt replication list */
- err = nix_update_bcast_mce_list(rvu, pcifunc, true);
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
if (err) {
dev_err(rvu->dev,
"Bcast list, failed to enable PF_FUNC 0x%x\n",
pcifunc);
return err;
}
-
+ /* Install MCAM rule matching Ethernet broadcast mac address */
rvu_npc_install_bcast_match_entry(rvu, pcifunc,
nixlf, pfvf->rx_chan_base);
+
pfvf->maxlen = NIC_HW_MIN_FRS;
pfvf->minlen = NIC_HW_MIN_FRS;
@@ -265,28 +457,28 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
pfvf->maxlen = 0;
pfvf->minlen = 0;
- pfvf->rxvlan = false;
/* Remove this PF_FUNC from bcast pkt replication list */
- err = nix_update_bcast_mce_list(rvu, pcifunc, false);
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
if (err) {
dev_err(rvu->dev,
"Bcast list, failed to disable PF_FUNC 0x%x\n",
pcifunc);
}
- /* Free and disable any MCAM entries used by this NIX LF */
- rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+ /* Disable DMAC filters used */
+ rvu_cgx_disable_dmac_entries(rvu, pcifunc);
}
-int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
- struct nix_bp_cfg_req *req,
- struct msg_rsp *rsp)
+static int nix_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp, bool cpt_link)
{
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
int blkaddr, pf, type;
u16 chan_base, chan;
+ u16 chan_v;
u64 cfg;
pf = rvu_get_pf(pcifunc);
@@ -294,24 +486,49 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
return 0;
+ if (cpt_link && !rvu->hw->cpt_links)
+ return 0;
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
chan_base = pfvf->rx_chan_base + req->chan_base;
for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
- rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+ /* CPT channel for a given link channel is always
+ * assumed to be BIT(11) set in link channel.
+ */
+ if (cpt_link)
+ chan_v = chan | BIT(11);
+ else
+ chan_v = chan;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
cfg & ~BIT_ULL(16));
}
return 0;
}
+int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ return nix_bp_disable(rvu, req, rsp, false);
+}
+
+int rvu_mbox_handler_nix_cpt_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ return nix_bp_disable(rvu, req, rsp, true);
+}
+
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id)
{
- int bpid, blkaddr, lmac_chan_cnt;
+ int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt, vf;
+ u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt;
struct rvu_hwinfo *hw = rvu->hw;
- u16 cgx_bpid_cnt, lbk_bpid_cnt;
struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
u64 cfg;
@@ -323,6 +540,10 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+ sdp_chan_cnt = cfg & 0xFFF;
+ sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
+
pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
/* Backpressure IDs range division
@@ -337,7 +558,7 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
*/
switch (type) {
case NIX_INTF_TYPE_CGX:
- if ((req->chan_base + req->chan_cnt) > 15)
+ if ((req->chan_base + req->chan_cnt) > 16)
return -EINVAL;
rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
/* Assign bpid based on cgx, lmac and chan id */
@@ -351,36 +572,60 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
break;
case NIX_INTF_TYPE_LBK:
- if ((req->chan_base + req->chan_cnt) > 63)
+ if ((req->chan_base + req->chan_cnt) > 1)
return -EINVAL;
- bpid = cgx_bpid_cnt + req->chan_base;
+ /* Channel number allocation is based on VF id,
+ * hence BPID follows similar scheme.
+ */
+ vf = (req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+
+ bpid = cgx_bpid_cnt + req->chan_base + vf;
if (req->bpid_per_chan)
bpid += chan_id;
if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
return -EINVAL;
break;
+ case NIX_INTF_TYPE_SDP:
+ if ((req->chan_base + req->chan_cnt) > 255)
+ return -EINVAL;
+
+ bpid = sdp_bpid_cnt + req->chan_base;
+ if (req->bpid_per_chan)
+ bpid += chan_id;
+
+ if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt))
+ return -EINVAL;
+ break;
default:
return -EINVAL;
}
return bpid;
}
-int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
- struct nix_bp_cfg_req *req,
- struct nix_bp_cfg_rsp *rsp)
+static int nix_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp,
+ bool cpt_link)
{
int blkaddr, pf, type, chan_id = 0;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
u16 chan_base, chan;
s16 bpid, bpid_base;
+ u16 chan_v;
u64 cfg;
pf = rvu_get_pf(pcifunc);
type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ if (is_sdp_pfvf(pcifunc))
+ type = NIX_INTF_TYPE_SDP;
- /* Enable backpressure only for CGX mapped PFs and LBK interface */
- if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
+ /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */
+ if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
+ type != NIX_INTF_TYPE_SDP)
+ return 0;
+
+ if (cpt_link && !rvu->hw->cpt_links)
return 0;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -396,9 +641,19 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
return -EINVAL;
}
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
- rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
- cfg | (bpid & 0xFF) | BIT_ULL(16));
+ /* CPT channel for a given link channel is always
+ * assumed to be BIT(11) set in link channel.
+ */
+
+ if (cpt_link)
+ chan_v = chan | BIT(11);
+ else
+ chan_v = chan;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
+ cfg &= ~GENMASK_ULL(8, 0);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
+ cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
chan_id++;
bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
}
@@ -415,6 +670,20 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ return nix_bp_enable(rvu, req, rsp, false);
+}
+
+int rvu_mbox_handler_nix_cpt_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ return nix_bp_enable(rvu, req, rsp, true);
+}
+
static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
u64 format, bool v4, u64 *fidx)
{
@@ -546,9 +815,10 @@ static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
struct rvu_pfvf *pfvf, int nixlf,
int rss_sz, int rss_grps, int hwctx_size,
- u64 way_mask)
+ u64 way_mask, bool tag_lsb_as_adder)
{
int err, grp, num_indices;
+ u64 val;
/* RSS is not requested for this NIXLF */
if (!rss_sz)
@@ -564,10 +834,13 @@ static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
(u64)pfvf->rss_ctx->iova);
/* Config full RSS table size, enable RSS and caching */
- rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
- BIT_ULL(36) | BIT_ULL(4) |
- ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
- way_mask << 20);
+ val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 |
+ ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE);
+
+ if (tag_lsb_as_adder)
+ val |= BIT_ULL(5);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val);
/* Config RSS group offset and sizes */
for (grp = 0; grp < rss_grps; grp++)
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
@@ -612,8 +885,9 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
return 0;
}
-static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
- struct nix_aq_enq_rsp *rsp)
+static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
@@ -626,10 +900,7 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
bool ena;
u64 cfg;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
+ blkaddr = nix_hw->blkaddr;
block = &hw->block[blkaddr];
aq = block->aq;
if (!aq) {
@@ -640,8 +911,11 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
pfvf = rvu_get_pfvf(rvu, pcifunc);
nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
- /* Skip NIXLF check for broadcast MCE entry init */
- if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
+ /* Skip NIXLF check for broadcast MCE entry and bandwidth profile
+ * operations done by AF itself.
+ */
+ if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
+ (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
if (!pfvf->nixlf || nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
}
@@ -669,8 +943,9 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
break;
case NIX_AQ_CTYPE_MCE:
cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
+
/* Check if index exceeds MCE list length */
- if (!hw->nix0->mcast.mce_ctx ||
+ if (!nix_hw->mcast.mce_ctx ||
(req->qidx >= (256UL << (cfg & 0xF))))
rc = NIX_AF_ERR_AQ_ENQUEUE;
@@ -680,6 +955,11 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
if (rsp)
rc = NIX_AF_ERR_AQ_ENQUEUE;
break;
+ case NIX_AQ_CTYPE_BANDPROF:
+ if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
+ nix_hw, pcifunc))
+ rc = NIX_AF_ERR_INVALID_BANDPROF;
+ break;
default:
rc = NIX_AF_ERR_AQ_ENQUEUE;
}
@@ -695,6 +975,8 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
pcifunc, req->sq.smq))
return NIX_AF_ERR_AQ_ENQUEUE;
+ rvu_nix_update_sq_smq_mapping(rvu, blkaddr, nixlf, req->qidx,
+ req->sq.smq);
}
memset(&inst, 0, sizeof(struct nix_aq_inst_s));
@@ -736,6 +1018,9 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(mask, &req->mce_mask,
sizeof(struct nix_rx_mce_s));
+ else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
+ memcpy(mask, &req->prof_mask,
+ sizeof(struct nix_bandprof_s));
fallthrough;
case NIX_AQ_INSTOP_INIT:
if (req->ctype == NIX_AQ_CTYPE_RQ)
@@ -748,6 +1033,8 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
+ else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
+ memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
break;
case NIX_AQ_INSTOP_NOP:
case NIX_AQ_INSTOP_READ:
@@ -825,6 +1112,9 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(&rsp->mce, ctx,
sizeof(struct nix_rx_mce_s));
+ else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
+ memcpy(&rsp->prof, ctx,
+ sizeof(struct nix_bandprof_s));
}
}
@@ -832,6 +1122,98 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
return 0;
}
+static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_aq_enq_req *req, u8 ctype)
+{
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ int rc, word;
+
+ if (req->ctype != NIX_AQ_CTYPE_CQ)
+ return 0;
+
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
+ req->hdr.pcifunc, ctype, req->qidx);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n",
+ __func__, nix_get_ctx_name(ctype), req->qidx,
+ req->hdr.pcifunc);
+ return rc;
+ }
+
+ /* Make copy of original context & mask which are required
+ * for resubmission
+ */
+ memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s));
+ memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s));
+
+ /* exclude fields which HW can update */
+ aq_req.cq_mask.cq_err = 0;
+ aq_req.cq_mask.wrptr = 0;
+ aq_req.cq_mask.tail = 0;
+ aq_req.cq_mask.head = 0;
+ aq_req.cq_mask.avg_level = 0;
+ aq_req.cq_mask.update_time = 0;
+ aq_req.cq_mask.substream = 0;
+
+ /* Context mask (cq_mask) holds mask value of fields which
+ * are changed in AQ WRITE operation.
+ * for example cq.drop = 0xa;
+ * cq_mask.drop = 0xff;
+ * Below logic performs '&' between cq and cq_mask so that non
+ * updated fields are masked out for request and response
+ * comparison
+ */
+ for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64);
+ word++) {
+ *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &=
+ (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
+ *(u64 *)((u8 *)&aq_req.cq + word * 8) &=
+ (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
+ }
+
+ if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s)))
+ return NIX_AF_ERR_AQ_CTX_RETRY_WRITE;
+
+ return 0;
+}
+
+static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
+{
+ struct nix_hw *nix_hw;
+ int err, retries = 5;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+retry:
+ err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
+
+ /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic'
+ * As a work around perfrom CQ context read after each AQ write. If AQ
+ * read shows AQ write is not updated perform AQ write again.
+ */
+ if (!err && req->op == NIX_AQ_INSTOP_WRITE) {
+ err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ);
+ if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) {
+ if (retries--)
+ goto retry;
+ else
+ return NIX_AF_ERR_CQ_CTX_WRITE_ERR;
+ }
+ }
+
+ return err;
+}
+
static const char *nix_get_ctx_name(int ctype)
{
switch (ctype) {
@@ -947,6 +1329,17 @@ int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
return rvu_nix_aq_enq_inst(rvu, req, rsp);
}
#endif
+EXPORT_SYMBOL(rvu_mbox_handler_nix_aq_enq);
+
+/* CN10K mbox handler */
+int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
+ struct nix_cn10k_aq_enq_req *req,
+ struct nix_cn10k_aq_enq_rsp *rsp)
+{
+ return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
+ (struct nix_aq_enq_rsp *)rsp);
+}
+EXPORT_SYMBOL(rvu_mbox_handler_nix_cn10k_aq_enq);
int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
@@ -960,10 +1353,10 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
struct nix_lf_alloc_rsp *rsp)
{
int nixlf, qints, hwctx_size, intf, err, rc = 0;
+ struct rvu_pfvf *pfvf, *parent_pf;
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_block *block;
- struct rvu_pfvf *pfvf;
u64 cfg, ctx_cfg;
int blkaddr;
@@ -973,6 +1366,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
if (req->way_mask)
req->way_mask &= 0xFFFF;
+ parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (!pfvf->nixlf || blkaddr < 0)
@@ -1076,7 +1470,8 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
/* Initialize receive side scaling (RSS) */
hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
- req->rss_grps, hwctx_size, req->way_mask);
+ req->rss_grps, hwctx_size, req->way_mask,
+ !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER));
if (err)
goto free_mem;
@@ -1130,17 +1525,32 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
/* Configure pkind for TX parse config */
- cfg = NPC_TX_DEF_PKIND;
- rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
+ if (rvu_cgx_is_pkind_config_permitted(rvu, pcifunc)) {
+ cfg = NPC_TX_DEF_PKIND;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
+ }
intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
- err = nix_interface_init(rvu, pcifunc, intf, nixlf);
+ if (is_sdp_pfvf(pcifunc))
+ intf = NIX_INTF_TYPE_SDP;
+
+ err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp,
+ !!(req->flags & NIX_LF_LBK_BLK_SEL));
if (err)
goto free_mem;
/* Disable NPC entries as NIXLF's contexts are not initialized yet */
rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+ /* Configure RX VTAG Type 7 (strip) for vf vlan */
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
+ VTAGSIZE_T4 | VTAG_STRIP);
+ /* Configure RX VTAG Type 6 (strip) for fdsa */
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE6),
+ VTAGSIZE_T4 | VTAG_STRIP | VTAG_CAPTURE);
+
goto exit;
free_mem:
@@ -1168,10 +1578,15 @@ exit:
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
rsp->qints = ((cfg >> 12) & 0xFFF);
rsp->cints = ((cfg >> 24) & 0xFFF);
+ rsp->hw_rx_tstamp_en = parent_pf->hw_rx_tstamp_en;
+ rsp->cgx_links = hw->cgx_links;
+ rsp->lbk_links = hw->lbk_links;
+ rsp->sdp_links = hw->sdp_links;
+
return rc;
}
-int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
struct msg_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -1190,6 +1605,15 @@ int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
+ if (req->flags & NIX_LF_DISABLE_FLOWS)
+ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+ else
+ rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
+
+ /* Free any tx vtag def entries used by this NIX LF */
+ if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
+ nix_free_tx_vtag_entries(rvu, pcifunc);
+
nix_interface_deinit(rvu, pcifunc, nixlf);
/* Reset this NIX LF */
@@ -1222,7 +1646,7 @@ int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
cfg = (((u32)req->offset & 0x7) << 16) |
(((u32)req->y_mask & 0xF) << 12) |
@@ -1240,12 +1664,104 @@ int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
return 0;
}
+/* Handle shaper update specially for few revisions */
+static bool
+handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf,
+ int lvl, u64 reg, u64 regval)
+{
+ u64 regbase, oldval, sw_xoff = 0;
+ u64 dbgval, md_debug0 = 0;
+ unsigned long poll_tmo;
+ bool rate_reg = 0;
+ u32 schq;
+
+ regbase = reg & 0xFFFF;
+ schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+
+ /* Check for rate register */
+ switch (lvl) {
+ case NIX_TXSCH_LVL_TL1:
+ md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq);
+ sw_xoff = NIX_AF_TL1X_SW_XOFF(schq);
+
+ rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0));
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq);
+ sw_xoff = NIX_AF_TL2X_SW_XOFF(schq);
+
+ rate_reg = (regbase == NIX_AF_TL2X_CIR(0) ||
+ regbase == NIX_AF_TL2X_PIR(0));
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq);
+ sw_xoff = NIX_AF_TL3X_SW_XOFF(schq);
+
+ rate_reg = (regbase == NIX_AF_TL3X_CIR(0) ||
+ regbase == NIX_AF_TL3X_PIR(0));
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq);
+ sw_xoff = NIX_AF_TL4X_SW_XOFF(schq);
+
+ rate_reg = (regbase == NIX_AF_TL4X_CIR(0) ||
+ regbase == NIX_AF_TL4X_PIR(0));
+ break;
+ case NIX_TXSCH_LVL_MDQ:
+ sw_xoff = NIX_AF_MDQX_SW_XOFF(schq);
+ rate_reg = (regbase == NIX_AF_MDQX_CIR(0) ||
+ regbase == NIX_AF_MDQX_PIR(0));
+ break;
+ }
+
+ if (!rate_reg)
+ return false;
+
+ /* Nothing special to do when state is not toggled */
+ oldval = rvu_read64(rvu, blkaddr, reg);
+ if ((oldval & 0x1) == (regval & 0x1)) {
+ rvu_write64(rvu, blkaddr, reg, regval);
+ return true;
+ }
+
+ /* PIR/CIR disable */
+ if (!(regval & 0x1)) {
+ rvu_write64(rvu, blkaddr, sw_xoff, 1);
+ rvu_write64(rvu, blkaddr, reg, 0);
+ udelay(4);
+ rvu_write64(rvu, blkaddr, sw_xoff, 0);
+ return true;
+ }
+
+ /* PIR/CIR enable */
+ rvu_write64(rvu, blkaddr, sw_xoff, 1);
+ if (md_debug0) {
+ poll_tmo = jiffies + usecs_to_jiffies(10000);
+ /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */
+ do {
+ if (time_after(jiffies, poll_tmo)) {
+ dev_err(rvu->dev,
+ "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n",
+ nixlf, schq, lvl);
+ goto exit;
+ }
+ usleep_range(1, 5);
+ dbgval = rvu_read64(rvu, blkaddr, md_debug0);
+ } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48)));
+ }
+ rvu_write64(rvu, blkaddr, reg, regval);
+exit:
+ rvu_write64(rvu, blkaddr, sw_xoff, 0);
+ return true;
+}
+
/* Disable shaping of pkts by a scheduler queue
* at a given scheduler level.
*/
static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
- int lvl, int schq)
+ int nixlf, int lvl, int schq)
{
+ struct rvu_hwinfo *hw = rvu->hw;
u64 cir_reg = 0, pir_reg = 0;
u64 cfg;
@@ -1266,6 +1782,21 @@ static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
cir_reg = NIX_AF_TL4X_CIR(schq);
pir_reg = NIX_AF_TL4X_PIR(schq);
break;
+ case NIX_TXSCH_LVL_MDQ:
+ cir_reg = NIX_AF_MDQX_CIR(schq);
+ pir_reg = NIX_AF_MDQX_PIR(schq);
+ break;
+ }
+
+ /* Shaper state toggle needs wait/poll */
+ if (hw->cap.nix_shaper_toggle_wait) {
+ if (cir_reg)
+ handle_txschq_shaper_update(rvu, blkaddr, nixlf,
+ lvl, cir_reg, 0);
+ if (pir_reg)
+ handle_txschq_shaper_update(rvu, blkaddr, nixlf,
+ lvl, pir_reg, 0);
+ return;
}
if (!cir_reg)
@@ -1283,6 +1814,7 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
int lvl, int schq)
{
struct rvu_hwinfo *hw = rvu->hw;
+ int link_level;
int link;
if (lvl >= hw->cap.nix_tx_aggr_lvl)
@@ -1292,7 +1824,9 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
if (lvl == NIX_TXSCH_LVL_TL4)
rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
- if (lvl != NIX_TXSCH_LVL_TL2)
+ link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
+ NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
+ if (lvl != link_level)
return;
/* Reset TL2's CGX or LBK link config */
@@ -1301,6 +1835,40 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
}
+static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr,
+ int lvl, int schq)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 reg;
+
+ /* Skip this if shaping is not supported */
+ if (!hw->cap.nix_shaping)
+ return;
+
+ /* Clear level specific SW_XOFF */
+ switch (lvl) {
+ case NIX_TXSCH_LVL_TL1:
+ reg = NIX_AF_TL1X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ reg = NIX_AF_TL2X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ reg = NIX_AF_TL3X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ reg = NIX_AF_TL4X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_MDQ:
+ reg = NIX_AF_MDQX_SW_XOFF(schq);
+ break;
+ default:
+ return;
+ }
+
+ rvu_write64(rvu, blkaddr, reg, 0x0);
+}
+
static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -1374,7 +1942,8 @@ static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
free_cnt = rvu_rsrc_free_count(&txsch->schq);
}
- if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
+ if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC ||
+ req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC)
return NIX_AF_ERR_TLX_ALLOC_FAIL;
/* If contiguous queues are needed, check for availability */
@@ -1478,22 +2047,29 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
int link, blkaddr, rc = 0;
int lvl, idx, start, end;
struct nix_txsch *txsch;
- struct rvu_pfvf *pfvf;
struct nix_hw *nix_hw;
u32 *pfvf_map;
+ int nixlf;
u16 schq;
- pfvf = rvu_get_pfvf(rvu, pcifunc);
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (!pfvf->nixlf || blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (rc)
+ return rc;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
mutex_lock(&rvu->rsrc_lock);
+ /* Check if request can be accommodated as per limits set by admin */
+ if (!hw->cap.nix_fixed_txschq_mapping &&
+ rvu_check_txsch_policy(rvu, req, pcifunc)) {
+ dev_err(rvu->dev, "Func 0x%x: TXSCH policy check failed\n",
+ pcifunc);
+ goto err;
+ }
+
/* Check if request is valid as per HW capabilities
* and can be accomodated.
*/
@@ -1535,7 +2111,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
NIX_TXSCHQ_CFG_DONE))
pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
- nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
+ nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
}
for (idx = 0; idx < req->schq[lvl]; idx++) {
@@ -1544,7 +2120,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
NIX_TXSCHQ_CFG_DONE))
pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
- nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
+ nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
}
}
@@ -1561,8 +2137,8 @@ exit:
return rc;
}
-static void nix_smq_flush(struct rvu *rvu, int blkaddr,
- int smq, u16 pcifunc, int nixlf)
+static int nix_smq_flush(struct rvu *rvu, int blkaddr,
+ int smq, u16 pcifunc, int nixlf)
{
int pf = rvu_get_pf(pcifunc);
u8 cgx_id = 0, lmac_id = 0;
@@ -1572,8 +2148,8 @@ static void nix_smq_flush(struct rvu *rvu, int blkaddr,
/* enable cgx tx if disabled */
if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
- restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
- lmac_id, true);
+ restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, true);
}
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
@@ -1586,6 +2162,8 @@ static void nix_smq_flush(struct rvu *rvu, int blkaddr,
*/
rvu_cgx_enadis_rx_bp(rvu, pf, false);
+ rvu_smqvf_xmit(rvu);
+
/* Wait for flush to complete */
err = rvu_poll_reg(rvu, blkaddr,
NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
@@ -1596,7 +2174,8 @@ static void nix_smq_flush(struct rvu *rvu, int blkaddr,
rvu_cgx_enadis_rx_bp(rvu, pf, true);
/* restore cgx tx state */
if (restore_tx_en)
- cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+ rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+ return err;
}
static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
@@ -1605,6 +2184,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
struct rvu_hwinfo *hw = rvu->hw;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
+ u16 map_func;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
@@ -1612,25 +2192,42 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
- /* Disable TL2/3 queue links before SMQ flush*/
+ /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/
mutex_lock(&rvu->rsrc_lock);
- for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
- if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
+ for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+
+ if (lvl >= hw->cap.nix_tx_aggr_lvl)
continue;
- txsch = &nix_hw->txsch[lvl];
for (schq = 0; schq < txsch->schq.max; schq++) {
if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue;
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
}
}
+ nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1,
+ nix_get_tx_link(rvu, pcifunc));
+
+ /* On PF cleanup, clear cfg done flag as
+ * PF would have changed default config.
+ */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
+ schq = nix_get_tx_link(rvu, pcifunc);
+ /* Do not clear pcifunc in txsch->pfvf_map[schq] because
+ * VF might be using this TL1 queue
+ */
+ map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
+ txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0);
+ }
/* Flush SMQs */
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
@@ -1658,9 +2255,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
}
mutex_unlock(&rvu->rsrc_lock);
- /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
- rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
- err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
+ err = rvu_ndc_sync(rvu, blkaddr, nixlf, NIX_AF_NDC_TX_SYNC);
if (err)
dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
@@ -1676,6 +2271,7 @@ static int nix_txschq_free_one(struct rvu *rvu,
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
u32 *pfvf_map;
+ int rc;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
@@ -1683,7 +2279,7 @@ static int nix_txschq_free_one(struct rvu *rvu,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
if (nixlf < 0)
@@ -1700,15 +2296,24 @@ static int nix_txschq_free_one(struct rvu *rvu,
mutex_lock(&rvu->rsrc_lock);
if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
- mutex_unlock(&rvu->rsrc_lock);
+ rc = NIX_AF_ERR_TLX_INVALID;
goto err;
}
+ /* Clear SW_XOFF of this resource only.
+ * For SMQ level, all path XOFF's
+ * need to be made clear by user
+ */
+ nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
+
/* Flush if it is a SMQ. Onus of disabling
* TL2/3 queue links before SMQ flush is on user
*/
- if (lvl == NIX_TXSCH_LVL_SMQ)
- nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
+ if (lvl == NIX_TXSCH_LVL_SMQ &&
+ nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) {
+ rc = NIX_AF_SMQ_FLUSH_FAILED;
+ goto err;
+ }
/* Free the resource */
rvu_free_rsrc(&txsch->schq, schq);
@@ -1716,7 +2321,8 @@ static int nix_txschq_free_one(struct rvu *rvu,
mutex_unlock(&rvu->rsrc_lock);
return 0;
err:
- return NIX_AF_ERR_TLX_INVALID;
+ mutex_unlock(&rvu->rsrc_lock);
+ return rc;
}
int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
@@ -1799,6 +2405,11 @@ static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
regbase == NIX_AF_TL4X_PIR(0))
return false;
break;
+ case NIX_TXSCH_LVL_MDQ:
+ if (regbase == NIX_AF_MDQX_CIR(0) ||
+ regbase == NIX_AF_MDQX_PIR(0))
+ return false;
+ break;
}
return true;
}
@@ -1816,19 +2427,88 @@ static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
return;
rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
(TXSCH_TL1_DFLT_RR_PRIO << 1));
- rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
- TXSCH_TL1_DFLT_RR_QTM);
+
+ /* On OcteonTx2 the config was in bytes and newer silcons
+ * it's changed to weight.
+ */
+ if (!rvu->hw->cap.nix_common_dwrr_mtu)
+ rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
+ TXSCH_TL1_DFLT_RR_QTM);
+ else
+ rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
+ CN10K_MAX_DWRR_WEIGHT);
+
rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
}
+/* Register offset - [15:0]
+ * Scheduler Queue number - [25:16]
+ */
+#define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0)
+
+static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw,
+ int blkaddr, struct nix_txschq_config *req,
+ struct nix_txschq_config *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int idx, schq;
+ u64 reg;
+
+ rvu_nix_txsch_lock(nix_hw);
+ for (idx = 0; idx < req->num_regs; idx++) {
+ reg = req->reg[idx];
+ reg &= NIX_TX_SCHQ_MASK;
+ schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+ if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) ||
+ !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq)) {
+ rvu_nix_txsch_unlock(nix_hw);
+ return NIX_AF_INVAL_TXSCHQ_CFG;
+ }
+ rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg);
+ }
+ rsp->lvl = req->lvl;
+ rsp->num_regs = req->num_regs;
+ rvu_nix_txsch_unlock(nix_hw);
+ return 0;
+}
+
+static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr,
+ u16 pcifunc, struct nix_txsch *txsch)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int lbk_link_start, lbk_links;
+ u8 pf = rvu_get_pf(pcifunc);
+ int schq;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return;
+
+ lbk_link_start = hw->cgx_links;
+
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
+ continue;
+ /* Enable all LBK links with channel 63 by default so that
+ * packets can be sent to LBK with a NPC TX MCAM rule
+ */
+ lbk_links = hw->lbk_links;
+ while (lbk_links--)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(schq,
+ lbk_link_start +
+ lbk_links),
+ BIT_ULL(12) | RVU_SWITCH_LBK_CHAN);
+ }
+}
+
int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
struct nix_txschq_config *req,
- struct msg_rsp *rsp)
+ struct nix_txschq_config *rsp)
{
+ u64 reg, val, regval, schq_regbase, val_mask;
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
- u64 reg, regval, schq_regbase;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
int blkaddr, idx, err;
@@ -1845,7 +2525,10 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ if (req->read)
+ return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp);
txsch = &nix_hw->txsch[req->lvl];
pfvf_map = txsch->pfvf_map;
@@ -1859,19 +2542,33 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
return 0;
}
+ rvu_nix_txsch_lock(nix_hw);
for (idx = 0; idx < req->num_regs; idx++) {
reg = req->reg[idx];
+ reg &= NIX_TX_SCHQ_MASK;
regval = req->regval[idx];
schq_regbase = reg & 0xFFFF;
+ val_mask = req->regval_mask[idx];
if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
- txsch->lvl, reg, regval))
+ txsch->lvl, reg, regval)) {
+ rvu_nix_txsch_unlock(nix_hw);
return NIX_AF_INVAL_TXSCHQ_CFG;
+ }
/* Check if shaping and coloring is supported */
if (!is_txschq_shaping_valid(hw, req->lvl, reg))
continue;
+ val = rvu_read64(rvu, blkaddr, reg);
+ regval = (val & val_mask) | (regval & ~val_mask);
+
+ /* Handle shaping state toggle specially */
+ if (hw->cap.nix_shaper_toggle_wait &&
+ handle_txschq_shaper_update(rvu, blkaddr, nixlf,
+ req->lvl, reg, regval))
+ continue;
+
/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
@@ -1910,6 +2607,10 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
rvu_write64(rvu, blkaddr, reg, regval);
}
+ rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc,
+ &nix_hw->txsch[NIX_TXSCH_LVL_TL2]);
+ rvu_nix_txsch_config_changed(nix_hw);
+ rvu_nix_txsch_unlock(nix_hw);
return 0;
}
@@ -1918,9 +2619,14 @@ static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
{
u64 regval = req->vtag_size;
- if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8)
+ if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
+ req->vtag_size > VTAGSIZE_T8)
return -EINVAL;
+ /* RX VTAG Type 7,6 are reserved for vf vlan& FDSA tag strip */
+ if (req->rx.vtag_type >= NIX_AF_LFX_RX_VTAG_TYPE6)
+ return NIX_AF_ERR_RX_VTAG_INUSE;
+
if (req->rx.capture_vtag)
regval |= BIT_ULL(5);
if (req->rx.strip_vtag)
@@ -1931,9 +2637,169 @@ static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
return 0;
}
+static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
+ u16 pcifunc, int index)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ struct nix_txvlan *vlan;
+
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ vlan = &nix_hw->txvlan;
+ if (vlan->entry2pfvf_map[index] != pcifunc)
+ return NIX_AF_ERR_PARAM;
+
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
+
+ vlan->entry2pfvf_map[index] = 0;
+ rvu_free_rsrc(&vlan->rsrc, index);
+
+ return 0;
+}
+
+static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
+{
+ struct nix_txvlan *vlan;
+ struct nix_hw *nix_hw;
+ int index, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
+ vlan = &nix_hw->txvlan;
+
+ mutex_lock(&vlan->rsrc_lock);
+ /* Scan all the entries and free the ones mapped to 'pcifunc' */
+ for (index = 0; index < vlan->rsrc.max; index++) {
+ if (vlan->entry2pfvf_map[index] == pcifunc)
+ nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
+ }
+ mutex_unlock(&vlan->rsrc_lock);
+}
+
+static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
+ u64 vtag, u8 size)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ struct nix_txvlan *vlan;
+ u64 regval;
+ int index;
+
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ vlan = &nix_hw->txvlan;
+
+ mutex_lock(&vlan->rsrc_lock);
+
+ index = rvu_alloc_rsrc(&vlan->rsrc);
+ if (index < 0) {
+ mutex_unlock(&vlan->rsrc_lock);
+ return index;
+ }
+
+ mutex_unlock(&vlan->rsrc_lock);
+
+ regval = size ? vtag : vtag << 32;
+
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_CTL(index), size);
+
+ return index;
+}
+
+static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
+ struct nix_vtag_config *req)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ u16 pcifunc = req->hdr.pcifunc;
+ int idx0 = req->tx.vtag0_idx;
+ int idx1 = req->tx.vtag1_idx;
+ struct nix_txvlan *vlan;
+ int err = 0;
+
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ vlan = &nix_hw->txvlan;
+ if (req->tx.free_vtag0 && req->tx.free_vtag1)
+ if (vlan->entry2pfvf_map[idx0] != pcifunc ||
+ vlan->entry2pfvf_map[idx1] != pcifunc)
+ return NIX_AF_ERR_PARAM;
+
+ mutex_lock(&vlan->rsrc_lock);
+
+ if (req->tx.free_vtag0) {
+ err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
+ if (err)
+ goto exit;
+ }
+
+ if (req->tx.free_vtag1)
+ err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
+
+exit:
+ mutex_unlock(&vlan->rsrc_lock);
+ return err;
+}
+
+static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
+ struct nix_vtag_config *req,
+ struct nix_vtag_config_rsp *rsp)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ struct nix_txvlan *vlan;
+ u16 pcifunc = req->hdr.pcifunc;
+
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ vlan = &nix_hw->txvlan;
+ if (req->tx.cfg_vtag0) {
+ rsp->vtag0_idx =
+ nix_tx_vtag_alloc(rvu, blkaddr,
+ req->tx.vtag0, req->vtag_size);
+
+ if (rsp->vtag0_idx < 0)
+ return NIX_AF_ERR_TX_VTAG_NOSPC;
+
+ vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
+ }
+
+ if (req->tx.cfg_vtag1) {
+ rsp->vtag1_idx =
+ nix_tx_vtag_alloc(rvu, blkaddr,
+ req->tx.vtag1, req->vtag_size);
+
+ if (rsp->vtag1_idx < 0)
+ goto err_free;
+
+ vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
+ }
+
+ return 0;
+
+err_free:
+ if (req->tx.cfg_vtag0)
+ nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
+
+ return NIX_AF_ERR_TX_VTAG_NOSPC;
+}
+
int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
struct nix_vtag_config *req,
- struct msg_rsp *rsp)
+ struct nix_vtag_config_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
int blkaddr, nixlf, err;
@@ -1943,19 +2809,28 @@ int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
return err;
if (req->cfg_type) {
+ /* rx vtag configuration */
err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
if (err)
return NIX_AF_ERR_PARAM;
} else {
- /* TODO: handle tx vtag configuration */
- return 0;
+ /* tx vtag configuration */
+ if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
+ (req->tx.free_vtag0 || req->tx.free_vtag1))
+ return NIX_AF_ERR_PARAM;
+
+ if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
+ return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
+
+ if (req->tx.free_vtag0 || req->tx.free_vtag1)
+ return nix_tx_vtag_decfg(rvu, blkaddr, req);
}
return 0;
}
-static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
- u16 pcifunc, int next, bool eol)
+static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
+ int mce, u8 op, u16 pcifunc, int next, bool eol)
{
struct nix_aq_enq_req aq_req;
int err;
@@ -1965,8 +2840,8 @@ static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
aq_req.op = op;
aq_req.qidx = mce;
- /* Forward bcast pkts to RQ0, RSS not needed */
- aq_req.mce.op = 0;
+ /* Use RSS with RSS index 0 */
+ aq_req.mce.op = 1;
aq_req.mce.index = 0;
aq_req.mce.eol = eol;
aq_req.mce.pf_func = pcifunc;
@@ -1975,7 +2850,7 @@ static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
/* All fields valid */
*(u64 *)(&aq_req.mce_mask) = ~0ULL;
- err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
+ err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
if (err) {
dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
@@ -1984,8 +2859,8 @@ static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
return 0;
}
-static int nix_update_mce_list(struct nix_mce_list *mce_list,
- u16 pcifunc, bool add)
+static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
+ u16 pcifunc, bool add)
{
struct mce *mce, *tail = NULL;
bool delete = false;
@@ -1996,6 +2871,9 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list,
if (mce->pcifunc == pcifunc && !add) {
delete = true;
break;
+ } else if (mce->pcifunc == pcifunc && add) {
+ /* entry already exists */
+ return 0;
}
tail = mce;
}
@@ -2023,36 +2901,23 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list,
return 0;
}
-int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
+int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
+ struct nix_mce_list *mce_list,
+ int mce_idx, int mcam_index, bool add)
{
- int err = 0, idx, next_idx, last_idx;
- struct nix_mce_list *mce_list;
+ int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
struct nix_mcast *mcast;
struct nix_hw *nix_hw;
- struct rvu_pfvf *pfvf;
struct mce *mce;
- int blkaddr;
-
- /* Broadcast pkt replication is not needed for AF's VFs, hence skip */
- if (is_afvf(pcifunc))
- return 0;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0)
- return 0;
-
- nix_hw = get_nix_hw(rvu->hw, blkaddr);
- if (!nix_hw)
- return 0;
- mcast = &nix_hw->mcast;
+ if (!mce_list)
+ return -EINVAL;
/* Get this PF/VF func's MCE index */
- pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
- idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
+ idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
- mce_list = &pfvf->bcast_mce_list;
- if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
+ if (idx > (mce_idx + mce_list->max)) {
dev_err(rvu->dev,
"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
__func__, idx, mce_list->max,
@@ -2060,20 +2925,26 @@ int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
return -EINVAL;
}
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mcast = &nix_hw->mcast;
mutex_lock(&mcast->mce_lock);
- err = nix_update_mce_list(mce_list, pcifunc, add);
+ err = nix_update_mce_list_entry(mce_list, pcifunc, add);
if (err)
goto end;
/* Disable MCAM entry in NPC */
if (!mce_list->count) {
- rvu_npc_enable_bcast_entry(rvu, pcifunc, false);
+ npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
goto end;
}
/* Dump the updated list to HW */
- idx = pfvf->bcast_mce_idx;
+ idx = mce_idx;
last_idx = idx + mce_list->count - 1;
hlist_for_each_entry(mce, &mce_list->head, node) {
if (idx > last_idx)
@@ -2081,9 +2952,9 @@ int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
next_idx = idx + 1;
/* EOL should be set in last MCE */
- err = nix_setup_mce(rvu, idx, NIX_AQ_INSTOP_WRITE,
- mce->pcifunc, next_idx,
- (next_idx > last_idx) ? true : false);
+ err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
+ mce->pcifunc, next_idx,
+ (next_idx > last_idx) ? true : false);
if (err)
goto end;
idx++;
@@ -2094,7 +2965,76 @@ end:
return err;
}
-static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
+void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
+ struct nix_mce_list **mce_list, int *mce_idx)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_pfvf *pfvf;
+
+ if (!hw->cap.nix_rx_multicast ||
+ !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
+ *mce_list = NULL;
+ *mce_idx = 0;
+ return;
+ }
+
+ /* Get this PF/VF func's MCE index */
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+
+ if (type == NIXLF_BCAST_ENTRY) {
+ *mce_list = &pfvf->bcast_mce_list;
+ *mce_idx = pfvf->bcast_mce_idx;
+ } else if (type == NIXLF_ALLMULTI_ENTRY) {
+ *mce_list = &pfvf->mcast_mce_list;
+ *mce_idx = pfvf->mcast_mce_idx;
+ } else if (type == NIXLF_PROMISC_ENTRY) {
+ *mce_list = &pfvf->promisc_mce_list;
+ *mce_idx = pfvf->promisc_mce_idx;
+ } else {
+ *mce_list = NULL;
+ *mce_idx = 0;
+ }
+}
+
+static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
+ int type, bool add)
+{
+ int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_mce_list *mce_list;
+ int pf;
+
+ /* skip multicast pkt replication for AF's VFs & SDP links */
+ if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc))
+ return 0;
+
+ if (!hw->cap.nix_rx_multicast)
+ return 0;
+
+ pf = rvu_get_pf(pcifunc);
+ if (!is_pf_cgxmapped(rvu, pf))
+ return 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return -EINVAL;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return -EINVAL;
+
+ nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
+
+ mcam_index = npc_get_nixlf_mcam_index(mcam,
+ pcifunc & ~RVU_PFVF_FUNC_MASK,
+ nixlf, type);
+ err = nix_update_mce_list(rvu, pcifunc, mce_list,
+ mce_idx, mcam_index, add);
+ return err;
+}
+
+static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
{
struct nix_mcast *mcast = &nix_hw->mcast;
int err, pf, numvfs, idx;
@@ -2112,11 +3052,23 @@ static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
numvfs = (cfg >> 12) & 0xFF;
pfvf = &rvu->pf[pf];
- /* Save the start MCE */
- pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ /* This NIX0/1 block mapped to PF ? */
+ if (pfvf->nix_blkaddr != nix_hw->blkaddr)
+ continue;
+
+ /* save start idx of broadcast mce list */
+ pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
+ /* save start idx of multicast mce list */
+ pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
+
+ /* save the start idx of promisc mce list */
+ pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
+
for (idx = 0; idx < (numvfs + 1); idx++) {
/* idx-0 is for PF, followed by VFs */
pcifunc = (pf << RVU_PFVF_PF_SHIFT);
@@ -2126,9 +3078,26 @@ static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
* Will be updated when a NIXLF is attached/detached to
* these PF/VFs.
*/
- err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx,
- NIX_AQ_INSTOP_INIT,
- pcifunc, 0, true);
+ err = nix_blk_setup_mce(rvu, nix_hw,
+ pfvf->bcast_mce_idx + idx,
+ NIX_AQ_INSTOP_INIT,
+ pcifunc, 0, true);
+ if (err)
+ return err;
+
+ /* add dummy entries to multicast mce list */
+ err = nix_blk_setup_mce(rvu, nix_hw,
+ pfvf->mcast_mce_idx + idx,
+ NIX_AQ_INSTOP_INIT,
+ pcifunc, 0, true);
+ if (err)
+ return err;
+
+ /* add dummy entries to promisc mce list */
+ err = nix_blk_setup_mce(rvu, nix_hw,
+ pfvf->promisc_mce_idx + idx,
+ NIX_AQ_INSTOP_INIT,
+ pcifunc, 0, true);
if (err)
return err;
}
@@ -2177,7 +3146,32 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
mutex_init(&mcast->mce_lock);
- return nix_setup_bcast_tables(rvu, nix_hw);
+ return nix_setup_mce_tables(rvu, nix_hw);
+}
+
+static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ struct nix_txvlan *vlan = &nix_hw->txvlan;
+ int err;
+
+ /* Allocate resource bimap for tx vtag def registers*/
+ vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
+ err = rvu_alloc_bitmap(&vlan->rsrc);
+ if (err)
+ return -ENOMEM;
+
+ /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
+ vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!vlan->entry2pfvf_map)
+ goto free_mem;
+
+ mutex_init(&vlan->rsrc_lock);
+ return 0;
+
+free_mem:
+ kfree(vlan->rsrc.bmap);
+ return -ENOMEM;
}
static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
@@ -2225,6 +3219,15 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
for (schq = 0; schq < txsch->schq.max; schq++)
txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
}
+
+ /* Setup a default value of 8192 as DWRR MTU */
+ if (rvu->hw->cap.nix_common_dwrr_mtu) {
+ rvu_write64(rvu, blkaddr, NIX_AF_DWRR_RPM_MTU,
+ convert_bytes_to_dwrr_mtu(8192));
+ rvu_write64(rvu, blkaddr, NIX_AF_DWRR_SDP_MTU,
+ convert_bytes_to_dwrr_mtu(8192));
+ }
+
return 0;
}
@@ -2279,6 +3282,63 @@ static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
return 0;
}
+static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu)
+{
+ /* CN10K supports LBK FIFO size 72 KB */
+ if (rvu->hw->lbk_bufsize == 0x12000)
+ *max_mtu = CN10K_LBK_LINK_MAX_FRS;
+ else
+ *max_mtu = NIC_HW_MAX_FRS;
+}
+
+static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
+{
+ /* RPM supports FIFO len 128 KB */
+ if (rvu_cgx_get_fifolen(rvu) == 0x20000)
+ *max_mtu = CN10K_LMAC_LINK_MAX_FRS;
+ else
+ *max_mtu = NIC_HW_MAX_FRS;
+}
+
+int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
+ struct nix_hw_info *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ u64 dwrr_mtu;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ rsp->vwqe_delay = 0;
+ if (!is_rvu_otx2(rvu))
+ rsp->vwqe_delay = rvu_read64(rvu, blkaddr, NIX_AF_VWQE_TIMER) &
+ GENMASK_ULL(9, 0);
+
+ if (is_afvf(pcifunc))
+ rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
+ else
+ rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
+
+ rsp->min_mtu = NIC_HW_MIN_FRS;
+
+ if (!rvu->hw->cap.nix_common_dwrr_mtu) {
+ /* Return '1' on OTx2 */
+ rsp->rpm_dwrr_mtu = 1;
+ rsp->sdp_dwrr_mtu = 1;
+ return 0;
+ }
+
+ dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU);
+ rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
+
+ dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_SDP_MTU);
+ rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
+
+ return 0;
+}
+
int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
@@ -2324,6 +3384,8 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
struct nix_rx_flowkey_alg *field;
struct nix_rx_flowkey_alg tmp;
u32 key_type, valid_key;
+ int l4_key_offset = 0;
+ u32 l3_l4_src_dst;
if (!alg)
return -EINVAL;
@@ -2350,6 +3412,15 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
* group_member - Enabled when protocol is part of a group.
*/
+ /* Last 4 bits (31:28) are reserved to specify SRC, DST
+ * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST,
+ * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST
+ * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST
+ */
+ l3_l4_src_dst = flow_cfg;
+ /* Reset these 4 bits, so that these won't be part of key */
+ flow_cfg &= NIX_FLOW_KEY_TYPE_L3_L4_MASK;
+
keyoff_marker = 0; max_key_off = 0; group_member = 0;
nr_field = 0; key_off = 0; field_marker = 1;
field = &tmp; max_bit_pos = fls(flow_cfg);
@@ -2370,6 +3441,13 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
/* This should be set to 1, when SEL_CHAN is set */
field->bytesm1 = 1;
break;
+ case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
+ field->lid = NPC_LID_LC;
+ field->hdr_offset = 9; /* offset */
+ field->bytesm1 = 0; /* 1 byte */
+ field->ltype_match = NPC_LT_LC_IP;
+ field->ltype_mask = 0xF;
+ break;
case NIX_FLOW_KEY_TYPE_IPV4:
case NIX_FLOW_KEY_TYPE_INNR_IPV4:
field->lid = NPC_LID_LC;
@@ -2380,6 +3458,22 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
}
field->hdr_offset = 12; /* SIP offset */
field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
+
+ /* Only SIP */
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
+ field->bytesm1 = 3; /* SIP, 4 bytes */
+
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
+ /* Both SIP + DIP */
+ if (field->bytesm1 == 3) {
+ field->bytesm1 = 7; /* SIP + DIP, 8B */
+ } else {
+ /* Only DIP */
+ field->hdr_offset = 16; /* DIP off */
+ field->bytesm1 = 3; /* DIP, 4 bytes */
+ }
+ }
+
field->ltype_mask = 0xF; /* Match only IPv4 */
keyoff_marker = false;
break;
@@ -2393,6 +3487,22 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
}
field->hdr_offset = 8; /* SIP offset */
field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
+
+ /* Only SIP */
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
+ field->bytesm1 = 15; /* SIP, 16 bytes */
+
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
+ /* Both SIP + DIP */
+ if (field->bytesm1 == 15) {
+ /* SIP + DIP, 32 bytes */
+ field->bytesm1 = 31;
+ } else {
+ /* Only DIP */
+ field->hdr_offset = 24; /* DIP off */
+ field->bytesm1 = 15; /* DIP,16 bytes */
+ }
+ }
field->ltype_mask = 0xF; /* Match only IPv6 */
break;
case NIX_FLOW_KEY_TYPE_TCP:
@@ -2408,6 +3518,21 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->lid = NPC_LID_LH;
field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_SRC_ONLY)
+ field->bytesm1 = 1; /* SRC, 2 bytes */
+
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_DST_ONLY) {
+ /* Both SRC + DST */
+ if (field->bytesm1 == 1) {
+ /* SRC + DST, 4 bytes */
+ field->bytesm1 = 3;
+ } else {
+ /* Only DIP */
+ field->hdr_offset = 2; /* DST off */
+ field->bytesm1 = 1; /* DST, 2 bytes */
+ }
+ }
+
/* Enum values for NPC_LID_LD and NPC_LID_LG are same,
* so no need to change the ltype_match, just change
* the lid for inner protocols
@@ -2449,6 +3574,12 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field_marker = false;
keyoff_marker = false;
}
+
+ /* TCP/UDP/SCTP and ESP/AH falls at same offset so
+ * remember the TCP key offset of 40 byte hash key.
+ */
+ if (key_type == NIX_FLOW_KEY_TYPE_TCP)
+ l4_key_offset = key_off;
break;
case NIX_FLOW_KEY_TYPE_NVGRE:
field->lid = NPC_LID_LD;
@@ -2512,6 +3643,13 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->ltype_match = NPC_LT_LE_GTPU;
field->ltype_mask = 0xF;
break;
+ case NIX_FLOW_KEY_TYPE_CH_LEN_90B:
+ field->lid = NPC_LID_LA;
+ field->hdr_offset = 24;
+ field->bytesm1 = 1; /* 2 Bytes*/
+ field->ltype_match = NPC_LT_LA_CUSTOM_L2_90B_ETHER;
+ field->ltype_mask = 0xF;
+ break;
case NIX_FLOW_KEY_TYPE_VLAN:
field->lid = NPC_LID_LB;
field->hdr_offset = 2; /* Skip TPID (2-bytes) */
@@ -2520,11 +3658,38 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->ltype_mask = 0xF;
field->fn_mask = 1; /* Mask out the first nibble */
break;
+ case NIX_FLOW_KEY_TYPE_CUSTOM0:
+ field->lid = NPC_LID_LC;
+ field->hdr_offset = 6;
+ field->bytesm1 = 1; /* 2 Bytes*/
+ field->ltype_match = NPC_LT_LC_CUSTOM0;
+ field->ltype_mask = 0xF;
+ break;
+ case NIX_FLOW_KEY_TYPE_AH:
+ case NIX_FLOW_KEY_TYPE_ESP:
+ field->hdr_offset = 0;
+ field->bytesm1 = 7; /* SPI + sequence number */
+ field->ltype_mask = 0xF;
+ field->lid = NPC_LID_LE;
+ field->ltype_match = NPC_LT_LE_ESP;
+ if (key_type == NIX_FLOW_KEY_TYPE_AH) {
+ field->lid = NPC_LID_LD;
+ field->ltype_match = NPC_LT_LD_AH;
+ field->hdr_offset = 4;
+ keyoff_marker = false;
+ }
+ break;
}
field->ena = 1;
/* Found a valid flow key type */
if (valid_key) {
+ /* Use the key offset of TCP/UDP/SCTP fields
+ * for ESP/AH fields.
+ */
+ if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
+ key_type == NIX_FLOW_KEY_TYPE_AH)
+ key_off = l4_key_offset;
field->key_offset = key_off;
memcpy(&alg[nr_field], field, sizeof(*field));
max_key_off = max(max_key_off, field->bytesm1 + 1);
@@ -2555,7 +3720,7 @@ static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
hw = get_nix_hw(rvu->hw, blkaddr);
if (!hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
/* No room to add new flow hash algoritham */
if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
@@ -2595,7 +3760,7 @@ int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
/* Failed to get algo index from the exiting list, reserve new */
@@ -2684,6 +3849,7 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
struct nix_set_mac_addr *req,
struct msg_rsp *rsp)
{
+ bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK);
u16 pcifunc = req->hdr.pcifunc;
int blkaddr, nixlf, err;
struct rvu_pfvf *pfvf;
@@ -2694,12 +3860,23 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
pfvf = rvu_get_pfvf(rvu, pcifunc);
+ /* untrusted VF can't overwrite admin(PF) changes */
+ if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
+ (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
+ dev_warn(rvu->dev,
+ "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
+ return -EPERM;
+ }
+
ether_addr_copy(pfvf->mac_addr, req->mac_addr);
rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base, req->mac_addr);
- rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+ if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
+ ether_addr_copy(pfvf->default_mac, req->mac_addr);
+
+ rvu_switch_update_rules(rvu, pcifunc);
return 0;
}
@@ -2724,31 +3901,74 @@ int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
struct msg_rsp *rsp)
{
- bool allmulti = false, disable_promisc = false;
+ bool allmulti, promisc, nix_rx_multicast;
u16 pcifunc = req->hdr.pcifunc;
- int blkaddr, nixlf, err;
struct rvu_pfvf *pfvf;
+ int nixlf, err;
- err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
+ allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
+ pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
+
+ nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
+
+ if (is_vf(pcifunc) && !nix_rx_multicast &&
+ (promisc || allmulti)) {
+ dev_warn_ratelimited(rvu->dev,
+ "VF promisc/multicast not supported\n");
+ return 0;
+ }
+
+ /* untrusted VF can't configure promisc/allmulti */
+ if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
+ (promisc || allmulti))
+ return 0;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
if (err)
return err;
- pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (nix_rx_multicast) {
+ /* add/del this PF_FUNC to/from mcast pkt replication list */
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
+ allmulti);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update pcifunc 0x%x to multicast list\n",
+ pcifunc);
+ return err;
+ }
- if (req->mode & NIX_RX_MODE_PROMISC)
- allmulti = false;
- else if (req->mode & NIX_RX_MODE_ALLMULTI)
- allmulti = true;
- else
- disable_promisc = true;
+ /* add/del this PF_FUNC to/from promisc pkt replication list */
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
+ promisc);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update pcifunc 0x%x to promisc list\n",
+ pcifunc);
+ return err;
+ }
+ }
- if (disable_promisc)
- rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
- else
- rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
- pfvf->rx_chan_base, allmulti);
+ /* install/uninstall allmulti entry */
+ if (allmulti) {
+ rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base);
+ } else {
+ if (!nix_rx_multicast)
+ rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
+ }
- rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+ /* install/uninstall promisc entry */
+ if (promisc) {
+ rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base,
+ pfvf->rx_chan_cnt);
+ } else {
+ if (!nix_rx_multicast)
+ rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
+ }
return 0;
}
@@ -2798,6 +4018,80 @@ static void nix_find_link_frs(struct rvu *rvu,
req->minlen = minlen;
}
+static int
+nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
+ u16 pcifunc, u64 tx_credits)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id = 0, lmac_id = 0;
+ unsigned long poll_tmo;
+ bool restore_tx_en = 0;
+ struct nix_hw *nix_hw;
+ u64 cfg, sw_xoff = 0;
+ u32 schq = 0;
+ u32 credits;
+ int rc;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ if (tx_credits == nix_hw->tx_credits[link])
+ return 0;
+
+ /* Enable cgx tx if disabled for credits to be back */
+ if (is_pf_cgxmapped(rvu, pf)) {
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, true);
+ }
+
+ rvu_nix_txsch_lock(nix_hw);
+ mutex_lock(&rvu->rsrc_lock);
+ /* Disable new traffic to link */
+ if (hw->cap.nix_shaping) {
+ schq = nix_get_tx_link(rvu, pcifunc);
+ sw_xoff = rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq));
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0));
+ }
+
+ rc = NIX_AF_ERR_LINK_CREDITS;
+ poll_tmo = jiffies + usecs_to_jiffies(200000);
+ /* Wait for credits to return */
+ do {
+ if (time_after(jiffies, poll_tmo))
+ goto exit;
+ usleep_range(100, 200);
+
+ cfg = rvu_read64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_NORM_CREDIT(link));
+ credits = (cfg >> 12) & 0xFFFFFULL;
+ } while (credits != nix_hw->tx_credits[link]);
+
+ cfg &= ~(0xFFFFFULL << 12);
+ cfg |= (tx_credits << 12);
+ rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
+ rc = 0;
+
+ nix_hw->tx_credits[link] = tx_credits;
+ rvu_nix_update_link_credits(rvu, blkaddr, link, cfg);
+
+exit:
+ /* Enable traffic back */
+ if (hw->cap.nix_shaping && !sw_xoff)
+ rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq), 0);
+
+ /* Restore state of cgx tx */
+ if (restore_tx_en)
+ rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+
+ mutex_unlock(&rvu->rsrc_lock);
+ rvu_nix_txsch_unlock(nix_hw);
+ return rc;
+}
+
int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
struct msg_rsp *rsp)
{
@@ -2808,7 +4102,9 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
struct nix_txsch *txsch;
u64 cfg, lmac_fifo_len;
struct nix_hw *nix_hw;
+ struct rvu_pfvf *pfvf;
u8 cgx = 0, lmac = 0;
+ u16 max_mtu;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
@@ -2816,9 +4112,14 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ if (is_afvf(pcifunc))
+ rvu_get_lbk_link_max_frs(rvu, &max_mtu);
+ else
+ rvu_get_lmac_link_max_frs(rvu, &max_mtu);
- if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS)
+ if (!req->sdp_link && req->maxlen > max_mtu)
return NIX_AF_ERR_FRS_INVALID;
if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
@@ -2858,7 +4159,8 @@ rx_frscfg:
link = (cgx * hw->lmac_per_cgx) + lmac;
} else if (pf == 0) {
/* For VFs of PF0 ingress is LBK port, so config LBK link */
- link = hw->cgx_links;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ link = hw->cgx_links + pfvf->lbkid;
}
if (link < 0)
@@ -2878,71 +4180,10 @@ linkcfg:
/* Update transmit credits for CGX links */
lmac_fifo_len =
- CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
- cfg &= ~(0xFFFFFULL << 12);
- cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12;
- rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
- return 0;
-}
-
-int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp)
-{
- struct npc_mcam_alloc_entry_req alloc_req = { };
- struct npc_mcam_alloc_entry_rsp alloc_rsp = { };
- struct npc_mcam_free_entry_req free_req = { };
- u16 pcifunc = req->hdr.pcifunc;
- int blkaddr, nixlf, err;
- struct rvu_pfvf *pfvf;
-
- /* LBK VFs do not have separate MCAM UCAST entry hence
- * skip allocating rxvlan for them
- */
- if (is_afvf(pcifunc))
- return 0;
-
- pfvf = rvu_get_pfvf(rvu, pcifunc);
- if (pfvf->rxvlan)
- return 0;
-
- /* alloc new mcam entry */
- alloc_req.hdr.pcifunc = pcifunc;
- alloc_req.count = 1;
-
- err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
- &alloc_rsp);
- if (err)
- return err;
-
- /* update entry to enable rxvlan offload */
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0) {
- err = NIX_AF_ERR_AF_LF_INVALID;
- goto free_entry;
- }
-
- nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0);
- if (nixlf < 0) {
- err = NIX_AF_ERR_AF_LF_INVALID;
- goto free_entry;
- }
-
- pfvf->rxvlan_index = alloc_rsp.entry_list[0];
- /* all it means is that rxvlan_index is valid */
- pfvf->rxvlan = true;
-
- err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
- if (err)
- goto free_entry;
-
- return 0;
-free_entry:
- free_req.hdr.pcifunc = pcifunc;
- free_req.entry = alloc_rsp.entry_list[0];
- rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp);
- pfvf->rxvlan = false;
- return err;
+ rvu_cgx_get_fifolen(rvu) /
+ cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
+ return nix_config_link_credits(rvu, blkaddr, link, pcifunc,
+ (lmac_fifo_len - req->maxlen) / 16);
}
int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
@@ -2967,6 +4208,11 @@ int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
else
cfg &= ~BIT_ULL(40);
+ if (req->len_verify & NIX_RX_DROP_RE)
+ cfg |= BIT_ULL(32);
+ else
+ cfg &= ~BIT_ULL(32);
+
if (req->csum_verify & BIT(0))
cfg |= BIT_ULL(37);
else
@@ -2977,11 +4223,25 @@ int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
return 0;
}
-static void nix_link_config(struct rvu *rvu, int blkaddr)
+static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
+{
+ /* CN10k supports 72KB FIFO size and max packet size of 64k */
+ if (rvu->hw->lbk_bufsize == 0x12000)
+ return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16;
+ else
+ return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
+}
+
+static void nix_link_config(struct rvu *rvu, int blkaddr,
+ struct nix_hw *nix_hw)
{
struct rvu_hwinfo *hw = rvu->hw;
int cgx, lmac_cnt, slink, link;
- u64 tx_credits;
+ u16 lbk_max_frs, lmac_max_frs;
+ u64 tx_credits, cfg;
+
+ rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
+ rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
/* Set default min/max packet lengths allowed on NIX Rx links.
*
@@ -2989,37 +4249,56 @@ static void nix_link_config(struct rvu *rvu, int blkaddr)
* as undersize and report them to SW as error pkts, hence
* setting it to 40 bytes.
*/
- for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
+ for (link = 0; link < hw->cgx_links; link++) {
rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
- NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
+ ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
}
+ for (link = hw->cgx_links; link < hw->cgx_links + hw->lbk_links; link++) {
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
+ ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
+ }
if (hw->sdp_links) {
link = hw->cgx_links + hw->lbk_links;
rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
}
+ /* Set CPT link i.e second pass config */
+ if (hw->cpt_links) {
+ link = hw->cgx_links + hw->lbk_links + hw->sdp_links;
+ /* Set default min/max packet lengths allowed to LBK as that
+ * LBK link's range is max.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
+ ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
+ }
+
/* Set credits for Tx links assuming max packet length allowed.
* This will be reconfigured based on MTU set for PF/VF.
*/
for (cgx = 0; cgx < hw->cgx; cgx++) {
lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
- tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16;
+ /* Skip when cgx is not available or lmac cnt is zero */
+ if (lmac_cnt <= 0)
+ continue;
+ tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) -
+ lmac_max_frs) / 16;
/* Enable credits and set credit pkt count to max allowed */
- tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
+ cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
slink = cgx * hw->lmac_per_cgx;
for (link = slink; link < (slink + lmac_cnt); link++) {
+ nix_hw->tx_credits[link] = tx_credits;
rvu_write64(rvu, blkaddr,
- NIX_AF_TX_LINKX_NORM_CREDIT(link),
- tx_credits);
+ NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
}
}
/* Set Tx credits for LBK link */
slink = hw->cgx_links;
for (link = slink; link < (slink + hw->lbk_links); link++) {
- tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */
+ tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
+ nix_hw->tx_credits[link] = tx_credits;
/* Enable credits and set credit pkt count to max allowed */
tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
rvu_write64(rvu, blkaddr,
@@ -3113,17 +4392,37 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
return 0;
}
-int rvu_nix_init(struct rvu *rvu)
+static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 hw_const;
+
+ hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+
+ /* On OcteonTx2 DWRR quantum is directly configured into each of
+ * the transmit scheduler queues. And PF/VF drivers were free to
+ * config any value upto 2^24.
+ * On CN10K, HW is modified, the quantum configuration at scheduler
+ * queues is in terms of weight. And SW needs to setup a base DWRR MTU
+ * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do
+ * 'DWRR MTU * weight' to get the quantum.
+ *
+ * Check if HW uses a common MTU for all DWRR quantum configs.
+ * On OcteonTx2 this register field is '0'.
+ */
+ if (((hw_const >> 56) & 0x10) == 0x10)
+ hw->cap.nix_common_dwrr_mtu = true;
+}
+
+static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
{
const struct npc_lt_def_cfg *ltdefs;
struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr = nix_hw->blkaddr;
struct rvu_block *block;
- int blkaddr, err;
+ int err;
u64 cfg;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
- if (blkaddr < 0)
- return 0;
block = &hw->block[blkaddr];
if (is_rvu_96xx_B0(rvu)) {
@@ -3152,13 +4451,8 @@ int rvu_nix_init(struct rvu *rvu)
if (err)
return err;
- /* Set num of links of each type */
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
- hw->cgx = (cfg >> 12) & 0xF;
- hw->lmac_per_cgx = (cfg >> 8) & 0xF;
- hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
- hw->lbk_links = 1;
- hw->sdp_links = 1;
+ /* Setup capabilities of the NIX block */
+ rvu_nix_setup_capabilities(rvu, blkaddr);
/* Initialize admin queue */
err = nix_aq_init(rvu, block);
@@ -3168,26 +4462,41 @@ int rvu_nix_init(struct rvu *rvu)
/* Restore CINT timer delay to HW reset values */
rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
- if (blkaddr == BLKADDR_NIX0) {
- hw->nix0 = devm_kzalloc(rvu->dev,
- sizeof(struct nix_hw), GFP_KERNEL);
- if (!hw->nix0)
- return -ENOMEM;
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG);
- err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
+ /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
+ cfg |= 1ULL;
+ if (!is_rvu_otx2(rvu))
+ cfg |= NIX_PTP_1STEP_EN;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg);
+
+ if (!is_rvu_otx2(rvu))
+ rvu_nix_block_cn10k_init(rvu, nix_hw);
+
+ if (is_block_implemented(hw, blkaddr)) {
+ err = nix_setup_txschq(rvu, nix_hw, blkaddr);
if (err)
return err;
- err = nix_af_mark_format_setup(rvu, hw->nix0, blkaddr);
+ err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
if (err)
return err;
- err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
+ err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
+
+ err = nix_setup_mcast(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
+
+ err = nix_setup_txvlan(rvu, nix_hw);
if (err)
return err;
/* Configure segmentation offload formats */
- nix_setup_lso(rvu, hw->nix0, blkaddr);
+ nix_setup_lso(rvu, nix_hw, blkaddr);
/* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
* This helps HW protocol checker to identify headers
@@ -3227,49 +4536,155 @@ int rvu_nix_init(struct rvu *rvu)
(ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
ltdefs->rx_isctp.ltype_mask);
+ if (!is_rvu_otx2(rvu)) {
+ /* Enable APAD calculation for other protocols
+ * matching APAD0 and APAD1 lt def registers.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
+ (ltdefs->rx_apad0.valid << 11) |
+ (ltdefs->rx_apad0.lid << 8) |
+ (ltdefs->rx_apad0.ltype_match << 4) |
+ ltdefs->rx_apad0.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
+ (ltdefs->rx_apad1.valid << 11) |
+ (ltdefs->rx_apad1.lid << 8) |
+ (ltdefs->rx_apad1.ltype_match << 4) |
+ ltdefs->rx_apad1.ltype_mask);
+
+ /* Receive ethertype defination register defines layer
+ * information in NPC_RESULT_S to identify the Ethertype
+ * location in L2 header. Used for Ethertype overwriting
+ * in inline IPsec flow.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
+ (ltdefs->rx_et[0].offset << 12) |
+ (ltdefs->rx_et[0].valid << 11) |
+ (ltdefs->rx_et[0].lid << 8) |
+ (ltdefs->rx_et[0].ltype_match << 4) |
+ ltdefs->rx_et[0].ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
+ (ltdefs->rx_et[1].offset << 12) |
+ (ltdefs->rx_et[1].valid << 11) |
+ (ltdefs->rx_et[1].lid << 8) |
+ (ltdefs->rx_et[1].ltype_match << 4) |
+ ltdefs->rx_et[1].ltype_mask);
+ }
+
err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
if (err)
return err;
+ nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links,
+ sizeof(u64), GFP_KERNEL);
+ if (!nix_hw->tx_credits)
+ return -ENOMEM;
+
/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
- nix_link_config(rvu, blkaddr);
+ nix_link_config(rvu, blkaddr, nix_hw);
/* Enable Channel backpressure */
rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
+
+ err = rvu_nix_fixes_init(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
+
+ if (is_block_implemented(rvu->hw, BLKADDR_CPT0)) {
+ /* Config IPSec headers identification */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IPSECX(0),
+ (ltdefs->rx_ipsec[0].lid << 8) |
+ (ltdefs->rx_ipsec[0].ltype_match << 4) |
+ ltdefs->rx_ipsec[0].ltype_mask);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IPSECX(1),
+ (ltdefs->rx_ipsec[1].spi_offset << 12) |
+ (ltdefs->rx_ipsec[1].lid << 8) |
+ (ltdefs->rx_ipsec[1].ltype_match << 4) |
+ ltdefs->rx_ipsec[1].ltype_mask);
+ }
}
+
return 0;
}
-void rvu_nix_freemem(struct rvu *rvu)
+int rvu_nix_init(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
- struct rvu_block *block;
+ struct nix_hw *nix_hw;
+ int blkaddr = 0, err;
+ int i = 0;
+
+ hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
+ GFP_KERNEL);
+ if (!hw->nix)
+ return -ENOMEM;
+
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ nix_hw = &hw->nix[i];
+ nix_hw->rvu = rvu;
+ nix_hw->blkaddr = blkaddr;
+ err = rvu_nix_block_init(rvu, nix_hw);
+ if (err)
+ return err;
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ i++;
+ }
+
+ return 0;
+}
+
+static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
+ struct rvu_block *block)
+{
struct nix_txsch *txsch;
struct nix_mcast *mcast;
+ struct nix_txvlan *vlan;
struct nix_hw *nix_hw;
- int blkaddr, lvl;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
- if (blkaddr < 0)
- return;
+ int lvl;
- block = &hw->block[blkaddr];
rvu_aq_free(rvu, block->aq);
- if (blkaddr == BLKADDR_NIX0) {
+ if (is_block_implemented(rvu->hw, blkaddr)) {
nix_hw = get_nix_hw(rvu->hw, blkaddr);
- if (!nix_hw)
+ if (!nix_hw) {
+ dev_err(rvu->dev, "Unable to free %s memory\n",
+ block->name);
return;
+ }
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
txsch = &nix_hw->txsch[lvl];
kfree(txsch->schq.bmap);
}
+ kfree(nix_hw->tx_credits);
+
+ nix_ipolicer_freemem(rvu, nix_hw);
+
+ vlan = &nix_hw->txvlan;
+ kfree(vlan->rsrc.bmap);
+ mutex_destroy(&vlan->rsrc_lock);
+
mcast = &nix_hw->mcast;
qmem_free(rvu->dev, mcast->mce_ctx);
qmem_free(rvu->dev, mcast->mcast_buf);
mutex_destroy(&mcast->mce_lock);
+ rvu_nix_fixes_exit(rvu, nix_hw);
+ }
+}
+
+void rvu_nix_freemem(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr = 0;
+
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ block = &hw->block[blkaddr];
+ rvu_nix_block_freemem(rvu, blkaddr, block);
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
}
}
@@ -3277,6 +4692,7 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
int nixlf, err;
err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
@@ -3285,6 +4701,13 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
+ npc_mcam_enable_flows(rvu, pcifunc);
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ set_bit(NIXLF_INITIALIZED, &pfvf->flags);
+
+ rvu_switch_update_rules(rvu, pcifunc);
+
return rvu_cgx_start_stop_io(rvu, pcifunc, true);
}
@@ -3292,30 +4715,44 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
int nixlf, err;
err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
if (err)
return err;
- rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
return rvu_cgx_start_stop_io(rvu, pcifunc, false);
}
+#define RX_SA_BASE GENMASK_ULL(52, 7)
+
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct hwctx_disable_req ctx_req;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+ u64 sa_base;
+ void *cgxd;
int err;
ctx_req.hdr.pcifunc = pcifunc;
/* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
+ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+ rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
nix_interface_deinit(rvu, pcifunc, nixlf);
nix_rx_sync(rvu, blkaddr);
nix_txschq_free(rvu, pcifunc);
+ clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
+
rvu_cgx_start_stop_io(rvu, pcifunc, false);
if (pfvf->sq_ctx) {
@@ -3339,7 +4776,48 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
dev_err(rvu->dev, "CQ ctx disable failed\n");
}
+ /* Disabling CGX and NPC config done for PTP */
+ if (pfvf->hw_rx_tstamp_en) {
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ cgx_lmac_ptp_config(cgxd, lmac_id, false);
+ /* Undo NPC config done for PTP */
+ if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false))
+ dev_err(rvu->dev, "NPC config for PTP failed\n");
+ pfvf->hw_rx_tstamp_en = false;
+ }
+
+ /* reset HW config done for Switch headers */
+ rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT,
+ (PKIND_TX | PKIND_RX), 0, 0, 0, 0);
+
+ /* reset priority flow control config */
+ rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0);
+
+ /* reset 802.3x flow control config */
+ rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0);
+
nix_ctx_free(rvu, pfvf);
+
+ nix_free_all_bandprof(rvu, pcifunc);
+
+ sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf));
+ if (FIELD_GET(RX_SA_BASE, sa_base)) {
+ err = rvu_cpt_ctx_flush(rvu, pcifunc);
+ if (err)
+ dev_err(rvu->dev,
+ "CPT ctx flush failed with error: %d\n", err);
+ }
+
+ if (is_block_implemented(rvu->hw, BLKADDR_CPT0)) {
+ /* reset the configuration related to inline ipsec */
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(nixlf),
+ 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(nixlf),
+ 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf),
+ 0x0);
+ }
}
#define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
@@ -3348,10 +4826,18 @@ static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
{
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_block *block;
- int blkaddr;
+ int blkaddr, pf;
int nixlf;
u64 cfg;
+ pf = rvu_get_pf(pcifunc);
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
+ return 0;
+
+ /* Silicon does not support enabling time stamp in higig mode */
+ if (rvu_cgx_is_higig2_enabled(rvu, rvu_get_pf(pcifunc)))
+ return NIX_AF_ERR_PTP_CONFIG_FAIL;
+
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
@@ -3402,7 +4888,7 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
/* Find existing matching LSO format, if any */
for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
@@ -3435,3 +4921,797 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
return 0;
}
+
+int rvu_mbox_handler_nix_set_vlan_tpid(struct rvu *rvu,
+ struct nix_set_vlan_tpid *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int nixlf, err, blkaddr;
+ u64 cfg;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
+
+ if (req->vlan_type != NIX_VLAN_TYPE_OUTER &&
+ req->vlan_type != NIX_VLAN_TYPE_INNER)
+ return NIX_AF_ERR_PARAM;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
+
+ if (req->vlan_type == NIX_VLAN_TYPE_OUTER)
+ cfg = (cfg & ~GENMASK_ULL(15, 0)) | req->tpid;
+ else
+ cfg = (cfg & ~GENMASK_ULL(31, 16)) | ((u64)req->tpid << 16);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
+ return 0;
+}
+
+#define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48)
+#define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32)
+#define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16)
+#define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0)
+
+#define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24)
+#define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
+#define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0)
+
+static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req,
+ int blkaddr)
+{
+ u8 cpt_idx, cpt_blkaddr;
+ u64 val;
+
+ cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
+ if (req->enable) {
+ /* Enable context prefetching */
+ if (!is_rvu_otx2(rvu))
+ val = BIT_ULL(51);
+
+ /* Set OPCODE and EGRP */
+ val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp);
+ val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode);
+ val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1);
+ val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val);
+
+ /* Set CPT queue for inline IPSec */
+ val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot);
+ val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC,
+ req->inst_qsel.cpt_pf_func);
+
+ if (!is_rvu_otx2(rvu)) {
+ cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 :
+ BLKADDR_CPT1;
+ val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr);
+ }
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
+ val);
+
+ /* Set CPT credit */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ req->cpt_credit);
+ } else {
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
+ 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ 0x3FFFFF);
+ }
+}
+
+int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu,
+ struct nix_inline_ipsec_cfg *req,
+ struct msg_rsp *rsp)
+{
+ if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ return 0;
+
+ nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0);
+ if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
+ nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(
+struct rvu *rvu, struct nix_inline_ipsec_lf_cfg *req, struct msg_rsp *rsp)
+{
+ int lf, blkaddr, err;
+ u64 val;
+
+ if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ return 0;
+
+ err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr);
+ if (err)
+ return err;
+
+ if (req->enable) {
+ /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */
+ val = (u64)req->ipsec_cfg0.tt << 44 |
+ (u64)req->ipsec_cfg0.tag_const << 20 |
+ (u64)req->ipsec_cfg0.sa_pow2_size << 16 |
+ req->ipsec_cfg0.lenm1_max;
+
+ if (blkaddr == BLKADDR_NIX1)
+ val |= BIT_ULL(46);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val);
+
+ /* Set SA_IDX_W and SA_IDX_MAX */
+ val = (u64)req->ipsec_cfg1.sa_idx_w << 32 |
+ req->ipsec_cfg1.sa_idx_max;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val);
+
+ /* Set SA base address */
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
+ req->sa_base_addr);
+ } else {
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
+ 0x0);
+ }
+
+ return 0;
+}
+
+void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
+{
+ bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
+
+ /* overwrite vf mac address with default_mac */
+ if (from_vf)
+ ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
+}
+
+bool rvu_nix_is_ptp_tx_enabled(struct rvu *rvu, u16 pcifunc)
+{
+ int blkaddr, nixlf, err;
+ u64 cfg;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return false;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
+ return (cfg & BIT_ULL(32));
+}
+
+/* NIX ingress policers or bandwidth profiles APIs */
+static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
+{
+ struct npc_lt_def_cfg defs, *ltdefs;
+
+ ltdefs = &defs;
+ memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
+
+ /* Extract PCP and DEI fields from outer VLAN from byte offset
+ * 2 from the start of LB_PTR (ie TAG).
+ * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
+ * fields are considered when 'Tunnel enable' is set in profile.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
+ (2UL << 12) | (ltdefs->ovlan.lid << 8) |
+ (ltdefs->ovlan.ltype_match << 4) |
+ ltdefs->ovlan.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
+ (2UL << 12) | (ltdefs->ivlan.lid << 8) |
+ (ltdefs->ivlan.ltype_match << 4) |
+ ltdefs->ivlan.ltype_mask);
+
+ /* DSCP field in outer and tunneled IPv4 packets */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
+ (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
+ (ltdefs->rx_oip4.ltype_match << 4) |
+ ltdefs->rx_oip4.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
+ (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
+ (ltdefs->rx_iip4.ltype_match << 4) |
+ ltdefs->rx_iip4.ltype_mask);
+
+ /* DSCP field (traffic class) in outer and tunneled IPv6 packets */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
+ (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
+ (ltdefs->rx_oip6.ltype_match << 4) |
+ ltdefs->rx_oip6.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
+ (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
+ (ltdefs->rx_iip6.ltype_match << 4) |
+ ltdefs->rx_iip6.ltype_mask);
+}
+
+static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
+ int layer, int prof_idx)
+{
+ struct nix_cn10k_aq_enq_req aq_req;
+ int rc;
+
+ memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+
+ aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
+ aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq_req.op = NIX_AQ_INSTOP_INIT;
+
+ /* Context is all zeros, submit to AQ */
+ rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)&aq_req, NULL);
+ if (rc)
+ dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
+ layer, prof_idx);
+ return rc;
+}
+
+static int nix_setup_ipolicers(struct rvu *rvu,
+ struct nix_hw *nix_hw, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_ipolicer *ipolicer;
+ int err, layer, prof_idx;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ if (!(cfg & BIT_ULL(61))) {
+ hw->cap.ipolicer = false;
+ return 0;
+ }
+
+ hw->cap.ipolicer = true;
+ nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
+ sizeof(*ipolicer), GFP_KERNEL);
+ if (!nix_hw->ipolicer)
+ return -ENOMEM;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
+
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ ipolicer = &nix_hw->ipolicer[layer];
+ switch (layer) {
+ case BAND_PROF_LEAF_LAYER:
+ ipolicer->band_prof.max = cfg & 0XFFFF;
+ break;
+ case BAND_PROF_MID_LAYER:
+ ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
+ break;
+ case BAND_PROF_TOP_LAYER:
+ ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
+ break;
+ }
+
+ if (!ipolicer->band_prof.max)
+ continue;
+
+ err = rvu_alloc_bitmap(&ipolicer->band_prof);
+ if (err)
+ return err;
+
+ ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
+ ipolicer->band_prof.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!ipolicer->pfvf_map)
+ return -ENOMEM;
+
+ ipolicer->match_id = devm_kcalloc(rvu->dev,
+ ipolicer->band_prof.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!ipolicer->match_id)
+ return -ENOMEM;
+
+ for (prof_idx = 0;
+ prof_idx < ipolicer->band_prof.max; prof_idx++) {
+ /* Set AF as current owner for INIT ops to succeed */
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+
+ /* There is no enable bit in the profile context,
+ * so no context disable. So let's INIT them here
+ * so that PF/VF later on have to just do WRITE to
+ * setup policer rates and config.
+ */
+ err = nix_init_policer_context(rvu, nix_hw,
+ layer, prof_idx);
+ if (err)
+ return err;
+ }
+
+ /* Allocate memory for maintaining ref_counts for MID level
+ * profiles, this will be needed for leaf layer profiles'
+ * aggregation.
+ */
+ if (layer != BAND_PROF_MID_LAYER)
+ continue;
+
+ ipolicer->ref_count = devm_kcalloc(rvu->dev,
+ ipolicer->band_prof.max,
+ sizeof(u16), GFP_KERNEL);
+ }
+
+ /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */
+ rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
+
+ nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
+
+ return 0;
+}
+
+static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ struct nix_ipolicer *ipolicer;
+ int layer;
+
+ if (!rvu->hw->cap.ipolicer)
+ return;
+
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ ipolicer = &nix_hw->ipolicer[layer];
+
+ if (!ipolicer->band_prof.max)
+ continue;
+
+ kfree(ipolicer->band_prof.bmap);
+ }
+}
+
+static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
+ struct nix_hw *nix_hw, u16 pcifunc)
+{
+ struct nix_ipolicer *ipolicer;
+ int layer, hi_layer, prof_idx;
+
+ /* Bits [15:14] in profile index represent layer */
+ layer = (req->qidx >> 14) & 0x03;
+ prof_idx = req->qidx & 0x3FFF;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ if (prof_idx >= ipolicer->band_prof.max)
+ return -EINVAL;
+
+ /* Check if the profile is allocated to the requesting PCIFUNC or not
+ * with the exception of AF. AF is allowed to read and update contexts.
+ */
+ if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
+ return -EINVAL;
+
+ /* If this profile is linked to higher layer profile then check
+ * if that profile is also allocated to the requesting PCIFUNC
+ * or not.
+ */
+ if (!req->prof.hl_en)
+ return 0;
+
+ /* Leaf layer profile can link only to mid layer and
+ * mid layer to top layer.
+ */
+ if (layer == BAND_PROF_LEAF_LAYER)
+ hi_layer = BAND_PROF_MID_LAYER;
+ else if (layer == BAND_PROF_MID_LAYER)
+ hi_layer = BAND_PROF_TOP_LAYER;
+ else
+ return -EINVAL;
+
+ ipolicer = &nix_hw->ipolicer[hi_layer];
+ prof_idx = req->prof.band_prof_id;
+ if (prof_idx >= ipolicer->band_prof.max ||
+ ipolicer->pfvf_map[prof_idx] != pcifunc)
+ return -EINVAL;
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
+ struct nix_bandprof_alloc_req *req,
+ struct nix_bandprof_alloc_rsp *rsp)
+{
+ int blkaddr, layer, prof, idx, err;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mutex_lock(&rvu->rsrc_lock);
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ if (!req->prof_count[layer])
+ continue;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ for (idx = 0; idx < req->prof_count[layer]; idx++) {
+ /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
+ if (idx == MAX_BANDPROF_PER_PFFUNC)
+ break;
+
+ prof = rvu_alloc_rsrc(&ipolicer->band_prof);
+ if (prof < 0)
+ break;
+ rsp->prof_count[layer]++;
+ rsp->prof_idx[layer][idx] = prof;
+ ipolicer->pfvf_map[prof] = pcifunc;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
+{
+ int blkaddr, layer, prof_idx, err;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mutex_lock(&rvu->rsrc_lock);
+ /* Free all the profiles allocated to the PCIFUNC */
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ ipolicer = &nix_hw->ipolicer[layer];
+
+ for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
+ if (ipolicer->pfvf_map[prof_idx] != pcifunc)
+ continue;
+
+ /* Clear ratelimit aggregation, if any */
+ if (layer == BAND_PROF_LEAF_LAYER &&
+ ipolicer->match_id[prof_idx])
+ nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
+
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+ ipolicer->match_id[prof_idx] = 0;
+ rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
+ struct nix_bandprof_free_req *req,
+ struct msg_rsp *rsp)
+{
+ int blkaddr, layer, prof_idx, idx, err;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+
+ if (req->free_all)
+ return nix_free_all_bandprof(rvu, pcifunc);
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mutex_lock(&rvu->rsrc_lock);
+ /* Free the requested profile indices */
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ if (!req->prof_count[layer])
+ continue;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ for (idx = 0; idx < req->prof_count[layer]; idx++) {
+ prof_idx = req->prof_idx[layer][idx];
+ if (prof_idx >= ipolicer->band_prof.max ||
+ ipolicer->pfvf_map[prof_idx] != pcifunc)
+ continue;
+
+ /* Clear ratelimit aggregation, if any */
+ if (layer == BAND_PROF_LEAF_LAYER &&
+ ipolicer->match_id[prof_idx])
+ nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
+
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+ ipolicer->match_id[prof_idx] = 0;
+ rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
+ if (idx == MAX_BANDPROF_PER_PFFUNC)
+ break;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_cn10k_aq_enq_req *aq_req,
+ struct nix_cn10k_aq_enq_rsp *aq_rsp,
+ u16 pcifunc, u8 ctype, u32 qidx)
+{
+ memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+ aq_req->hdr.pcifunc = pcifunc;
+ aq_req->ctype = ctype;
+ aq_req->op = NIX_AQ_INSTOP_READ;
+ aq_req->qidx = qidx;
+
+ return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)aq_req,
+ (struct nix_aq_enq_rsp *)aq_rsp);
+}
+
+static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
+ struct nix_hw *nix_hw,
+ struct nix_cn10k_aq_enq_req *aq_req,
+ struct nix_cn10k_aq_enq_rsp *aq_rsp,
+ u32 leaf_prof, u16 mid_prof)
+{
+ memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+ aq_req->hdr.pcifunc = 0x00;
+ aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq_req->op = NIX_AQ_INSTOP_WRITE;
+ aq_req->qidx = leaf_prof;
+
+ aq_req->prof.band_prof_id = mid_prof;
+ aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
+ aq_req->prof.hl_en = 1;
+ aq_req->prof_mask.hl_en = 1;
+
+ return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)aq_req,
+ (struct nix_aq_enq_rsp *)aq_rsp);
+}
+
+int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
+ u16 rq_idx, u16 match_id)
+{
+ int leaf_prof, mid_prof, leaf_match;
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+ int blkaddr, idx, rc;
+
+ if (!rvu->hw->cap.ipolicer)
+ return 0;
+
+ rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (rc)
+ return rc;
+
+ /* Fetch the RQ's context to see if policing is enabled */
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
+ NIX_AQ_CTYPE_RQ, rq_idx);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
+ __func__, rq_idx, pcifunc);
+ return rc;
+ }
+
+ if (!aq_rsp.rq.policer_ena)
+ return 0;
+
+ /* Get the bandwidth profile ID mapped to this RQ */
+ leaf_prof = aq_rsp.rq.band_prof_id;
+
+ ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
+ ipolicer->match_id[leaf_prof] = match_id;
+
+ /* Check if any other leaf profile is marked with same match_id */
+ for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
+ if (idx == leaf_prof)
+ continue;
+ if (ipolicer->match_id[idx] != match_id)
+ continue;
+
+ leaf_match = idx;
+ break;
+ }
+
+ if (idx == ipolicer->band_prof.max)
+ return 0;
+
+ /* Fetch the matching profile's context to check if it's already
+ * mapped to a mid level profile.
+ */
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
+ NIX_AQ_CTYPE_BANDPROF, leaf_match);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of leaf profile %d\n",
+ __func__, leaf_match);
+ return rc;
+ }
+
+ ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
+ if (aq_rsp.prof.hl_en) {
+ /* Get Mid layer prof index and map leaf_prof index
+ * also such that flows that are being steered
+ * to different RQs and marked with same match_id
+ * are rate limited in a aggregate fashion
+ */
+ mid_prof = aq_rsp.prof.band_prof_id;
+ rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
+ &aq_req, &aq_rsp,
+ leaf_prof, mid_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
+ __func__, leaf_prof, mid_prof);
+ goto exit;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ ipolicer->ref_count[mid_prof]++;
+ mutex_unlock(&rvu->rsrc_lock);
+ goto exit;
+ }
+
+ /* Allocate a mid layer profile and
+ * map both 'leaf_prof' and 'leaf_match' profiles to it.
+ */
+ mutex_lock(&rvu->rsrc_lock);
+ mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
+ if (mid_prof < 0) {
+ dev_err(rvu->dev,
+ "%s: Unable to allocate mid layer profile\n", __func__);
+ mutex_unlock(&rvu->rsrc_lock);
+ goto exit;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ ipolicer->pfvf_map[mid_prof] = 0x00;
+ ipolicer->ref_count[mid_prof] = 0;
+
+ /* Initialize mid layer profile same as 'leaf_prof' */
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
+ NIX_AQ_CTYPE_BANDPROF, leaf_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of leaf profile %d\n",
+ __func__, leaf_prof);
+ goto exit;
+ }
+
+ memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+ aq_req.hdr.pcifunc = 0x00;
+ aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
+ aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq_req.op = NIX_AQ_INSTOP_WRITE;
+ memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
+ memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s));
+ /* Clear higher layer enable bit in the mid profile, just in case */
+ aq_req.prof.hl_en = 0;
+ aq_req.prof_mask.hl_en = 1;
+
+ rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)&aq_req, NULL);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to INIT context of mid layer profile %d\n",
+ __func__, mid_prof);
+ goto exit;
+ }
+
+ /* Map both leaf profiles to this mid layer profile */
+ rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
+ &aq_req, &aq_rsp,
+ leaf_prof, mid_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
+ __func__, leaf_prof, mid_prof);
+ goto exit;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ ipolicer->ref_count[mid_prof]++;
+ mutex_unlock(&rvu->rsrc_lock);
+
+ rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
+ &aq_req, &aq_rsp,
+ leaf_match, mid_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
+ __func__, leaf_match, mid_prof);
+ ipolicer->ref_count[mid_prof]--;
+ goto exit;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ ipolicer->ref_count[mid_prof]++;
+ mutex_unlock(&rvu->rsrc_lock);
+
+exit:
+ return rc;
+}
+
+/* Called with mutex rsrc_lock */
+static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
+ u32 leaf_prof)
+{
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ struct nix_ipolicer *ipolicer;
+ u16 mid_prof;
+ int rc;
+
+ mutex_unlock(&rvu->rsrc_lock);
+
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
+ NIX_AQ_CTYPE_BANDPROF, leaf_prof);
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of leaf profile %d\n",
+ __func__, leaf_prof);
+ return;
+ }
+
+ if (!aq_rsp.prof.hl_en)
+ return;
+
+ mid_prof = aq_rsp.prof.band_prof_id;
+ ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
+ ipolicer->ref_count[mid_prof]--;
+ /* If ref_count is zero, free mid layer profile */
+ if (!ipolicer->ref_count[mid_prof]) {
+ ipolicer->pfvf_map[mid_prof] = 0x00;
+ rvu_free_rsrc(&ipolicer->band_prof, mid_prof);
+ }
+}
+
+int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req,
+ struct nix_bandprof_get_hwinfo_rsp *rsp)
+{
+ struct nix_ipolicer *ipolicer;
+ int blkaddr, layer, err;
+ struct nix_hw *nix_hw;
+ u64 tu;
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ /* Return number of bandwidth profiles free at each layer */
+ mutex_lock(&rvu->rsrc_lock);
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof);
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ /* Set the policer timeunit in nanosec */
+ tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0);
+ rsp->policer_timeunit = (tu + 1) * 100;
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_rx_sw_sync(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_rx_sync(rvu, blkaddr);
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
index 67471cb2b129..9d764f5abad3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -1,11 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
@@ -292,6 +289,7 @@ int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
return rvu_npa_aq_enq_inst(rvu, req, rsp);
}
#endif
+EXPORT_SYMBOL(rvu_mbox_handler_npa_aq_enq);
int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
@@ -419,6 +417,10 @@ exit:
rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF;
rsp->stack_pg_bytes = cfg & 0xFF;
rsp->qints = (cfg >> 28) & 0xFFF;
+ if (!is_rvu_otx2(rvu)) {
+ cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL);
+ rsp->cache_lines = (cfg >> 1) & 0x3F;
+ }
return rc;
}
@@ -478,6 +480,13 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
#endif
rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
+ /* For CN10K NPA BATCH DMA set 35 cache lines */
+ if (!is_rvu_otx2(rvu)) {
+ cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL);
+ cfg &= ~0x7EULL;
+ cfg |= BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1);
+ rvu_write64(rvu, block->addr, NPA_AF_BATCH_CTL, cfg);
+ }
/* Result structure can be followed by Aura/Pool context at
* RES + 128bytes and a write mask at RES + 256 bytes, depending on
* operation type. Alloc sufficient result memory for all operations.
@@ -497,18 +506,14 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
int rvu_npa_init(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
- int blkaddr, err;
+ int blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
if (blkaddr < 0)
return 0;
/* Initialize admin queue */
- err = npa_aq_init(rvu, &hw->block[blkaddr]);
- if (err)
- return err;
-
- return 0;
+ return npa_aq_init(rvu, &hw->block[blkaddr]);
}
void rvu_npa_freemem(struct rvu *rvu)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 6fa9358e6db4..97fb12db0192 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -1,16 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/stddef.h>
#include "rvu_struct.h"
#include "rvu_reg.h"
@@ -19,15 +18,14 @@
#include "cgx.h"
#include "npc_profile.h"
-#define RSVD_MCAM_ENTRIES_PER_PF 2 /* Bcast & Promisc */
+#define RSVD_MCAM_ENTRIES_PER_PF 3 /* Broadcast, Promisc and AllMulticast */
#define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */
-#define NIXLF_UCAST_ENTRY 0
-#define NIXLF_BCAST_ENTRY 1
-#define NIXLF_PROMISC_ENTRY 2
+#define NPC_HW_TSTAMP_OFFSET 8ULL
+#define NPC_KEX_PF_FUNC_MASK 0xFFFFULL
+#define ALIGN_8B_CEIL(__a) (((__a) + 7) & (-8))
#define NPC_PARSE_RESULT_DMAC_OFFSET 8
-#define NPC_HW_TSTAMP_OFFSET 8ULL
static const char def_pfl_name[] = "default";
@@ -36,6 +34,45 @@ static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam,
u16 pcifunc);
+bool is_npc_intf_tx(u8 intf)
+{
+ return !!(intf & 0x1);
+}
+
+bool is_npc_intf_rx(u8 intf)
+{
+ return !(intf & 0x1);
+}
+
+bool is_npc_interface_valid(struct rvu *rvu, u8 intf)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ return intf < hw->npc_intfs;
+}
+
+static int npc_mcam_verify_pf_func(struct rvu *rvu,
+ struct mcam_entry *entry_data, u8 intf,
+ u16 pcifunc)
+{
+ u16 pf_func, pf_func_mask;
+
+ if (is_npc_intf_rx(intf))
+ return 0;
+
+ pf_func_mask = (entry_data->kw_mask[0] >> 32) &
+ NPC_KEX_PF_FUNC_MASK;
+ pf_func = (entry_data->kw[0] >> 32) & NPC_KEX_PF_FUNC_MASK;
+
+ pf_func = be16_to_cpu((__force __be16)pf_func);
+ if (pf_func_mask != NPC_KEX_PF_FUNC_MASK ||
+ ((pf_func & ~RVU_PFVF_FUNC_MASK) !=
+ (pcifunc & ~RVU_PFVF_FUNC_MASK)))
+ return -EINVAL;
+
+ return 0;
+}
+
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf)
{
int blkaddr;
@@ -94,8 +131,33 @@ int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool enable)
return 0;
}
-static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
- u16 pcifunc, int nixlf, int type)
+static int npc_get_ucast_mcam_index(struct npc_mcam *mcam, u16 pcifunc,
+ int nixlf)
+{
+ struct rvu_hwinfo *hw = container_of(mcam, struct rvu_hwinfo, mcam);
+ struct rvu *rvu = hw->rvu;
+ int blkaddr = 0, max = 0;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ /* Given a PF/VF and NIX LF number calculate the unicast mcam
+ * entry index based on the NIX block assigned to the PF/VF.
+ */
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ if (pfvf->nix_blkaddr == blkaddr)
+ break;
+ block = &rvu->hw->block[blkaddr];
+ max += block->lf.max;
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ }
+
+ return mcam->nixlf_offset + (max + nixlf) * RSVD_MCAM_ENTRIES_PER_NIXLF;
+}
+
+int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
+ u16 pcifunc, int nixlf, int type)
{
int pf = rvu_get_pf(pcifunc);
int index;
@@ -110,14 +172,16 @@ static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
*/
if (type == NIXLF_BCAST_ENTRY)
return index;
- else if (type == NIXLF_PROMISC_ENTRY)
+ else if (type == NIXLF_ALLMULTI_ENTRY)
return index + 1;
+ else if (type == NIXLF_PROMISC_ENTRY)
+ return index + 2;
}
- return (mcam->nixlf_offset + (nixlf * RSVD_MCAM_ENTRIES_PER_NIXLF));
+ return npc_get_ucast_mcam_index(mcam, pcifunc, nixlf);
}
-static int npc_get_bank(struct npc_mcam *mcam, int index)
+int npc_get_bank(struct npc_mcam *mcam, int index)
{
int bank = index / mcam->banksize;
@@ -128,8 +192,8 @@ static int npc_get_bank(struct npc_mcam *mcam, int index)
return bank;
}
-static bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam,
- int blkaddr, int index)
+bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index)
{
int bank = npc_get_bank(mcam, index);
u64 cfg;
@@ -139,8 +203,8 @@ static bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam,
return (cfg & 1);
}
-static void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
- int blkaddr, int index, bool enable)
+void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, bool enable)
{
int bank = npc_get_bank(mcam, index);
int actbank = bank;
@@ -257,12 +321,121 @@ static void npc_get_keyword(struct mcam_entry *entry, int idx,
*cam0 = ~*cam1 & kw_mask;
}
+static void npc_fill_entryword(struct mcam_entry *entry, int idx,
+ u64 cam0, u64 cam1)
+{
+ /* Similar to npc_get_keyword, but fills mcam_entry structure from
+ * CAM registers.
+ */
+ switch (idx) {
+ case 0:
+ entry->kw[0] = cam1;
+ entry->kw_mask[0] = cam1 ^ cam0;
+ break;
+ case 1:
+ entry->kw[1] = cam1;
+ entry->kw_mask[1] = cam1 ^ cam0;
+ break;
+ case 2:
+ entry->kw[1] |= (cam1 & CAM_MASK(16)) << 48;
+ entry->kw[2] = (cam1 >> 16) & CAM_MASK(48);
+ entry->kw_mask[1] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48;
+ entry->kw_mask[2] = ((cam1 ^ cam0) >> 16) & CAM_MASK(48);
+ break;
+ case 3:
+ entry->kw[2] |= (cam1 & CAM_MASK(16)) << 48;
+ entry->kw[3] = (cam1 >> 16) & CAM_MASK(32);
+ entry->kw_mask[2] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48;
+ entry->kw_mask[3] = ((cam1 ^ cam0) >> 16) & CAM_MASK(32);
+ break;
+ case 4:
+ entry->kw[3] |= (cam1 & CAM_MASK(32)) << 32;
+ entry->kw[4] = (cam1 >> 32) & CAM_MASK(32);
+ entry->kw_mask[3] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32;
+ entry->kw_mask[4] = ((cam1 ^ cam0) >> 32) & CAM_MASK(32);
+ break;
+ case 5:
+ entry->kw[4] |= (cam1 & CAM_MASK(32)) << 32;
+ entry->kw[5] = (cam1 >> 32) & CAM_MASK(16);
+ entry->kw_mask[4] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32;
+ entry->kw_mask[5] = ((cam1 ^ cam0) >> 32) & CAM_MASK(16);
+ break;
+ case 6:
+ entry->kw[5] |= (cam1 & CAM_MASK(48)) << 16;
+ entry->kw[6] = (cam1 >> 48) & CAM_MASK(16);
+ entry->kw_mask[5] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16;
+ entry->kw_mask[6] = ((cam1 ^ cam0) >> 48) & CAM_MASK(16);
+ break;
+ case 7:
+ entry->kw[6] |= (cam1 & CAM_MASK(48)) << 16;
+ entry->kw_mask[6] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16;
+ break;
+ }
+}
+
+static u64 npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 pf_func)
+{
+ int bank, nixlf, index;
+
+ /* get ucast entry rule entry index */
+ nix_get_nixlf(rvu, pf_func, &nixlf, NULL);
+ index = npc_get_nixlf_mcam_index(mcam, pf_func, nixlf,
+ NIXLF_UCAST_ENTRY);
+ bank = npc_get_bank(mcam, index);
+ index &= (mcam->banksize - 1);
+
+ return rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+}
+
+static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, struct mcam_entry *entry,
+ bool *enable)
+{
+ struct rvu_npc_mcam_rule *rule;
+ u16 owner, target_func;
+ struct rvu_pfvf *pfvf;
+ u64 rx_action;
+
+ owner = mcam->entry2pfvf_map[index];
+ target_func = (entry->action >> 4) & 0xffff;
+ /* do nothing when target is LBK/PF or owner is not PF */
+ if (is_pffunc_af(owner) || is_afvf(target_func) ||
+ (owner & RVU_PFVF_FUNC_MASK) ||
+ !(target_func & RVU_PFVF_FUNC_MASK))
+ return;
+
+ /* save entry2target_pffunc */
+ pfvf = rvu_get_pfvf(rvu, target_func);
+ mcam->entry2target_pffunc[index] = target_func;
+
+ /* don't enable rule when nixlf not attached or initialized */
+ if (!(is_nixlf_attached(rvu, target_func) &&
+ test_bit(NIXLF_INITIALIZED, &pfvf->flags)))
+ *enable = false;
+
+ /* fix up not needed for the rules added by user(ntuple filters) */
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->entry == index)
+ return;
+ }
+
+ /* copy VF default entry action to the VF mcam entry */
+ rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
+ target_func);
+ if (rx_action)
+ entry->action = rx_action;
+}
+
static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index, u8 intf,
struct mcam_entry *entry, bool enable)
{
int bank = npc_get_bank(mcam, index);
int kw = 0, actbank, actindex;
+ u8 tx_intf_mask = ~intf & 0x3;
+ u8 tx_intf = intf;
u64 cam0, cam1;
actbank = bank; /* Save bank id, to set action later on */
@@ -283,12 +456,21 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
*/
for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) {
/* Interface should be set in all banks */
+ if (is_npc_intf_tx(intf)) {
+ /* Last bit must be set and rest don't care
+ * for TX interfaces
+ */
+ tx_intf_mask = 0x1;
+ tx_intf = intf & tx_intf_mask;
+ tx_intf_mask = ~tx_intf & tx_intf_mask;
+ }
+
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1),
- intf);
+ tx_intf);
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0),
- ~intf & 0x3);
+ tx_intf_mask);
/* Set the match key */
npc_get_keyword(entry, kw, &cam0, &cam1);
@@ -304,6 +486,10 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), cam0);
}
+ /* PF installing VF rule */
+ if (is_npc_intf_rx(intf) && actindex < mcam->bmap_entries)
+ npc_fixup_vf_rule(rvu, mcam, blkaddr, actindex, entry, &enable);
+
/* Set 'action' */
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_ACTION(index, actbank), entry->action);
@@ -317,6 +503,42 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true);
}
+void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 src,
+ struct mcam_entry *entry, u8 *intf, u8 *ena)
+{
+ int sbank = npc_get_bank(mcam, src);
+ int bank, kw = 0;
+ u64 cam0, cam1;
+
+ src &= (mcam->banksize - 1);
+ bank = sbank;
+
+ for (; bank < (sbank + mcam->banks_per_entry); bank++, kw = kw + 2) {
+ cam1 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 1));
+ cam0 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 0));
+ npc_fill_entryword(entry, kw, cam0, cam1);
+
+ cam1 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 1));
+ cam0 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 0));
+ npc_fill_entryword(entry, kw + 1, cam0, cam1);
+ }
+
+ entry->action = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(src, sbank));
+ entry->vtag_action =
+ rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank));
+ *intf = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank, 1)) & 3;
+ *ena = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(src, sbank)) & 1;
+}
+
static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 src, u16 dest)
{
@@ -371,34 +593,23 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, u8 *mac_addr)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
struct npc_mcam *mcam = &rvu->hw->mcam;
- struct mcam_entry entry = { {0} };
struct nix_rx_action action;
- int blkaddr, index, kwi;
- u64 mac = 0;
+ int blkaddr, index;
- /* AF's VFs work in promiscuous mode */
- if (is_afvf(pcifunc))
+ /* AF's and SDP VFs work in promiscuous mode */
+ if (is_afvf(pcifunc) || is_sdp_vf(pcifunc))
return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return;
- for (index = ETH_ALEN - 1; index >= 0; index--)
- mac |= ((u64)*mac_addr++) << (8 * index);
-
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_UCAST_ENTRY);
- /* Match ingress channel and DMAC */
- entry.kw[0] = chan;
- entry.kw_mask[0] = 0xFFFULL;
-
- kwi = NPC_PARSE_RESULT_DMAC_OFFSET / sizeof(u64);
- entry.kw[kwi] = mac;
- entry.kw_mask[kwi] = BIT_ULL(48) - 1;
-
/* Don't change the action if entry is already enabled
* Otherwise RSS action may get overwritten.
*/
@@ -411,32 +622,36 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
action.pf_func = pcifunc;
}
- entry.action = *(u64 *)&action;
- npc_config_mcam_entry(rvu, mcam, blkaddr, index,
- NIX_INTF_RX, &entry, true);
+ req.default_rule = 1;
+ ether_addr_copy(req.packet.dmac, mac_addr);
+ eth_broadcast_addr((u8 *)&req.mask.dmac);
+ req.features = BIT_ULL(NPC_DMAC);
+ req.channel = chan;
+ req.chan_mask = 0xFFFU;
+ req.intf = pfvf->nix_rx_intf;
+ req.op = action.op;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = action.pf_func;
+ req.index = action.index;
+ req.match_id = action.match_id;
+ req.flow_key_alg = action.flow_key_alg;
- /* add VLAN matching, setup action and save entry back for later */
- entry.kw[0] |= (NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG) << 20;
- entry.kw_mask[0] |= (NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG) << 20;
-
- entry.vtag_action = VTAG0_VALID_BIT |
- FIELD_PREP(VTAG0_TYPE_MASK, 0) |
- FIELD_PREP(VTAG0_LID_MASK, NPC_LID_LA) |
- FIELD_PREP(VTAG0_RELPTR_MASK, 12);
-
- memcpy(&pfvf->entry, &entry, sizeof(entry));
+ rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
- int nixlf, u64 chan, bool allmulti)
+ int nixlf, u64 chan, u8 chan_cnt)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct npc_mcam *mcam = &rvu->hw->mcam;
- int blkaddr, ucast_idx, index, kwi;
- struct mcam_entry entry = { {0} };
- struct nix_rx_action action = { };
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ int blkaddr, ucast_idx, index;
+ struct nix_rx_action action;
+ u64 relaxed_mask;
- /* Only PF or AF VF can add a promiscuous entry */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) && !is_afvf(pcifunc))
+ if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc))
return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -445,39 +660,71 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_PROMISC_ENTRY);
-
- entry.kw[0] = chan;
- entry.kw_mask[0] = 0xFFFULL;
-
- if (allmulti) {
- kwi = NPC_KEXOF_DMAC / sizeof(u64);
- entry.kw[kwi] = BIT_ULL(40); /* LSB bit of 1st byte in DMAC */
- entry.kw_mask[kwi] = BIT_ULL(40);
- }
-
- ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
- nixlf, NIXLF_UCAST_ENTRY);
+ if (is_cgx_vf(rvu, pcifunc))
+ index = npc_get_nixlf_mcam_index(mcam,
+ pcifunc & ~RVU_PFVF_FUNC_MASK,
+ nixlf, NIXLF_PROMISC_ENTRY);
/* If the corresponding PF's ucast action is RSS,
* use the same action for promisc also
*/
+ ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx))
*(u64 *)&action = npc_get_mcam_action(rvu, mcam,
- blkaddr, ucast_idx);
+ blkaddr, ucast_idx);
if (action.op != NIX_RX_ACTIONOP_RSS) {
*(u64 *)&action = 0x00;
action.op = NIX_RX_ACTIONOP_UCAST;
- action.pf_func = pcifunc;
}
- entry.action = *(u64 *)&action;
- npc_config_mcam_entry(rvu, mcam, blkaddr, index,
- NIX_INTF_RX, &entry, true);
+ /* RX_ACTION set to MCAST for CGX PF's */
+ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list &&
+ is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
+ *(u64 *)&action = 0x00;
+ action.op = NIX_RX_ACTIONOP_MCAST;
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+ action.index = pfvf->promisc_mce_idx;
+ }
+
+ /* For cn10k the upper two bits of the channel number are
+ * cpt channel number. with masking out these bits in the
+ * mcam entry, same entry used for NIX will allow packets
+ * received from cpt for parsing.
+ */
+ if (!is_rvu_otx2(rvu)) {
+ req.chan_mask = NIX_CHAN_CPT_X2P_MASK;
+ } else {
+ req.chan_mask = 0xFFFU;
+ }
+
+ if (chan_cnt > 1) {
+ if (!is_power_of_2(chan_cnt)) {
+ dev_err(rvu->dev,
+ "%s: channel count more than 1, must be power of 2\n", __func__);
+ return;
+ }
+ relaxed_mask = GENMASK_ULL(BITS_PER_LONG_LONG - 1,
+ ilog2(chan_cnt));
+ req.chan_mask &= relaxed_mask;
+ }
+
+ req.channel = chan;
+ req.intf = pfvf->nix_rx_intf;
+ req.entry = index;
+ req.op = action.op;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc;
+ req.index = action.index;
+ req.match_id = action.match_id;
+ req.flow_key_alg = action.flow_key_alg;
+
+ rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
-static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc,
- int nixlf, bool enable)
+void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, bool enable)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
int blkaddr, index;
@@ -486,33 +733,22 @@ static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc,
if (blkaddr < 0)
return;
- /* Only PF's have a promiscuous entry */
- if (pcifunc & RVU_PFVF_FUNC_MASK)
- return;
+ /* Get 'pcifunc' of PF device */
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_PROMISC_ENTRY);
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
}
-void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
-{
- npc_enadis_promisc_entry(rvu, pcifunc, nixlf, false);
-}
-
-void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
-{
- npc_enadis_promisc_entry(rvu, pcifunc, nixlf, true);
-}
-
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan)
{
+ struct rvu_pfvf *pfvf;
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
struct npc_mcam *mcam = &rvu->hw->mcam;
- struct mcam_entry entry = { {0} };
struct rvu_hwinfo *hw = rvu->hw;
- struct nix_rx_action action;
- struct rvu_pfvf *pfvf;
int blkaddr, index;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -526,44 +762,137 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
/* If pkt replication is not supported,
* then only PF is allowed to add a bcast match entry.
*/
- if (!hw->cap.nix_rx_multicast && pcifunc & RVU_PFVF_FUNC_MASK)
+ if (!hw->cap.nix_rx_multicast && is_vf(pcifunc))
return;
/* Get 'pcifunc' of PF device */
pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_BCAST_ENTRY);
- /* Match ingress channel */
- entry.kw[0] = chan;
- entry.kw_mask[0] = 0xfffull;
-
- /* Match broadcast MAC address.
- * DMAC is extracted at 0th bit of PARSE_KEX::KW1
- */
- entry.kw[1] = 0xffffffffffffull;
- entry.kw_mask[1] = 0xffffffffffffull;
-
- *(u64 *)&action = 0x00;
if (!hw->cap.nix_rx_multicast) {
/* Early silicon doesn't support pkt replication,
* so install entry with UCAST action, so that PF
* receives all broadcast packets.
*/
+ req.op = NIX_RX_ACTIONOP_UCAST;
+ } else {
+ req.op = NIX_RX_ACTIONOP_MCAST;
+ req.index = pfvf->bcast_mce_idx;
+ }
+
+ eth_broadcast_addr((u8 *)&req.packet.dmac);
+ eth_broadcast_addr((u8 *)&req.mask.dmac);
+ req.features = BIT_ULL(NPC_DMAC);
+ req.channel = chan;
+ req.chan_mask = 0xFFFU;
+ req.intf = pfvf->nix_rx_intf;
+ req.entry = index;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc;
+
+ rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Get 'pcifunc' of PF device */
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
+ NIXLF_BCAST_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+}
+
+void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ u64 chan)
+{
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr, ucast_idx, index;
+ u8 mac_addr[ETH_ALEN] = { 0 };
+ struct nix_rx_action action;
+ struct rvu_pfvf *pfvf;
+ u16 vf_func;
+
+ /* Only CGX PF/VF can add allmulticast entry */
+ if (is_afvf(pcifunc) && is_sdp_vf(pcifunc))
+ return;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Get 'pcifunc' of PF device */
+ vf_func = pcifunc & RVU_PFVF_FUNC_MASK;
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_ALLMULTI_ENTRY);
+
+ /* If the corresponding PF's ucast action is RSS,
+ * use the same action for multicast entry also
+ */
+ ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx))
+ *(u64 *)&action = npc_get_mcam_action(rvu, mcam,
+ blkaddr, ucast_idx);
+
+ if (action.op != NIX_RX_ACTIONOP_RSS) {
+ *(u64 *)&action = 0x00;
action.op = NIX_RX_ACTIONOP_UCAST;
action.pf_func = pcifunc;
- } else {
- pfvf = rvu_get_pfvf(rvu, pcifunc);
- action.index = pfvf->bcast_mce_idx;
+ }
+
+ /* RX_ACTION set to MCAST for CGX PF's */
+ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list) {
+ *(u64 *)&action = 0x00;
action.op = NIX_RX_ACTIONOP_MCAST;
+ action.index = pfvf->mcast_mce_idx;
}
- entry.action = *(u64 *)&action;
- npc_config_mcam_entry(rvu, mcam, blkaddr, index,
- NIX_INTF_RX, &entry, true);
+ mac_addr[0] = 0x01; /* LSB bit of 1st byte in DMAC */
+ ether_addr_copy(req.packet.dmac, mac_addr);
+ ether_addr_copy(req.mask.dmac, mac_addr);
+ req.features = BIT_ULL(NPC_DMAC);
+
+ /* For cn10k the upper two bits of the channel number are
+ * cpt channel number. with masking out these bits in the
+ * mcam entry, same entry used for NIX will allow packets
+ * received from cpt for parsing.
+ */
+ if (!is_rvu_otx2(rvu))
+ req.chan_mask = NIX_CHAN_CPT_X2P_MASK;
+ else
+ req.chan_mask = 0xFFFU;
+
+ req.channel = chan;
+ req.intf = pfvf->nix_rx_intf;
+ req.entry = index;
+ req.op = action.op;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc | vf_func;
+ req.index = action.index;
+ req.match_id = action.match_id;
+ req.flow_key_alg = action.flow_key_alg;
+
+ rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
-void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable)
+void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
int blkaddr, index;
@@ -575,16 +904,62 @@ void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable)
/* Get 'pcifunc' of PF device */
pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
- index = npc_get_nixlf_mcam_index(mcam, pcifunc, 0, NIXLF_BCAST_ENTRY);
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
+ NIXLF_ALLMULTI_ENTRY);
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
}
+static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 pcifunc, u64 rx_action)
+{
+ int actindex, index, bank, entry;
+ struct rvu_npc_mcam_rule *rule;
+ bool enable, update;
+
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ return;
+
+ mutex_lock(&mcam->lock);
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2target_pffunc[index] == pcifunc) {
+ update = true;
+ /* update not needed for the rules added via ntuple filters */
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->entry == index)
+ update = false;
+ }
+ if (!update)
+ continue;
+ bank = npc_get_bank(mcam, index);
+ actindex = index;
+ entry = index & (mcam->banksize - 1);
+
+ /* read vf flow entry enable status */
+ enable = is_mcam_entry_enabled(rvu, mcam, blkaddr,
+ actindex);
+ /* disable before mcam entry update */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex,
+ false);
+ /* update 'action' */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(entry, bank),
+ rx_action);
+ if (enable)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ actindex, true);
+ }
+ }
+ mutex_unlock(&mcam->lock);
+}
+
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
struct nix_rx_action action;
int blkaddr, index, bank;
+ struct rvu_pfvf *pfvf;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
@@ -621,13 +996,27 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action);
+ /* update the VF flow rule action with the VF default entry action
+ * due to restriction of the dataplane application PF adding the
+ * VF flow rule can not specify the rx action explicitly.
+ */
+ if (mcam_index < 0)
+ npc_update_vf_flow_entry(rvu, mcam, blkaddr, pcifunc,
+ *(u64 *)&action);
+
+ /* update the action change in default rule */
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (pfvf->def_ucast_rule)
+ pfvf->def_ucast_rule->rx_action = action;
+
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_PROMISC_ENTRY);
/* If PF's promiscuous entry is enabled,
* Set RSS action for that entry as well
*/
- if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
+ if ((!hw->cap.nix_rx_multicast || !pfvf->use_mce_list) &&
+ is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
bank = npc_get_bank(mcam, index);
index &= (mcam->banksize - 1);
@@ -635,16 +1024,49 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
NPC_AF_MCAMEX_BANKX_ACTION(index, bank),
*(u64 *)&action);
}
+}
+
+void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, int type, bool enable)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_mce_list *mce_list;
+ int index, blkaddr, mce_idx;
+ struct rvu_pfvf *pfvf;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc & ~RVU_PFVF_FUNC_MASK,
+ nixlf, type);
- rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+ /* disable MCAM entry when packet replication is not supported by hw */
+ if (!hw->cap.nix_rx_multicast && !is_vf(pcifunc)) {
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+ return;
+ }
+
+ /* return incase mce list is not enabled */
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+ if (hw->cap.nix_rx_multicast && is_vf(pcifunc) &&
+ type != NIXLF_BCAST_ENTRY && !pfvf->use_mce_list)
+ return;
+
+ nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
+
+ nix_update_mce_list(rvu, pcifunc, mce_list,
+ mce_idx, index, enable);
+ if (enable)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
}
static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
int nixlf, bool enable)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
- struct nix_rx_action action;
- int index, bank, blkaddr;
+ int index, blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
@@ -655,56 +1077,47 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
nixlf, NIXLF_UCAST_ENTRY);
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
- /* For PF, ena/dis promisc and bcast MCAM match entries.
- * For VFs add/delete from bcast list when RX multicast
- * feature is present.
+ /* Nothing to do for VFs, on platforms where pkt replication
+ * is not supported
*/
- if (pcifunc & RVU_PFVF_FUNC_MASK && !rvu->hw->cap.nix_rx_multicast)
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) && !rvu->hw->cap.nix_rx_multicast)
return;
- /* For bcast, enable/disable only if it's action is not
- * packet replication, incase if action is replication
- * then this PF/VF's nixlf is removed from bcast replication
- * list.
- */
- index = npc_get_nixlf_mcam_index(mcam, pcifunc & ~RVU_PFVF_FUNC_MASK,
- nixlf, NIXLF_BCAST_ENTRY);
- bank = npc_get_bank(mcam, index);
- *(u64 *)&action = rvu_read64(rvu, blkaddr,
- NPC_AF_MCAMEX_BANKX_ACTION(index & (mcam->banksize - 1), bank));
-
- /* VFs will not have BCAST entry */
- if (action.op != NIX_RX_ACTIONOP_MCAST &&
- !(pcifunc & RVU_PFVF_FUNC_MASK)) {
- npc_enable_mcam_entry(rvu, mcam,
- blkaddr, index, enable);
- } else {
- nix_update_bcast_mce_list(rvu, pcifunc, enable);
- /* Enable PF's BCAST entry for packet replication */
- rvu_npc_enable_bcast_entry(rvu, pcifunc, enable);
- }
-
- if (enable)
- rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf);
- else
- rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
-
- rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+ /* add/delete pf_func to broadcast MCE list */
+ npc_enadis_default_mce_entry(rvu, pcifunc, nixlf,
+ NIXLF_BCAST_ENTRY, enable);
}
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
+ if (nixlf < 0)
+ return;
+
npc_enadis_default_entries(rvu, pcifunc, nixlf, false);
+
+ /* Delete multicast and promisc MCAM entries */
+ npc_enadis_default_mce_entry(rvu, pcifunc, nixlf,
+ NIXLF_ALLMULTI_ENTRY, false);
+ npc_enadis_default_mce_entry(rvu, pcifunc, nixlf,
+ NIXLF_PROMISC_ENTRY, false);
}
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
+ if (nixlf < 0)
+ return;
+
+ /* Enables only broadcast match entry. Promisc/Allmulti are enabled
+ * in set_rx_mode mbox handler.
+ */
npc_enadis_default_entries(rvu, pcifunc, nixlf, true);
}
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule, *tmp;
int blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -713,12 +1126,56 @@ void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
mutex_lock(&mcam->lock);
- /* Disable and free all MCAM entries mapped to this 'pcifunc' */
+ /* Disable MCAM entries directing traffic to this 'pcifunc' */
+ list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) {
+ if (is_npc_intf_rx(rule->intf) &&
+ rule->rx_action.pf_func == pcifunc &&
+ rule->rx_action.op != NIX_RX_ACTIONOP_MCAST) {
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ rule->entry, false);
+ rule->enable = false;
+ /* Indicate that default rule is disabled */
+ if (rule->default_rule) {
+ pfvf->def_ucast_rule = NULL;
+ list_del(&rule->list);
+ kfree(rule);
+ }
+ }
+ }
+
+ mutex_unlock(&mcam->lock);
+
+ npc_mcam_disable_flows(rvu, pcifunc);
+
+ rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+}
+
+void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule, *tmp;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ mutex_lock(&mcam->lock);
+
+ /* Free all MCAM entries owned by this 'pcifunc' */
npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc);
- /* Free all MCAM counters mapped to this 'pcifunc' */
+ /* Free all MCAM counters owned by this 'pcifunc' */
npc_mcam_free_all_counters(rvu, mcam, pcifunc);
+ /* Delete MCAM entries owned by this 'pcifunc' */
+ list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) {
+ if (rule->owner == pcifunc && !rule->default_rule) {
+ list_del(&rule->list);
+ kfree(rule);
+ }
+ }
+
mutex_unlock(&mcam->lock);
rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
@@ -732,47 +1189,104 @@ void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
rvu_write64(rvu, blkaddr, \
NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
-static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
- const struct npc_mcam_kex *mkex)
+static void npc_program_mkex_rx(struct rvu *rvu, int blkaddr,
+ const struct npc_mcam_kex *mkex, u8 intf)
{
int lid, lt, ld, fl;
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX),
- mkex->keyx_cfg[NIX_INTF_RX]);
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX),
- mkex->keyx_cfg[NIX_INTF_TX]);
+ if (is_npc_intf_tx(intf))
+ return;
- for (ld = 0; ld < NPC_MAX_LD; ld++)
- rvu_write64(rvu, blkaddr, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld),
- mkex->kex_ld_flags[ld]);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf),
+ mkex->keyx_cfg[NIX_INTF_RX]);
+ /* Program LDATA */
for (lid = 0; lid < NPC_MAX_LID; lid++) {
for (lt = 0; lt < NPC_MAX_LT; lt++) {
- for (ld = 0; ld < NPC_MAX_LD; ld++) {
- SET_KEX_LD(NIX_INTF_RX, lid, lt, ld,
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ SET_KEX_LD(intf, lid, lt, ld,
mkex->intf_lid_lt_ld[NIX_INTF_RX]
[lid][lt][ld]);
-
- SET_KEX_LD(NIX_INTF_TX, lid, lt, ld,
- mkex->intf_lid_lt_ld[NIX_INTF_TX]
- [lid][lt][ld]);
- }
}
}
-
+ /* Program LFLAGS */
for (ld = 0; ld < NPC_MAX_LD; ld++) {
- for (fl = 0; fl < NPC_MAX_LFL; fl++) {
- SET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl,
+ for (fl = 0; fl < NPC_MAX_LFL; fl++)
+ SET_KEX_LDFLAGS(intf, ld, fl,
mkex->intf_ld_flags[NIX_INTF_RX]
[ld][fl]);
+ }
+}
+
+static void npc_program_mkex_tx(struct rvu *rvu, int blkaddr,
+ const struct npc_mcam_kex *mkex, u8 intf)
+{
+ int lid, lt, ld, fl;
+
+ if (is_npc_intf_rx(intf))
+ return;
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf),
+ mkex->keyx_cfg[NIX_INTF_TX]);
- SET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl,
+ /* Program LDATA */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ SET_KEX_LD(intf, lid, lt, ld,
+ mkex->intf_lid_lt_ld[NIX_INTF_TX]
+ [lid][lt][ld]);
+ }
+ }
+ /* Program LFLAGS */
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ for (fl = 0; fl < NPC_MAX_LFL; fl++)
+ SET_KEX_LDFLAGS(intf, ld, fl,
mkex->intf_ld_flags[NIX_INTF_TX]
[ld][fl]);
- }
}
}
+static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
+ const struct npc_mcam_kex *mkex)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u8 intf;
+ int ld;
+
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ rvu_write64(rvu, blkaddr, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld),
+ mkex->kex_ld_flags[ld]);
+
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ npc_program_mkex_rx(rvu, blkaddr, mkex, intf);
+ npc_program_mkex_tx(rvu, blkaddr, mkex, intf);
+ }
+}
+
+static int npc_fwdb_prfl_img_map(struct rvu *rvu, void __iomem **prfl_img_addr,
+ u64 *size)
+{
+ u64 prfl_addr, prfl_sz;
+
+ if (!rvu->fwdata)
+ return -EINVAL;
+
+ prfl_addr = rvu->fwdata->mcam_addr;
+ prfl_sz = rvu->fwdata->mcam_sz;
+
+ if (!prfl_addr || !prfl_sz)
+ return -EINVAL;
+
+ *prfl_img_addr = ioremap_wc(prfl_addr, prfl_sz);
+ if (!(*prfl_img_addr))
+ return -ENOMEM;
+
+ *size = prfl_sz;
+
+ return 0;
+}
+
#define MKEX_END_SIGN 0xdeadbeef
static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr,
@@ -781,36 +1295,31 @@ static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr,
struct device *dev = &rvu->pdev->dev;
struct npc_mcam_kex *mcam_kex;
void *mkex_prfl_addr = NULL;
- u64 prfl_addr, prfl_sz;
+ u64 prfl_sz;
+ int ret;
+ /* Order of precedence (high to low):
+ * 1. Via mkex_profile, loaded from ATF.
+ * 2. Built-in KEX profile from npc_mkex_default.
+ */
/* If user not selected mkex profile */
- if (!strncmp(mkex_profile, def_pfl_name, MKEX_NAME_LEN))
- goto program_mkex;
-
- if (!rvu->fwdata)
- goto program_mkex;
- prfl_addr = rvu->fwdata->mcam_addr;
- prfl_sz = rvu->fwdata->mcam_sz;
-
- if (!prfl_addr || !prfl_sz)
+ if (rvu->kpu_fwdata_sz ||
+ !strncmp(mkex_profile, def_pfl_name, MKEX_NAME_LEN))
goto program_mkex;
- mkex_prfl_addr = memremap(prfl_addr, prfl_sz, MEMREMAP_WC);
- if (!mkex_prfl_addr)
+ /* Setting up the mapping for mkex profile image */
+ ret = npc_fwdb_prfl_img_map(rvu, &mkex_prfl_addr, &prfl_sz);
+ if (ret < 0)
goto program_mkex;
- mcam_kex = (struct npc_mcam_kex *)mkex_prfl_addr;
+ mcam_kex = (struct npc_mcam_kex __force *)mkex_prfl_addr;
while (((s64)prfl_sz > 0) && (mcam_kex->mkex_sign != MKEX_END_SIGN)) {
/* Compare with mkex mod_param name string */
if (mcam_kex->mkex_sign == MKEX_SIGN &&
!strncmp(mcam_kex->name, mkex_profile, MKEX_NAME_LEN)) {
- /* Due to an errata (35786) in A0/B0 pass silicon,
- * parse nibble enable configuration has to be
- * identical for both Rx and Tx interfaces.
- */
- if (!is_rvu_96xx_B0(rvu) ||
- mcam_kex->keyx_cfg[NIX_INTF_RX] == mcam_kex->keyx_cfg[NIX_INTF_TX])
+ /* If profile is valid, switch to it. */
+ if (is_parse_nibble_config_valid(rvu, mcam_kex))
rvu->kpu.mkex = mcam_kex;
goto program_mkex;
}
@@ -902,6 +1411,7 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
const struct npc_kpu_profile *profile)
{
int entry, num_entries, max_entries;
+ u64 entry_mask;
if (profile->cam_entries != profile->action_entries) {
dev_err(rvu->dev,
@@ -909,7 +1419,7 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
kpu, profile->cam_entries, profile->action_entries);
}
- max_entries = rvu_read64(rvu, blkaddr, NPC_AF_CONST1) & 0xFFF;
+ max_entries = rvu->hw->npc_kpu_entries;
/* Program CAM match entries for previous KPU extracted data */
num_entries = min_t(int, profile->cam_entries, max_entries);
@@ -925,8 +1435,12 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
/* Enable all programmed entries */
num_entries = min_t(int, profile->action_entries, profile->cam_entries);
+ entry_mask = enable_mask(num_entries);
+ /* Disable first KPU_MAX_CST_ENT entries for built-in profile */
+ if (!rvu->kpu.custom)
+ entry_mask |= GENMASK_ULL(KPU_MAX_CST_ENT - 1, 0);
rvu_write64(rvu, blkaddr,
- NPC_AF_KPUX_ENTRY_DISX(kpu, 0), enable_mask(num_entries));
+ NPC_AF_KPUX_ENTRY_DISX(kpu, 0), entry_mask);
if (num_entries > 64) {
rvu_write64(rvu, blkaddr,
NPC_AF_KPUX_ENTRY_DISX(kpu, 1),
@@ -939,6 +1453,7 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile)
{
+ profile->custom = 0;
profile->name = def_pfl_name;
profile->version = NPC_KPU_PROFILE_VER;
profile->ikpu = ikpu_action_entries;
@@ -951,10 +1466,253 @@ static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile)
return 0;
}
+static int npc_apply_custom_kpu(struct rvu *rvu,
+ struct npc_kpu_profile_adapter *profile)
+{
+ size_t hdr_sz = sizeof(struct npc_kpu_profile_fwdata), offset = 0;
+ struct npc_kpu_profile_fwdata *fw = rvu->kpu_fwdata;
+ struct npc_kpu_profile_action *action;
+ struct npc_kpu_profile_cam *cam;
+ struct npc_kpu_fwdata *fw_kpu;
+ int entries;
+ u16 kpu, entry;
+
+ if (rvu->kpu_fwdata_sz < hdr_sz) {
+ dev_warn(rvu->dev, "Invalid KPU profile size\n");
+ return -EINVAL;
+ }
+ if (le64_to_cpu(fw->signature) != KPU_SIGN) {
+ dev_warn(rvu->dev, "Invalid KPU profile signature %llx\n",
+ fw->signature);
+ return -EINVAL;
+ }
+ profile->custom = 1;
+ profile->name = fw->name;
+ profile->version = le64_to_cpu(fw->version);
+ profile->mkex = &fw->mkex;
+
+ /* Verify if the using known profile structure */
+ if (NPC_KPU_VER_MAJ(profile->version) >
+ NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER)) {
+ dev_warn(rvu->dev, "Not supported Major version: %d > %d\n",
+ NPC_KPU_VER_MAJ(profile->version),
+ NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER));
+ return -EINVAL;
+ }
+ /* Verify if profile is aligned with the required kernel changes */
+ if (NPC_KPU_VER_MIN(profile->version) <
+ NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER)) {
+ dev_warn(rvu->dev,
+ "Invalid KPU profile version: %d.%d.%d expected version <= %d.%d.%d\n",
+ NPC_KPU_VER_MAJ(profile->version),
+ NPC_KPU_VER_MIN(profile->version),
+ NPC_KPU_VER_PATCH(profile->version),
+ NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER),
+ NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER),
+ NPC_KPU_VER_PATCH(NPC_KPU_PROFILE_VER));
+ return -EINVAL;
+ }
+ /* Verify if profile fits the HW */
+ if (fw->kpus > profile->kpus) {
+ dev_warn(rvu->dev, "Not enough KPUs: %d > %ld\n", fw->kpus,
+ profile->kpus);
+ return -EINVAL;
+ }
+ /* Update adapter structure and ensure endianness where needed. */
+ profile->lt_def = &fw->lt_def;
+
+ for (kpu = 0; kpu < fw->kpus; kpu++) {
+ fw_kpu = (struct npc_kpu_fwdata *)(fw->data + offset);
+ if (fw_kpu->entries > KPU_MAX_CST_ENT)
+ dev_warn(rvu->dev,
+ "Too many custom entries on KPU%d: %d > %d\n",
+ kpu, fw_kpu->entries, KPU_MAX_CST_ENT);
+ entries = min(fw_kpu->entries, KPU_MAX_CST_ENT);
+ cam = (struct npc_kpu_profile_cam *)fw_kpu->data;
+ offset += sizeof(*fw_kpu) + fw_kpu->entries * sizeof(*cam);
+ action = (struct npc_kpu_profile_action *)(fw->data + offset);
+ offset += fw_kpu->entries * sizeof(*action);
+ if (rvu->kpu_fwdata_sz < hdr_sz + offset) {
+ dev_warn(rvu->dev,
+ "Profile size mismatch on KPU%i parsing.\n",
+ kpu + 1);
+ return -EINVAL;
+ }
+ /* Fix endianness and update */
+ for (entry = 0; entry < entries; entry++) {
+ cam[entry].dp0 = le16_to_cpu(cam[entry].dp0);
+ cam[entry].dp0_mask = le16_to_cpu(cam[entry].dp0_mask);
+ cam[entry].dp1 = le16_to_cpu(cam[entry].dp1);
+ cam[entry].dp1_mask = le16_to_cpu(cam[entry].dp1_mask);
+ cam[entry].dp2 = le16_to_cpu(cam[entry].dp2);
+ cam[entry].dp2_mask = le16_to_cpu(cam[entry].dp2_mask);
+ profile->kpu[kpu].cam[entry] = cam[entry];
+ profile->kpu[kpu].action[entry] = action[entry];
+ }
+ }
+
+ return 0;
+}
+
+static int npc_load_kpu_prfl_img(struct rvu *rvu, void __iomem *prfl_addr,
+ u64 prfl_sz, const char *kpu_profile)
+{
+ struct npc_kpu_profile_fwdata *kpu_data = NULL;
+ int rc = -EINVAL;
+
+ kpu_data = (struct npc_kpu_profile_fwdata __force *)prfl_addr;
+ if (le64_to_cpu(kpu_data->signature) == KPU_SIGN &&
+ !strncmp(kpu_data->name, kpu_profile, KPU_NAME_LEN)) {
+ dev_info(rvu->dev, "Loading KPU profile from firmware db: %s\n",
+ kpu_profile);
+ rvu->kpu_fwdata = kpu_data;
+ rvu->kpu_fwdata_sz = prfl_sz;
+ rvu->kpu_prfl_addr = prfl_addr;
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static int npc_fwdb_detect_load_prfl_img(struct rvu *rvu, uint64_t prfl_sz,
+ const char *kpu_profile)
+{
+ struct npc_coalesced_kpu_prfl *img_data = NULL;
+ int i = 0, rc = -EINVAL;
+ void __iomem *kpu_prfl_addr;
+ u16 offset;
+
+ img_data = (struct npc_coalesced_kpu_prfl __force *)rvu->kpu_prfl_addr;
+ if (le64_to_cpu(img_data->signature) == KPU_SIGN &&
+ !strncmp(img_data->name, kpu_profile, KPU_NAME_LEN)) {
+ /* Loaded profile is a single KPU profile. */
+ rc = npc_load_kpu_prfl_img(rvu, rvu->kpu_prfl_addr,
+ prfl_sz, kpu_profile);
+ goto done;
+ }
+
+ /* Loaded profile is coalesced image, offset of first KPU profile.*/
+ offset = offsetof(struct npc_coalesced_kpu_prfl, prfl_sz) +
+ (img_data->num_prfl * sizeof(uint16_t));
+ /* Check if mapped image is coalesced image. */
+ while (i < img_data->num_prfl) {
+ /* Profile image offsets are rounded up to next 8 multiple.*/
+ offset = ALIGN_8B_CEIL(offset);
+ kpu_prfl_addr = (void __iomem *)((uintptr_t)rvu->kpu_prfl_addr +
+ offset);
+ rc = npc_load_kpu_prfl_img(rvu, kpu_prfl_addr,
+ img_data->prfl_sz[i], kpu_profile);
+ if (!rc)
+ break;
+ /* Calculating offset of profile image based on profile size.*/
+ offset += img_data->prfl_sz[i];
+ i++;
+ }
+done:
+ return rc;
+}
+
+static int npc_load_kpu_profile_fwdb(struct rvu *rvu, const char *kpu_profile)
+{
+ int ret = -EINVAL;
+ u64 prfl_sz;
+
+ /* Setting up the mapping for NPC profile image */
+ ret = npc_fwdb_prfl_img_map(rvu, &rvu->kpu_prfl_addr, &prfl_sz);
+ if (ret < 0)
+ goto done;
+
+ /* Detect if profile is coalesced or single KPU profile and load */
+ ret = npc_fwdb_detect_load_prfl_img(rvu, prfl_sz, kpu_profile);
+ if (ret == 0)
+ goto done;
+
+ /* Cleaning up if KPU profile image from fwdata is not valid. */
+ if (rvu->kpu_prfl_addr) {
+ iounmap(rvu->kpu_prfl_addr);
+ rvu->kpu_prfl_addr = NULL;
+ rvu->kpu_fwdata_sz = 0;
+ rvu->kpu_fwdata = NULL;
+ }
+
+done:
+ return ret;
+}
+
static void npc_load_kpu_profile(struct rvu *rvu)
{
struct npc_kpu_profile_adapter *profile = &rvu->kpu;
+ const char *kpu_profile = rvu->kpu_pfl_name;
+ const struct firmware *fw = NULL;
+ bool retry_fwdb = false;
+
+ /* If user not specified profile customization */
+ if (!strncmp(kpu_profile, def_pfl_name, KPU_NAME_LEN))
+ goto revert_to_default;
+ /* First prepare default KPU, then we'll customize top entries. */
+ npc_prepare_default_kpu(profile);
+
+ /* Order of preceedence for load loading NPC profile (high to low)
+ * Firmware binary in filesystem.
+ * Firmware database method.
+ * Default KPU profile.
+ */
+ if (!request_firmware(&fw, kpu_profile, rvu->dev)) {
+ dev_info(rvu->dev, "Loading KPU profile from firmware: %s\n",
+ kpu_profile);
+ rvu->kpu_fwdata = kzalloc(fw->size, GFP_KERNEL);
+ if (rvu->kpu_fwdata) {
+ memcpy(rvu->kpu_fwdata, fw->data, fw->size);
+ rvu->kpu_fwdata_sz = fw->size;
+ }
+ release_firmware(fw);
+ retry_fwdb = true;
+ goto program_kpu;
+ }
+
+load_image_fwdb:
+ /* Loading the KPU profile using firmware database */
+ if (npc_load_kpu_profile_fwdb(rvu, kpu_profile))
+ goto revert_to_default;
+
+program_kpu:
+ /* Apply profile customization if firmware was loaded. */
+ if (!rvu->kpu_fwdata_sz || npc_apply_custom_kpu(rvu, profile)) {
+ /* If image from firmware filesystem fails to load or invalid
+ * retry with firmware database method.
+ */
+ if (rvu->kpu_fwdata || rvu->kpu_fwdata_sz) {
+ /* Loading image from firmware database failed. */
+ if (rvu->kpu_prfl_addr) {
+ iounmap(rvu->kpu_prfl_addr);
+ rvu->kpu_prfl_addr = NULL;
+ } else {
+ kfree(rvu->kpu_fwdata);
+ }
+ rvu->kpu_fwdata = NULL;
+ rvu->kpu_fwdata_sz = 0;
+ if (retry_fwdb) {
+ retry_fwdb = false;
+ goto load_image_fwdb;
+ }
+ }
+ dev_warn(rvu->dev,
+ "Can't load KPU profile %s. Using default.\n",
+ kpu_profile);
+ kfree(rvu->kpu_fwdata);
+ rvu->kpu_fwdata = NULL;
+ goto revert_to_default;
+ }
+
+ dev_info(rvu->dev, "Using custom profile '%s', version %d.%d.%d\n",
+ profile->name, NPC_KPU_VER_MAJ(profile->version),
+ NPC_KPU_VER_MIN(profile->version),
+ NPC_KPU_VER_PATCH(profile->version));
+
+ return;
+
+revert_to_default:
npc_prepare_default_kpu(profile);
}
@@ -962,10 +1720,6 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
{
struct rvu_hwinfo *hw = rvu->hw;
int num_pkinds, num_kpus, idx;
- struct npc_pkind *pkind;
-
- /* Get HW limits */
- hw->npc_kpus = (rvu_read64(rvu, blkaddr, NPC_AF_CONST) >> 8) & 0x1F;
/* Disable all KPUs and their entries */
for (idx = 0; idx < hw->npc_kpus; idx++) {
@@ -983,9 +1737,8 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
* Check HW max count to avoid configuring junk or
* writing to unsupported CSR addresses.
*/
- pkind = &hw->pkind;
num_pkinds = rvu->kpu.pkinds;
- num_pkinds = min_t(int, pkind->rsrc.max, num_pkinds);
+ num_pkinds = min_t(int, hw->npc_pkinds, num_pkinds);
for (idx = 0; idx < num_pkinds; idx++)
npc_config_kpuaction(rvu, blkaddr, &rvu->kpu.ikpu[idx], 0, idx, true);
@@ -1003,14 +1756,10 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
int nixlf_count = rvu_get_nixlf_count(rvu);
struct npc_mcam *mcam = &rvu->hw->mcam;
int rsvd, err;
+ u16 index;
+ int cntr;
u64 cfg;
- /* Get HW limits */
- cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
- mcam->banks = (cfg >> 44) & 0xF;
- mcam->banksize = (cfg >> 28) & 0xFFFF;
- mcam->counters.max = (cfg >> 48) & 0xFFFF;
-
/* Actual number of MCAM entries vary by entry size */
cfg = (rvu_read64(rvu, blkaddr,
NPC_AF_INTFX_KEX_CFG(0)) >> 32) & 0x07;
@@ -1077,12 +1826,6 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
mcam->hprio_count = mcam->lprio_count;
mcam->hprio_end = mcam->hprio_count;
- /* Reserve last counter for MCAM RX miss action which is set to
- * drop pkt. This way we will know how many pkts didn't match
- * any MCAM entry.
- */
- mcam->counters.max--;
- mcam->rx_miss_act_cntr = mcam->counters.max;
/* Allocate bitmap for managing MCAM counters and memory
* for saving counter to RVU PFFUNC allocation mapping.
@@ -1109,6 +1852,20 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
if (!mcam->cntr_refcnt)
goto free_mem;
+ /* Alloc memory for saving target device of mcam rule */
+ mcam->entry2target_pffunc = devm_kcalloc(rvu->dev, mcam->total_entries,
+ sizeof(u16), GFP_KERNEL);
+ if (!mcam->entry2target_pffunc)
+ goto free_mem;
+
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP;
+ mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP;
+ }
+
+ for (cntr = 0; cntr < mcam->counters.max; cntr++)
+ mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP;
+
mutex_init(&mcam->lock);
return 0;
@@ -1118,12 +1875,125 @@ free_mem:
return -ENOMEM;
}
+static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr)
+{
+ struct npc_pkind *pkind = &rvu->hw->pkind;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 npc_const, npc_const1;
+ u64 npc_const2 = 0;
+
+ npc_const = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
+ npc_const1 = rvu_read64(rvu, blkaddr, NPC_AF_CONST1);
+ if (npc_const1 & BIT_ULL(63))
+ npc_const2 = rvu_read64(rvu, blkaddr, NPC_AF_CONST2);
+
+ pkind->rsrc.max = NPC_UNRESERVED_PKIND_COUNT;
+ hw->npc_pkinds = (npc_const1 >> 12) & 0xFFULL;
+ hw->npc_kpu_entries = npc_const1 & 0xFFFULL;
+ hw->npc_kpus = (npc_const >> 8) & 0x1FULL;
+ hw->npc_intfs = npc_const & 0xFULL;
+ hw->npc_counters = (npc_const >> 48) & 0xFFFFULL;
+
+ mcam->banks = (npc_const >> 44) & 0xFULL;
+ mcam->banksize = (npc_const >> 28) & 0xFFFFULL;
+ hw->npc_stat_ena = BIT_ULL(9);
+ /* Extended set */
+ if (npc_const2) {
+ hw->npc_ext_set = true;
+ /* 96xx supports only match_stats and npc_counters
+ * reflected in NPC_AF_CONST reg.
+ * STAT_SEL and ENA are at [0:8] and 9 bit positions.
+ * 98xx has both match_stat and ext and npc_counter
+ * reflected in NPC_AF_CONST2
+ * STAT_SEL_EXT added at [12:14] bit position.
+ * cn10k supports only ext and hence npc_counters in
+ * NPC_AF_CONST is 0 and npc_counters reflected in NPC_AF_CONST2.
+ * STAT_SEL bitpos incremented from [0:8] to [0:11] and ENA bit moved to 63
+ */
+ if (!hw->npc_counters)
+ hw->npc_stat_ena = BIT_ULL(63);
+ hw->npc_counters = (npc_const2 >> 16) & 0xFFFFULL;
+ mcam->banksize = npc_const2 & 0xFFFFULL;
+ }
+
+ mcam->counters.max = hw->npc_counters;
+}
+
+static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr)
+{
+ struct npc_mcam_kex *mkex = rvu->kpu.mkex;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 nibble_ena, rx_kex, tx_kex;
+ u8 intf;
+
+ /* Reserve last counter for MCAM RX miss action which is set to
+ * drop packet. This way we will know how many pkts didn't match
+ * any MCAM entry.
+ */
+ mcam->counters.max--;
+ mcam->rx_miss_act_cntr = mcam->counters.max;
+
+ rx_kex = mkex->keyx_cfg[NIX_INTF_RX];
+ tx_kex = mkex->keyx_cfg[NIX_INTF_TX];
+ nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex);
+
+ nibble_ena = rvu_npc_get_tx_nibble_cfg(rvu, nibble_ena);
+ if (nibble_ena) {
+ tx_kex &= ~NPC_PARSE_NIBBLE;
+ tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena);
+ mkex->keyx_cfg[NIX_INTF_TX] = tx_kex;
+ }
+
+ /* Configure RX interfaces */
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ if (is_npc_intf_tx(intf))
+ continue;
+
+ /* Set RX MCAM search key size. LA..LE (ltype only) + Channel */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf),
+ rx_kex);
+
+ /* If MCAM lookup doesn't result in a match, drop the received
+ * packet. And map this action to a counter to count dropped
+ * packets.
+ */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_INTFX_MISS_ACT(intf), NIX_RX_ACTIONOP_DROP);
+
+ /* NPC_AF_INTFX_MISS_STAT_ACT[14:12] - counter[11:9]
+ * NPC_AF_INTFX_MISS_STAT_ACT[8:0] - counter[8:0]
+ */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_INTFX_MISS_STAT_ACT(intf),
+ ((mcam->rx_miss_act_cntr >> 9) << 12) |
+ hw->npc_stat_ena | mcam->rx_miss_act_cntr);
+ }
+
+ /* Configure TX interfaces */
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ if (is_npc_intf_rx(intf))
+ continue;
+
+ /* Extract Ltypes LID_LA to LID_LE */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf),
+ tx_kex);
+
+ /* Set TX miss action to UCAST_DEFAULT i.e
+ * transmit the packet on NIX LF SQ's default channel.
+ */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_INTFX_MISS_ACT(intf),
+ NIX_TX_ACTIONOP_UCAST_DEFAULT);
+ }
+}
+
int rvu_npc_init(struct rvu *rvu)
{
struct npc_kpu_profile_adapter *kpu = &rvu->kpu;
struct npc_pkind *pkind = &rvu->hw->pkind;
struct npc_mcam *mcam = &rvu->hw->mcam;
- u64 cfg, nibble_ena, rx_kex, tx_kex;
int blkaddr, entry, bank, err;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -1132,20 +2002,22 @@ int rvu_npc_init(struct rvu *rvu)
return -ENODEV;
}
+ rvu_npc_hw_init(rvu, blkaddr);
+
/* First disable all MCAM entries, to stop traffic towards NIXLFs */
- cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
- for (bank = 0; bank < ((cfg >> 44) & 0xF); bank++) {
- for (entry = 0; entry < ((cfg >> 28) & 0xFFFF); entry++)
+ for (bank = 0; bank < mcam->banks; bank++) {
+ for (entry = 0; entry < mcam->banksize; entry++)
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CFG(entry, bank), 0);
}
- /* Allocate resource bimap for pkind*/
- pkind->rsrc.max = (rvu_read64(rvu, blkaddr,
- NPC_AF_CONST1) >> 12) & 0xFF;
err = rvu_alloc_bitmap(&pkind->rsrc);
if (err)
return err;
+ /* Reserve PKIND#0 for LBKs. Power reset value of LBK_CH_PKIND is '0',
+ * no need to configure PKIND for all LBKs separately.
+ */
+ rvu_alloc_rsrc(&pkind->rsrc);
/* Allocate mem for pkind to PF and channel mapping info */
pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max,
@@ -1181,42 +2053,21 @@ int rvu_npc_init(struct rvu *rvu)
((u64)NPC_EC_OIP4_CSUM << 32) | (NPC_EC_IIP4_CSUM << 24) |
BIT_ULL(7) | BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1));
- /* Set RX and TX side MCAM search key size.
- * LA..LD (ltype only) + Channel
- */
- rx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_RX];
- tx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_TX];
- nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex);
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX), rx_kex);
- /* Due to an errata (35786) in A0 pass silicon, parse nibble enable
- * configuration has to be identical for both Rx and Tx interfaces.
- */
- if (is_rvu_96xx_B0(rvu)) {
- tx_kex &= ~NPC_PARSE_NIBBLE;
- tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena);
- }
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX), tx_kex);
-
- err = npc_mcam_rsrcs_init(rvu, blkaddr);
- if (err)
- return err;
+ rvu_npc_setup_interfaces(rvu, blkaddr);
/* Configure MKEX profile */
npc_load_mkex_profile(rvu, blkaddr, rvu->mkex_pfl_name);
- /* Set TX miss action to UCAST_DEFAULT i.e
- * transmit the packet on NIX LF SQ's default channel.
- */
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_TX),
- NIX_TX_ACTIONOP_UCAST_DEFAULT);
+ err = npc_mcam_rsrcs_init(rvu, blkaddr);
+ if (err)
+ return err;
- /* If MCAM lookup doesn't result in a match, drop the received packet.
- * And map this action to a counter to count dropped pkts.
- */
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_RX),
- NIX_RX_ACTIONOP_DROP);
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_STAT_ACT(NIX_INTF_RX),
- BIT_ULL(9) | mcam->rx_miss_act_cntr);
+ err = npc_flow_steering_init(rvu, blkaddr);
+ if (err) {
+ dev_err(rvu->dev,
+ "Incorrect mkex profile loaded using default mkex\n");
+ npc_load_mkex_profile(rvu, blkaddr, def_pfl_name);
+ }
return 0;
}
@@ -1228,6 +2079,10 @@ void rvu_npc_freemem(struct rvu *rvu)
kfree(pkind->rsrc.bmap);
kfree(mcam->counters.bmap);
+ if (rvu->kpu_prfl_addr)
+ iounmap(rvu->kpu_prfl_addr);
+ else
+ kfree(rvu->kpu_fwdata);
mutex_destroy(&mcam->lock);
}
@@ -1272,6 +2127,9 @@ void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
static int npc_mcam_verify_entry(struct npc_mcam *mcam,
u16 pcifunc, int entry)
{
+ /* verify AF installed entries */
+ if (is_pffunc_af(pcifunc))
+ return 0;
/* Verify if entry is valid and if it is indeed
* allocated to the requesting PFFUNC.
*/
@@ -1303,7 +2161,8 @@ static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 entry, u16 cntr)
{
u16 index = entry & (mcam->banksize - 1);
- u16 bank = npc_get_bank(mcam, entry);
+ u32 bank = npc_get_bank(mcam, entry);
+ struct rvu_hwinfo *hw = rvu->hw;
/* Set mapping and increment counter's refcnt */
mcam->entry2cntr_map[entry] = cntr;
@@ -1311,7 +2170,7 @@ static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam,
/* Enable stats */
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank),
- BIT_ULL(9) | cntr);
+ ((cntr >> 9) << 12) | hw->npc_stat_ena | cntr);
}
static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu,
@@ -1381,6 +2240,7 @@ static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
npc_unmap_mcam_entry_and_cntr(rvu, mcam,
blkaddr, index,
cntr);
+ mcam->entry2target_pffunc[index] = 0x0;
}
}
}
@@ -1566,6 +2426,17 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
goto alloc;
}
+ /* For a VF base MCAM match rule is set by its PF. And all the
+ * further MCAM rules installed by VF on its own are
+ * concatenated with the base rule set by its PF. Hence PF entries
+ * should be at lower priority compared to VF entries. Otherwise
+ * base rule is hit always and rules installed by VF will be of
+ * no use. Hence if the request is from PF and NOT a priority
+ * allocation request then allocate low priority entries.
+ */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ goto lprio_alloc;
+
/* Find out the search range for non-priority allocation request
*
* Get MCAM free entry count in middle zone.
@@ -1591,6 +2462,7 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
/* Not enough free entries, search all entries in reverse,
* so that low priority ones will get used up.
*/
+lprio_alloc:
reverse = true;
start = 0;
end = mcam->bmap_entries;
@@ -1716,8 +2588,11 @@ int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
rsp->free_count = 0;
/* Check if ref_entry is within range */
- if (req->priority && req->ref_entry >= mcam->bmap_entries)
+ if (req->priority && req->ref_entry >= mcam->bmap_entries) {
+ dev_err(rvu->dev, "%s: reference entry %d is out of range\n",
+ __func__, req->ref_entry);
return NPC_MCAM_INVALID_REQ;
+ }
/* ref_entry can't be '0' if requested priority is high.
* Can't be last entry if requested priority is low.
@@ -1730,11 +2605,15 @@ int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
/* Since list of allocated indices needs to be sent to requester,
* max number of non-contiguous entries per mbox msg is limited.
*/
- if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES)
+ if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES) {
+ dev_err(rvu->dev,
+ "%s: %d Non-contiguous MCAM entries requested is more than max (%d) allowed\n",
+ __func__, req->count, NPC_MAX_NONCONTIG_ENTRIES);
return NPC_MCAM_INVALID_REQ;
+ }
/* Alloc request from PFFUNC with no NIXLF attached should be denied */
- if (!is_nixlf_attached(rvu, pcifunc))
+ if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
return NPC_MCAM_ALLOC_DENIED;
return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp);
@@ -1754,7 +2633,7 @@ int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
return NPC_MCAM_INVALID_REQ;
/* Free request from PFFUNC with no NIXLF attached, ignore */
- if (!is_nixlf_attached(rvu, pcifunc))
+ if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
return NPC_MCAM_INVALID_REQ;
mutex_lock(&mcam->lock);
@@ -1766,7 +2645,8 @@ int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
if (rc)
goto exit;
- mcam->entry2pfvf_map[req->entry] = 0;
+ mcam->entry2pfvf_map[req->entry] = NPC_MCAM_INVALID_MAP;
+ mcam->entry2target_pffunc[req->entry] = 0x0;
npc_mcam_clear_bit(mcam, req->entry);
npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false);
@@ -1786,13 +2666,39 @@ exit:
return rc;
}
+int rvu_mbox_handler_npc_mcam_read_entry(struct rvu *rvu,
+ struct npc_mcam_read_entry_req *req,
+ struct npc_mcam_read_entry_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ if (!rc) {
+ npc_read_mcam_entry(rvu, mcam, blkaddr, req->entry,
+ &rsp->entry_data,
+ &rsp->intf, &rsp->enable);
+ }
+
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
struct npc_mcam_write_entry_req *req,
struct msg_rsp *rsp)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
struct npc_mcam *mcam = &rvu->hw->mcam;
u16 pcifunc = req->hdr.pcifunc;
int blkaddr, rc;
+ u8 nix_intf;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
@@ -1809,12 +2715,27 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
goto exit;
}
- if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX) {
+ if (!is_npc_interface_valid(rvu, req->intf)) {
rc = NPC_MCAM_INVALID_REQ;
goto exit;
}
- npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, req->intf,
+ if (is_npc_intf_tx(req->intf))
+ nix_intf = pfvf->nix_tx_intf;
+ else
+ nix_intf = pfvf->nix_rx_intf;
+
+ if (!is_pffunc_af(pcifunc) &&
+ npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) {
+ rc = NPC_MCAM_INVALID_REQ;
+ goto exit;
+ }
+
+ /* For AF installed rules, the nix_intf should be set to target NIX */
+ if (is_pffunc_af(req->hdr.pcifunc))
+ nix_intf = req->intf;
+
+ npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, nix_intf,
&req->entry_data, req->enable_entry);
if (req->set_cntr)
@@ -1956,7 +2877,7 @@ int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
return NPC_MCAM_INVALID_REQ;
/* If the request is from a PFFUNC with no NIXLF attached, ignore */
- if (!is_nixlf_attached(rvu, pcifunc))
+ if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
return NPC_MCAM_INVALID_REQ;
/* Since list of allocated counter IDs needs to be sent to requester,
@@ -2143,6 +3064,7 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
struct npc_mcam_alloc_and_write_entry_req *req,
struct npc_mcam_alloc_and_write_entry_rsp *rsp)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
struct npc_mcam_alloc_counter_req cntr_req;
struct npc_mcam_alloc_counter_rsp cntr_rsp;
struct npc_mcam_alloc_entry_req entry_req;
@@ -2151,12 +3073,17 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
u16 entry = NPC_MCAM_ENTRY_INVALID;
u16 cntr = NPC_MCAM_ENTRY_INVALID;
int blkaddr, rc;
+ u8 nix_intf;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return NPC_MCAM_INVALID_REQ;
- if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX)
+ if (!is_npc_interface_valid(rvu, req->intf))
+ return NPC_MCAM_INVALID_REQ;
+
+ if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf,
+ req->hdr.pcifunc))
return NPC_MCAM_INVALID_REQ;
/* Try to allocate a MCAM entry */
@@ -2188,7 +3115,7 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
if (rc) {
/* Free allocated MCAM entry */
mutex_lock(&mcam->lock);
- mcam->entry2pfvf_map[entry] = 0;
+ mcam->entry2pfvf_map[entry] = NPC_MCAM_INVALID_MAP;
npc_mcam_clear_bit(mcam, entry);
mutex_unlock(&mcam->lock);
return rc;
@@ -2198,7 +3125,13 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
write_entry:
mutex_lock(&mcam->lock);
- npc_config_mcam_entry(rvu, mcam, blkaddr, entry, req->intf,
+
+ if (is_npc_intf_tx(req->intf))
+ nix_intf = pfvf->nix_tx_intf;
+ else
+ nix_intf = pfvf->nix_rx_intf;
+
+ npc_config_mcam_entry(rvu, mcam, blkaddr, entry, nix_intf,
&req->entry_data, req->enable_entry);
if (req->alloc_cntr)
@@ -2257,26 +3190,208 @@ int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
return 0;
}
-int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf)
+static int
+npc_set_var_len_offset_pkind(struct rvu *rvu, u16 pcifunc, u64 pkind,
+ u8 var_len_off, u8 var_len_off_mask, u8 shift_dir)
+{
+ struct npc_kpu_action0 *act0;
+ u8 shift_count = 0;
+ int blkaddr;
+ u64 val;
+
+ if (!var_len_off_mask)
+ return -EINVAL;
+
+ if (var_len_off_mask != 0xff) {
+ if (shift_dir)
+ shift_count = __ffs(var_len_off_mask);
+ else
+ shift_count = (8 - __fls(var_len_off_mask));
+ }
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+ val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind));
+ act0 = (struct npc_kpu_action0 *)&val;
+ act0->var_len_shift = shift_count;
+ act0->var_len_right = shift_dir;
+ act0->var_len_mask = var_len_off_mask;
+ act0->var_len_offset = var_len_off;
+ rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val);
+ return 0;
+}
+
+int
+rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
+ u64 pkind, u8 var_len_off, u8 var_len_off_mask,
+ u8 shift_dir)
+
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int blkaddr, nixlf, rc, intf_mode;
+ int pf = rvu_get_pf(pcifunc);
+ bool enable_higig2 = false;
+ u64 rxpkind, txpkind;
+ u8 cgx_id, lmac_id;
+
+ /* use default pkind to disable edsa/higig */
+ rxpkind = rvu_npc_get_pkind(rvu, pf);
+ txpkind = NPC_TX_DEF_PKIND;
+ intf_mode = NPC_INTF_MODE_DEF;
+
+ if (mode & OTX2_PRIV_FLAGS_EDSA) {
+ rxpkind = NPC_RX_EDSA_PKIND;
+ intf_mode = NPC_INTF_MODE_EDSA;
+ } else if (mode & OTX2_PRIV_FLAGS_FDSA) {
+ rxpkind = NPC_RX_EDSA_PKIND;
+ intf_mode = NPC_INTF_MODE_FDSA;
+ } else if (mode & OTX2_PRIV_FLAGS_HIGIG) {
+ /* Silicon does not support enabling higig in time stamp mode */
+ if (pfvf->hw_rx_tstamp_en ||
+ rvu_nix_is_ptp_tx_enabled(rvu, pcifunc))
+ return NPC_AF_ERR_HIGIG_CONFIG_FAIL;
+
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_HIGIG2))
+ return NPC_AF_ERR_HIGIG_NOT_SUPPORTED;
+
+ rxpkind = NPC_RX_HIGIG_PKIND;
+ txpkind = NPC_TX_HIGIG_PKIND;
+ intf_mode = NPC_INTF_MODE_HIGIG;
+ enable_higig2 = true;
+ } else if (mode & OTX2_PRIV_FLAGS_CUSTOM) {
+ if (pkind == NPC_RX_CUSTOM_PRE_L2_PKIND) {
+ rc = npc_set_var_len_offset_pkind(rvu, pcifunc, pkind,
+ var_len_off,
+ var_len_off_mask,
+ shift_dir);
+ if (rc)
+ return rc;
+ }
+ rxpkind = pkind;
+ txpkind = pkind;
+ }
+
+ if (dir & PKIND_RX) {
+ /* rx pkind set req valid only for cgx mapped PFs */
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return 0;
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ rc = cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, rxpkind);
+ if (rc)
+ return rc;
+ }
+
+ if (dir & PKIND_TX) {
+ /* Tx pkind set request valid if PCIFUNC has NIXLF attached */
+ rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (rc)
+ return rc;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf),
+ txpkind);
+ }
+
+ if (enable_higig2 ^ rvu_cgx_is_higig2_enabled(rvu, pf))
+ rvu_cgx_enadis_higig2(rvu, pf, enable_higig2);
+
+ pfvf->intf_mode = intf_mode;
+ return 0;
+}
+
+int rvu_mbox_handler_npc_set_pkind(struct rvu *rvu,
+ struct npc_set_pkind *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_npc_set_parse_mode(rvu, req->hdr.pcifunc, req->mode,
+ req->dir, req->pkind, req->var_len_off,
+ req->var_len_off_mask, req->shift_dir);
+}
+
+int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu,
+ struct msg_req *req,
+ struct npc_mcam_read_base_rule_rsp *rsp)
+{
struct npc_mcam *mcam = &rvu->hw->mcam;
- int blkaddr, index;
- bool enable;
+ int index, blkaddr, nixlf, rc = 0;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ u8 intf, enable;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ return NPC_MCAM_INVALID_REQ;
- if (!pfvf->rxvlan)
- return 0;
+ /* Return the channel number in case of PF */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ rsp->entry.kw[0] = pfvf->rx_chan_base;
+ rsp->entry.kw_mask[0] = 0xFFFULL;
+ goto out;
+ }
+
+ /* Find the pkt steering rule installed by PF to this VF */
+ mutex_lock(&mcam->lock);
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2target_pffunc[index] == pcifunc)
+ goto read_entry;
+ }
+ rc = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
+ if (rc < 0) {
+ mutex_unlock(&mcam->lock);
+ goto out;
+ }
+ /* Read the default ucast entry if there is no pkt steering rule */
index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
NIXLF_UCAST_ENTRY);
- pfvf->entry.action = npc_get_mcam_action(rvu, mcam, blkaddr, index);
- enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, index);
- npc_config_mcam_entry(rvu, mcam, blkaddr, pfvf->rxvlan_index,
- NIX_INTF_RX, &pfvf->entry, enable);
+read_entry:
+ /* Read the mcam entry */
+ npc_read_mcam_entry(rvu, mcam, blkaddr, index, &rsp->entry, &intf,
+ &enable);
+ mutex_unlock(&mcam->lock);
+out:
+ return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu,
+ struct npc_mcam_get_stats_req *req,
+ struct npc_mcam_get_stats_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 index, cntr;
+ int blkaddr;
+ u64 regval;
+ u32 bank;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+
+ index = req->entry & (mcam->banksize - 1);
+ bank = npc_get_bank(mcam, req->entry);
+
+ /* read MCAM entry STAT_ACT register */
+ regval = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank));
+
+ if (!(regval & rvu->hw->npc_stat_ena)) {
+ rsp->stat_ena = 0;
+ mutex_unlock(&mcam->lock);
+ return 0;
+ }
+
+ cntr = regval & 0x1FF;
+
+ rsp->stat_ena = 1;
+ rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(cntr));
+ rsp->stat &= BIT_ULL(48) - 1;
+
+ mutex_unlock(&mcam->lock);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
new file mode 100644
index 000000000000..0ad83405aacd
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -0,0 +1,1434 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2020 Marvell.
+ */
+
+#include <linux/bitfield.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "npc.h"
+
+#define NPC_BYTESM GENMASK_ULL(19, 16)
+#define NPC_HDR_OFFSET GENMASK_ULL(15, 8)
+#define NPC_KEY_OFFSET GENMASK_ULL(5, 0)
+#define NPC_LDATA_EN BIT_ULL(7)
+
+static const char * const npc_flow_names[] = {
+ [NPC_DMAC] = "dmac",
+ [NPC_SMAC] = "smac",
+ [NPC_ETYPE] = "ether type",
+ [NPC_VLAN_ETYPE_CTAG] = "vlan ether type ctag",
+ [NPC_VLAN_ETYPE_STAG] = "vlan ether type stag",
+ [NPC_OUTER_VID] = "outer vlan id",
+ [NPC_TOS] = "tos",
+ [NPC_SIP_IPV4] = "ipv4 source ip",
+ [NPC_DIP_IPV4] = "ipv4 destination ip",
+ [NPC_SIP_IPV6] = "ipv6 source ip",
+ [NPC_DIP_IPV6] = "ipv6 destination ip",
+ [NPC_IPPROTO_TCP] = "ip proto tcp",
+ [NPC_IPPROTO_UDP] = "ip proto udp",
+ [NPC_IPPROTO_SCTP] = "ip proto sctp",
+ [NPC_IPPROTO_ICMP] = "ip proto icmp",
+ [NPC_IPPROTO_ICMP6] = "ip proto icmp6",
+ [NPC_IPPROTO_AH] = "ip proto AH",
+ [NPC_IPPROTO_ESP] = "ip proto ESP",
+ [NPC_SPORT_TCP] = "tcp source port",
+ [NPC_DPORT_TCP] = "tcp destination port",
+ [NPC_SPORT_UDP] = "udp source port",
+ [NPC_DPORT_UDP] = "udp destination port",
+ [NPC_SPORT_SCTP] = "sctp source port",
+ [NPC_DPORT_SCTP] = "sctp destination port",
+ [NPC_FDSA_VAL] = "FDSA tag value ",
+ [NPC_UNKNOWN] = "unknown",
+};
+
+const char *npc_get_field_name(u8 hdr)
+{
+ if (hdr >= ARRAY_SIZE(npc_flow_names))
+ return npc_flow_names[NPC_UNKNOWN];
+
+ return npc_flow_names[hdr];
+}
+
+/* Compute keyword masks and figure out the number of keywords a field
+ * spans in the key.
+ */
+static void npc_set_kw_masks(struct npc_mcam *mcam, u8 type,
+ u8 nr_bits, int start_kwi, int offset, u8 intf)
+{
+ struct npc_key_field *field = &mcam->rx_key_fields[type];
+ u8 bits_in_kw;
+ int max_kwi;
+
+ if (mcam->banks_per_entry == 1)
+ max_kwi = 1; /* NPC_MCAM_KEY_X1 */
+ else if (mcam->banks_per_entry == 2)
+ max_kwi = 3; /* NPC_MCAM_KEY_X2 */
+ else
+ max_kwi = 6; /* NPC_MCAM_KEY_X4 */
+
+ if (is_npc_intf_tx(intf))
+ field = &mcam->tx_key_fields[type];
+
+ if (offset + nr_bits <= 64) {
+ /* one KW only */
+ if (start_kwi > max_kwi)
+ return;
+ field->kw_mask[start_kwi] |= GENMASK_ULL(nr_bits - 1, 0)
+ << offset;
+ field->nr_kws = 1;
+ } else if (offset + nr_bits > 64 &&
+ offset + nr_bits <= 128) {
+ /* two KWs */
+ if (start_kwi + 1 > max_kwi)
+ return;
+ /* first KW mask */
+ bits_in_kw = 64 - offset;
+ field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0)
+ << offset;
+ /* second KW mask i.e. mask for rest of bits */
+ bits_in_kw = nr_bits + offset - 64;
+ field->kw_mask[start_kwi + 1] |= GENMASK_ULL(bits_in_kw - 1, 0);
+ field->nr_kws = 2;
+ } else {
+ /* three KWs */
+ if (start_kwi + 2 > max_kwi)
+ return;
+ /* first KW mask */
+ bits_in_kw = 64 - offset;
+ field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0)
+ << offset;
+ /* second KW mask */
+ field->kw_mask[start_kwi + 1] = ~0ULL;
+ /* third KW mask i.e. mask for rest of bits */
+ bits_in_kw = nr_bits + offset - 128;
+ field->kw_mask[start_kwi + 2] |= GENMASK_ULL(bits_in_kw - 1, 0);
+ field->nr_kws = 3;
+ }
+}
+
+/* Helper function to figure out whether field exists in the key */
+static bool npc_is_field_present(struct rvu *rvu, enum key_fields type, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct npc_key_field *input;
+
+ input = &mcam->rx_key_fields[type];
+ if (is_npc_intf_tx(intf))
+ input = &mcam->tx_key_fields[type];
+
+ return input->nr_kws > 0;
+}
+
+static bool npc_is_same(struct npc_key_field *input,
+ struct npc_key_field *field)
+{
+ return memcmp(&input->layer_mdata, &field->layer_mdata,
+ sizeof(struct npc_layer_mdata)) == 0;
+}
+
+static void npc_set_layer_mdata(struct npc_mcam *mcam, enum key_fields type,
+ u64 cfg, u8 lid, u8 lt, u8 intf)
+{
+ struct npc_key_field *input = &mcam->rx_key_fields[type];
+
+ if (is_npc_intf_tx(intf))
+ input = &mcam->tx_key_fields[type];
+
+ input->layer_mdata.hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
+ input->layer_mdata.key = FIELD_GET(NPC_KEY_OFFSET, cfg);
+ input->layer_mdata.len = FIELD_GET(NPC_BYTESM, cfg) + 1;
+ input->layer_mdata.ltype = lt;
+ input->layer_mdata.lid = lid;
+}
+
+static bool npc_check_overlap_fields(struct npc_key_field *input1,
+ struct npc_key_field *input2)
+{
+ int kwi;
+
+ /* Fields with same layer id and different ltypes are mutually
+ * exclusive hence they can be overlapped
+ */
+ if (input1->layer_mdata.lid == input2->layer_mdata.lid &&
+ input1->layer_mdata.ltype != input2->layer_mdata.ltype)
+ return false;
+
+ for (kwi = 0; kwi < NPC_MAX_KWS_IN_KEY; kwi++) {
+ if (input1->kw_mask[kwi] & input2->kw_mask[kwi])
+ return true;
+ }
+
+ return false;
+}
+
+/* Helper function to check whether given field overlaps with any other fields
+ * in the key. Due to limitations on key size and the key extraction profile in
+ * use higher layers can overwrite lower layer's header fields. Hence overlap
+ * needs to be checked.
+ */
+static bool npc_check_overlap(struct rvu *rvu, int blkaddr,
+ enum key_fields type, u8 start_lid, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct npc_key_field *dummy, *input;
+ int start_kwi, offset;
+ u8 nr_bits, lid, lt, ld;
+ u64 cfg;
+
+ dummy = &mcam->rx_key_fields[NPC_UNKNOWN];
+ input = &mcam->rx_key_fields[type];
+
+ if (is_npc_intf_tx(intf)) {
+ dummy = &mcam->tx_key_fields[NPC_UNKNOWN];
+ input = &mcam->tx_key_fields[type];
+ }
+
+ for (lid = start_lid; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG
+ (intf, lid, lt, ld));
+ if (!FIELD_GET(NPC_LDATA_EN, cfg))
+ continue;
+ memset(dummy, 0, sizeof(struct npc_key_field));
+ npc_set_layer_mdata(mcam, NPC_UNKNOWN, cfg,
+ lid, lt, intf);
+ /* exclude input */
+ if (npc_is_same(input, dummy))
+ continue;
+ start_kwi = dummy->layer_mdata.key / 8;
+ offset = (dummy->layer_mdata.key * 8) % 64;
+ nr_bits = dummy->layer_mdata.len * 8;
+ /* form KW masks */
+ npc_set_kw_masks(mcam, NPC_UNKNOWN, nr_bits,
+ start_kwi, offset, intf);
+ /* check any input field bits falls in any
+ * other field bits.
+ */
+ if (npc_check_overlap_fields(dummy, input))
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static bool npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type,
+ u8 intf)
+{
+ if (!npc_is_field_present(rvu, type, intf) ||
+ npc_check_overlap(rvu, blkaddr, type, 0, intf))
+ return false;
+ return true;
+}
+
+static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number,
+ u8 key_nibble, u8 intf)
+{
+ u8 offset = (key_nibble * 4) % 64; /* offset within key word */
+ u8 kwi = (key_nibble * 4) / 64; /* which word in key */
+ u8 nr_bits = 4; /* bits in a nibble */
+ u8 type;
+
+ switch (bit_number) {
+ case 0 ... 2:
+ type = NPC_CHAN;
+ break;
+ case 3:
+ type = NPC_ERRLEV;
+ break;
+ case 4 ... 5:
+ type = NPC_ERRCODE;
+ break;
+ case 6:
+ type = NPC_LXMB;
+ break;
+ /* check for LTYPE only as of now */
+ case 9:
+ type = NPC_LA;
+ break;
+ case 12:
+ type = NPC_LB;
+ break;
+ case 15:
+ type = NPC_LC;
+ break;
+ case 18:
+ type = NPC_LD;
+ break;
+ case 21:
+ type = NPC_LE;
+ break;
+ case 24:
+ type = NPC_LF;
+ break;
+ case 27:
+ type = NPC_LG;
+ break;
+ case 30:
+ type = NPC_LH;
+ break;
+ default:
+ return;
+ }
+ npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf);
+}
+
+static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct npc_key_field *key_fields;
+ /* Ether type can come from three layers
+ * (ethernet, single tagged, double tagged)
+ */
+ struct npc_key_field *etype_ether;
+ struct npc_key_field *etype_tag1;
+ struct npc_key_field *etype_tag2;
+ /* Outer VLAN TCI can come from two layers
+ * (single tagged, double tagged)
+ */
+ struct npc_key_field *vlan_tag1;
+ struct npc_key_field *vlan_tag2;
+ u64 *features;
+ u8 start_lid;
+ int i;
+
+ key_fields = mcam->rx_key_fields;
+ features = &mcam->rx_features;
+
+ if (is_npc_intf_tx(intf)) {
+ key_fields = mcam->tx_key_fields;
+ features = &mcam->tx_features;
+ }
+
+ /* Handle header fields which can come from multiple layers like
+ * etype, outer vlan tci. These fields should have same position in
+ * the key otherwise to install a mcam rule more than one entry is
+ * needed which complicates mcam space management.
+ */
+ etype_ether = &key_fields[NPC_ETYPE_ETHER];
+ etype_tag1 = &key_fields[NPC_ETYPE_TAG1];
+ etype_tag2 = &key_fields[NPC_ETYPE_TAG2];
+ vlan_tag1 = &key_fields[NPC_VLAN_TAG1];
+ vlan_tag2 = &key_fields[NPC_VLAN_TAG2];
+
+ /* if key profile programmed does not extract Ethertype at all */
+ if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws)
+ goto vlan_tci;
+
+ /* if key profile programmed extracts Ethertype from one layer */
+ if (etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws)
+ key_fields[NPC_ETYPE] = *etype_ether;
+ if (!etype_ether->nr_kws && etype_tag1->nr_kws && !etype_tag2->nr_kws)
+ key_fields[NPC_ETYPE] = *etype_tag1;
+ if (!etype_ether->nr_kws && !etype_tag1->nr_kws && etype_tag2->nr_kws)
+ key_fields[NPC_ETYPE] = *etype_tag2;
+
+ /* if key profile programmed extracts Ethertype from multiple layers */
+ if (etype_ether->nr_kws && etype_tag1->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (etype_ether->kw_mask[i] != etype_tag1->kw_mask[i])
+ goto vlan_tci;
+ }
+ key_fields[NPC_ETYPE] = *etype_tag1;
+ }
+ if (etype_ether->nr_kws && etype_tag2->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (etype_ether->kw_mask[i] != etype_tag2->kw_mask[i])
+ goto vlan_tci;
+ }
+ key_fields[NPC_ETYPE] = *etype_tag2;
+ }
+ if (etype_tag1->nr_kws && etype_tag2->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (etype_tag1->kw_mask[i] != etype_tag2->kw_mask[i])
+ goto vlan_tci;
+ }
+ key_fields[NPC_ETYPE] = *etype_tag2;
+ }
+
+ /* check none of higher layers overwrite Ethertype */
+ start_lid = key_fields[NPC_ETYPE].layer_mdata.lid + 1;
+ if (npc_check_overlap(rvu, blkaddr, NPC_ETYPE, start_lid, intf))
+ goto vlan_tci;
+ *features |= BIT_ULL(NPC_ETYPE);
+vlan_tci:
+ /* if key profile does not extract outer vlan tci at all */
+ if (!vlan_tag1->nr_kws && !vlan_tag2->nr_kws)
+ goto done;
+
+ /* if key profile extracts outer vlan tci from one layer */
+ if (vlan_tag1->nr_kws && !vlan_tag2->nr_kws)
+ key_fields[NPC_OUTER_VID] = *vlan_tag1;
+ if (!vlan_tag1->nr_kws && vlan_tag2->nr_kws)
+ key_fields[NPC_OUTER_VID] = *vlan_tag2;
+
+ /* if key profile extracts outer vlan tci from multiple layers */
+ if (vlan_tag1->nr_kws && vlan_tag2->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (vlan_tag1->kw_mask[i] != vlan_tag2->kw_mask[i])
+ goto done;
+ }
+ key_fields[NPC_OUTER_VID] = *vlan_tag2;
+ }
+ /* check none of higher layers overwrite outer vlan tci */
+ start_lid = key_fields[NPC_OUTER_VID].layer_mdata.lid + 1;
+ if (npc_check_overlap(rvu, blkaddr, NPC_OUTER_VID, start_lid, intf))
+ goto done;
+ *features |= BIT_ULL(NPC_OUTER_VID);
+done:
+ return;
+}
+
+static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid,
+ u8 lt, u64 cfg, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u8 hdr, key, nr_bytes, bit_offset;
+ u8 la_ltype, la_start;
+ /* starting KW index and starting bit position */
+ int start_kwi, offset;
+
+ nr_bytes = FIELD_GET(NPC_BYTESM, cfg) + 1;
+ hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
+ key = FIELD_GET(NPC_KEY_OFFSET, cfg);
+ start_kwi = key / 8;
+ offset = (key * 8) % 64;
+
+ /* For Tx, Layer A has NIX_INST_HDR_S(64 bytes) preceding
+ * ethernet header.
+ */
+ if (is_npc_intf_tx(intf)) {
+ la_ltype = NPC_LT_LA_IH_NIX_ETHER;
+ la_start = 8;
+ } else {
+ la_ltype = NPC_LT_LA_ETHER;
+ la_start = 0;
+ }
+
+#define NPC_SCAN_HDR(name, hlid, hlt, hstart, hlen) \
+do { \
+ if (lid == (hlid) && lt == (hlt)) { \
+ if ((hstart) >= hdr && \
+ ((hstart) + (hlen)) <= (hdr + nr_bytes)) { \
+ bit_offset = (hdr + nr_bytes - (hstart) - (hlen)) * 8; \
+ npc_set_layer_mdata(mcam, (name), cfg, lid, lt, intf); \
+ npc_set_kw_masks(mcam, (name), (hlen) * 8, \
+ start_kwi, offset + bit_offset, intf);\
+ } \
+ } \
+} while (0)
+
+ /* List LID, LTYPE, start offset from layer and length(in bytes) of
+ * packet header fields below.
+ * Example: Source IP is 4 bytes and starts at 12th byte of IP header
+ */
+ NPC_SCAN_HDR(NPC_TOS, NPC_LID_LC, NPC_LT_LC_IP, 1, 1);
+ NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4);
+ NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4);
+ NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16);
+ NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16);
+ NPC_SCAN_HDR(NPC_SPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 0, 2);
+ NPC_SCAN_HDR(NPC_DPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 2, 2);
+ NPC_SCAN_HDR(NPC_SPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 0, 2);
+ NPC_SCAN_HDR(NPC_DPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 2, 2);
+ NPC_SCAN_HDR(NPC_SPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 0, 2);
+ NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2);
+ NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2);
+ NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2);
+ NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2);
+ NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2);
+ NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2);
+ NPC_SCAN_HDR(NPC_FDSA_VAL, NPC_LID_LB, NPC_LT_LB_FDSA, 1, 1);
+ NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6);
+ NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start, 6);
+ /* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */
+ NPC_SCAN_HDR(NPC_PF_FUNC, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, 0, 2);
+}
+
+static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u64 *features = &mcam->rx_features;
+ u64 tcp_udp_sctp;
+ int hdr;
+
+ if (is_npc_intf_tx(intf))
+ features = &mcam->tx_features;
+
+ for (hdr = NPC_DMAC; hdr < NPC_HEADER_FIELDS_MAX; hdr++) {
+ if (npc_check_field(rvu, blkaddr, hdr, intf))
+ *features |= BIT_ULL(hdr);
+ }
+
+ tcp_udp_sctp = BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_SPORT_UDP) |
+ BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) |
+ BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP);
+
+ /* for tcp/udp/sctp corresponding layer type should be in the key */
+ if (*features & tcp_udp_sctp) {
+ if (!npc_check_field(rvu, blkaddr, NPC_LD, intf))
+ *features &= ~tcp_udp_sctp;
+ else
+ *features |= BIT_ULL(NPC_IPPROTO_TCP) |
+ BIT_ULL(NPC_IPPROTO_UDP) |
+ BIT_ULL(NPC_IPPROTO_SCTP);
+ }
+
+ /* for AH/ICMP/ICMPv6/, check if corresponding layer type is present in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_LD, intf)) {
+ *features |= BIT_ULL(NPC_IPPROTO_AH);
+ *features |= BIT_ULL(NPC_IPPROTO_ICMP);
+ *features |= BIT_ULL(NPC_IPPROTO_ICMP6);
+ }
+
+ /* for ESP, check if corresponding layer type is present in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_LE, intf))
+ *features |= BIT_ULL(NPC_IPPROTO_ESP);
+
+ /* for vlan corresponding layer type should be in the key */
+ if (*features & BIT_ULL(NPC_OUTER_VID) ||
+ *features & BIT_ULL(NPC_FDSA_VAL))
+ if (!npc_check_field(rvu, blkaddr, NPC_LB, intf)) {
+ *features &= ~BIT_ULL(NPC_OUTER_VID);
+ *features &= ~BIT_ULL(NPC_FDSA_VAL);
+ }
+
+ /* for vlan ethertypes corresponding layer type should be in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_LB, intf))
+ *features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG) |
+ BIT_ULL(NPC_VLAN_ETYPE_STAG);
+}
+
+/* Scan key extraction profile and record how fields of our interest
+ * fill the key structure. Also verify Channel and DMAC exists in
+ * key and not overwritten by other header fields.
+ */
+static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u8 lid, lt, ld, bitnr;
+ u8 key_nibble = 0;
+ u64 cfg;
+
+ /* Scan and note how parse result is going to be in key.
+ * A bit set in PARSE_NIBBLE_ENA corresponds to a nibble from
+ * parse result in the key. The enabled nibbles from parse result
+ * will be concatenated in key.
+ */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf));
+ cfg &= NPC_PARSE_NIBBLE;
+ for_each_set_bit(bitnr, (unsigned long *)&cfg, 31) {
+ npc_scan_parse_result(mcam, bitnr, key_nibble, intf);
+ key_nibble++;
+ }
+
+ /* Scan and note how layer data is going to be in key */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG
+ (intf, lid, lt, ld));
+ if (!FIELD_GET(NPC_LDATA_EN, cfg))
+ continue;
+ npc_scan_ldata(rvu, blkaddr, lid, lt, cfg,
+ intf);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int npc_scan_verify_kex(struct rvu *rvu, int blkaddr)
+{
+ int err;
+
+ err = npc_scan_kex(rvu, blkaddr, NIX_INTF_RX);
+ if (err)
+ return err;
+
+ err = npc_scan_kex(rvu, blkaddr, NIX_INTF_TX);
+ if (err)
+ return err;
+
+ /* Channel is mandatory */
+ if (!npc_is_field_present(rvu, NPC_CHAN, NIX_INTF_RX)) {
+ dev_err(rvu->dev, "Channel not present in Key\n");
+ return -EINVAL;
+ }
+ /* check that none of the fields overwrite channel */
+ if (npc_check_overlap(rvu, blkaddr, NPC_CHAN, 0, NIX_INTF_RX)) {
+ dev_err(rvu->dev, "Channel cannot be overwritten\n");
+ return -EINVAL;
+ }
+ /* DMAC should be present in key for unicast filter to work */
+ if (!npc_is_field_present(rvu, NPC_DMAC, NIX_INTF_RX)) {
+ dev_err(rvu->dev, "DMAC not present in Key\n");
+ return -EINVAL;
+ }
+ /* check that none of the fields overwrite DMAC */
+ if (npc_check_overlap(rvu, blkaddr, NPC_DMAC, 0, NIX_INTF_RX)) {
+ dev_err(rvu->dev, "DMAC cannot be overwritten\n");
+ return -EINVAL;
+ }
+
+ npc_set_features(rvu, blkaddr, NIX_INTF_TX);
+ npc_set_features(rvu, blkaddr, NIX_INTF_RX);
+ npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_TX);
+ npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_RX);
+
+ return 0;
+}
+
+int npc_flow_steering_init(struct rvu *rvu, int blkaddr)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+
+ INIT_LIST_HEAD(&mcam->mcam_rules);
+
+ return npc_scan_verify_kex(rvu, blkaddr);
+}
+
+static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u64 *mcam_features = &mcam->rx_features;
+ u64 unsupported;
+ u8 bit;
+
+ if (is_npc_intf_tx(intf))
+ mcam_features = &mcam->tx_features;
+
+ unsupported = (*mcam_features ^ features) & ~(*mcam_features);
+ if (unsupported) {
+ dev_info(rvu->dev, "Unsupported flow(s):\n");
+ for_each_set_bit(bit, (unsigned long *)&unsupported, 64)
+ dev_info(rvu->dev, "%s ", npc_get_field_name(bit));
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/* npc_update_entry - Based on the masks generated during
+ * the key scanning, updates the given entry with value and
+ * masks for the field of interest. Maximum 16 bytes of a packet
+ * header can be extracted by HW hence lo and hi are sufficient.
+ * When field bytes are less than or equal to 8 then hi should be
+ * 0 for value and mask.
+ *
+ * If exact match of value is required then mask should be all 1's.
+ * If any bits in mask are 0 then corresponding bits in value are
+ * dont care.
+ */
+static void npc_update_entry(struct rvu *rvu, enum key_fields type,
+ struct mcam_entry *entry, u64 val_lo,
+ u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct mcam_entry dummy = { {0} };
+ struct npc_key_field *field;
+ u64 kw1, kw2, kw3;
+ u8 shift;
+ int i;
+
+ field = &mcam->rx_key_fields[type];
+ if (is_npc_intf_tx(intf))
+ field = &mcam->tx_key_fields[type];
+
+ if (!field->nr_kws)
+ return;
+
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (!field->kw_mask[i])
+ continue;
+ /* place key value in kw[x] */
+ shift = __ffs64(field->kw_mask[i]);
+ /* update entry value */
+ kw1 = (val_lo << shift) & field->kw_mask[i];
+ dummy.kw[i] = kw1;
+ /* update entry mask */
+ kw1 = (mask_lo << shift) & field->kw_mask[i];
+ dummy.kw_mask[i] = kw1;
+
+ if (field->nr_kws == 1)
+ break;
+ /* place remaining bits of key value in kw[x + 1] */
+ if (field->nr_kws == 2) {
+ /* update entry value */
+ kw2 = shift ? val_lo >> (64 - shift) : 0;
+ kw2 |= (val_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ dummy.kw[i + 1] = kw2;
+ /* update entry mask */
+ kw2 = shift ? mask_lo >> (64 - shift) : 0;
+ kw2 |= (mask_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ dummy.kw_mask[i + 1] = kw2;
+ break;
+ }
+ /* place remaining bits of key value in kw[x + 1], kw[x + 2] */
+ if (field->nr_kws == 3) {
+ /* update entry value */
+ kw2 = shift ? val_lo >> (64 - shift) : 0;
+ kw2 |= (val_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ kw3 = shift ? val_hi >> (64 - shift) : 0;
+ kw3 &= field->kw_mask[i + 2];
+ dummy.kw[i + 1] = kw2;
+ dummy.kw[i + 2] = kw3;
+ /* update entry mask */
+ kw2 = shift ? mask_lo >> (64 - shift) : 0;
+ kw2 |= (mask_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ kw3 = shift ? mask_hi >> (64 - shift) : 0;
+ kw3 &= field->kw_mask[i + 2];
+ dummy.kw_mask[i + 1] = kw2;
+ dummy.kw_mask[i + 2] = kw3;
+ break;
+ }
+ }
+ /* dummy is ready with values and masks for given key
+ * field now clear and update input entry with those
+ */
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (!field->kw_mask[i])
+ continue;
+ entry->kw[i] &= ~field->kw_mask[i];
+ entry->kw_mask[i] &= ~field->kw_mask[i];
+
+ entry->kw[i] |= dummy.kw[i];
+ entry->kw_mask[i] |= dummy.kw_mask[i];
+ }
+}
+
+#define IPV6_WORDS 4
+
+static void npc_update_ipv6_flow(struct rvu *rvu, struct mcam_entry *entry,
+ u64 features, struct flow_msg *pkt,
+ struct flow_msg *mask,
+ struct rvu_npc_mcam_rule *output, u8 intf)
+{
+ u32 src_ip[IPV6_WORDS], src_ip_mask[IPV6_WORDS];
+ u32 dst_ip[IPV6_WORDS], dst_ip_mask[IPV6_WORDS];
+ struct flow_msg *opkt = &output->packet;
+ struct flow_msg *omask = &output->mask;
+ u64 mask_lo, mask_hi;
+ u64 val_lo, val_hi;
+
+ /* For an ipv6 address fe80::2c68:63ff:fe5e:2d0a the packet
+ * values to be programmed in MCAM should as below:
+ * val_high: 0xfe80000000000000
+ * val_low: 0x2c6863fffe5e2d0a
+ */
+ if (features & BIT_ULL(NPC_SIP_IPV6)) {
+ be32_to_cpu_array(src_ip_mask, mask->ip6src, IPV6_WORDS);
+ be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS);
+
+ mask_hi = (u64)src_ip_mask[0] << 32 | src_ip_mask[1];
+ mask_lo = (u64)src_ip_mask[2] << 32 | src_ip_mask[3];
+ val_hi = (u64)src_ip[0] << 32 | src_ip[1];
+ val_lo = (u64)src_ip[2] << 32 | src_ip[3];
+
+ npc_update_entry(rvu, NPC_SIP_IPV6, entry, val_lo, val_hi,
+ mask_lo, mask_hi, intf);
+ memcpy(opkt->ip6src, pkt->ip6src, sizeof(opkt->ip6src));
+ memcpy(omask->ip6src, mask->ip6src, sizeof(omask->ip6src));
+ }
+ if (features & BIT_ULL(NPC_DIP_IPV6)) {
+ be32_to_cpu_array(dst_ip_mask, mask->ip6dst, IPV6_WORDS);
+ be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS);
+
+ mask_hi = (u64)dst_ip_mask[0] << 32 | dst_ip_mask[1];
+ mask_lo = (u64)dst_ip_mask[2] << 32 | dst_ip_mask[3];
+ val_hi = (u64)dst_ip[0] << 32 | dst_ip[1];
+ val_lo = (u64)dst_ip[2] << 32 | dst_ip[3];
+
+ npc_update_entry(rvu, NPC_DIP_IPV6, entry, val_lo, val_hi,
+ mask_lo, mask_hi, intf);
+ memcpy(opkt->ip6dst, pkt->ip6dst, sizeof(opkt->ip6dst));
+ memcpy(omask->ip6dst, mask->ip6dst, sizeof(omask->ip6dst));
+ }
+}
+
+static void npc_update_vlan_features(struct rvu *rvu, struct mcam_entry *entry,
+ u64 features, u8 intf)
+{
+ bool ctag = !!(features & BIT_ULL(NPC_VLAN_ETYPE_CTAG));
+ bool stag = !!(features & BIT_ULL(NPC_VLAN_ETYPE_STAG));
+ bool vid = !!(features & BIT_ULL(NPC_OUTER_VID));
+
+ /* If only VLAN id is given then always match outer VLAN id */
+ if (vid && !ctag && !stag) {
+ npc_update_entry(rvu, NPC_LB, entry,
+ NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0,
+ NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf);
+ return;
+ }
+ if (ctag)
+ npc_update_entry(rvu, NPC_LB, entry, NPC_LT_LB_CTAG, 0,
+ ~0ULL, 0, intf);
+ if (stag)
+ npc_update_entry(rvu, NPC_LB, entry, NPC_LT_LB_STAG_QINQ, 0,
+ ~0ULL, 0, intf);
+}
+
+static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry,
+ u64 features, struct flow_msg *pkt,
+ struct flow_msg *mask,
+ struct rvu_npc_mcam_rule *output, u8 intf)
+{
+ u64 dmac_mask = ether_addr_to_u64(mask->dmac);
+ u64 smac_mask = ether_addr_to_u64(mask->smac);
+ u64 dmac_val = ether_addr_to_u64(pkt->dmac);
+ u64 smac_val = ether_addr_to_u64(pkt->smac);
+ struct flow_msg *opkt = &output->packet;
+ struct flow_msg *omask = &output->mask;
+
+ if (!features)
+ return;
+
+ /* For tcp/udp/sctp LTYPE should be present in entry */
+ if (features & BIT_ULL(NPC_IPPROTO_TCP))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_TCP,
+ 0, ~0ULL, 0, intf);
+ if (features & BIT_ULL(NPC_IPPROTO_UDP))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_UDP,
+ 0, ~0ULL, 0, intf);
+ if (features & BIT_ULL(NPC_IPPROTO_SCTP))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_SCTP,
+ 0, ~0ULL, 0, intf);
+ if (features & BIT_ULL(NPC_IPPROTO_ICMP))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP,
+ 0, ~0ULL, 0, intf);
+ if (features & BIT_ULL(NPC_IPPROTO_ICMP6))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP6,
+ 0, ~0ULL, 0, intf);
+ if (features & BIT_ULL(NPC_FDSA_VAL))
+ npc_update_entry(rvu, NPC_LB, entry, NPC_LT_LB_FDSA,
+ 0, ~0ULL, 0, intf);
+
+ /* For AH, LTYPE should be present in entry */
+ if (features & BIT_ULL(NPC_IPPROTO_AH))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_AH,
+ 0, ~0ULL, 0, intf);
+ /* For ESP, LTYPE should be present in entry */
+ if (features & BIT_ULL(NPC_IPPROTO_ESP))
+ npc_update_entry(rvu, NPC_LE, entry, NPC_LT_LE_ESP,
+ 0, ~0ULL, 0, intf);
+
+#define NPC_WRITE_FLOW(field, member, val_lo, val_hi, mask_lo, mask_hi) \
+do { \
+ if (features & BIT_ULL((field))) { \
+ npc_update_entry(rvu, (field), entry, (val_lo), (val_hi), \
+ (mask_lo), (mask_hi), intf); \
+ memcpy(&opkt->member, &pkt->member, sizeof(pkt->member)); \
+ memcpy(&omask->member, &mask->member, sizeof(mask->member)); \
+ } \
+} while (0)
+
+ NPC_WRITE_FLOW(NPC_DMAC, dmac, dmac_val, 0, dmac_mask, 0);
+ NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0);
+ NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0,
+ ntohs(mask->etype), 0);
+ NPC_WRITE_FLOW(NPC_TOS, tos, pkt->tos, 0, mask->tos, 0);
+ NPC_WRITE_FLOW(NPC_SIP_IPV4, ip4src, ntohl(pkt->ip4src), 0,
+ ntohl(mask->ip4src), 0);
+ NPC_WRITE_FLOW(NPC_DIP_IPV4, ip4dst, ntohl(pkt->ip4dst), 0,
+ ntohl(mask->ip4dst), 0);
+ NPC_WRITE_FLOW(NPC_SPORT_TCP, sport, ntohs(pkt->sport), 0,
+ ntohs(mask->sport), 0);
+ NPC_WRITE_FLOW(NPC_SPORT_UDP, sport, ntohs(pkt->sport), 0,
+ ntohs(mask->sport), 0);
+ NPC_WRITE_FLOW(NPC_DPORT_TCP, dport, ntohs(pkt->dport), 0,
+ ntohs(mask->dport), 0);
+ NPC_WRITE_FLOW(NPC_DPORT_UDP, dport, ntohs(pkt->dport), 0,
+ ntohs(mask->dport), 0);
+ NPC_WRITE_FLOW(NPC_SPORT_SCTP, sport, ntohs(pkt->sport), 0,
+ ntohs(mask->sport), 0);
+ NPC_WRITE_FLOW(NPC_DPORT_SCTP, dport, ntohs(pkt->dport), 0,
+ ntohs(mask->dport), 0);
+
+ NPC_WRITE_FLOW(NPC_OUTER_VID, vlan_tci, ntohs(pkt->vlan_tci), 0,
+ ntohs(mask->vlan_tci), 0);
+ NPC_WRITE_FLOW(NPC_FDSA_VAL, vlan_tci, ntohs(pkt->vlan_tci), 0,
+ ntohs(mask->vlan_tci), 0);
+
+ npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf);
+ npc_update_vlan_features(rvu, entry, features, intf);
+}
+
+static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam,
+ u16 entry)
+{
+ struct rvu_npc_mcam_rule *iter;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(iter, &mcam->mcam_rules, list) {
+ if (iter->entry == entry) {
+ mutex_unlock(&mcam->lock);
+ return iter;
+ }
+ }
+ mutex_unlock(&mcam->lock);
+
+ return NULL;
+}
+
+static void rvu_mcam_add_rule(struct npc_mcam *mcam,
+ struct rvu_npc_mcam_rule *rule)
+{
+ struct list_head *head = &mcam->mcam_rules;
+ struct rvu_npc_mcam_rule *iter;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(iter, &mcam->mcam_rules, list) {
+ if (iter->entry > rule->entry)
+ break;
+ head = &iter->list;
+ }
+
+ list_add(&rule->list, head);
+ mutex_unlock(&mcam->lock);
+}
+
+static void rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc,
+ struct rvu_npc_mcam_rule *rule)
+{
+ struct npc_mcam_oper_counter_req free_req = { 0 };
+ struct msg_rsp free_rsp;
+
+ if (!rule->has_cntr)
+ return;
+
+ free_req.hdr.pcifunc = pcifunc;
+ free_req.cntr = rule->cntr;
+
+ rvu_mbox_handler_npc_mcam_free_counter(rvu, &free_req, &free_rsp);
+ rule->has_cntr = false;
+}
+
+static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
+ struct rvu_npc_mcam_rule *rule,
+ struct npc_install_flow_rsp *rsp)
+{
+ struct npc_mcam_alloc_counter_req cntr_req = { 0 };
+ struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 };
+ int err;
+
+ cntr_req.hdr.pcifunc = pcifunc;
+ cntr_req.contig = true;
+ cntr_req.count = 1;
+
+ /* we try to allocate a counter to track the stats of this
+ * rule. If counter could not be allocated then proceed
+ * without counter because counters are limited than entries.
+ */
+ err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req,
+ &cntr_rsp);
+ if (!err && cntr_rsp.count) {
+ rule->cntr = cntr_rsp.cntr;
+ rule->has_cntr = true;
+ rsp->counter = rule->cntr;
+ } else {
+ rsp->counter = err;
+ }
+}
+
+static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct mcam_entry *entry,
+ struct npc_install_flow_req *req,
+ u16 target, bool pf_set_vfs_mac)
+{
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ struct nix_rx_action action;
+
+ if (rswitch->mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && pf_set_vfs_mac)
+ req->chan_mask = 0x0; /* Do not care channel */
+
+ npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, req->chan_mask,
+ 0, NIX_INTF_RX);
+
+ *(u64 *)&action = 0x00;
+ action.pf_func = target;
+ action.op = req->op;
+ action.index = req->index;
+ action.match_id = req->match_id;
+ action.flow_key_alg = req->flow_key_alg;
+
+ if (req->op == NIX_RX_ACTION_DEFAULT && pfvf->def_ucast_rule)
+ action = pfvf->def_ucast_rule->rx_action;
+
+ entry->action = *(u64 *)&action;
+
+ /* VTAG0 starts at 0th byte of LID_B.
+ * VTAG1 starts at 4th byte of LID_B.
+ */
+ entry->vtag_action = FIELD_PREP(RX_VTAG0_VALID_BIT, req->vtag0_valid) |
+ FIELD_PREP(RX_VTAG0_TYPE_MASK, req->vtag0_type) |
+ FIELD_PREP(RX_VTAG0_LID_MASK, NPC_LID_LB) |
+ FIELD_PREP(RX_VTAG0_RELPTR_MASK, 0) |
+ FIELD_PREP(RX_VTAG1_VALID_BIT, req->vtag1_valid) |
+ FIELD_PREP(RX_VTAG1_TYPE_MASK, req->vtag1_type) |
+ FIELD_PREP(RX_VTAG1_LID_MASK, NPC_LID_LB) |
+ FIELD_PREP(RX_VTAG1_RELPTR_MASK, 4);
+}
+
+static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct mcam_entry *entry,
+ struct npc_install_flow_req *req, u16 target)
+{
+ struct nix_tx_action action;
+ u64 mask = ~0ULL;
+
+ /* If AF is installing then do not care about
+ * PF_FUNC in Send Descriptor
+ */
+ if (is_pffunc_af(req->hdr.pcifunc))
+ mask = 0;
+
+ npc_update_entry(rvu, NPC_PF_FUNC, entry, (__force u16)htons(target),
+ 0, mask, 0, NIX_INTF_TX);
+
+ *(u64 *)&action = 0x00;
+ action.op = req->op;
+ action.index = req->index;
+ action.match_id = req->match_id;
+
+ entry->action = *(u64 *)&action;
+
+ /* VTAG0 starts at 0th byte of LID_B.
+ * VTAG1 starts at 4th byte of LID_B.
+ */
+ entry->vtag_action = FIELD_PREP(TX_VTAG0_DEF_MASK, req->vtag0_def) |
+ FIELD_PREP(TX_VTAG0_OP_MASK, req->vtag0_op) |
+ FIELD_PREP(TX_VTAG0_LID_MASK, NPC_LID_LA) |
+ FIELD_PREP(TX_VTAG0_RELPTR_MASK, 20) |
+ FIELD_PREP(TX_VTAG1_DEF_MASK, req->vtag1_def) |
+ FIELD_PREP(TX_VTAG1_OP_MASK, req->vtag1_op) |
+ FIELD_PREP(TX_VTAG1_LID_MASK, NPC_LID_LA) |
+ FIELD_PREP(TX_VTAG1_RELPTR_MASK, 24);
+}
+
+static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
+ int nixlf, struct rvu_pfvf *pfvf,
+ struct npc_install_flow_req *req,
+ struct npc_install_flow_rsp *rsp, bool enable,
+ bool pf_set_vfs_mac)
+{
+ struct rvu_npc_mcam_rule *def_ucast_rule = pfvf->def_ucast_rule;
+ u64 features, installed_features, missing_features = 0;
+ struct npc_mcam_write_entry_req write_req = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule dummy = { 0 };
+ struct rvu_npc_mcam_rule *rule;
+ u16 owner = req->hdr.pcifunc;
+ struct msg_rsp write_rsp;
+ struct mcam_entry *entry;
+ int entry_index, err;
+ bool new = false;
+
+ installed_features = req->features;
+ features = req->features;
+ entry = &write_req.entry_data;
+ entry_index = req->entry;
+
+ npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy,
+ req->intf);
+
+ if (is_npc_intf_rx(req->intf))
+ npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac);
+ else
+ npc_update_tx_entry(rvu, pfvf, entry, req, target);
+
+ /* Default unicast rules do not exist for TX */
+ if (is_npc_intf_tx(req->intf))
+ goto find_rule;
+
+ if (req->default_rule) {
+ entry_index = npc_get_nixlf_mcam_index(mcam, target, nixlf,
+ NIXLF_UCAST_ENTRY);
+ enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, entry_index);
+ }
+
+ /* update mcam entry with default unicast rule attributes */
+ if (def_ucast_rule && (req->default_rule && req->append)) {
+ missing_features = (def_ucast_rule->features ^ features) &
+ def_ucast_rule->features;
+ if (missing_features)
+ npc_update_flow(rvu, entry, missing_features,
+ &def_ucast_rule->packet,
+ &def_ucast_rule->mask,
+ &dummy, req->intf);
+ installed_features = req->features | missing_features;
+ }
+find_rule:
+ rule = rvu_mcam_find_rule(mcam, entry_index);
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+ new = true;
+ }
+
+ /* allocate new counter if rule has no counter */
+ if (!req->default_rule && req->set_cntr && !rule->has_cntr)
+ rvu_mcam_add_counter_to_rule(rvu, owner, rule, rsp);
+
+ /* if user wants to delete an existing counter for a rule then
+ * free the counter
+ */
+ if (!req->set_cntr && rule->has_cntr)
+ rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
+
+ write_req.hdr.pcifunc = owner;
+
+ /* AF owns the default rules so change the owner just to relax
+ * the checks in rvu_mbox_handler_npc_mcam_write_entry
+ */
+ if (req->default_rule)
+ write_req.hdr.pcifunc = 0;
+
+ write_req.entry = entry_index;
+ write_req.intf = req->intf;
+ write_req.enable_entry = (u8)enable;
+ /* if counter is available then clear and use it */
+ if (req->set_cntr && rule->has_cntr) {
+ rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), 0x00);
+ write_req.set_cntr = 1;
+ write_req.cntr = rule->cntr;
+ }
+
+ /* update rule */
+ memcpy(&rule->packet, &dummy.packet, sizeof(rule->packet));
+ memcpy(&rule->mask, &dummy.mask, sizeof(rule->mask));
+ rule->entry = entry_index;
+ memcpy(&rule->rx_action, &entry->action, sizeof(struct nix_rx_action));
+ if (is_npc_intf_tx(req->intf))
+ memcpy(&rule->tx_action, &entry->action,
+ sizeof(struct nix_tx_action));
+ rule->vtag_action = entry->vtag_action;
+ rule->features = installed_features;
+ rule->default_rule = req->default_rule;
+ rule->owner = owner;
+ rule->enable = enable;
+ rule->chan_mask = write_req.entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK;
+ rule->chan = write_req.entry_data.kw[0] & NPC_KEX_CHAN_MASK;
+ rule->chan &= rule->chan_mask;
+ if (is_npc_intf_tx(req->intf))
+ rule->intf = pfvf->nix_tx_intf;
+ else
+ rule->intf = pfvf->nix_rx_intf;
+
+ if (new)
+ rvu_mcam_add_rule(mcam, rule);
+ if (req->default_rule)
+ pfvf->def_ucast_rule = rule;
+
+ /* write to mcam entry registers */
+ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req,
+ &write_rsp);
+ if (err) {
+ rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
+ if (new) {
+ list_del(&rule->list);
+ kfree(rule);
+ }
+ return err;
+ }
+
+ /* VF's MAC address is being changed via PF */
+ if (pf_set_vfs_mac) {
+ ether_addr_copy(pfvf->default_mac, req->packet.dmac);
+ ether_addr_copy(pfvf->mac_addr, req->packet.dmac);
+ set_bit(PF_SET_VF_MAC, &pfvf->flags);
+ }
+
+ if (test_bit(PF_SET_VF_CFG, &pfvf->flags) &&
+ req->vtag0_type == NIX_AF_LFX_RX_VTAG_TYPE7)
+ rule->vfvlan_cfg = true;
+
+ if (is_npc_intf_rx(req->intf) && req->match_id &&
+ (req->op == NIX_RX_ACTIONOP_UCAST || req->op == NIX_RX_ACTIONOP_RSS))
+ return rvu_nix_setup_ratelimit_aggr(rvu, req->hdr.pcifunc,
+ req->index, req->match_id);
+ return 0;
+}
+
+int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
+ struct npc_install_flow_req *req,
+ struct npc_install_flow_rsp *rsp)
+{
+ bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK);
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ int blkaddr, nixlf, err;
+ struct rvu_pfvf *pfvf;
+ bool pf_set_vfs_mac = false;
+ bool enable = true;
+ u16 target;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return NPC_MCAM_INVALID_REQ;
+ }
+
+ if (!is_npc_interface_valid(rvu, req->intf))
+ return NPC_FLOW_INTF_INVALID;
+
+ if (from_vf && req->default_rule)
+ return NPC_FLOW_VF_PERM_DENIED;
+
+ /* Each PF/VF info is maintained in struct rvu_pfvf.
+ * rvu_pfvf for the target PF/VF needs to be retrieved
+ * hence modify pcifunc accordingly.
+ */
+
+ /* AF installing for a PF/VF */
+ if (!req->hdr.pcifunc)
+ target = req->vf;
+ /* PF installing for its VF */
+ else if (!from_vf && req->vf) {
+ target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf;
+ pf_set_vfs_mac = req->default_rule &&
+ (req->features & BIT_ULL(NPC_DMAC));
+ }
+ /* msg received from PF/VF */
+ else
+ target = req->hdr.pcifunc;
+
+ /* ignore chan_mask in case pf func is not AF, revisit later */
+ if (!is_pffunc_af(req->hdr.pcifunc))
+ req->chan_mask = 0xFFF;
+
+ err = npc_check_unsupported_flows(rvu, req->features, req->intf);
+ if (err)
+ return NPC_FLOW_NOT_SUPPORTED;
+
+ pfvf = rvu_get_pfvf(rvu, target);
+
+ /* PF installing for its VF */
+ if (req->hdr.pcifunc && !from_vf && req->vf)
+ set_bit(PF_SET_VF_CFG, &pfvf->flags);
+
+ /* update req destination mac addr */
+ if ((req->features & BIT_ULL(NPC_DMAC)) && is_npc_intf_rx(req->intf) &&
+ is_zero_ether_addr(req->packet.dmac)) {
+ ether_addr_copy(req->packet.dmac, pfvf->mac_addr);
+ eth_broadcast_addr((u8 *)&req->mask.dmac);
+ }
+
+ /* Proceed if NIXLF is attached or not for TX rules */
+ err = nix_get_nixlf(rvu, target, &nixlf, NULL);
+ if (err && is_npc_intf_rx(req->intf) && !pf_set_vfs_mac)
+ return NPC_FLOW_NO_NIXLF;
+
+ /* don't enable rule when nixlf not attached or initialized */
+ if (!(is_nixlf_attached(rvu, target) &&
+ test_bit(NIXLF_INITIALIZED, &pfvf->flags)))
+ enable = false;
+
+ /* Packets reaching NPC in Tx path implies that a
+ * NIXLF is properly setup and transmitting.
+ * Hence rules can be enabled for Tx.
+ */
+ if (is_npc_intf_tx(req->intf))
+ enable = true;
+
+ /* Do not allow requests from uninitialized VFs */
+ if (from_vf && !enable)
+ return NPC_FLOW_VF_NOT_INIT;
+
+ /* PF sets VF mac & VF NIXLF is not attached, update the mac addr */
+ if (pf_set_vfs_mac && !enable) {
+ ether_addr_copy(pfvf->default_mac, req->packet.dmac);
+ ether_addr_copy(pfvf->mac_addr, req->packet.dmac);
+ set_bit(PF_SET_VF_MAC, &pfvf->flags);
+ return 0;
+ }
+
+ mutex_lock(&rswitch->switch_lock);
+ err = npc_install_flow(rvu, blkaddr, target, nixlf, pfvf,
+ req, rsp, enable, pf_set_vfs_mac);
+ mutex_unlock(&rswitch->switch_lock);
+
+ return err;
+}
+
+static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule,
+ u16 pcifunc)
+{
+ struct npc_mcam_ena_dis_entry_req dis_req = { 0 };
+ struct msg_rsp dis_rsp;
+
+ if (rule->default_rule)
+ return 0;
+
+ if (rule->has_cntr)
+ rvu_mcam_remove_counter_from_rule(rvu, pcifunc, rule);
+
+ dis_req.hdr.pcifunc = pcifunc;
+ dis_req.entry = rule->entry;
+
+ list_del(&rule->list);
+ kfree(rule);
+
+ return rvu_mbox_handler_npc_mcam_dis_entry(rvu, &dis_req, &dis_rsp);
+}
+
+int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu,
+ struct npc_delete_flow_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *iter, *tmp;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct list_head del_list;
+
+ INIT_LIST_HEAD(&del_list);
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry_safe(iter, tmp, &mcam->mcam_rules, list) {
+ if (iter->owner == pcifunc) {
+ /* All rules */
+ if (req->all) {
+ list_move_tail(&iter->list, &del_list);
+ /* Range of rules */
+ } else if (req->end && iter->entry >= req->start &&
+ iter->entry <= req->end) {
+ list_move_tail(&iter->list, &del_list);
+ /* single rule */
+ } else if (req->entry == iter->entry) {
+ list_move_tail(&iter->list, &del_list);
+ break;
+ }
+ }
+ }
+ mutex_unlock(&mcam->lock);
+
+ list_for_each_entry_safe(iter, tmp, &del_list, list) {
+ u16 entry = iter->entry;
+
+ /* clear the mcam entry target pcifunc */
+ mcam->entry2target_pffunc[entry] = 0x0;
+ if (npc_delete_flow(rvu, iter, pcifunc))
+ dev_err(rvu->dev, "rule deletion failed for entry:%u",
+ entry);
+ }
+
+ return 0;
+}
+
+static int npc_update_dmac_value(struct rvu *rvu, int npcblkaddr,
+ struct rvu_npc_mcam_rule *rule,
+ struct rvu_pfvf *pfvf)
+{
+ struct npc_mcam_write_entry_req write_req = { 0 };
+ struct mcam_entry *entry = &write_req.entry_data;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct msg_rsp rsp;
+ u8 intf, enable;
+ int err;
+
+ ether_addr_copy(rule->packet.dmac, pfvf->mac_addr);
+
+ npc_read_mcam_entry(rvu, mcam, npcblkaddr, rule->entry,
+ entry, &intf, &enable);
+
+ npc_update_entry(rvu, NPC_DMAC, entry,
+ ether_addr_to_u64(pfvf->mac_addr), 0,
+ 0xffffffffffffull, 0, intf);
+
+ write_req.hdr.pcifunc = rule->owner;
+ write_req.entry = rule->entry;
+ write_req.intf = pfvf->nix_rx_intf;
+
+ mutex_unlock(&mcam->lock);
+ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req, &rsp);
+ mutex_lock(&mcam->lock);
+
+ return err;
+}
+
+void npc_mcam_enable_flows(struct rvu *rvu, u16 target)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, target);
+ struct rvu_npc_mcam_rule *def_ucast_rule;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule;
+ int blkaddr, bank, index;
+ u64 def_action;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ def_ucast_rule = pfvf->def_ucast_rule;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (is_npc_intf_rx(rule->intf) &&
+ rule->rx_action.pf_func == target && !rule->enable) {
+ if (rule->default_rule) {
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ rule->entry, true);
+ rule->enable = true;
+ continue;
+ }
+
+ if (rule->vfvlan_cfg)
+ npc_update_dmac_value(rvu, blkaddr, rule, pfvf);
+
+ if (rule->rx_action.op == NIX_RX_ACTION_DEFAULT) {
+ if (!def_ucast_rule)
+ continue;
+ /* Use default unicast entry action */
+ rule->rx_action = def_ucast_rule->rx_action;
+ def_action = *(u64 *)&def_ucast_rule->rx_action;
+ bank = npc_get_bank(mcam, rule->entry);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION
+ (rule->entry, bank), def_action);
+ }
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ rule->entry, true);
+ rule->enable = true;
+ }
+ }
+
+ /* Enable MCAM entries installed by PF with target as VF pcifunc */
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2target_pffunc[index] == target)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ index, true);
+ }
+ mutex_unlock(&mcam->lock);
+}
+
+void npc_mcam_disable_flows(struct rvu *rvu, u16 target)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ mutex_lock(&mcam->lock);
+ /* Disable MCAM entries installed by PF with target as VF pcifunc */
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2target_pffunc[index] == target)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ index, false);
+ }
+ mutex_unlock(&mcam->lock);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_ree.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_ree.c
new file mode 100644
index 000000000000..6b0d86582243
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_ree.c
@@ -0,0 +1,1242 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include "rvu.h"
+#include "rvu_reg.h"
+
+/* Maximum number of REE blocks */
+#define MAX_REE_BLKS 2
+
+/* Graph maximum number of entries, each of 8B */
+#define REE_GRAPH_CNT (16 * 1024 * 1024)
+
+/* Prefix Block size 1K of 16B entries
+ * maximum number of blocks for a single ROF is 128
+ */
+#define REE_PREFIX_PTR_LEN 1024
+#define REE_PREFIX_CNT (128 * 1024)
+
+/* Rule DB entries are held in memory */
+#define REE_RULE_DB_ALLOC_SIZE (4 * 1024 * 1024)
+#define REE_RULE_DB_ALLOC_SHIFT 22
+#define REE_RULE_DB_BLOCK_CNT 64
+
+/* Rule DB incremental */
+#define REE_RULE_DBI_SIZE (16 * 6)
+
+/* Administrative instruction queue size */
+#define REE_AQ_SIZE 128
+
+static const char *ree_irq_name[MAX_REE_BLKS][REE_AF_INT_VEC_CNT] = {
+ { "REE0_AF_RAS", "REE0_AF_RVU", "REE0_AF_DONE", "REE0_AF_AQ" },
+ { "REE1_AF_RAS", "REE1_AF_RVU", "REE1_AF_DONE", "REE1_AF_AQ" },
+};
+
+enum ree_cmp_ops {
+ REE_CMP_EQ, /* Equal to data*/
+ REE_CMP_GEQ, /* Equal or greater than data */
+ REE_CMP_LEQ, /* Equal or less than data */
+ REE_CMP_KEY_FIELDS_MAX,
+};
+
+enum ree_rof_types {
+ REE_ROF_TYPE_0 = 0, /* Legacy */
+ REE_ROF_TYPE_1 = 1, /* Check CSR EQ */
+ REE_ROF_TYPE_2 = 2, /* Check CSR GEQ */
+ REE_ROF_TYPE_3 = 3, /* Check CSR LEQ */
+ REE_ROF_TYPE_4 = 4, /* Not relevant */
+ REE_ROF_TYPE_5 = 5, /* Check CSR checksum only for internal memory */
+ REE_ROF_TYPE_6 = 6, /* Internal memory */
+ REE_ROF_TYPE_7 = 7, /* External memory */
+};
+
+struct ree_rule_db_entry {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 addr : 32;
+ u64 pad : 24;
+ u64 type : 8;
+#else
+ u64 type : 8;
+ u64 pad : 24;
+ u64 addr : 32;
+#endif
+ u64 value;
+};
+
+static void ree_reex_enable(struct rvu *rvu, struct rvu_block *block)
+{
+ u64 reg;
+
+ /* Set GO bit */
+ reg = rvu_read64(rvu, block->addr, REE_AF_REEXM_CTRL);
+ reg |= REE_AF_REEXM_CTRL_GO;
+ rvu_write64(rvu, block->addr, REE_AF_REEXM_CTRL, reg);
+}
+
+static void ree_reex_force_clock(struct rvu *rvu, struct rvu_block *block,
+ bool force_on)
+{
+ u64 reg;
+
+ /* Force ON or OFF for SCLK / RXPCLK */
+ reg = rvu_read64(rvu, block->addr, REE_AF_CMD_CTL);
+ if (force_on)
+ reg = reg | REE_AF_FORCE_CCLK | REE_AF_FORCE_CSCLK;
+ else
+ reg = reg & ~(REE_AF_FORCE_CCLK | REE_AF_FORCE_CSCLK);
+ rvu_write64(rvu, block->addr, REE_AF_CMD_CTL, reg);
+}
+
+static int ree_graceful_disable_control(struct rvu *rvu,
+ struct rvu_block *block, bool apply)
+{
+ u64 val, mask;
+ int err;
+
+ /* Graceful Disable is available on all queues 0..35
+ * 0 = Queue is not gracefully-disabled (apply is false)
+ * 1 = Queue was gracefully-disabled (apply is true)
+ */
+ mask = GENMASK(35, 0);
+
+ /* Check what is graceful disable status */
+ val = rvu_read64(rvu, block->addr, REE_AF_GRACEFUL_DIS_STATUS) & mask;
+ if (apply & val)
+ return REE_AF_ERR_Q_IS_GRACEFUL_DIS;
+ else if (!apply & !val)
+ return REE_AF_ERR_Q_NOT_GRACEFUL_DIS;
+
+ /* Apply Graceful Enable or Disable on all queues 0..35 */
+ if (apply)
+ val = GENMASK(35, 0);
+ else
+ val = 0;
+
+ rvu_write64(rvu, block->addr, REE_AF_GRACEFUL_DIS_CTL, val);
+
+ /* Poll For graceful disable if it is applied or not on all queues */
+ /* This might take time */
+ err = rvu_poll_reg(rvu, block->addr, REE_AF_GRACEFUL_DIS_STATUS, mask,
+ !apply);
+ if (err) {
+ dev_err(rvu->dev, "REE graceful disable control failed");
+ return err;
+ }
+ return 0;
+}
+
+static int ree_reex_programming(struct rvu *rvu, struct rvu_block *block,
+ u8 incremental)
+{
+ int err;
+
+ if (!incremental) {
+ /* REEX Set & Clear MAIN_CSR init */
+ rvu_write64(rvu, block->addr, REE_AF_REEXM_CTRL,
+ REE_AF_REEXM_CTRL_INIT);
+ rvu_write64(rvu, block->addr, REE_AF_REEXM_CTRL, 0x0);
+
+ /* REEX Poll MAIN_CSR INIT_DONE */
+ err = rvu_poll_reg(rvu, block->addr, REE_AF_REEXM_STATUS,
+ REE_AF_REEXM_STATUS_INIT_DONE, false);
+ if (err) {
+ dev_err(rvu->dev, "REE poll reexm status failed");
+ return err;
+ }
+
+ /* REEX Set Mem Init Mode */
+ rvu_write64(rvu, block->addr, REE_AF_REEXR_CTRL,
+ (REE_AF_REEXR_CTRL_INIT |
+ REE_AF_REEXR_CTRL_MODE_IM_L1_L2));
+
+ /* REEX Set & Clear Mem Init */
+ rvu_write64(rvu, block->addr, REE_AF_REEXR_CTRL,
+ REE_AF_REEXR_CTRL_MODE_IM_L1_L2);
+
+ /* REEX Poll all RTRU DONE 3 bits */
+ err = rvu_poll_reg(rvu, block->addr, REE_AF_REEXR_STATUS,
+ (REE_AF_REEXR_STATUS_IM_INIT_DONE |
+ REE_AF_REEXR_STATUS_L1_CACHE_INIT_DONE |
+ REE_AF_REEXR_STATUS_L2_CACHE_INIT_DONE),
+ false);
+ if (err) {
+ dev_err(rvu->dev, "REE for cache done failed");
+ return err;
+ }
+ } else {
+ /* REEX Set Mem Init Mode */
+ rvu_write64(rvu, block->addr, REE_AF_REEXR_CTRL,
+ (REE_AF_REEXR_CTRL_INIT |
+ REE_AF_REEXR_CTRL_MODE_L1_L2));
+
+ /* REEX Set & Clear Mem Init */
+ rvu_write64(rvu, block->addr, REE_AF_REEXR_CTRL,
+ REE_AF_REEXR_CTRL_MODE_L1_L2);
+
+ /* REEX Poll all RTRU DONE 2 bits */
+ err = rvu_poll_reg(rvu, block->addr, REE_AF_REEXR_STATUS,
+ (REE_AF_REEXR_STATUS_L1_CACHE_INIT_DONE |
+ REE_AF_REEXR_STATUS_L2_CACHE_INIT_DONE),
+ false);
+ if (err) {
+ dev_err(rvu->dev, "REE cache & init done failed");
+ return err;
+ }
+ }
+
+ /* Before 1st time en-queue, set REEX RTRU.GO bit to 1 */
+ rvu_write64(rvu, block->addr, REE_AF_REEXR_CTRL, REE_AF_REEXR_CTRL_GO);
+ return 0;
+}
+
+static int ree_afaq_done_ack(struct rvu *rvu, struct rvu_block *block,
+ bool poll)
+{
+ u64 val;
+ int err;
+
+ /* Poll on Done count until it is 1 to see that last instruction
+ * is completed. Then write this value to DONE_ACK to decrement
+ * the value of Done count
+ * Note that no interrupts are used for this counters
+ */
+ if (poll) {
+ err = rvu_poll_reg(rvu, block->addr, REE_AF_AQ_DONE,
+ 0x1, false);
+ if (err) {
+ dev_err(rvu->dev, "REE AFAQ done failed");
+ return err;
+ }
+ }
+ val = rvu_read64(rvu, block->addr, REE_AF_AQ_DONE);
+ if (val)
+ rvu_write64(rvu, block->addr, REE_AF_AQ_DONE_ACK, val);
+ return 0;
+}
+
+static void ree_aq_inst_enq(struct rvu *rvu, struct rvu_block *block,
+ struct ree_rsrc *ree, dma_addr_t head, u32 size,
+ int doneint)
+{
+ struct admin_queue *aq = block->aq;
+ struct ree_af_aq_inst_s inst;
+
+ /* Fill instruction */
+ memset(&inst, 0, sizeof(struct ree_af_aq_inst_s));
+ inst.length = size;
+ inst.rof_ptr_addr = (u64)head;
+ inst.doneint = doneint;
+ /* Copy instruction to AF AQ head */
+ memcpy(aq->inst->base + (ree->aq_head * aq->inst->entry_sz),
+ &inst, aq->inst->entry_sz);
+ /* Sync into memory */
+ wmb();
+ /* SW triggers HW AQ.DOORBELL */
+ rvu_write64(rvu, block->addr, REE_AF_AQ_DOORBELL, 1);
+ /* Move Head to next cell in AF AQ.
+ * HW CSR gives only AF AQ tail address
+ */
+ ree->aq_head++;
+ if (ree->aq_head >= aq->inst->qsize)
+ ree->aq_head = 0;
+}
+
+static int ree_reex_memory_alloc(struct rvu *rvu, struct rvu_block *block,
+ struct ree_rsrc *ree, int db_len,
+ int is_incremental)
+{
+ int alloc_len, err, i;
+
+ /* Allocate Graph Memory 128MB. This is an IOVA base address
+ * for the memory image of regular expressions graphs.
+ * Software is filling this memory with graph instructions (type 7)
+ * and HW uses this as external memory for graph search.
+ */
+ if (!ree->graph_ctx) {
+ err = qmem_alloc(rvu->dev, &ree->graph_ctx, REE_GRAPH_CNT,
+ sizeof(u64));
+ if (err)
+ return err;
+ /* Update Graph address in DRAM */
+ rvu_write64(rvu, block->addr, REE_AF_EM_BASE,
+ (u64)ree->graph_ctx->iova);
+ }
+
+ /* If not incremental programming, clear Graph Memory
+ * before programming
+ */
+ if (!is_incremental)
+ memset(ree->graph_ctx->base, 0, REE_GRAPH_CNT * sizeof(u64));
+
+ /* Allocate buffers to hold ROF data. Each buffer holds maximum length
+ * of 16384 Bytes, which is 1K instructions block. These blocks are
+ * pointed to by REE_AF_AQ_INST_S:ROF_PTR_ADDR. Multiple blocks are
+ * allocated for concurrent work with HW
+ */
+ if (!ree->prefix_ctx) {
+ err = qmem_alloc(rvu->dev, &ree->prefix_ctx, REE_PREFIX_CNT,
+ sizeof(struct ree_rof_s));
+ if (err) {
+ qmem_free(rvu->dev, ree->graph_ctx);
+ ree->graph_ctx = NULL;
+ return err;
+ }
+ }
+
+ /* Allocate memory to hold incremental programming checksum reference
+ * data which later be retrieved via mbox by the application
+ */
+ if (!ree->ruledbi) {
+ ree->ruledbi = kmalloc_array(REE_RULE_DBI_SIZE, sizeof(void *),
+ GFP_KERNEL);
+ if (!ree->ruledbi) {
+ qmem_free(rvu->dev, ree->graph_ctx);
+ ree->graph_ctx = NULL;
+ qmem_free(rvu->dev, ree->prefix_ctx);
+ ree->prefix_ctx = NULL;
+ return REE_AF_ERR_RULE_DBI_ALLOC_FAILED;
+ }
+ }
+ /* Allocate memory to hold ROF instructions. ROF instructions are
+ * passed from application by multiple mbox messages. Once the last
+ * instruction is passed, they are programmed to REE.
+ * ROF instructions are kept in memory for future retrieve by
+ * application in order to make incremental programming
+ */
+ if (!ree->ruledb) {
+ ree->ruledb = kmalloc_array(REE_RULE_DB_BLOCK_CNT,
+ sizeof(void *), GFP_KERNEL);
+ if (!ree->ruledb) {
+ qmem_free(rvu->dev, ree->graph_ctx);
+ ree->graph_ctx = NULL;
+ qmem_free(rvu->dev, ree->prefix_ctx);
+ ree->prefix_ctx = NULL;
+ kfree(ree->ruledbi);
+ ree->ruledbi = NULL;
+ return REE_AF_ERR_RULE_DB_ALLOC_FAILED;
+ }
+ ree->ruledb_blocks = 0;
+ }
+ alloc_len = ree->ruledb_blocks * REE_RULE_DB_ALLOC_SIZE;
+ while (alloc_len < db_len) {
+ if (ree->ruledb_blocks >= REE_RULE_DB_BLOCK_CNT) {
+ /* No need to free memory here since it is just
+ * indication of rule DB that is too big.
+ * Unlike previous allocation that happens only once,
+ * this allocation can happen along time if larger
+ * ROF files are sent
+ */
+ return REE_AF_ERR_RULE_DB_TOO_BIG;
+ }
+ ree->ruledb[ree->ruledb_blocks] =
+ kmalloc(REE_RULE_DB_ALLOC_SIZE, GFP_KERNEL);
+ if (!ree->ruledb[ree->ruledb_blocks]) {
+ for (i = 0; i < ree->ruledb_blocks; i++)
+ kfree(ree->ruledb[i]);
+ qmem_free(rvu->dev, ree->graph_ctx);
+ ree->graph_ctx = NULL;
+ qmem_free(rvu->dev, ree->prefix_ctx);
+ ree->prefix_ctx = NULL;
+ kfree(ree->ruledbi);
+ ree->ruledbi = NULL;
+ kfree(ree->ruledb);
+ ree->ruledb = NULL;
+ return REE_AF_ERR_RULE_DB_BLOCK_ALLOC_FAILED;
+ }
+ ree->ruledb_blocks += 1;
+ alloc_len += REE_RULE_DB_ALLOC_SIZE;
+ }
+
+ return 0;
+}
+
+static
+int ree_reex_cksum_compare(struct rvu *rvu, int blkaddr,
+ struct ree_rule_db_entry **rule_db,
+ int *rule_db_len, enum ree_cmp_ops cmp)
+{
+ u64 offset;
+ u64 reg;
+
+ /* ROF instructions have 3 fields: type, address and data.
+ * Instructions of type 1,2,3 and 5 are compared against CSR values.
+ * The address of the CSR is calculated from the instruction address.
+ * The CSR value is compared against instruction data.
+ * REE AF REEX comparison registers are in 2 sections: main and rtru.
+ * Main CSR base address is 0x8000, rtru CSR base address is 0x8200
+ * Instruction address bits 16 to 18 indicate the block from which one
+ * can take the base address. Main is 0x0000, RTRU is 0x0001
+ * Low 5 bits indicate the offset, one should multiply it by 8.
+ * The address is calculated as follows:
+ * - Base address is 0x8000
+ * - bits 16 to 18 are multiplied by 0x200
+ * - Low 5 bits are multiplied by 8
+ */
+ offset = REE_AF_REEX_CSR_BLOCK_BASE_ADDR +
+ ((((*rule_db)->addr & REE_AF_REEX_CSR_BLOCK_ID_MASK) >>
+ REE_AF_REEX_CSR_BLOCK_ID_SHIFT) *
+ REE_AF_REEX_CSR_BLOCK_ID) +
+ (((*rule_db)->addr & REE_AF_REEX_CSR_INDEX_MASK) *
+ REE_AF_REEX_CSR_INDEX);
+ reg = rvu_read64(rvu, blkaddr, offset);
+ switch (cmp) {
+ case REE_CMP_EQ:
+ if (reg != (*rule_db)->value) {
+ dev_err(rvu->dev, "REE addr %llx data %llx neq %llx",
+ offset, reg, (*rule_db)->value);
+ return REE_AF_ERR_RULE_DB_EQ_BAD_VALUE;
+ }
+ break;
+ case REE_CMP_GEQ:
+ if (reg < (*rule_db)->value) {
+ dev_err(rvu->dev, "REE addr %llx data %llx ngeq %llx",
+ offset, reg, (*rule_db)->value);
+ return REE_AF_ERR_RULE_DB_GEQ_BAD_VALUE;
+ }
+ break;
+ case REE_CMP_LEQ:
+ if (reg > (*rule_db)->value) {
+ dev_err(rvu->dev, "REE addr %llx data %llx nleq %llx",
+ offset, reg, (*rule_db)->value);
+ return REE_AF_ERR_RULE_DB_LEQ_BAD_VALUE;
+ }
+ break;
+ default:
+ dev_err(rvu->dev, "REE addr %llx data %llx default %llx",
+ offset, reg, (*rule_db)->value);
+ return REE_AF_ERR_RULE_UNKNOWN_VALUE;
+ }
+
+ (*rule_db)++;
+ *rule_db_len -= sizeof(struct ree_rule_db_entry);
+ return 0;
+}
+
+static
+void ree_reex_prefix_write(void **prefix_ptr,
+ struct ree_rule_db_entry **rule_db,
+ int *rule_db_len, u32 *count,
+ u32 *db_block_len)
+{
+ struct ree_rof_s rof_entry;
+
+ while ((*rule_db)->type == REE_ROF_TYPE_6) {
+ rof_entry.typ = (*rule_db)->type;
+ rof_entry.addr = (*rule_db)->addr;
+ rof_entry.data = (*rule_db)->value;
+ memcpy((*prefix_ptr), (void *)(&rof_entry),
+ sizeof(struct ree_rof_s));
+ /* AF AQ prefix block to copy to */
+ (*prefix_ptr) += sizeof(struct ree_rof_s);
+ /* Location in ROF DB that was parsed by now */
+ (*rule_db)++;
+ /* Length of ROF DB left to handle*/
+ (*rule_db_len) -= sizeof(struct ree_rule_db_entry);
+ /* Number of type 6 rows that were parsed */
+ (*count)++;
+ /* Go over current block only */
+ (*db_block_len)--;
+ if (*db_block_len == 0)
+ break;
+ }
+}
+
+static
+int ree_reex_graph_write(struct ree_rsrc *ree,
+ struct ree_rule_db_entry **rule_db, int *rule_db_len,
+ u32 *db_block_len)
+{
+ u32 offset;
+
+ while ((*rule_db)->type == REE_ROF_TYPE_7) {
+ offset = ((*rule_db)->addr & 0xFFFFFF) << 3;
+ if (offset > REE_GRAPH_CNT * 8)
+ return REE_AF_ERR_GRAPH_ADDRESS_TOO_BIG;
+ memcpy(ree->graph_ctx->base + offset,
+ &(*rule_db)->value, sizeof((*rule_db)->value));
+ (*rule_db)++;
+ *rule_db_len -= sizeof(struct ree_rule_db_entry);
+ /* Go over current block only */
+ (*db_block_len)--;
+ if (*db_block_len == 0)
+ break;
+ }
+ return 0;
+}
+
+static
+int ree_rof_data_validation(struct rvu *rvu, int blkaddr,
+ struct ree_rsrc *ree, int *db_block,
+ struct ree_rule_db_entry **rule_db_ptr,
+ int *rule_db_len, u32 *db_block_len)
+{
+ int err;
+
+ /* Parse ROF data */
+ while (*rule_db_len > 0) {
+ switch ((*rule_db_ptr)->type) {
+ case REE_ROF_TYPE_1:
+ err = ree_reex_cksum_compare(rvu, blkaddr, rule_db_ptr,
+ rule_db_len, REE_CMP_EQ);
+ if (err < 0)
+ return err;
+ break;
+ case REE_ROF_TYPE_2:
+ err = ree_reex_cksum_compare(rvu, blkaddr, rule_db_ptr,
+ rule_db_len, REE_CMP_GEQ);
+ if (err < 0)
+ return err;
+ break;
+ case REE_ROF_TYPE_3:
+ err = ree_reex_cksum_compare(rvu, blkaddr, rule_db_ptr,
+ rule_db_len, REE_CMP_LEQ);
+ if (err < 0)
+ return err;
+ break;
+ case REE_ROF_TYPE_4:
+ /* Type 4 handles internal memory */
+ (*rule_db_ptr)++;
+ (*rule_db_len) -= sizeof(struct ree_rof_s);
+ break;
+ case REE_ROF_TYPE_5:
+ err = ree_reex_cksum_compare(rvu, blkaddr, rule_db_ptr,
+ rule_db_len, REE_CMP_EQ);
+ if (err < 0)
+ return err;
+ break;
+ case REE_ROF_TYPE_6:
+ case REE_ROF_TYPE_7:
+ return 0;
+ default:
+ /* Other types not supported */
+ (*rule_db_ptr)++;
+ *rule_db_len -= sizeof(struct ree_rof_s);
+ return REE_AF_ERR_BAD_RULE_TYPE;
+ }
+ (*db_block_len)--;
+ /* If rule DB is larger than 4M there is a need
+ * to move between db blocks of 4M
+ */
+ if (*db_block_len == 0) {
+ (*db_block)++;
+ *rule_db_ptr = ree->ruledb[(*db_block)];
+ *db_block_len = (REE_RULE_DB_ALLOC_SIZE >> 4);
+ }
+ }
+ return 0;
+}
+
+static
+int ree_rof_data_enq(struct rvu *rvu, struct rvu_block *block,
+ struct ree_rsrc *ree,
+ struct ree_rule_db_entry **rule_db_ptr,
+ int *rule_db_len, int *db_block, u32 *db_block_len)
+{
+ void *prefix_ptr = ree->prefix_ctx->base;
+ u32 size, num_of_entries = 0;
+ dma_addr_t head;
+ int err;
+
+ /* Parse ROF data */
+ while (*rule_db_len > 0) {
+ switch ((*rule_db_ptr)->type) {
+ case REE_ROF_TYPE_1:
+ case REE_ROF_TYPE_2:
+ case REE_ROF_TYPE_3:
+ case REE_ROF_TYPE_4:
+ case REE_ROF_TYPE_5:
+ break;
+ case REE_ROF_TYPE_6:
+ ree_reex_prefix_write(&prefix_ptr, rule_db_ptr,
+ rule_db_len, &num_of_entries,
+ db_block_len);
+ break;
+ case REE_ROF_TYPE_7:
+ err = ree_reex_graph_write(ree, rule_db_ptr,
+ rule_db_len, db_block_len);
+ if (err)
+ return err;
+ break;
+ default:
+ /* Other types not supported */
+ return REE_AF_ERR_BAD_RULE_TYPE;
+ }
+ /* If rule DB is larger than 4M there is a need
+ * to move between db blocks of 4M
+ */
+ if (*db_block_len == 0) {
+ (*db_block)++;
+ *rule_db_ptr = ree->ruledb[(*db_block)];
+ *db_block_len = (REE_RULE_DB_ALLOC_SIZE >> 4);
+ }
+ /* If there are no more prefix and graph data
+ * en-queue prefix data and continue with data validation
+ */
+ if (((*rule_db_ptr)->type != REE_ROF_TYPE_6) &&
+ ((*rule_db_ptr)->type != REE_ROF_TYPE_7))
+ break;
+ }
+
+ /* Block is filled with 1K instructions
+ * En-queue to AF AQ all available blocks
+ */
+ head = ree->prefix_ctx->iova;
+ while (num_of_entries > 0) {
+ if (num_of_entries > REE_PREFIX_PTR_LEN) {
+ size = REE_PREFIX_PTR_LEN * sizeof(struct ree_rof_s);
+ ree_aq_inst_enq(rvu, block, ree, head, size, false);
+ head += REE_PREFIX_PTR_LEN * sizeof(struct ree_rof_s);
+ num_of_entries -= REE_PREFIX_PTR_LEN;
+ } else {
+ /* Last chunk of instructions to handle */
+ size = num_of_entries * sizeof(struct ree_rof_s);
+ ree_aq_inst_enq(rvu, block, ree, head, size, true);
+ num_of_entries = 0;
+ }
+ }
+ /* Verify completion of type 6 */
+ return ree_afaq_done_ack(rvu, block, true);
+}
+
+static
+int ree_rule_db_prog(struct rvu *rvu, struct rvu_block *block,
+ struct ree_rsrc *ree, int inc)
+{
+ /* db_block_len holds number of ROF instruction in a memory block */
+ u32 db_block_len = (REE_RULE_DB_ALLOC_SIZE >> 4);
+ struct ree_rule_db_entry *rule_db_ptr;
+ int rule_db_len, ret = 0, db_block = 0;
+ u64 reg;
+
+ /* Stop fetching new instructions while programming*/
+ ret = ree_graceful_disable_control(rvu, block, true);
+ if (ret)
+ return ret;
+
+ /* Force Clock ON
+ * Force bits should be set throughout REEX programming, whether full
+ * or incremental
+ */
+ ree_reex_force_clock(rvu, block, true);
+
+ /* Ack afaq done count
+ * In case previous programming timed-out before receiving done
+ * indication. Before programming process starts acknowledge all
+ * existing done counts from previous run
+ */
+ ret = ree_afaq_done_ack(rvu, block, false);
+ if (ret)
+ goto err;
+
+ /* Reinitialize REEX block for programming */
+ ret = ree_reex_programming(rvu, block, inc);
+ if (ret)
+ goto err;
+
+ /* Parse ROF data - validation part */
+ rule_db_len = ree->ruledb_len;
+ rule_db_ptr = (struct ree_rule_db_entry *)ree->ruledb[db_block];
+ ret = ree_rof_data_validation(rvu, block->addr, ree, &db_block,
+ &rule_db_ptr, &rule_db_len,
+ &db_block_len);
+ if (ret)
+ goto err;
+
+ /* Parse ROF data - data part */
+ ret = ree_rof_data_enq(rvu, block, ree, &rule_db_ptr, &rule_db_len,
+ &db_block, &db_block_len);
+ if (ret)
+ goto err;
+ /* Parse ROF data - validation part */
+ ret = ree_rof_data_validation(rvu, block->addr, ree, &db_block,
+ &rule_db_ptr, &rule_db_len,
+ &db_block_len);
+ if (ret)
+ goto err;
+
+ /* REEX Programming DONE: clear GO bit */
+ reg = rvu_read64(rvu, block->addr, REE_AF_REEXR_CTRL);
+ reg = reg & ~(REE_AF_REEXR_CTRL_GO);
+ rvu_write64(rvu, block->addr, REE_AF_REEXR_CTRL, reg);
+
+ ree_reex_enable(rvu, block);
+
+err:
+ /* Force Clock OFF */
+ ree_reex_force_clock(rvu, block, false);
+
+ /* Resume fetching instructions */
+ ree_graceful_disable_control(rvu, block, false);
+
+ return ret;
+}
+
+int rvu_mbox_handler_ree_rule_db_prog(struct rvu *rvu,
+ struct ree_rule_db_prog_req_msg *req,
+ struct msg_rsp *rsp)
+{
+ int blkaddr, db_block = 0, blkid = 0, err;
+ struct rvu_block *block;
+ struct ree_rsrc *ree;
+
+ blkaddr = req->blkaddr;
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return REE_AF_ERR_BLOCK_NOT_IMPLEMENTED;
+ if (blkaddr == BLKADDR_REE1)
+ blkid = 1;
+
+ block = &rvu->hw->block[blkaddr];
+ ree = &rvu->hw->ree[blkid];
+
+ /* If this is the first block of ROF */
+ if (!req->offset) {
+ if (req->total_len >
+ REE_RULE_DB_ALLOC_SIZE * REE_RULE_DB_BLOCK_CNT)
+ return REE_AF_ERR_RULE_DB_TOO_BIG;
+
+ /* Initialize Programming memory */
+ err = ree_reex_memory_alloc(rvu, block, ree, req->total_len,
+ req->is_incremental);
+ if (err)
+ return err;
+ /* Programming overwrites existing rule db
+ * Incremental programming overwrites both rule db and rule dbi
+ */
+ ree->ruledb_len = 0;
+ if (!req->is_incremental)
+ ree->ruledbi_len = 0;
+ }
+
+ /* Copy rof data from mbox to ruledb.
+ * Rule db is later used for programming
+ */
+ if (ree->ruledb_len + req->len >
+ ree->ruledb_blocks * REE_RULE_DB_ALLOC_SIZE)
+ return REE_AF_ERR_RULE_DB_WRONG_LENGTH;
+ if (ree->ruledb_len != req->offset)
+ return REE_AF_ERR_RULE_DB_WRONG_OFFSET;
+ /* All messages should be in block size, apart for last one */
+ if (req->len < REE_RULE_DB_REQ_BLOCK_SIZE && !req->is_last)
+ return REE_AF_ERR_RULE_DB_SHOULD_FILL_REQUEST;
+ /* Each mbox is 32KB each ruledb block is 4096KB
+ * Single mbox shouldn't spread over blocks
+ */
+ db_block = ree->ruledb_len >> REE_RULE_DB_ALLOC_SHIFT;
+ if (db_block >= ree->ruledb_blocks)
+ return REE_AF_ERR_RULE_DB_BLOCK_TOO_BIG;
+ memcpy((void *)((u64)ree->ruledb[db_block] + ree->ruledb_len -
+ db_block * REE_RULE_DB_ALLOC_SIZE), req->rule_db, req->len);
+ ree->ruledb_len += req->len;
+ /* ROF file is sent in chunks
+ * wait for last chunk to start programming
+ */
+ if (!req->is_last)
+ return 0;
+
+ if (req->total_len != ree->ruledb_len)
+ return REE_AF_ERR_RULE_DB_PARTIAL;
+
+ if (!req->is_incremental || req->is_dbi) {
+ err = ree_rule_db_prog(rvu, block, ree, req->is_incremental);
+ if (err)
+ return err;
+ }
+
+ if (req->is_dbi) {
+ memcpy(ree->ruledbi,
+ ree->ruledb[db_block] +
+ req->total_len - REE_RULE_DBI_SIZE,
+ REE_RULE_DBI_SIZE);
+ ree->ruledbi_len = REE_RULE_DBI_SIZE;
+ }
+
+ return 0;
+}
+
+int
+rvu_mbox_handler_ree_rule_db_get(struct rvu *rvu,
+ struct ree_rule_db_get_req_msg *req,
+ struct ree_rule_db_get_rsp_msg *rsp)
+{
+ int blkaddr, len, blkid = 0, db_block;
+ struct ree_rsrc *ree;
+
+ blkaddr = req->blkaddr;
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return REE_AF_ERR_BLOCK_NOT_IMPLEMENTED;
+ if (blkaddr == BLKADDR_REE1)
+ blkid = 1;
+ ree = &rvu->hw->ree[blkid];
+
+ /* In case no programming or incremental programming was done yet */
+ if ((req->is_dbi && ree->ruledbi_len == 0) ||
+ (!req->is_dbi && ree->ruledb_len == 0)) {
+ rsp->len = 0;
+ return 0;
+ }
+
+ /* ROF file is sent in chunks
+ * Verify that offset is inside db range
+ */
+ if (req->is_dbi) {
+ if (ree->ruledbi_len < req->offset)
+ return REE_AF_ERR_RULE_DB_INC_OFFSET_TOO_BIG;
+ len = ree->ruledbi_len - req->offset;
+ } else {
+ if (ree->ruledb_len < req->offset)
+ return REE_AF_ERR_RULE_DB_OFFSET_TOO_BIG;
+ len = ree->ruledb_len - req->offset;
+ }
+
+ /* Check if this is the last chunk of db */
+ if (len < REE_RULE_DB_RSP_BLOCK_SIZE) {
+ rsp->is_last = true;
+ rsp->len = len;
+ } else {
+ rsp->is_last = false;
+ rsp->len = REE_RULE_DB_RSP_BLOCK_SIZE;
+ }
+
+ /* Copy DB chunk to response */
+ if (req->is_dbi) {
+ memcpy(rsp->rule_db, ree->ruledbi + req->offset, rsp->len);
+ } else {
+ db_block = req->offset >> 22;
+ memcpy(rsp->rule_db, ree->ruledb[db_block] + req->offset,
+ rsp->len);
+ }
+
+ return 0;
+}
+
+int
+rvu_mbox_handler_ree_rule_db_len_get(struct rvu *rvu, struct ree_req_msg *req,
+ struct ree_rule_db_len_rsp_msg *rsp)
+{
+ int blkaddr, blkid = 0;
+
+ blkaddr = req->blkaddr;
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return REE_AF_ERR_BLOCK_NOT_IMPLEMENTED;
+ if (blkaddr == BLKADDR_REE1)
+ blkid = 1;
+ rsp->len = rvu->hw->ree[blkid].ruledb_len;
+ rsp->inc_len = rvu->hw->ree[blkid].ruledbi_len;
+ return 0;
+}
+
+int rvu_mbox_handler_ree_config_lf(struct rvu *rvu,
+ struct ree_lf_req_msg *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr, num_lfs;
+ struct rvu_block *block;
+ u64 val;
+
+ blkaddr = req->blkaddr;
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return REE_AF_ERR_BLOCK_NOT_IMPLEMENTED;
+ block = &rvu->hw->block[blkaddr];
+
+ /* Need to translate REE LF slot to global number
+ * VFs use local numbering from 0 to number of LFs - 1
+ */
+ lf = rvu_get_lf(rvu, block, pcifunc, req->lf);
+ if (lf < 0)
+ return REE_AF_ERR_LF_INVALID;
+
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, req->hdr.pcifunc),
+ blkaddr);
+ if (lf >= num_lfs)
+ return REE_AF_ERR_LF_NO_MORE_RESOURCES;
+
+ /* LF instruction buffer size and priority are configured by AF.
+ * Priority value can be 0 or 1
+ */
+ if (req->pri > 1)
+ return REE_AF_ERR_LF_WRONG_PRIORITY;
+ if (req->size > REE_AF_QUE_SBUF_CTL_MAX_SIZE)
+ return REE_AF_ERR_LF_SIZE_TOO_BIG;
+ val = req->size;
+ val = val << REE_AF_QUE_SBUF_CTL_SIZE_SHIFT;
+ val += req->pri;
+ rvu_write64(rvu, blkaddr, REE_AF_QUE_SBUF_CTL(lf), val);
+
+ return 0;
+}
+
+int rvu_mbox_handler_ree_rd_wr_register(struct rvu *rvu,
+ struct ree_rd_wr_reg_msg *req,
+ struct ree_rd_wr_reg_msg *rsp)
+{
+ int blkaddr;
+
+ blkaddr = req->blkaddr;
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return REE_AF_ERR_BLOCK_NOT_IMPLEMENTED;
+ rsp->reg_offset = req->reg_offset;
+ rsp->ret_val = req->ret_val;
+ rsp->is_write = req->is_write;
+
+ switch (req->reg_offset) {
+ case REE_AF_REEXM_MAX_MATCH:
+ break;
+
+ default:
+ /* Access to register denied */
+ return REE_AF_ERR_ACCESS_DENIED;
+ }
+
+ if (req->is_write)
+ rvu_write64(rvu, blkaddr, req->reg_offset, req->val);
+ else
+ rsp->val = rvu_read64(rvu, blkaddr, req->reg_offset);
+
+ return 0;
+}
+
+static int ree_aq_inst_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
+ int qsize, int inst_size, int res_size)
+{
+ struct admin_queue *aq;
+ int err;
+
+ *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
+ if (!*ad_queue)
+ return -ENOMEM;
+ aq = *ad_queue;
+
+ /* Allocate memory for instructions i.e AQ */
+ err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
+ if (err) {
+ devm_kfree(rvu->dev, aq);
+ return err;
+ }
+
+ /* REE AF AQ does not have result and lock is not used */
+ aq->res = NULL;
+
+ return 0;
+}
+
+static irqreturn_t rvu_ree_af_ras_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 intr;
+
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ intr = rvu_read64(block->rvu, blkaddr, REE_AF_RAS);
+ if (intr & REE_AF_RAS_DAT_PSN)
+ dev_err_ratelimited(rvu->dev, "REE: Poison received on a NCB data response\n");
+ if (intr & REE_AF_RAS_LD_CMD_PSN)
+ dev_err_ratelimited(rvu->dev, "REE: Poison received on a NCB instruction response\n");
+ if (intr & REE_AF_RAS_LD_REEX_PSN)
+ dev_err_ratelimited(rvu->dev, "REE: Poison received on a REEX response\n");
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, REE_AF_RAS, intr);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_ree_af_rvu_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 intr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_REE, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ intr = rvu_read64(rvu, blkaddr, REE_AF_RVU_INT);
+ if (intr & REE_AF_RVU_INT_UNMAPPED_SLOT)
+ dev_err_ratelimited(rvu->dev, "REE: Unmapped slot error\n");
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, REE_AF_RVU_INT, intr);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_ree_af_aq_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 intr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_REE, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ intr = rvu_read64(rvu, blkaddr, REE_AF_AQ_INT);
+
+ if (intr & REE_AF_AQ_INT_DOVF)
+ dev_err_ratelimited(rvu->dev, "REE: DOORBELL overflow\n");
+ if (intr & REE_AF_AQ_INT_IRDE)
+ dev_err_ratelimited(rvu->dev, "REE: Instruction NCB read response error\n");
+ if (intr & REE_AF_AQ_INT_PRDE)
+ dev_err_ratelimited(rvu->dev, "REE: Payload NCB read response error\n");
+ if (intr & REE_AF_AQ_INT_PLLE)
+ dev_err_ratelimited(rvu->dev, "REE: Payload length error\n");
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, REE_AF_AQ_INT, intr);
+ return IRQ_HANDLED;
+}
+
+static void rvu_ree_unregister_interrupts_block(struct rvu *rvu, int blkaddr)
+{
+ int i, offs;
+ struct rvu_block *block;
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (!is_block_implemented(hw, blkaddr))
+ return;
+ block = &hw->block[blkaddr];
+
+ offs = rvu_read64(rvu, blkaddr, REE_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs) {
+ dev_warn(rvu->dev,
+ "Failed to get REE_AF_INT vector offsets");
+ return;
+ }
+
+ /* Disable all REE AF interrupts */
+ rvu_write64(rvu, blkaddr, REE_AF_RAS_ENA_W1C, 0x1);
+ rvu_write64(rvu, blkaddr, REE_AF_RVU_INT_ENA_W1C, 0x1);
+ rvu_write64(rvu, blkaddr, REE_AF_AQ_DONE_INT_ENA_W1C, 0x1);
+ rvu_write64(rvu, blkaddr, REE_AF_AQ_INT_ENA_W1C, 0x1);
+
+ for (i = 0; i < REE_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), block);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+void rvu_ree_unregister_interrupts(struct rvu *rvu)
+{
+ rvu_ree_unregister_interrupts_block(rvu, BLKADDR_REE0);
+ rvu_ree_unregister_interrupts_block(rvu, BLKADDR_REE1);
+}
+
+static int rvu_ree_af_request_irq(struct rvu_block *block,
+ int offset, irq_handler_t handler,
+ const char *name)
+{
+ int ret = 0;
+ struct rvu *rvu = block->rvu;
+
+ WARN_ON(rvu->irq_allocated[offset]);
+ rvu->irq_allocated[offset] = false;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "%s", name);
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset), handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE], block);
+ if (ret)
+ dev_warn(block->rvu->dev, "Failed to register %s irq\n", name);
+ else
+ rvu->irq_allocated[offset] = true;
+
+ return rvu->irq_allocated[offset];
+}
+
+static int rvu_ree_register_interrupts_block(struct rvu *rvu, int blkaddr,
+ int blkid)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int offs, ret = 0;
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return 0;
+
+ block = &hw->block[blkaddr];
+
+ /* Read interrupt vector */
+ offs = rvu_read64(rvu, blkaddr, REE_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs) {
+ dev_warn(rvu->dev,
+ "Failed to get REE_AF_INT vector offsets");
+ return 0;
+ }
+
+ /* Register and enable RAS interrupt */
+ ret = rvu_ree_af_request_irq(block, offs + REE_AF_INT_VEC_RAS,
+ rvu_ree_af_ras_intr_handler,
+ ree_irq_name[blkid][REE_AF_INT_VEC_RAS]);
+ if (!ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, REE_AF_RAS_ENA_W1S, ~0ULL);
+
+ /* Register and enable RVU interrupt */
+ ret = rvu_ree_af_request_irq(block, offs + REE_AF_INT_VEC_RVU,
+ rvu_ree_af_rvu_intr_handler,
+ ree_irq_name[blkid][REE_AF_INT_VEC_RVU]);
+ if (!ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, REE_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ /* QUE DONE */
+ /* Interrupt for QUE DONE is not required, software is polling
+ * DONE count to get indication that all instructions are completed
+ */
+
+ /* Register and enable AQ interrupt */
+ ret = rvu_ree_af_request_irq(block, offs + REE_AF_INT_VEC_AQ,
+ rvu_ree_af_aq_intr_handler,
+ ree_irq_name[blkid][REE_AF_INT_VEC_AQ]);
+ if (!ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, REE_AF_AQ_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+err:
+ rvu_ree_unregister_interrupts(rvu);
+ return ret;
+}
+
+int rvu_ree_register_interrupts(struct rvu *rvu)
+{
+ int ret;
+
+ ret = rvu_ree_register_interrupts_block(rvu, BLKADDR_REE0, 0);
+ if (ret)
+ return ret;
+
+ return rvu_ree_register_interrupts_block(rvu, BLKADDR_REE1, 1);
+}
+
+static int rvu_ree_init_block(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int ret = 0, blkid = 0;
+ struct ree_rsrc *ree;
+ u64 val;
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return 0;
+
+ block = &hw->block[blkaddr];
+ if (blkaddr == BLKADDR_REE1)
+ blkid = 1;
+ ree = &rvu->hw->ree[blkid];
+
+ /* Administrative instruction queue allocation */
+ ret = ree_aq_inst_alloc(rvu, &block->aq,
+ REE_AQ_SIZE,
+ sizeof(struct ree_af_aq_inst_s),
+ 0);
+ if (ret)
+ return ret;
+
+ /* Administrative instruction queue address */
+ rvu_write64(rvu, block->addr, REE_AF_AQ_SBUF_ADDR,
+ (u64)block->aq->inst->iova);
+
+ /* Move head to start only when a new AQ is allocated and configured.
+ * Otherwise head is wrap around
+ */
+ ree->aq_head = 0;
+
+ /* Administrative queue instruction buffer size, in units of 128B
+ * (8 * REE_AF_AQ_INST_S)
+ */
+ val = REE_AQ_SIZE >> 3;
+ rvu_write64(rvu, block->addr, REE_AF_AQ_SBUF_CTL,
+ (val << REE_AF_AQ_SBUF_CTL_SIZE_SHIFT));
+
+ /* Enable instruction queue */
+ rvu_write64(rvu, block->addr, REE_AF_AQ_ENA, 0x1);
+
+ /* Force Clock ON
+ * Force bits should be set throughout the REEX Initialization
+ */
+ ree_reex_force_clock(rvu, block, true);
+
+ /* REEX MAIN_CSR configuration */
+ rvu_write64(rvu, block->addr, REE_AF_REEXM_MAX_MATCH,
+ REE_AF_REEXM_MAX_MATCH_MAX);
+ rvu_write64(rvu, block->addr, REE_AF_REEXM_MAX_PRE_CNT,
+ REE_AF_REEXM_MAX_PRE_CNT_COUNT);
+ rvu_write64(rvu, block->addr, REE_AF_REEXM_MAX_PTHREAD_CNT,
+ REE_AF_REEXM_MAX_PTHREAD_COUNT);
+ rvu_write64(rvu, block->addr, REE_AF_REEXM_MAX_LATENCY_CNT,
+ REE_AF_REEXM_MAX_LATENCY_COUNT);
+
+ /* REEX Set & Clear MAIN_CSR init */
+ rvu_write64(rvu, block->addr, REE_AF_REEXM_CTRL, 0x1);
+ rvu_write64(rvu, block->addr, REE_AF_REEXM_CTRL, 0x0);
+
+ /* REEX Poll MAIN_CSR INIT_DONE */
+ ret = rvu_poll_reg(rvu, block->addr, REE_AF_REEXM_STATUS,
+ BIT_ULL(0), false);
+ if (ret) {
+ dev_err(rvu->dev, "REE reexm poll for init done failed");
+ goto err;
+ }
+
+err:
+ /* Force Clock OFF */
+ ree_reex_force_clock(rvu, block, false);
+
+ return ret;
+}
+
+int rvu_ree_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int err;
+
+ hw->ree = devm_kcalloc(rvu->dev, MAX_REE_BLKS, sizeof(struct ree_rsrc),
+ GFP_KERNEL);
+ if (!hw->ree)
+ return -ENOMEM;
+
+ err = rvu_ree_init_block(rvu, BLKADDR_REE0);
+ if (err)
+ return err;
+ return rvu_ree_init_block(rvu, BLKADDR_REE1);
+}
+
+static void rvu_ree_freemem_block(struct rvu *rvu, int blkaddr, int blkid)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ struct ree_rsrc *ree;
+ int i = 0;
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return;
+
+ block = &hw->block[blkaddr];
+ ree = &hw->ree[blkid];
+
+ rvu_aq_free(rvu, block->aq);
+ if (ree->graph_ctx)
+ qmem_free(rvu->dev, ree->graph_ctx);
+ if (ree->prefix_ctx)
+ qmem_free(rvu->dev, ree->prefix_ctx);
+ if (ree->ruledb) {
+ for (i = 0; i < ree->ruledb_blocks; i++)
+ kfree(ree->ruledb[i]);
+ kfree(ree->ruledb);
+ }
+ kfree(ree->ruledbi);
+}
+
+void rvu_ree_freemem(struct rvu *rvu)
+{
+ rvu_ree_freemem_block(rvu, BLKADDR_REE0, 0);
+ rvu_ree_freemem_block(rvu, BLKADDR_REE1, 1);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
index 9d7c135c7965..b3150f053291 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
@@ -1,11 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
@@ -33,9 +30,9 @@ static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = {
{NIX_TXSCH_LVL_SMQ, 2, 0xFFFF, {{0x0700, 0x0708}, {0x1400, 0x14C8} } },
{NIX_TXSCH_LVL_TL4, 3, 0xFFFF, {{0x0B00, 0x0B08}, {0x0B10, 0x0B18},
{0x1200, 0x12E0} } },
- {NIX_TXSCH_LVL_TL3, 3, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608},
- {0x1610, 0x1618} } },
- {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x1768} } },
+ {NIX_TXSCH_LVL_TL3, 4, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608},
+ {0x1610, 0x1618}, {0x1700, 0x17B0} } },
+ {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17B0} } },
{NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } },
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 7ca599b973c0..4689041bbdcf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -1,11 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef RVU_REG_H
@@ -44,6 +41,18 @@
#define RVU_AF_PFME_INT_W1S (0x28c8)
#define RVU_AF_PFME_INT_ENA_W1S (0x28d0)
#define RVU_AF_PFME_INT_ENA_W1C (0x28d8)
+#define RVU_AF_PFX_BAR4_ADDR(a) (0x5000 | (a) << 4)
+#define RVU_AF_PFX_BAR4_CFG (0x5200 | (a) << 4)
+#define RVU_AF_PFX_VF_BAR4_ADDR (0x5400 | (a) << 4)
+#define RVU_AF_PFX_VF_BAR4_CFG (0x5600 | (a) << 4)
+#define RVU_AF_PFX_LMTLINE_ADDR (0x5800 | (a) << 4)
+#define RVU_AF_SMMU_ADDR_REQ (0x6000)
+#define RVU_AF_SMMU_TXN_REQ (0x6008)
+#define RVU_AF_SMMU_ADDR_RSP_STS (0x6010)
+#define RVU_AF_SMMU_ADDR_TLN (0x6018)
+#define RVU_AF_SMMU_TLN_FLIT0 (0x6020)
+
+#define RVU_AF_BAR2_ALIASX(a, b) (0x9100000ull | (a) << 12 | (b))
/* Admin function's privileged PF/VF registers */
#define RVU_PRIV_CONST (0x8000000)
@@ -54,20 +63,22 @@
#define RVU_PRIV_PFX_MSIX_CFG(a) (0x8000110 | (a) << 16)
#define RVU_PRIV_PFX_ID_CFG(a) (0x8000120 | (a) << 16)
#define RVU_PRIV_PFX_INT_CFG(a) (0x8000200 | (a) << 16)
-#define RVU_PRIV_PFX_NIX0_CFG (0x8000300)
+#define RVU_PRIV_PFX_NIXX_CFG(a) (0x8000300 | (a) << 3)
#define RVU_PRIV_PFX_NPA_CFG (0x8000310)
#define RVU_PRIV_PFX_SSO_CFG (0x8000320)
#define RVU_PRIV_PFX_SSOW_CFG (0x8000330)
#define RVU_PRIV_PFX_TIM_CFG (0x8000340)
-#define RVU_PRIV_PFX_CPT0_CFG (0x8000350)
+#define RVU_PRIV_PFX_CPTX_CFG(a) (0x8000350 | (a) << 3)
#define RVU_PRIV_BLOCK_TYPEX_REV(a) (0x8000400 | (a) << 3)
#define RVU_PRIV_HWVFX_INT_CFG(a) (0x8001280 | (a) << 16)
-#define RVU_PRIV_HWVFX_NIX0_CFG (0x8001300)
+#define RVU_PRIV_HWVFX_NIXX_CFG(a) (0x8001300 | (a) << 3)
#define RVU_PRIV_HWVFX_NPA_CFG (0x8001310)
#define RVU_PRIV_HWVFX_SSO_CFG (0x8001320)
#define RVU_PRIV_HWVFX_SSOW_CFG (0x8001330)
#define RVU_PRIV_HWVFX_TIM_CFG (0x8001340)
-#define RVU_PRIV_HWVFX_CPT0_CFG (0x8001350)
+#define RVU_PRIV_HWVFX_CPTX_CFG(a) (0x8001350 | (a) << 3)
+#define RVU_PRIV_PFX_REEX_CFG(a) (0x8000360 | (a) << 3)
+#define RVU_PRIV_HWVFX_REEX_CFG(a) (0x8001360 | (a) << 3)
/* RVU PF registers */
#define RVU_PF_VFX_PFVF_MBOX0 (0x00000)
@@ -100,6 +111,8 @@
#define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+#define RVU_PF_VF_MBOX_ADDR (0xC40)
+#define RVU_PF_LMTLINE_ADDR (0xC48)
/* RVU VF registers */
#define RVU_VF_VFPF_MBOX0 (0x00000)
@@ -112,6 +125,7 @@
#define NPA_AF_LF_RST (0x0020)
#define NPA_AF_GEN_CFG (0x0030)
#define NPA_AF_NDC_CFG (0x0040)
+#define NPA_AF_NDC_SYNC (0x0050)
#define NPA_AF_INP_CTL (0x00D0)
#define NPA_AF_ACTIVE_CYCLES_PC (0x00F0)
#define NPA_AF_AVG_DELAY (0x0100)
@@ -144,6 +158,7 @@
#define NPA_AF_AQ_DONE_INT_W1S (0x0688)
#define NPA_AF_AQ_DONE_ENA_W1S (0x0690)
#define NPA_AF_AQ_DONE_ENA_W1C (0x0698)
+#define NPA_AF_BATCH_CTL (0x06a0)
#define NPA_AF_LFX_AURAS_CFG(a) (0x4000 | (a) << 18)
#define NPA_AF_LFX_LOC_AURAS_BASE(a) (0x4010 | (a) << 18)
#define NPA_AF_LFX_QINTS_CFG(a) (0x4100 | (a) << 18)
@@ -153,6 +168,9 @@
#define NPA_PRIV_LFX_INT_CFG (0x10020)
#define NPA_AF_RVU_LF_CFG_DEBUG (0x10030)
+#define NPA_AF_BAR2_SEL (0x9000000ull)
+#define NPA_AF_BAR2_ALIASX(a, b) RVU_AF_BAR2_ALIASX(a, b)
+
/* NIX block's admin function registers */
#define NIX_AF_CFG (0x0000)
#define NIX_AF_STATUS (0x0010)
@@ -164,6 +182,7 @@
#define NIX_AF_SQ_CONST (0x0040)
#define NIX_AF_CQ_CONST (0x0048)
#define NIX_AF_RQ_CONST (0x0050)
+#define NIX_AF_PL_CONST (0x0058)
#define NIX_AF_PSE_CONST (0x0060)
#define NIX_AF_TL1_CONST (0x0070)
#define NIX_AF_TL2_CONST (0x0078)
@@ -174,9 +193,11 @@
#define NIX_AF_LSO_CFG (0x00A8)
#define NIX_AF_BLK_RST (0x00B0)
#define NIX_AF_TX_TSTMP_CFG (0x00C0)
+#define NIX_AF_PL_TS (0x00C8)
#define NIX_AF_RX_CFG (0x00D0)
#define NIX_AF_AVG_DELAY (0x00E0)
#define NIX_AF_CINT_DELAY (0x00F0)
+#define NIX_AF_VWQE_TIMER (0x00F8)
#define NIX_AF_RX_MCAST_BASE (0x0100)
#define NIX_AF_RX_MCAST_CFG (0x0110)
#define NIX_AF_RX_MCAST_BUF_BASE (0x0120)
@@ -201,21 +222,31 @@
#define NIX_AF_RVU_INT_ENA_W1S (0x01D0)
#define NIX_AF_RVU_INT_ENA_W1C (0x01D8)
#define NIX_AF_TCP_TIMER (0x01E0)
-#define NIX_AF_RX_WQE_TAG_CTL (0x01F0)
+#define NIX_AF_RX_DEF_ET(a) (0x01F0ull | (uint64_t)(a) << 3)
#define NIX_AF_RX_DEF_OL2 (0x0200)
#define NIX_AF_RX_DEF_OIP4 (0x0210)
#define NIX_AF_RX_DEF_IIP4 (0x0220)
+#define NIX_AF_RX_DEF_VLAN0_PCP_DEI (0x0228)
#define NIX_AF_RX_DEF_OIP6 (0x0230)
+#define NIX_AF_RX_DEF_VLAN1_PCP_DEI (0x0238)
#define NIX_AF_RX_DEF_IIP6 (0x0240)
#define NIX_AF_RX_DEF_OTCP (0x0250)
#define NIX_AF_RX_DEF_ITCP (0x0260)
#define NIX_AF_RX_DEF_OUDP (0x0270)
#define NIX_AF_RX_DEF_IUDP (0x0280)
#define NIX_AF_RX_DEF_OSCTP (0x0290)
+#define NIX_AF_RX_DEF_CST_APAD0 (0x0298)
#define NIX_AF_RX_DEF_ISCTP (0x02A0)
-#define NIX_AF_RX_DEF_IPSECX (0x02B0)
+#define NIX_AF_RX_DEF_CST_APAD1 (0x02A8)
+#define NIX_AF_RX_DEF_IPSECX(a) (0x02B0ull | (uint64_t)(a) << 3)
+#define NIX_AF_RX_DEF_IIP4_DSCP (0x02E0)
+#define NIX_AF_RX_DEF_OIP4_DSCP (0x02E8)
+#define NIX_AF_RX_DEF_IIP6_DSCP (0x02F0)
+#define NIX_AF_RX_DEF_OIP6_DSCP (0x02F8)
#define NIX_AF_RX_IPSEC_GEN_CFG (0x0300)
-#define NIX_AF_RX_CPTX_INST_ADDR (0x0310)
+#define NIX_AF_RX_CPTX_INST_QSEL(a) (0x0320ull | (uint64_t)(a) << 3)
+#define NIX_AF_RX_CPTX_CREDIT(a) (0x0360ull | (uint64_t)(a) << 3)
+#define NIX_AF_NDC_RX_SYNC (0x03E0)
#define NIX_AF_NDC_TX_SYNC (0x03F0)
#define NIX_AF_AQ_CFG (0x0400)
#define NIX_AF_AQ_BASE (0x0410)
@@ -239,20 +270,22 @@
#define NIX_AF_SEB_ECO (0x0600)
#define NIX_AF_SEB_TEST_BP (0x0610)
#define NIX_AF_NORM_TX_FIFO_STATUS (0x0620)
-#define NIX_AF_EXPR_TX_FIFO_STATUS (0x0630)
#define NIX_AF_SDP_TX_FIFO_STATUS (0x0640)
#define NIX_AF_TX_NPC_CAPTURE_CONFIG (0x0660)
#define NIX_AF_TX_NPC_CAPTURE_INFO (0x0670)
+#define NIX_AF_SEB_CFG (0x05F0)
+#define NIX_PTP_1STEP_EN BIT_ULL(2)
#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3)
#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
+#define NIX_AF_SMQX_STATUS(a) (0x730 | (a) << 16)
#define NIX_AF_SQM_DBG_CTL_STATUS (0x750)
+#define NIX_AF_DWRR_SDP_MTU (0x790)
+#define NIX_AF_DWRR_RPM_MTU (0x7A0)
#define NIX_AF_PSE_CHANNEL_LEVEL (0x800)
#define NIX_AF_PSE_SHAPER_CFG (0x810)
-#define NIX_AF_TX_EXPR_CREDIT (0x830)
#define NIX_AF_MARK_FORMATX_CTL(a) (0x900 | (a) << 18)
#define NIX_AF_TX_LINKX_NORM_CREDIT(a) (0xA00 | (a) << 16)
-#define NIX_AF_TX_LINKX_EXPR_CREDIT(a) (0xA10 | (a) << 16)
#define NIX_AF_TX_LINKX_SW_XOFF(a) (0xA20 | (a) << 16)
#define NIX_AF_TX_LINKX_HW_XOFF(a) (0xA30 | (a) << 16)
#define NIX_AF_SDP_LINK_CREDIT (0xa40)
@@ -386,7 +419,7 @@
#define NIX_AF_LFX_RX_IPSEC_CFG0(a) (0x4140 | (a) << 17)
#define NIX_AF_LFX_RX_IPSEC_CFG1(a) (0x4148 | (a) << 17)
#define NIX_AF_LFX_RX_IPSEC_DYNO_CFG(a) (0x4150 | (a) << 17)
-#define NIX_AF_LFX_RX_IPSEC_DYNO_BASE(a) (0x4158 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_DYNO_BASE(a)(0x4158 | (a) << 17)
#define NIX_AF_LFX_RX_IPSEC_SA_BASE(a) (0x4170 | (a) << 17)
#define NIX_AF_LFX_TX_STATUS(a) (0x4180 | (a) << 17)
#define NIX_AF_LFX_RX_VTAG_TYPEX(a, b) (0x4200 | (a) << 17 | (b) << 3)
@@ -399,20 +432,188 @@
#define NIX_AF_RX_NPC_MIRROR_RCV (0x4720)
#define NIX_AF_RX_NPC_MIRROR_DROP (0x4730)
#define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16)
+#define NIX_AF_RQM_BP_TEST (0x4880)
+#define NIX_AF_CQM_BP_TEST (0x48c0)
+#define NIX_AF_LINKX_CFG(a) (0x4010 | (a) << 17)
#define NIX_PRIV_AF_INT_CFG (0x8000000)
#define NIX_PRIV_LFX_CFG (0x8000010)
#define NIX_PRIV_LFX_INT_CFG (0x8000020)
#define NIX_AF_RVU_LF_CFG_DEBUG (0x8000030)
+#define NIX_AF_LF_CFG_SHIFT 17
+#define NIX_AF_LF_SSO_PF_FUNC_SHIFT 16
+
+#define NIX_AF_LINKX_BASE_MASK GENMASK_ULL(11, 0)
+#define NIX_AF_LINKX_RANGE_MASK GENMASK_ULL(19, 16)
+
/* SSO */
#define SSO_AF_CONST (0x1000)
#define SSO_AF_CONST1 (0x1008)
-#define SSO_AF_BLK_RST (0x10f8)
+#define SSO_AF_WQ_INT_PC (0x1020)
+#define SSO_AF_NOS_CNT (0x1050)
+#define SSO_AF_AW_WE (0x1080)
+#define SSO_AF_WS_CFG (0x1088)
#define SSO_AF_LF_HWGRP_RST (0x10e0)
+#define SSO_AF_AW_CFG (0x10f0)
+#define SSO_AF_BLK_RST (0x10f8)
+#define SSO_AF_ACTIVE_CYCLES0 (0x1100)
+#define SSO_AF_ACTIVE_CYCLES1 (0x1108)
+#define SSO_AF_ACTIVE_CYCLES2 (0x1110)
+#define SSO_AF_ERR0 (0x1220)
+#define SSO_AF_ERR0_W1S (0x1228)
+#define SSO_AF_ERR0_ENA_W1C (0x1230)
+#define SSO_AF_ERR0_ENA_W1S (0x1238)
+#define SSO_AF_ERR2 (0x1260)
+#define SSO_AF_ERR2_W1S (0x1268)
+#define SSO_AF_ERR2_ENA_W1C (0x1270)
+#define SSO_AF_ERR2_ENA_W1S (0x1278)
+#define SSO_AF_UNMAP_INFO (0x12f0)
+#define SSO_AF_UNMAP_INFO2 (0x1300)
+#define SSO_AF_UNMAP_INFO3 (0x1310)
+#define SSO_AF_RAS (0x1420)
+#define SSO_AF_RAS_W1S (0x1430)
+#define SSO_AF_RAS_ENA_W1C (0x1460)
+#define SSO_AF_RAS_ENA_W1S (0x1470)
+#define SSO_PRIV_AF_INT_CFG (0x3000)
+#define SSO_AF_AW_ADD (0x2080)
+#define SSO_AF_AW_READ_ARB (0x2090)
+#define SSO_AF_XAQ_REQ_PC (0x20B0)
+#define SSO_AF_XAQ_LATENCY_PC (0x20B8)
+#define SSO_AF_TAQ_CNT (0x20c0)
+#define SSO_AF_TAQ_ADD (0x20e0)
+#define SSO_AF_POISONX(a) (0x2100 | (a) << 3)
+#define SSO_AF_POISONX_W1S(a) (0x2200 | (a) << 3)
#define SSO_AF_RVU_LF_CFG_DEBUG (0x3800)
#define SSO_PRIV_LFX_HWGRP_CFG (0x10000)
#define SSO_PRIV_LFX_HWGRP_INT_CFG (0x20000)
+#define SSO_AF_XAQX_GMCTL(a) (0xe0000 | (a) << 3)
+#define SSO_AF_XAQX_HEAD_PTR(a) (0x80000 | (a) << 3)
+#define SSO_AF_XAQX_TAIL_PTR(a) (0x90000 | (a) << 3)
+#define SSO_AF_XAQX_HEAD_NEXT(a) (0xa0000 | (a) << 3)
+#define SSO_AF_XAQX_TAIL_NEXT(a) (0xb0000 | (a) << 3)
+#define SSO_AF_TOAQX_STATUS(a) (0xd0000 | (a) << 3)
+#define SSO_AF_TIAQX_STATUS(a) (0xc0000 | (a) << 3)
+#define SSO_AF_HWGRPX_IAQ_THR(a) (0x200000 | (a) << 12)
+#define SSO_AF_HWGRPX_TAQ_THR(a) (0x200010 | (a) << 12)
+#define SSO_AF_HWGRPX_PRI(a) (0x200020 | (a) << 12)
+#define SSO_AF_HWGRPX_WS_PC(a) (0x200050 | (a) << 12)
+#define SSO_AF_HWGRPX_EXT_PC(a) (0x200060 | (a) << 12)
+#define SSO_AF_HWGRPX_WA_PC(a) (0x200070 | (a) << 12)
+#define SSO_AF_HWGRPX_TS_PC(a) (0x200080 | (a) << 12)
+#define SSO_AF_HWGRPX_DS_PC(a) (0x200090 | (a) << 12)
+#define SSO_AF_HWGRPX_DQ_PC(a) (0x2000A0 | (a) << 12)
+#define SSO_AF_HWGRPX_LS_PC(a) (0x2000C0 | (a) << 12)
+#define SSO_AF_HWGRPX_PAGE_CNT(a) (0x200100 | (a) << 12)
+#define SSO_AF_IU_ACCNTX_CFG(a) (0x50000 | (a) << 3)
+#define SSO_AF_IU_ACCNTX_RST(a) (0x60000 | (a) << 3)
+#define SSO_AF_HWGRPX_AW_STATUS(a) (0x200110 | (a) << 12)
+#define SSO_AF_HWGRPX_AW_CFG(a) (0x200120 | (a) << 12)
+#define SSO_AF_HWGRPX_AW_TAGSPACE(a) (0x200130 | (a) << 12)
+#define SSO_AF_HWGRPX_XAQ_AURA(a) (0x200140 | (a) << 12)
+#define SSO_AF_HWGRPX_XAQ_LIMIT(a) (0x200220 | (a) << 12)
+#define SSO_AF_HWGRPX_IU_ACCNT(a) (0x200230 | (a) << 12)
+#define SSO_AF_HWSX_ARB(a) (0x400100 | (a) << 12)
+#define SSO_AF_HWSX_INV(a) (0x400180 | (a) << 12)
+#define SSO_AF_HWSX_GMCTL(a) (0x400200 | (a) << 12)
+#define SSO_AF_HWSX_LSW_CFG(a) (0x400300 | (a) << 12)
+#define SSO_AF_HWSX_SX_GRPMSKX(a, b, c) \
+ (0x400400 | (a) << 12 | (b) << 5 | (c) << 3)
+#define SSO_AF_TAQX_LINK(a) (0xc00000 | (a) << 3)
+#define SSO_AF_TAQX_WAEY_TAG(a, b) (0xe00000 | (a) << 8 | (b) << 4)
+#define SSO_AF_TAQX_WAEY_WQP(a, b) (0xe00008 | (a) << 8 | (b) << 4)
+#define SSO_AF_IPL_FREEX(a) (0x800000 | (a) << 3)
+#define SSO_AF_IPL_IAQX(a) (0x840000 | (a) << 3)
+#define SSO_AF_IPL_DESCHEDX(a) (0x860000 | (a) << 3)
+#define SSO_AF_IPL_CONFX(a) (0x880000 | (a) << 3)
+#define SSO_AF_IENTX_TAG(a) (0Xa00000 | (a) << 3)
+#define SSO_AF_IENTX_GRP(a) (0xa20000 | (a) << 3)
+#define SSO_AF_IENTX_PENDTAG(a) (0xa40000 | (a) << 3)
+#define SSO_AF_IENTX_LINKS(a) (0xa60000 | (a) << 3)
+#define SSO_AF_IENTX_QLINKS(a) (0xa80000 | (a) << 3)
+#define SSO_AF_IENTX_WQP(a) (0xaa0000 | (a) << 3)
+#define SSO_AF_XAQDIS_DIGESTX(a) (0x901000 | (a) << 3)
+#define SSO_AF_FLR_AQ_DIGESTX(a) (0x901200 | (a) << 3)
+#define SSO_AF_QCTLDIS_DIGESTX(a) (0x900E00 | (a) << 3)
+#define SSO_AF_WQP0_DIGESTX(a) (0x900A00 | (a) << 3)
+#define SSO_AF_NPA_DIGESTX(a) (0x900000 | (a) << 3)
+#define SSO_AF_BFP_DIGESTX(a) (0x900200 | (a) << 3)
+#define SSO_AF_BFPN_DIGESTX(a) (0x900400 | (a) << 3)
+#define SSO_AF_GRPDIS_DIGESTX(a) (0x900600 | (a) << 3)
+
+#define SSO_AF_CONST1_NO_NSCHED BIT_ULL(34)
+#define SSO_AF_CONST1_LSW_PRESENT BIT_ULL(36)
+#define SSO_AF_CONST1_PRF_PRESENT BIT_ULL(37)
+#define SSO_AF_IAQ_FREE_CNT_MASK 0x3FFFull
+#define SSO_AF_IAQ_RSVD_FREE_MASK 0x3FFFull
+#define SSO_AF_IAQ_RSVD_FREE_SHIFT 16
+#define SSO_AF_IAQ_FREE_CNT_MAX SSO_AF_IAQ_FREE_CNT_MASK
+#define SSO_AF_AW_ADD_RSVD_FREE_MASK 0x3FFFull
+#define SSO_AF_AW_ADD_RSVD_FREE_SHIFT 16
+#define SSO_HWGRP_IAQ_MAX_THR_MASK 0x3FFFull
+#define SSO_HWGRP_IAQ_RSVD_THR_MASK 0x3FFFull
+#define SSO_HWGRP_IAQ_MAX_THR_SHIFT 32
+#define SSO_HWGRP_IAQ_RSVD_THR 0x2
+#define SSO_HWGRP_IAQ_GRP_CNT_SHIFT 48
+#define SSO_HWGRP_IAQ_GRP_CNT_MASK 0x3FFFull
+#define SSO_AF_HWGRPX_IUEX_NOSCHED(a, b)\
+ ((((b >> 48) & 0x3FF) == a) && (b & BIT_ULL(60)))
+#define SSO_AF_HWGRP_PAGE_CNT_MASK (BIT_ULL(32) - 1)
+#define SSO_AF_HWGRP_PAGE_CNT_MASK (BIT_ULL(32) - 1)
+#define SSO_HWGRP_IAQ_MAX_THR_STRM_PERF 0xD0
+#define SSO_AF_HWGRP_IU_ACCNT_MAX_THR 0x7FFFull
+
+#define SSO_AF_TAQ_FREE_CNT_MASK 0x7FFull
+#define SSO_AF_TAQ_RSVD_FREE_MASK 0x7FFull
+#define SSO_AF_TAQ_RSVD_FREE_SHIFT 16
+#define SSO_AF_TAQ_FREE_CNT_MAX SSO_AF_TAQ_FREE_CNT_MASK
+#define SSO_AF_TAQ_ADD_RSVD_FREE_MASK 0x1FFFull
+#define SSO_AF_TAQ_ADD_RSVD_FREE_SHIFT 16
+#define SSO_HWGRP_TAQ_MAX_THR_MASK 0x7FFull
+#define SSO_HWGRP_TAQ_RSVD_THR_MASK 0x7FFull
+#define SSO_HWGRP_TAQ_MAX_THR_SHIFT 32
+#define SSO_HWGRP_TAQ_RSVD_THR 0x3
+#define SSO_AF_ERR0_MASK 0xFFEull
+#define SSO_AF_ERR2_MASK 0xF001F000ull
+#define SSO_HWGRP_TAQ_MAX_THR_STRM_PERF 0x10
+
+#define SSO_HWGRP_PRI_MASK 0x7ull
+#define SSO_HWGRP_PRI_AFF_MASK 0xFull
+#define SSO_HWGRP_PRI_AFF_SHIFT 8
+#define SSO_HWGRP_PRI_WGT_MASK 0x3Full
+#define SSO_HWGRP_PRI_WGT_SHIFT 16
+#define SSO_HWGRP_PRI_WGT_LEFT_MASK 0x3Full
+#define SSO_HWGRP_PRI_WGT_LEFT_SHIFT 24
+
+#define SSO_HWGRP_AW_CFG_RWEN BIT_ULL(0)
+#define SSO_HWGRP_AW_CFG_LDWB BIT_ULL(1)
+#define SSO_HWGRP_AW_CFG_LDT BIT_ULL(2)
+#define SSO_HWGRP_AW_CFG_STT BIT_ULL(3)
+#define SSO_HWGRP_AW_CFG_XAQ_BYP_DIS BIT_ULL(4)
+#define SSO_HWGRP_AW_CFG_XAQ_ALLOC_DIS BIT_ULL(6)
+
+#define SSO_HWGRP_AW_STS_TPTR_VLD BIT_ULL(8)
+#define SSO_HWGRP_AW_STS_NPA_FETCH BIT_ULL(9)
+#define SSO_HWGRP_AW_STS_TPTR_NEXT_VLD BIT_ULL(10)
+#define SSO_HWGRP_AW_STS_XAQ_BUFSC_MASK 0x7ull
+#define SSO_HWGRP_AW_STS_INIT_STS 0x18ull
+
+#define SSO_LF_GGRP_OP_ADD_WORK1 (0x8ull)
+#define SSO_LF_GGRP_QCTL (0x20ull)
+#define SSO_LF_GGRP_INT (0x100ull)
+#define SSO_LF_GGRP_INT_ENA_W1S (0x110ull)
+#define SSO_LF_GGRP_INT_ENA_W1C (0x118ull)
+#define SSO_LF_GGRP_INT_THR (0x140ull)
+#define SSO_LF_GGRP_INT_CNT (0x180ull)
+#define SSO_LF_GGRP_XAQ_CNT (0x1b0ull)
+#define SSO_LF_GGRP_AQ_CNT (0x1c0ull)
+#define SSO_LF_GGRP_AQ_THR (0x1e0ull)
+#define SSO_LF_GGRP_MISC_CNT (0x200ull)
+
+#define SSO_LF_GGRP_INT_MASK (0X7)
+#define SSO_LF_GGRP_AQ_THR_MASK (BIT_ULL(33) - 1)
+#define SSO_LF_GGRP_XAQ_CNT_MASK (BIT_ULL(33) - 1)
+#define SSO_LF_GGRP_INT_CNT_MASK (0x3FFF3FFF0000ull)
/* SSOW */
#define SSOW_AF_RVU_LF_HWS_CFG_DEBUG (0x0010)
@@ -420,6 +621,34 @@
#define SSOW_PRIV_LFX_HWS_CFG (0x1000)
#define SSOW_PRIV_LFX_HWS_INT_CFG (0x2000)
+#define SSOW_LF_GWS_PENDSTATE (0x50ull)
+#define SSOW_LF_GWS_NW_TIM (0x70ull)
+#define SSOW_LF_GWS_INT (0x100ull)
+#define SSOW_LF_GWS_INT_ENA_W1C (0x118ull)
+#define SSOW_LF_GWS_TAG (0x200ull)
+#define SSOW_LF_GWS_WQP (0x210ull)
+#define SSOW_LF_GWS_PRF_TAG (0x400ull)
+#define SSOW_LF_GWS_OP_GET_WORK (0x600ull)
+#define SSOW_LF_GWS_OP_SWTAG_FLUSH (0x800ull)
+#define SSOW_LF_GWS_OP_DESCHED (0x880ull)
+#define SSOW_LF_GWS_OP_CLR_NSCHED0 (0xA00ull)
+#define SSOW_LF_GWS_OP_GWC_INVAL (0xe00ull)
+
+#define SSO_TT_EMPTY (0x3)
+#define SSOW_LF_GWS_INT_MASK (0x7FF)
+#define SSOW_LF_GWS_MAX_NW_TIM (BIT_ULL(10) - 1)
+#define SSOW_LF_GWS_OP_GET_WORK_WAIT BIT_ULL(16)
+#define SSOW_LF_GWS_OP_GET_WORK_GROUPED BIT_ULL(18)
+#define SSOW_LF_GWS_TAG_PEND_DESCHED BIT_ULL(58)
+#define SSOW_LF_GWS_TAG_PEND_SWITCH BIT_ULL(62)
+#define SSOW_LF_GWS_TAG_PEND_GET_WORK BIT_ULL(63)
+
+#define SSOW_AF_BAR2_SEL (0x9000000ull)
+#define SSOW_AF_BAR2_ALIASX(a, b) RVU_AF_BAR2_ALIASX(a, b)
+
+#define SSO_AF_BAR2_SEL (0x9000000ull)
+#define SSO_AF_BAR2_ALIASX(a, b) RVU_AF_BAR2_ALIASX(a, b)
+
/* TIM */
#define TIM_AF_CONST (0x90)
#define TIM_PRIV_LFX_CFG (0x20000)
@@ -427,17 +656,125 @@
#define TIM_AF_RVU_LF_CFG_DEBUG (0x30000)
#define TIM_AF_BLK_RST (0x10)
#define TIM_AF_LF_RST (0x20)
+#define TIM_AF_BLK_RST (0x10)
+#define TIM_AF_RINGX_GMCTL(a) (0x2000 | (a) << 3)
+#define TIM_AF_RINGX_CTL0(a) (0x4000 | (a) << 3)
+#define TIM_AF_RINGX_CTL1(a) (0x6000 | (a) << 3)
+#define TIM_AF_RINGX_CTL2(a) (0x8000 | (a) << 3)
+#define TIM_AF_FLAGS_REG (0x80)
+#define TIM_AF_FLAGS_REG_ENA_TIM BIT_ULL(0)
+#define TIM_AF_RINGX_CTL1_ENA BIT_ULL(47)
+#define TIM_AF_RINGX_CTL1_RCF_BUSY BIT_ULL(50)
+#define TIM_AF_ADJUST_TENNS (0x160)
+#define TIM_AF_ADJUST_GPIOS (0x170)
+#define TIM_AF_ADJUST_GTI (0x180)
+#define TIM_AF_ADJUST_PTP (0x190)
+#define TIM_AF_ADJUST_BTS (0x1B0)
+#define TIM_AF_ADJUST_TIMERS (0x1C0)
+#define TIM_AF_ADJUST_TIMERS_MASK BIT_ULL(0)
+#define TIM_AF_CAPTURE_TENNS (0x1D0)
+#define TIM_AF_CAPTURE_GPIOS (0x1E0)
+#define TIM_AF_CAPTURE_GTI (0x1F0)
+#define TIM_AF_CAPTURE_PTP (0x200)
+#define TIM_AF_CAPTURE_BTS (0x220)
+#define TIM_AF_CAPTURE_EXT_GTI (0x240)
+#define TIM_AF_CAPTURE_TIMERS (0x250)
+#define TIM_AF_CAPTURE_TIMERS_MASK GENMASK_ULL(1, 0)
+#define TIM_AF_RING_GMCTL_SHIFT 3
+#define TIM_AF_RING_SSO_PF_FUNC_SHIFT 0
+#define TIM_AF_FLAGS_REG_GPIO_EDGE_MASK GENMASK_ULL(6, 5)
/* CPT */
-#define CPT_AF_CONSTANTS0 (0x0000)
-#define CPT_PRIV_LFX_CFG (0x41000)
-#define CPT_PRIV_LFX_INT_CFG (0x43000)
-#define CPT_AF_RVU_LF_CFG_DEBUG (0x45000)
-#define CPT_AF_LF_RST (0x44000)
-#define CPT_AF_BLK_RST (0x46000)
+#define CPT_AF_CONSTANTS0 (0x0ull)
+#define CPT_AF_CONSTANTS1 (0x1000ull)
+#define CPT_AF_DIAG (0x3000ull)
+#define CPT_AF_ECO (0x4000ull)
+#define CPT_AF_FLTX_INT(a) (0xa000ull | (u64)(a) << 3)
+#define CPT_AF_FLTX_INT_W1S(a) (0xb000ull | (u64)(a) << 3)
+#define CPT_AF_FLTX_INT_ENA_W1C(a) (0xc000ull | (u64)(a) << 3)
+#define CPT_AF_FLTX_INT_ENA_W1S(a) (0xd000ull | (u64)(a) << 3)
+#define CPT_AF_PSNX_EXE(a) (0xe000ull | (u64)(a) << 3)
+#define CPT_AF_PSNX_EXE_W1S(a) (0xf000ull | (u64)(a) << 3)
+#define CPT_AF_PSNX_LF(a) (0x10000ull | (u64)(a) << 3)
+#define CPT_AF_PSNX_LF_W1S(a) (0x11000ull | (u64)(a) << 3)
+#define CPT_AF_EXEX_CTL2(a) (0x12000ull | (u64)(a) << 3)
+#define CPT_AF_EXEX_STS(a) (0x13000ull | (u64)(a) << 3)
+#define CPT_AF_EXE_ERR_INFO (0x14000ull)
+#define CPT_AF_EXEX_ACTIVE(a) (0x16000ull | (u64)(a) << 3)
+#define CPT_AF_INST_REQ_PC (0x17000ull)
+#define CPT_AF_INST_LATENCY_PC (0x18000ull)
+#define CPT_AF_RD_REQ_PC (0x19000ull)
+#define CPT_AF_RD_LATENCY_PC (0x1a000ull)
+#define CPT_AF_RD_UC_PC (0x1b000ull)
+#define CPT_AF_ACTIVE_CYCLES_PC (0x1c000ull)
+#define CPT_AF_EXE_DBG_CTL (0x1d000ull)
+#define CPT_AF_EXE_DBG_DATA (0x1e000ull)
+#define CPT_AF_EXE_REQ_TIMER (0x1f000ull)
+#define CPT_AF_EXEX_CTL(a) (0x20000ull | (u64)(a) << 3)
+#define CPT_AF_EXE_PERF_CTL (0x21000ull)
+#define CPT_AF_EXE_DBG_CNTX(a) (0x22000ull | (u64)(a) << 3)
+#define CPT_AF_EXE_PERF_EVENT_CNT (0x23000ull)
+#define CPT_AF_EXE_EPCI_INBX_CNT(a) (0x24000ull | (u64)(a) << 3)
+#define CPT_AF_EXE_EPCI_OUTBX_CNT(a) (0x25000ull | (u64)(a) << 3)
+#define CPT_AF_EXEX_UCODE_BASE(a) (0x26000ull | (u64)(a) << 3)
+#define CPT_AF_LFX_CTL(a) (0x27000ull | (u64)(a) << 3)
+#define CPT_AF_LFX_CTL2(a) (0x29000ull | (u64)(a) << 3)
+#define CPT_AF_CPTCLK_CNT (0x2a000ull)
+#define CPT_AF_PF_FUNC (0x2b000ull)
+#define CPT_AF_LFX_PTR_CTL(a) (0x2c000ull | (u64)(a) << 3)
+#define CPT_AF_GRPX_THR(a) (0x2d000ull | (u64)(a) << 3)
+#define CPT_AF_CTL (0x2e000ull)
+#define CPT_AF_XEX_THR(a) (0x2f000ull | (u64)(a) << 3)
+#define CPT_PRIV_LFX_CFG (0x41000ull)
+#define CPT_PRIV_AF_INT_CFG (0x42000ull)
+#define CPT_PRIV_LFX_INT_CFG (0x43000ull)
+#define CPT_AF_LF_RST (0x44000ull)
+#define CPT_AF_RVU_LF_CFG_DEBUG (0x45000ull)
+#define CPT_AF_BLK_RST (0x46000ull)
+#define CPT_AF_RVU_INT (0x47000ull)
+#define CPT_AF_RVU_INT_W1S (0x47008ull)
+#define CPT_AF_RVU_INT_ENA_W1S (0x47010ull)
+#define CPT_AF_RVU_INT_ENA_W1C (0x47018ull)
+#define CPT_AF_RAS_INT (0x47020ull)
+#define CPT_AF_RAS_INT_W1S (0x47028ull)
+#define CPT_AF_RAS_INT_ENA_W1S (0x47030ull)
+#define CPT_AF_RAS_INT_ENA_W1C (0x47038ull)
+#define CPT_AF_CTX_FLUSH_TIMER (0x48000ull)
+#define CPT_AF_CTX_ERR (0x48008ull)
+#define CPT_AF_CTX_ENC_ID (0x48010ull)
+#define CPT_AF_CTX_MIS_PC (0x49400ull)
+#define CPT_AF_CTX_HIT_PC (0x49408ull)
+#define CPT_AF_CTX_AOP_PC (0x49410ull)
+#define CPT_AF_CTX_AOP_LATENCY_PC (0x49418ull)
+#define CPT_AF_CTX_IFETCH_PC (0x49420ull)
+#define CPT_AF_CTX_IFETCH_LATENCY_PC (0x49428ull)
+#define CPT_AF_CTX_FFETCH_PC (0x49430ull)
+#define CPT_AF_CTX_FFETCH_LATENCY_PC (0x49438ull)
+#define CPT_AF_CTX_WBACK_PC (0x49440ull)
+#define CPT_AF_CTX_WBACK_LATENCY_PC (0x49448ull)
+#define CPT_AF_CTX_PSH_PC (0x49450ull)
+#define CPT_AF_CTX_PSH_LATENCY_PC (0x49458ull)
+#define CPT_AF_CTX_CAM_DATA(a) (0x49800ull | (u64)(a) << 3)
+#define CPT_AF_RXC_TIME (0x50010ull)
+#define CPT_AF_RXC_TIME_CFG (0x50018ull)
+#define CPT_AF_RXC_DFRG (0x50020ull)
+#define CPT_AF_RXC_ACTIVE_STS (0x50028ull)
+#define CPT_AF_RXC_ZOMBIE_STS (0x50030ull)
+#define CPT_AF_X2PX_LINK_CFG(a) (0x51000ull | (u64)(a) << 3)
+
+#define CPT_AF_BAR2_SEL 0x9000000
+#define CPT_AF_BAR2_ALIASX(a, b) RVU_AF_BAR2_ALIASX(a, b)
#define NPC_AF_BLK_RST (0x00040)
+#define CPT_AF_LF_CTL2_SHIFT 3
+#define CPT_AF_LF_SSO_PF_FUNC_SHIFT 32
+
+#define CPT_LF_CTL 0x10
+#define CPT_LF_INPROG 0x40
+#define CPT_LF_Q_GRP_PTR 0x120
+#define CPT_LF_CTX_FLUSH 0x510
+
/* NPC */
#define NPC_AF_CFG (0x00000)
#define NPC_AF_ACTIVE_PC (0x00010)
@@ -446,6 +783,8 @@
#define NPC_AF_BLK_RST (0x00040)
#define NPC_AF_MCAM_SCRUB_CTL (0x000a0)
#define NPC_AF_KCAM_SCRUB_CTL (0x000b0)
+#define NPC_AF_CONST2 (0x00100)
+#define NPC_AF_CONST3 (0x00110)
#define NPC_AF_KPUX_CFG(a) (0x00500 | (a) << 3)
#define NPC_AF_PCK_CFG (0x00600)
#define NPC_AF_PCK_DEF_OL2 (0x00610)
@@ -469,20 +808,7 @@
(0x900000 | (a) << 16 | (b) << 12 | (c) << 5 | (d) << 3)
#define NPC_AF_INTFX_LDATAX_FLAGSX_CFG(a, b, c) \
(0x980000 | (a) << 16 | (b) << 12 | (c) << 3)
-#define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) \
- (0x1000000ull | (a) << 10 | (b) << 6 | (c) << 3)
-#define NPC_AF_MCAMEX_BANKX_CAMX_W0(a, b, c) \
- (0x1000010ull | (a) << 10 | (b) << 6 | (c) << 3)
-#define NPC_AF_MCAMEX_BANKX_CAMX_W1(a, b, c) \
- (0x1000020ull | (a) << 10 | (b) << 6 | (c) << 3)
-#define NPC_AF_MCAMEX_BANKX_CFG(a, b) (0x1800000ull | (a) << 8 | (b) << 4)
-#define NPC_AF_MCAMEX_BANKX_STAT_ACT(a, b) \
- (0x1880000 | (a) << 8 | (b) << 4)
-#define NPC_AF_MATCH_STATX(a) (0x1880008 | (a) << 8)
#define NPC_AF_INTFX_MISS_STAT_ACT(a) (0x1880040 + (a) * 0x8)
-#define NPC_AF_MCAMEX_BANKX_ACTION(a, b) (0x1900000ull | (a) << 8 | (b) << 4)
-#define NPC_AF_MCAMEX_BANKX_TAG_ACT(a, b) \
- (0x1900008 | (a) << 8 | (b) << 4)
#define NPC_AF_INTFX_MISS_ACT(a) (0x1a00000 | (a) << 4)
#define NPC_AF_INTFX_MISS_TAG_ACT(a) (0x1b00008 | (a) << 4)
#define NPC_AF_MCAM_BANKX_HITX(a, b) (0x1c80000 | (a) << 8 | (b) << 4)
@@ -499,6 +825,147 @@
#define NPC_AF_DBG_DATAX(a) (0x3001400 | (a) << 4)
#define NPC_AF_DBG_RESULTX(a) (0x3001800 | (a) << 4)
+#define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) ({ \
+ u64 offset; \
+ \
+ offset = (0x1000000ull | (a) << 10 | (b) << 6 | (c) << 3); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000000ull | (a) << 8 | (b) << 22 | (c) << 3); \
+ offset; })
+
+#define NPC_AF_MCAMEX_BANKX_CAMX_W0(a, b, c) ({ \
+ u64 offset; \
+ \
+ offset = (0x1000010ull | (a) << 10 | (b) << 6 | (c) << 3); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000010ull | (a) << 8 | (b) << 22 | (c) << 3); \
+ offset; })
+
+#define NPC_AF_MCAMEX_BANKX_CAMX_W1(a, b, c) ({ \
+ u64 offset; \
+ \
+ offset = (0x1000020ull | (a) << 10 | (b) << 6 | (c) << 3); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000020ull | (a) << 8 | (b) << 22 | (c) << 3); \
+ offset; })
+
+#define NPC_AF_MCAMEX_BANKX_CFG(a, b) ({ \
+ u64 offset; \
+ \
+ offset = (0x1800000ull | (a) << 8 | (b) << 4); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000038ull | (a) << 8 | (b) << 22); \
+ offset; })
+
+#define NPC_AF_MCAMEX_BANKX_ACTION(a, b) ({ \
+ u64 offset; \
+ \
+ offset = (0x1900000ull | (a) << 8 | (b) << 4); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000040ull | (a) << 8 | (b) << 22); \
+ offset; }) \
+
+#define NPC_AF_MCAMEX_BANKX_TAG_ACT(a, b) ({ \
+ u64 offset; \
+ \
+ offset = (0x1900008ull | (a) << 8 | (b) << 4); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000048ull | (a) << 8 | (b) << 22); \
+ offset; }) \
+
+#define NPC_AF_MCAMEX_BANKX_STAT_ACT(a, b) ({ \
+ u64 offset; \
+ \
+ offset = (0x1880000ull | (a) << 8 | (b) << 4); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000050ull | (a) << 8 | (b) << 22); \
+ offset; }) \
+
+#define NPC_AF_MATCH_STATX(a) ({ \
+ u64 offset; \
+ \
+ offset = (0x1880008ull | (a) << 8); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000078ull | (a) << 8); \
+ offset; }) \
+
+
+/* REE */
+#define REE_AF_CMD_CTL (0x00ull)
+#define REE_AF_CONSTANTS (0x0A0ull)
+#define REE_AF_AQ_SBUF_CTL (0x100ull)
+#define REE_AF_AQ_SBUF_ADDR (0x110ull)
+#define REE_AF_AQ_DONE (0x128ull)
+#define REE_AF_AQ_DONE_ACK (0x130ull)
+#define REE_AF_AQ_DONE_INT (0x150ull)
+#define REE_AF_AQ_DONE_INT_ENA_W1S (0x168ull)
+#define REE_AF_AQ_DONE_INT_ENA_W1C (0x170ull)
+#define REE_AF_AQ_ENA (0x180ull)
+#define REE_AF_AQ_DOORBELL (0x200ull)
+#define REE_AF_PF_FUNC (0x210ull)
+#define REE_AF_EM_BASE (0x300ull)
+#define REE_AF_RAS (0x980ull)
+#define REE_AF_RAS_ENA_W1C (0x990ull)
+#define REE_AF_RAS_ENA_W1S (0x998ull)
+#define REE_AF_QUE_SBUF_CTL(a) (0x1200ull | (a) << 3)
+#define REE_PRIV_AF_INT_CFG (0x4000ull)
+#define REE_AF_REEXM_STATUS (0x8050ull)
+#define REE_AF_REEXM_CTRL (0x80C0ull)
+#define REE_AF_REEXM_MAX_MATCH (0x80C8ull)
+#define REE_AF_REEXM_MAX_PRE_CNT (0x80D0ull)
+#define REE_AF_REEXM_MAX_PTHREAD_CNT (0x80D8ull)
+#define REE_AF_REEXM_MAX_LATENCY_CNT (0x80E0ull)
+#define REE_AF_REEXR_STATUS (0x8250ull)
+#define REE_AF_REEXR_CTRL (0x82C0ull)
+#define REE_PRIV_LFX_CFG (0x41000ull)
+#define REE_PRIV_LFX_INT_CFG (0x42000ull)
+#define REE_AF_LF_RST (0x43000ull)
+#define REE_AF_RVU_LF_CFG_DEBUG (0x44000ull)
+#define REE_AF_BLK_RST (0x45000ull)
+#define REE_AF_RVU_INT (0x46000ull)
+#define REE_AF_RVU_INT_ENA_W1S (0x46010ull)
+#define REE_AF_RVU_INT_ENA_W1C (0x46018ull)
+#define REE_AF_AQ_INT (0x46020ull)
+#define REE_AF_AQ_INT_ENA_W1S (0x46030ull)
+#define REE_AF_AQ_INT_ENA_W1C (0x46038ull)
+#define REE_AF_GRACEFUL_DIS_CTL (0x46100ull)
+#define REE_AF_GRACEFUL_DIS_STATUS (0x46110ull)
+
+#define REE_AF_FORCE_CSCLK BIT_ULL(1)
+#define REE_AF_FORCE_CCLK BIT_ULL(2)
+#define REE_AF_RAS_DAT_PSN BIT_ULL(0)
+#define REE_AF_RAS_LD_CMD_PSN BIT_ULL(1)
+#define REE_AF_RAS_LD_REEX_PSN BIT_ULL(2)
+#define REE_AF_RVU_INT_UNMAPPED_SLOT BIT_ULL(0)
+#define REE_AF_AQ_INT_DOVF BIT_ULL(0)
+#define REE_AF_AQ_INT_IRDE BIT_ULL(1)
+#define REE_AF_AQ_INT_PRDE BIT_ULL(2)
+#define REE_AF_AQ_INT_PLLE BIT_ULL(3)
+#define REE_AF_REEXM_CTRL_INIT BIT_ULL(0)
+#define REE_AF_REEXM_CTRL_GO BIT_ULL(3)
+#define REE_AF_REEXM_STATUS_INIT_DONE BIT_ULL(0)
+#define REE_AF_REEXR_CTRL_INIT BIT_ULL(0)
+#define REE_AF_REEXR_CTRL_GO BIT_ULL(1)
+#define REE_AF_REEXR_CTRL_MODE_IM_L1_L2 BIT_ULL(4)
+#define REE_AF_REEXR_CTRL_MODE_L1_L2 BIT_ULL(5)
+
+#define REE_AF_AQ_SBUF_CTL_SIZE_SHIFT 32
+#define REE_AF_REEXM_MAX_MATCH_MAX 0xFEull
+#define REE_AF_REEXM_MAX_PRE_CNT_COUNT 0x3F0ull
+#define REE_AF_REEXM_MAX_PTHREAD_COUNT 0xFFFFull
+#define REE_AF_REEXM_MAX_LATENCY_COUNT 0xFFFFull
+#define REE_AF_QUE_SBUF_CTL_SIZE_SHIFT 32
+#define REE_AF_REEX_CSR_BLOCK_BASE_ADDR (0x8000ull)
+#define REE_AF_REEX_CSR_BLOCK_ID (0x200ull)
+#define REE_AF_REEX_CSR_BLOCK_ID_MASK GENMASK_ULL(18, 16)
+#define REE_AF_REEX_CSR_BLOCK_ID_SHIFT 16
+#define REE_AF_REEX_CSR_INDEX 8
+#define REE_AF_REEX_CSR_INDEX_MASK GENMASK_ULL(4, 0)
+#define REE_AF_QUE_SBUF_CTL_MAX_SIZE GENMASK_ULL((50 - 32), 0)
+#define REE_AF_REEXR_STATUS_IM_INIT_DONE BIT_ULL(4)
+#define REE_AF_REEXR_STATUS_L1_CACHE_INIT_DONE BIT_ULL(5)
+#define REE_AF_REEXR_STATUS_L2_CACHE_INIT_DONE BIT_ULL(6)
+
/* NDC */
#define NDC_AF_CONST (0x00000)
#define NDC_AF_CLK_EN (0x00020)
@@ -525,4 +992,26 @@
(0x00F00 | (a) << 5 | (b) << 4)
#define NDC_AF_BANKX_HIT_PC(a) (0x01000 | (a) << 3)
#define NDC_AF_BANKX_MISS_PC(a) (0x01100 | (a) << 3)
+
+/* LBK */
+#define LBK_CONST (0x10ull)
+#define LBK_LINK_CFG_P2X (0x400ull)
+#define LBK_LINK_CFG_X2P (0x408ull)
+#define LBK_CONST_CHANS GENMASK_ULL(47, 32)
+#define LBK_CONST_DST GENMASK_ULL(31, 28)
+#define LBK_CONST_SRC GENMASK_ULL(27, 24)
+#define LBK_CONST_BUF_SIZE GENMASK_ULL(23, 0)
+#define LBK_LINK_CFG_RANGE_MASK GENMASK_ULL(19, 16)
+#define LBK_LINK_CFG_ID_MASK GENMASK_ULL(11, 6)
+#define LBK_LINK_CFG_BASE_MASK GENMASK_ULL(5, 0)
+
+/* APR */
+#define APR_AF_LMT_CFG (0x000ull)
+#define APR_AF_LMT_MAP_BASE (0x008ull)
+#define APR_AF_LMT_CTL (0x010ull)
+
+#define APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT 23
+#define APR_LMT_MAP_ENT_SCH_ENA_SHIFT 22
+#define APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT 21
+
#endif /* RVU_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
new file mode 100644
index 000000000000..b04fb226f708
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include "rvu.h"
+
+/* SDP PF device id */
+#define PCI_DEVID_OTX2_SDP_PF 0xA0F6
+
+/* Maximum SDP blocks in a chip */
+#define MAX_SDP 2
+
+/* SDP PF number */
+static int sdp_pf_num[MAX_SDP] = {-1, -1};
+
+bool is_sdp_pfvf(u16 pcifunc)
+{
+ u16 pf = rvu_get_pf(pcifunc);
+ u32 found = 0, i = 0;
+
+ while (i < MAX_SDP) {
+ if (pf == sdp_pf_num[i])
+ found = 1;
+ i++;
+ }
+
+ if (!found)
+ return false;
+
+ return true;
+}
+
+bool is_sdp_pf(u16 pcifunc)
+{
+ return (is_sdp_pfvf(pcifunc) &&
+ !(pcifunc & RVU_PFVF_FUNC_MASK));
+}
+
+bool is_sdp_vf(u16 pcifunc)
+{
+ return (is_sdp_pfvf(pcifunc) &&
+ !!(pcifunc & RVU_PFVF_FUNC_MASK));
+}
+
+int rvu_sdp_init(struct rvu *rvu)
+{
+ struct pci_dev *pdev = NULL;
+ struct rvu_pfvf *pfvf;
+ u32 i = 0;
+
+ while ((i < MAX_SDP) && (pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OTX2_SDP_PF,
+ pdev)) != NULL) {
+ /* The RVU PF number is one less than bus number */
+ sdp_pf_num[i] = pdev->bus->number - 1;
+ pfvf = &rvu->pf[sdp_pf_num[i]];
+
+ pfvf->sdp_info = devm_kzalloc(rvu->dev,
+ sizeof(struct sdp_node_info),
+ GFP_KERNEL);
+ if (!pfvf->sdp_info)
+ return -ENOMEM;
+
+ dev_info(rvu->dev, "SDP PF number:%d\n", sdp_pf_num[i]);
+
+ put_device(&pdev->dev);
+ i++;
+ }
+
+ return 0;
+}
+
+int
+rvu_mbox_handler_set_sdp_chan_info(struct rvu *rvu,
+ struct sdp_chan_info_msg *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ memcpy(pfvf->sdp_info, &req->info, sizeof(struct sdp_node_info));
+ dev_info(rvu->dev, "AF: SDP%d max_vfs %d num_pf_rings %d pf_srn %d\n",
+ req->info.node_id, req->info.max_vfs, req->info.num_pf_rings,
+ req->info.pf_srn);
+ return 0;
+}
+
+int
+rvu_mbox_handler_get_sdp_chan_info(struct rvu *rvu, struct msg_req *req,
+ struct sdp_get_chan_info_msg *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr;
+
+ if (!hw->cap.programmable_chans) {
+ rsp->chan_base = NIX_CHAN_SDP_CH_START;
+ rsp->num_chan = NIX_CHAN_SDP_NUM_CHANS;
+ } else {
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ rsp->chan_base = hw->sdp_chan_base;
+ rsp->num_chan = rvu_read64(rvu, blkaddr, NIX_AF_CONST1) & 0xFFFUL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sso.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sso.c
new file mode 100644
index 000000000000..f3685901aaa1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sso.c
@@ -0,0 +1,1661 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/types.h>
+
+#include "rvu_struct.h"
+
+#include "rvu_reg.h"
+#include "rvu.h"
+
+#define NPA_LF_AURA_OP_FREE0 0x20
+#define NPA_LF_AURA_OP_CNT 0x30
+
+#if defined(CONFIG_ARM64)
+#define rvu_sso_store_pair(val0, val1, addr) ({ \
+ __asm__ volatile("stp %x[x0], %x[x1], [%x[p1]]" \
+ : \
+ : \
+ [x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr)); \
+ })
+
+#define rvu_sso_ldadd(result, incr, ptr) ({ \
+ __asm__ volatile(".cpu generic+lse\n" \
+ "ldadd %x[i], %x[r], [%[b]]" \
+ : [r] "=r" (result), "+m" (*ptr) \
+ : [i] "r" (incr), [b] "r" (ptr) \
+ : "memory"); \
+ })
+#else
+#define rvu_sso_store_pair(val0, val1, addr) \
+ do { \
+ u64 *addr1 = (void *)addr; \
+ *addr1 = val0; \
+ *(u64 *)(((u8 *)addr1) + 8) = val1; \
+ } while (0)
+
+#define rvu_sso_ldadd(result, incr, ptr) \
+ do { \
+ } while (0)
+#endif
+
+#define SSO_AF_INT_DIGEST_PRNT(reg) \
+ for (i = 0; i < block->lf.max / 64; i++) { \
+ reg0 = rvu_read64(rvu, blkaddr, reg##X(i)); \
+ dev_err_ratelimited(rvu->dev, #reg "(%d) : 0x%llx", i, \
+ reg0); \
+ rvu_write64(rvu, blkaddr, reg##X(i), reg0); \
+ }
+
+void rvu_sso_hwgrp_config_thresh(struct rvu *rvu, int blkaddr, int lf)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 add, grp_thr, grp_rsvd;
+ u64 reg;
+
+ /* Configure IAQ Thresholds */
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_IAQ_THR(lf));
+ grp_rsvd = reg & SSO_HWGRP_IAQ_RSVD_THR_MASK;
+ add = hw->sso.iaq_rsvd - grp_rsvd;
+
+ grp_thr = hw->sso.iaq_rsvd & SSO_HWGRP_IAQ_RSVD_THR_MASK;
+ grp_thr |= ((hw->sso.iaq_max & SSO_HWGRP_IAQ_MAX_THR_MASK) <<
+ SSO_HWGRP_IAQ_MAX_THR_SHIFT);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_IAQ_THR(lf), grp_thr);
+
+ if (add)
+ rvu_write64(rvu, blkaddr, SSO_AF_AW_ADD,
+ (add & SSO_AF_AW_ADD_RSVD_FREE_MASK) <<
+ SSO_AF_AW_ADD_RSVD_FREE_SHIFT);
+
+ /* Configure TAQ Thresholds */
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_TAQ_THR(lf));
+ grp_rsvd = reg & SSO_HWGRP_TAQ_RSVD_THR_MASK;
+ add = hw->sso.taq_rsvd - grp_rsvd;
+
+ grp_thr = hw->sso.taq_rsvd & SSO_HWGRP_TAQ_RSVD_THR_MASK;
+ grp_thr |= ((hw->sso.taq_max & SSO_HWGRP_TAQ_MAX_THR_MASK) <<
+ SSO_HWGRP_TAQ_MAX_THR_SHIFT);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_TAQ_THR(lf), grp_thr);
+
+ if (add)
+ rvu_write64(rvu, blkaddr, SSO_AF_TAQ_ADD,
+ (add & SSO_AF_TAQ_RSVD_FREE_MASK) <<
+ SSO_AF_TAQ_ADD_RSVD_FREE_SHIFT);
+}
+
+static void rvu_sso_enable_aw_src(struct rvu *rvu, int lf_cnt, int sub_blkaddr,
+ u64 addr, int *lf_arr, u16 pcifunc, u8 shift,
+ u8 addr_off)
+{
+ u64 reg;
+ int lf;
+
+ for (lf = 0; lf < lf_cnt; lf++) {
+ reg = rvu_read64(rvu, sub_blkaddr, addr |
+ lf_arr[lf] << addr_off);
+
+ reg |= ((u64)pcifunc << shift);
+ rvu_write64(rvu, sub_blkaddr, addr |
+ lf_arr[lf] << addr_off, reg);
+ }
+}
+
+static int rvu_sso_disable_aw_src(struct rvu *rvu, int **lf_arr,
+ int sub_blkaddr, u8 shift, u8 addr_off,
+ u16 pcifunc, u64 addr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int lf_cnt = 0, lf;
+ u64 reg;
+
+ if (sub_blkaddr >= 0) {
+ block = &hw->block[sub_blkaddr];
+ *lf_arr = kmalloc(block->lf.max * sizeof(int), GFP_KERNEL);
+ if (!*lf_arr)
+ return 0;
+
+ for (lf = 0; lf < block->lf.max; lf++) {
+ reg = rvu_read64(rvu, sub_blkaddr,
+ addr | lf << addr_off);
+ if (((reg >> shift) & 0xFFFFul) != pcifunc)
+ continue;
+
+ reg &= ~(0xFFFFul << shift);
+ rvu_write64(rvu, sub_blkaddr, addr | lf << addr_off,
+ reg);
+ (*lf_arr)[lf_cnt] = lf;
+ lf_cnt++;
+ }
+ }
+
+ return lf_cnt;
+}
+
+static void rvu_sso_ggrp_taq_flush(struct rvu *rvu, u16 pcifunc, int lf,
+ int slot, int ssow_lf, u64 blkaddr,
+ u64 ssow_blkaddr)
+{
+ int nix_lf_cnt, cpt_lf_cnt, tim_lf_cnt;
+ int *nix_lf, *cpt_lf, *tim_lf;
+ u64 reg, val;
+
+ /* Disable add work. */
+ rvu_write64(rvu, blkaddr, SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_QCTL),
+ 0);
+
+ /* Disable all sources of work. */
+ nix_lf = NULL;
+ nix_lf_cnt = rvu_sso_disable_aw_src(rvu, &nix_lf,
+ rvu_get_blkaddr(rvu, BLKTYPE_NIX,
+ pcifunc),
+ NIX_AF_LF_SSO_PF_FUNC_SHIFT,
+ NIX_AF_LF_CFG_SHIFT, pcifunc,
+ NIX_AF_LFX_CFG(0));
+
+ cpt_lf = NULL;
+ cpt_lf_cnt = rvu_sso_disable_aw_src(rvu, &cpt_lf,
+ rvu_get_blkaddr(rvu, BLKTYPE_CPT,
+ 0),
+ CPT_AF_LF_SSO_PF_FUNC_SHIFT,
+ CPT_AF_LF_CTL2_SHIFT, pcifunc,
+ CPT_AF_LFX_CTL2(0));
+
+ tim_lf = NULL;
+ tim_lf_cnt = rvu_sso_disable_aw_src(rvu, &tim_lf,
+ rvu_get_blkaddr(rvu, BLKTYPE_TIM,
+ 0),
+ TIM_AF_RING_SSO_PF_FUNC_SHIFT,
+ TIM_AF_RING_GMCTL_SHIFT, pcifunc,
+ TIM_AF_RINGX_GMCTL(0));
+
+ /* ZIP and DPI blocks not yet implemented. */
+
+ /* Enable add work. */
+ rvu_write64(rvu, blkaddr, SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_QCTL),
+ 0x1);
+
+ /* Make sure that all the in-flights are complete before invalidate. */
+ mb();
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_GWC_INVAL), 0x0);
+ /* Prepare WS for GW operations. */
+ do {
+ reg = rvu_read64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_TAG));
+ } while (reg & BIT_ULL(63));
+
+ if (reg & BIT_ULL(62))
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_DESCHED),
+ 0x0);
+ else if (((reg >> 32) & SSO_TT_EMPTY) != SSO_TT_EMPTY)
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_SWTAG_FLUSH),
+ 0x0);
+
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_GWC_INVAL), 0x0);
+ /* Drain TAQ. */
+ val = slot;
+ val |= BIT_ULL(18);
+ val |= BIT_ULL(16);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_TAQ_THR(lf));
+ while ((reg >> 48) & 0x7FF) {
+ rvu_write64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_OP_ADD_WORK1),
+ 0x1 << 3);
+get_work:
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_GET_WORK),
+ val);
+ do {
+ reg = rvu_read64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0,
+ SSOW_LF_GWS_TAG));
+ } while (reg & BIT_ULL(63));
+
+ if (!rvu_read64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_WQP)))
+ goto get_work;
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_TAQ_THR(lf));
+ }
+
+ reg = rvu_read64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_TAG));
+ if (((reg >> 32) & SSO_TT_EMPTY) != SSO_TT_EMPTY)
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_SWTAG_FLUSH),
+ 0x0);
+
+ /* Disable add work. */
+ rvu_write64(rvu, blkaddr, SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_QCTL),
+ 0x0);
+
+ /* restore all sources of work. */
+ rvu_sso_enable_aw_src(rvu, nix_lf_cnt, rvu_get_blkaddr(rvu, BLKTYPE_NIX,
+ pcifunc),
+ NIX_AF_LFX_CFG(0), nix_lf, pcifunc,
+ NIX_AF_LF_SSO_PF_FUNC_SHIFT,
+ NIX_AF_LF_CFG_SHIFT);
+ rvu_sso_enable_aw_src(rvu, cpt_lf_cnt, rvu_get_blkaddr(rvu, BLKTYPE_CPT,
+ 0),
+ CPT_AF_LFX_CTL2(0), cpt_lf, pcifunc,
+ CPT_AF_LF_SSO_PF_FUNC_SHIFT,
+ CPT_AF_LF_CTL2_SHIFT);
+ rvu_sso_enable_aw_src(rvu, tim_lf_cnt, rvu_get_blkaddr(rvu, BLKTYPE_TIM,
+ 0),
+ TIM_AF_RINGX_GMCTL(0), tim_lf, pcifunc,
+ TIM_AF_RING_SSO_PF_FUNC_SHIFT,
+ TIM_AF_RING_GMCTL_SHIFT);
+
+ kfree(nix_lf);
+ kfree(cpt_lf);
+ kfree(tim_lf);
+}
+
+static void rvu_sso_clean_nscheduled(struct rvu *rvu, int lf)
+{
+ struct sso_rsrc *sso = &rvu->hw->sso;
+ int blkaddr, ssow_blkaddr, iue;
+ u64 wqp, reg, op_clr_nsched;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ ssow_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, 0);
+ op_clr_nsched = (ssow_blkaddr << 28) |
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_CLR_NSCHED0);
+ for (iue = 0; iue < sso->sso_iue; iue++) {
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_GRP(iue));
+ if (SSO_AF_HWGRPX_IUEX_NOSCHED(lf, reg)) {
+ wqp = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_WQP(iue));
+ rvu_sso_store_pair(wqp, iue,
+ rvu->afreg_base + op_clr_nsched);
+ }
+ }
+}
+
+static void rvu_ssow_clean_prefetch(struct rvu *rvu, int slot)
+{
+ int ssow_blkaddr, err;
+ u64 val, reg;
+
+ ssow_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, 0);
+
+ /* Make sure that all the in-flights are complete before invalidate. */
+ mb();
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot, SSOW_LF_GWS_OP_GWC_INVAL), 0x0);
+
+ err = rvu_poll_reg(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot, SSOW_LF_GWS_PRF_TAG),
+ SSOW_LF_GWS_TAG_PEND_GET_WORK, true);
+ if (err)
+ dev_warn(rvu->dev,
+ "SSOW_LF_GWS_PRF_TAG[PEND_GET_WORK] not cleared\n");
+
+ reg = rvu_read64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot, SSOW_LF_GWS_PRF_TAG));
+ if (((reg >> 32) & SSO_TT_EMPTY) != SSO_TT_EMPTY) {
+ val = 0x0;
+ val |= SSOW_LF_GWS_OP_GET_WORK_WAIT;
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot, SSOW_LF_GWS_OP_GET_WORK),
+ val);
+ err = rvu_poll_reg(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot,
+ SSOW_LF_GWS_TAG),
+ SSOW_LF_GWS_TAG_PEND_GET_WORK, true);
+ if (err)
+ dev_warn(rvu->dev,
+ "SSOW_LF_GWS_PENDSTATE[PEND_GET_WORK] not cleared\n");
+
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot,
+ SSOW_LF_GWS_OP_SWTAG_FLUSH),
+ 0x0);
+ }
+}
+
+void rvu_sso_lf_drain_queues(struct rvu *rvu, u16 pcifunc, int lf, int slot)
+{
+ bool has_prefetch, has_nsched, has_lsw;
+ int ssow_lf, blkaddr, ssow_blkaddr;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 aq_cnt, ds_cnt, cq_ds_cnt;
+ u64 reg, val;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Read hardware capabilities */
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_CONST1);
+ has_lsw = !!(reg & SSO_AF_CONST1_LSW_PRESENT);
+ has_nsched = !!!(reg & SSO_AF_CONST1_NO_NSCHED);
+ has_prefetch = !!(reg & SSO_AF_CONST1_PRF_PRESENT);
+
+ ssow_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, 0);
+ if (ssow_blkaddr < 0)
+ return;
+ /* Check if LF is in slot 0, if not no HWS are attached. */
+ ssow_lf = rvu_get_lf(rvu, &hw->block[ssow_blkaddr], pcifunc, 0);
+ if (ssow_lf < 0)
+ return;
+
+ /* Enable BAR2 ALIAS for this pcifunc. */
+ reg = BIT_ULL(16) | pcifunc;
+ rvu_write64(rvu, blkaddr, SSO_AF_BAR2_SEL, reg);
+ rvu_write64(rvu, ssow_blkaddr, SSOW_AF_BAR2_SEL, reg);
+
+ /* Ignore all interrupts */
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_INT_ENA_W1C),
+ SSOW_LF_GWS_INT_MASK);
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_INT),
+ SSOW_LF_GWS_INT_MASK);
+
+ if (has_lsw)
+ rvu_write64(rvu, blkaddr, SSO_AF_HWSX_LSW_CFG(ssow_lf), 0x0);
+
+ /* Make sure that all the in-flights are complete before invalidate. */
+ mb();
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_GWC_INVAL), 0x0);
+ /* Prepare WS for GW operations. */
+ rvu_poll_reg(rvu, ssow_blkaddr, SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_TAG),
+ SSOW_LF_GWS_TAG_PEND_GET_WORK, true);
+
+ reg = rvu_read64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_TAG));
+ if (reg & SSOW_LF_GWS_TAG_PEND_SWITCH)
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_DESCHED), 0);
+ else if (((reg >> 32) & SSO_TT_EMPTY) != SSO_TT_EMPTY)
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_SWTAG_FLUSH),
+ 0);
+
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_GWC_INVAL), 0);
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_NW_TIM),
+ SSOW_LF_GWS_MAX_NW_TIM);
+
+ if (has_prefetch)
+ rvu_ssow_clean_prefetch(rvu, 0);
+
+ /* Disable add work. */
+ rvu_write64(rvu, blkaddr, SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_QCTL),
+ 0x0);
+
+ /* HRM 14.13.4 (4) */
+ /* Clean up nscheduled IENT let the work flow. */
+ if (has_nsched)
+ rvu_sso_clean_nscheduled(rvu, lf);
+
+ /* HRM 14.13.4 (6) */
+ /* Drain all the work using grouped gw. */
+ aq_cnt = rvu_read64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_AQ_CNT));
+ ds_cnt = rvu_read64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_MISC_CNT));
+ cq_ds_cnt = rvu_read64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_INT_CNT));
+ cq_ds_cnt &= SSO_LF_GGRP_INT_CNT_MASK;
+
+ val = slot; /* GGRP ID */
+ val |= SSOW_LF_GWS_OP_GET_WORK_GROUPED;
+ val |= SSOW_LF_GWS_OP_GET_WORK_WAIT;
+
+ while (aq_cnt || cq_ds_cnt || ds_cnt) {
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_GET_WORK),
+ val);
+ do {
+ reg = rvu_read64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0,
+ SSOW_LF_GWS_TAG));
+ } while (reg & SSOW_LF_GWS_TAG_PEND_GET_WORK);
+ if (((reg >> 32) & SSO_TT_EMPTY) != SSO_TT_EMPTY)
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0,
+ SSOW_LF_GWS_OP_SWTAG_FLUSH),
+ 0x0);
+ aq_cnt = rvu_read64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_AQ_CNT)
+ );
+ ds_cnt = rvu_read64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot,
+ SSO_LF_GGRP_MISC_CNT));
+ cq_ds_cnt = rvu_read64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot,
+ SSO_LF_GGRP_INT_CNT));
+ /* Extract cq and ds count */
+ cq_ds_cnt &= SSO_LF_GGRP_INT_CNT_MASK;
+ }
+
+ /* Due to the Errata 35432, SSO doesn't release the partially consumed
+ * TAQ buffer used by HWGRP when HWGRP is reset. Use SW routine to
+ * drain it manually.
+ */
+ if (is_rvu_96xx_B0(rvu))
+ rvu_sso_ggrp_taq_flush(rvu, pcifunc, lf, slot, ssow_lf, blkaddr,
+ ssow_blkaddr);
+
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_NW_TIM), 0x0);
+
+ /* HRM 14.13.4 (7) */
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_XAQ_CNT))
+ & SSO_LF_GGRP_XAQ_CNT_MASK;
+ if (reg != 0)
+ dev_warn(rvu->dev,
+ "SSO_LF[%d]_GGRP_XAQ_CNT is %lld expected 0", lf, reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_PAGE_CNT(lf))
+ & SSO_AF_HWGRP_PAGE_CNT_MASK;
+ if (reg != 0)
+ dev_warn(rvu->dev,
+ "SSO_AF_HWGRP[%d]_PAGE_CNT is %lld expected 0", lf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_IAQ_THR(lf))
+ >> SSO_HWGRP_IAQ_GRP_CNT_SHIFT;
+ reg &= SSO_HWGRP_IAQ_GRP_CNT_MASK;
+ if (reg != 0)
+ dev_warn(rvu->dev,
+ "SSO_AF_HWGRP[%d]_IAQ_THR is %lld expected 0", lf,
+ reg);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWSX_INV(ssow_lf), 0x1);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_BAR2_SEL, 0);
+ rvu_write64(rvu, ssow_blkaddr, SSOW_AF_BAR2_SEL, 0);
+}
+
+int rvu_sso_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot)
+{
+ u64 reg, add;
+ bool has_lsw;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ /* Read hardware capabilities */
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_CONST1);
+ has_lsw = !!(reg & SSO_AF_CONST1_LSW_PRESENT);
+
+ /* Enable BAR2 ALIAS for this pcifunc. */
+ reg = BIT_ULL(16) | pcifunc;
+ rvu_write64(rvu, blkaddr, SSO_AF_BAR2_SEL, reg);
+
+ rvu_write64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_INT_THR), 0x0);
+ rvu_write64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_AQ_THR),
+ SSO_LF_GGRP_AQ_THR_MASK);
+
+ rvu_write64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_INT),
+ SSO_LF_GGRP_INT_MASK);
+ rvu_write64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_INT_ENA_W1C),
+ SSO_LF_GGRP_INT_MASK);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_BAR2_SEL, 0x0);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_UNMAP_INFO);
+ if ((reg & 0xFFF) == pcifunc)
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR0, SSO_AF_ERR0_MASK);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_UNMAP_INFO2);
+ if ((reg & 0xFFF) == pcifunc)
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR2, SSO_AF_ERR2_MASK);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_UNMAP_INFO3);
+ if ((reg & 0xFFF) == pcifunc)
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR2, SSO_AF_ERR2_MASK);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_POISONX(lf / 64), lf % 64);
+ rvu_write64(rvu, blkaddr, SSO_AF_IU_ACCNTX_RST(lf), 0x1);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR0, ~0ULL);
+ /* Re-enable error reporting once we're finished */
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR0_ENA_W1S, ~0ULL);
+
+ /* HRM 14.13.4 (13) */
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_CFG(lf),
+ SSO_HWGRP_AW_CFG_LDWB | SSO_HWGRP_AW_CFG_LDT |
+ SSO_HWGRP_AW_CFG_STT);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_XAQ_AURA(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_GMCTL(lf), 0x0);
+ reg = (SSO_HWGRP_PRI_AFF_MASK << SSO_HWGRP_PRI_AFF_SHIFT) |
+ (SSO_HWGRP_PRI_WGT_MASK << SSO_HWGRP_PRI_WGT_SHIFT) |
+ (0x1 << SSO_HWGRP_PRI_WGT_SHIFT);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_PRI(lf), reg);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_WS_PC(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_EXT_PC(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_WA_PC(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_TS_PC(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_DS_PC(lf), 0x0);
+ if (has_lsw)
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_LS_PC(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_XAQ_LIMIT(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_IU_ACCNT(lf), 0x0);
+
+ /* The delta between the current and default thresholds
+ * need to be returned to the SSO
+ */
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_IAQ_THR(lf)) &
+ SSO_HWGRP_IAQ_RSVD_THR_MASK;
+ add = SSO_HWGRP_IAQ_RSVD_THR - reg;
+ reg = (SSO_HWGRP_IAQ_MAX_THR_MASK << SSO_HWGRP_IAQ_MAX_THR_SHIFT) |
+ SSO_HWGRP_IAQ_RSVD_THR;
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_IAQ_THR(lf), reg);
+
+ if (add)
+ rvu_write64(rvu, blkaddr, SSO_AF_AW_ADD,
+ (add & SSO_AF_AW_ADD_RSVD_FREE_MASK) <<
+ SSO_AF_AW_ADD_RSVD_FREE_SHIFT);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_TAQ_THR(lf)) &
+ SSO_HWGRP_TAQ_RSVD_THR_MASK;
+ add = SSO_HWGRP_TAQ_RSVD_THR - reg;
+ reg = (SSO_HWGRP_TAQ_MAX_THR_MASK << SSO_HWGRP_TAQ_MAX_THR_SHIFT) |
+ SSO_HWGRP_TAQ_RSVD_THR;
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_TAQ_THR(lf), reg);
+ if (add)
+ rvu_write64(rvu, blkaddr, SSO_AF_TAQ_ADD,
+ (add & SSO_AF_TAQ_RSVD_FREE_MASK) <<
+ SSO_AF_TAQ_ADD_RSVD_FREE_SHIFT);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_HEAD_PTR(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_TAIL_PTR(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_HEAD_NEXT(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_TAIL_NEXT(lf), 0x0);
+
+ return 0;
+}
+
+int rvu_ssow_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot)
+{
+ struct sso_rsrc *sso = &rvu->hw->sso;
+ bool has_prefetch, has_lsw;
+ int blkaddr, ssow_blkaddr;
+ u64 reg, grpmsk;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ ssow_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, 0);
+ if (ssow_blkaddr < 0)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ /* Read hardware capabilities */
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_CONST1);
+ has_lsw = !!(reg & SSO_AF_CONST1_LSW_PRESENT);
+ has_prefetch = !!(reg & SSO_AF_CONST1_PRF_PRESENT);
+
+ /* Enable BAR2 alias access. */
+ reg = BIT_ULL(16) | pcifunc;
+ rvu_write64(rvu, ssow_blkaddr, SSOW_AF_BAR2_SEL, reg);
+
+ /* Ignore all interrupts */
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_INT_ENA_W1C),
+ SSOW_LF_GWS_INT_MASK);
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_INT),
+ SSOW_LF_GWS_INT_MASK);
+
+ if (has_lsw)
+ rvu_write64(rvu, blkaddr, SSO_AF_HWSX_LSW_CFG(lf), 0x0);
+
+ /* Make sure that all the in-flights are complete before invalidate. */
+ mb();
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot, SSOW_LF_GWS_OP_GWC_INVAL), 0x0);
+ /* HRM 14.13.4 (3) */
+ /* Wait till waitw/desched completes. */
+ rvu_poll_reg(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot, SSOW_LF_GWS_PENDSTATE),
+ SSOW_LF_GWS_TAG_PEND_GET_WORK |
+ SSOW_LF_GWS_TAG_PEND_DESCHED, true);
+
+ reg = rvu_read64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot, SSOW_LF_GWS_TAG));
+ /* Switch Tag Pending */
+ if (reg & SSOW_LF_GWS_TAG_PEND_SWITCH)
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot, SSOW_LF_GWS_OP_DESCHED),
+ 0x0);
+ /* Tag Type != EMPTY use swtag_flush to release tag-chain. */
+ else if (((reg >> 32) & SSO_TT_EMPTY) != SSO_TT_EMPTY)
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot,
+ SSOW_LF_GWS_OP_SWTAG_FLUSH),
+ 0x0);
+
+ /* Wait for desched to complete. */
+ rvu_poll_reg(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot, SSOW_LF_GWS_PENDSTATE),
+ SSOW_LF_GWS_TAG_PEND_DESCHED, true);
+
+ if (has_prefetch)
+ rvu_ssow_clean_prefetch(rvu, slot);
+
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_NW_TIM), 0x0);
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_GWC_INVAL), 0x0);
+
+ /* set SAI_INVAL bit */
+ rvu_write64(rvu, blkaddr, SSO_AF_HWSX_INV(lf), 0x1);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWSX_ARB(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWSX_GMCTL(lf), 0x0);
+
+ /* Unset the HWS Hardware Group Mask. */
+ for (grpmsk = 0; grpmsk < (sso->sso_hwgrps / 64); grpmsk++) {
+ rvu_write64(rvu, blkaddr,
+ SSO_AF_HWSX_SX_GRPMSKX(lf, 0, grpmsk),
+ 0x0);
+ rvu_write64(rvu, blkaddr,
+ SSO_AF_HWSX_SX_GRPMSKX(lf, 1, grpmsk),
+ 0x0);
+ }
+
+ rvu_write64(rvu, ssow_blkaddr, SSOW_AF_BAR2_SEL, 0x0);
+
+ return 0;
+}
+
+int rvu_sso_poll_aura_cnt(struct rvu *rvu, int npa_blkaddr, int aura)
+{
+ unsigned long timeout = jiffies + usecs_to_jiffies(20000);
+ bool twice = false;
+ u64 __iomem *addr;
+ u64 res, wdata;
+
+ wdata = (u64)aura << 44;
+ addr = rvu->afreg_base + ((npa_blkaddr << 28) |
+ NPA_AF_BAR2_ALIASX(0, NPA_LF_AURA_OP_CNT));
+again:
+ rvu_sso_ldadd(res, wdata, addr);
+ if (res & BIT_ULL(42))
+ return 0;
+ if (!(res & 0xFFFFFFFFF))
+ return 0;
+ if (time_before(jiffies, timeout)) {
+ usleep_range(1, 5);
+ goto again;
+ }
+ /* In scenarios where CPU is scheduled out before checking
+ * 'time_before' (above) and gets scheduled in such that
+ * jiffies are beyond timeout value, then check again if HW is
+ * done with the operation in the meantime.
+ */
+ if (!twice) {
+ twice = true;
+ goto again;
+ }
+ return -EBUSY;
+}
+
+void rvu_sso_deinit_xaq_aura(struct rvu *rvu, int blkaddr, int npa_blkaddr,
+ int aura, int lf)
+{
+ void *free_addr;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_CONST1);
+ free_addr = rvu->afreg_base + ((npa_blkaddr << 28) |
+ NPA_AF_BAR2_ALIASX(0, NPA_LF_AURA_OP_FREE0));
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_AW_CFG(lf));
+ reg &= ~SSO_HWGRP_AW_CFG_RWEN;
+ reg |= SSO_HWGRP_AW_CFG_XAQ_ALLOC_DIS;
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_CFG(lf), reg);
+
+ rvu_poll_reg(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf),
+ SSO_HWGRP_AW_STS_XAQ_BUFSC_MASK, true);
+ rvu_poll_reg(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf),
+ SSO_HWGRP_AW_STS_NPA_FETCH, true);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf));
+ if (reg & SSO_HWGRP_AW_STS_TPTR_NEXT_VLD) {
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_XAQX_TAIL_NEXT(lf));
+ reg &= ~0x7F;
+ if (npa_blkaddr && reg)
+ rvu_sso_store_pair(reg, (u64)aura, free_addr);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf),
+ SSO_HWGRP_AW_STS_TPTR_NEXT_VLD);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_TAIL_NEXT(lf), 0x0);
+ }
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf));
+ if (reg & SSO_HWGRP_AW_STS_TPTR_VLD) {
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_XAQX_TAIL_PTR(lf));
+ reg &= ~0x7F;
+ if (npa_blkaddr && reg)
+ rvu_sso_store_pair(reg, (u64)aura, free_addr);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf),
+ SSO_HWGRP_AW_STS_TPTR_VLD);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_TAIL_PTR(lf), 0x0);
+ }
+}
+
+int rvu_sso_cleanup_xaq_aura(struct rvu *rvu, u16 pcifunc, int nb_hwgrps)
+{
+ int hwgrp, lf, blkaddr, npa_blkaddr, npa_pcifunc, aura, err;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (lf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_XAQX_GMCTL(lf));
+ npa_pcifunc = reg & 0xFFFF;
+ npa_blkaddr = 0;
+
+ if (npa_pcifunc) {
+ npa_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, npa_pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_INVAL_NPA_PF_FUNC;
+
+ reg = BIT_ULL(16) | npa_pcifunc;
+ rvu_write64(rvu, npa_blkaddr, NPA_AF_BAR2_SEL, reg);
+ aura = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_XAQ_AURA(lf));
+ }
+
+ for (hwgrp = 0; hwgrp < nb_hwgrps; hwgrp++) {
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, hwgrp);
+ if (lf < 0) {
+ err = SSO_AF_ERR_LF_INVALID;
+ goto fail;
+ }
+
+ rvu_sso_deinit_xaq_aura(rvu, blkaddr, npa_blkaddr, aura, lf);
+ /* disable XAQ */
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_CFG(lf),
+ SSO_HWGRP_AW_CFG_LDWB | SSO_HWGRP_AW_CFG_LDT |
+ SSO_HWGRP_AW_CFG_STT);
+ }
+
+ if (npa_pcifunc) {
+ err = rvu_sso_poll_aura_cnt(rvu, npa_blkaddr, aura);
+ if (err)
+ dev_err(rvu->dev, "[%d]Failed to free XAQs to aura[%d]\n",
+ __LINE__, aura);
+ }
+
+ for (hwgrp = 0; hwgrp < nb_hwgrps; hwgrp++) {
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_XAQ_AURA(lf), 0);
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_GMCTL(lf), 0);
+ }
+ err = 0;
+fail:
+ if (npa_pcifunc)
+ rvu_write64(rvu, npa_blkaddr, NPA_AF_BAR2_SEL, 0x0);
+ return err;
+}
+
+int rvu_mbox_handler_sso_hw_release_xaq_aura(struct rvu *rvu,
+ struct sso_release_xaq *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+
+ return rvu_sso_cleanup_xaq_aura(rvu, pcifunc, req->hwgrps);
+}
+
+int rvu_mbox_handler_sso_hw_setconfig(struct rvu *rvu,
+ struct sso_hw_setconfig *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int hwgrp, lf, err, blkaddr;
+ u32 npa_aura_id;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ npa_aura_id = req->npa_aura_id;
+
+ /* Check if requested 'SSOLF <=> NPALF' mapping is valid */
+ if (req->npa_pf_func) {
+ /* If default, use 'this' SSOLF's PFFUNC */
+ if (req->npa_pf_func == RVU_DEFAULT_PF_FUNC)
+ req->npa_pf_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->npa_pf_func, BLKTYPE_NPA))
+ return SSO_AF_INVAL_NPA_PF_FUNC;
+ }
+
+ err = rvu_sso_cleanup_xaq_aura(rvu, pcifunc, req->hwgrps);
+ if (err < 0)
+ return err;
+
+ /* Initialize XAQ ring */
+ for (hwgrp = 0; hwgrp < req->hwgrps; hwgrp++) {
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, hwgrp);
+ if (lf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_XAQ_AURA(lf),
+ npa_aura_id);
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_GMCTL(lf),
+ req->npa_pf_func);
+
+ /* enable XAQ */
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_CFG(lf), 0xF);
+
+ /* Wait for ggrp to ack. */
+ err = rvu_poll_reg(rvu, blkaddr,
+ SSO_AF_HWGRPX_AW_STATUS(lf),
+ SSO_HWGRP_AW_STS_INIT_STS, false);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf));
+ if (err || (reg & BIT_ULL(4)) || !(reg & BIT_ULL(8))) {
+ dev_warn(rvu->dev, "SSO_HWGRP(%d) XAQ NPA pointer initialization failed",
+ lf);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_sso_grp_set_priority(struct rvu *rvu,
+ struct sso_grp_priority *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr;
+ u64 regval;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ regval = (((u64)(req->weight & SSO_HWGRP_PRI_WGT_MASK)
+ << SSO_HWGRP_PRI_WGT_SHIFT) |
+ ((u64)(req->affinity & SSO_HWGRP_PRI_AFF_MASK)
+ << SSO_HWGRP_PRI_AFF_SHIFT) |
+ (req->priority & SSO_HWGRP_PRI_MASK));
+
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, req->grp);
+ if (lf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_PRI(lf), regval);
+
+ return 0;
+}
+
+int rvu_mbox_handler_sso_grp_get_priority(struct rvu *rvu,
+ struct sso_info_req *req,
+ struct sso_grp_priority *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr;
+ u64 regval;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, req->grp);
+ if (lf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ regval = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_PRI(lf));
+
+ rsp->weight = (regval >> SSO_HWGRP_PRI_WGT_SHIFT)
+ & SSO_HWGRP_PRI_WGT_MASK;
+ rsp->affinity = (regval >> SSO_HWGRP_PRI_AFF_SHIFT)
+ & SSO_HWGRP_PRI_AFF_MASK;
+ rsp->priority = regval & SSO_HWGRP_PRI_MASK;
+
+ return 0;
+}
+
+int rvu_mbox_handler_sso_grp_qos_config(struct rvu *rvu,
+ struct sso_grp_qos_cfg *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ u64 regval, grp_rsvd;
+ int lf, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, req->grp);
+ if (lf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ /* Check if GGRP has been active. */
+ regval = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_WA_PC(lf));
+ if (regval)
+ return SSO_AF_ERR_GRP_EBUSY;
+
+ /* Configure XAQ threhold */
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_XAQ_LIMIT(lf), req->xaq_limit);
+
+ /* Configure TAQ threhold */
+ regval = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_TAQ_THR(lf));
+ grp_rsvd = regval & SSO_HWGRP_TAQ_RSVD_THR_MASK;
+ if (req->taq_thr < grp_rsvd)
+ req->taq_thr = grp_rsvd;
+
+ regval = req->taq_thr & SSO_HWGRP_TAQ_MAX_THR_MASK;
+ regval = (regval << SSO_HWGRP_TAQ_MAX_THR_SHIFT) | grp_rsvd;
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_TAQ_THR(lf), regval);
+
+ /* Configure IAQ threhold */
+ regval = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_IAQ_THR(lf));
+ grp_rsvd = regval & SSO_HWGRP_IAQ_RSVD_THR_MASK;
+ if (req->iaq_thr < grp_rsvd + 4)
+ req->iaq_thr = grp_rsvd + 4;
+
+ regval = req->iaq_thr & SSO_HWGRP_IAQ_MAX_THR_MASK;
+ regval = (regval << SSO_HWGRP_IAQ_MAX_THR_SHIFT) | grp_rsvd;
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_IAQ_THR(lf), regval);
+
+ return 0;
+}
+
+int rvu_mbox_handler_sso_grp_get_stats(struct rvu *rvu,
+ struct sso_info_req *req,
+ struct sso_grp_stats *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, req->grp);
+ if (lf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ rsp->ws_pc = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_WS_PC(lf));
+ rsp->ext_pc = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_EXT_PC(lf));
+ rsp->wa_pc = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_WA_PC(lf));
+ rsp->ts_pc = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_TS_PC(lf));
+ rsp->ds_pc = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_DS_PC(lf));
+ rsp->dq_pc = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_DQ_PC(lf));
+ rsp->aw_status = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf));
+ rsp->page_cnt = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_PAGE_CNT(lf));
+
+ return 0;
+}
+
+int rvu_mbox_handler_sso_hws_get_stats(struct rvu *rvu,
+ struct sso_info_req *req,
+ struct sso_hws_stats *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr, ssow_blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ ssow_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, pcifunc);
+ if (ssow_blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &hw->block[ssow_blkaddr], pcifunc, req->hws);
+ if (lf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ rsp->arbitration = rvu_read64(rvu, blkaddr, SSO_AF_HWSX_ARB(lf));
+
+ return 0;
+}
+
+int rvu_mbox_handler_sso_lf_alloc(struct rvu *rvu, struct sso_lf_alloc_req *req,
+ struct sso_lf_alloc_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int ssolf, uniq_ident, rc = 0;
+ struct rvu_pfvf *pfvf;
+ int hwgrp, blkaddr;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (pfvf->sso <= 0 || blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ if (!pfvf->sso_uniq_ident) {
+ uniq_ident = rvu_alloc_rsrc(&hw->sso.pfvf_ident);
+ if (uniq_ident < 0) {
+ rc = SSO_AF_ERR_AF_LF_ALLOC;
+ goto exit;
+ }
+ pfvf->sso_uniq_ident = uniq_ident;
+ } else {
+ uniq_ident = pfvf->sso_uniq_ident;
+ }
+
+ /* Set threshold for the In-Unit Accounting Index*/
+ rvu_write64(rvu, blkaddr, SSO_AF_IU_ACCNTX_CFG(uniq_ident),
+ SSO_AF_HWGRP_IU_ACCNT_MAX_THR << 16);
+
+ for (hwgrp = 0; hwgrp < req->hwgrps; hwgrp++) {
+ ssolf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, hwgrp);
+ if (ssolf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ /* All groups assigned to single SR-IOV function must be
+ * assigned same unique in-unit accounting index.
+ */
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_IU_ACCNT(ssolf),
+ 0x10000 | uniq_ident);
+
+ /* Assign unique tagspace */
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_TAGSPACE(ssolf),
+ uniq_ident);
+ }
+
+exit:
+ rsp->xaq_buf_size = hw->sso.sso_xaq_buf_size;
+ rsp->xaq_wq_entries = hw->sso.sso_xaq_num_works;
+ rsp->in_unit_entries = hw->sso.sso_iue;
+ rsp->hwgrps = hw->sso.sso_hwgrps;
+ return rc;
+}
+
+int rvu_mbox_handler_sso_lf_free(struct rvu *rvu, struct sso_lf_free_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int hwgrp, lf, err, blkaddr;
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ for (hwgrp = 0; hwgrp < req->hwgrps; hwgrp++) {
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, hwgrp);
+ if (lf < 0)
+ continue;
+ rvu_sso_lf_drain_queues(rvu, pcifunc, lf, hwgrp);
+ }
+ rvu_sso_cleanup_xaq_aura(rvu, pcifunc, req->hwgrps);
+
+ /* Perform reset of SSO HW GRPs */
+ for (hwgrp = 0; hwgrp < req->hwgrps; hwgrp++) {
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, hwgrp);
+ if (lf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ err = rvu_sso_lf_teardown(rvu, pcifunc, lf, hwgrp);
+ if (err)
+ return err;
+
+ /* Reset this SSO LF */
+ err = rvu_lf_reset(rvu, &hw->block[blkaddr], lf);
+ if (err)
+ dev_err(rvu->dev, "SSO%d free: failed to reset\n", lf);
+ /* Reset the IAQ and TAQ thresholds */
+ rvu_sso_hwgrp_config_thresh(rvu, blkaddr, lf);
+ }
+
+ if (pfvf->sso_uniq_ident) {
+ rvu_free_rsrc(&hw->sso.pfvf_ident, pfvf->sso_uniq_ident);
+ pfvf->sso_uniq_ident = 0;
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_sso_ws_cache_inv(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int num_lfs, ssowlf, hws, blkaddr;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, pcifunc);
+ if (blkaddr < 0)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ block->addr);
+ if (!num_lfs)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ /* SSO HWS invalidate registers are part of SSO AF */
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ for (hws = 0; hws < num_lfs; hws++) {
+ ssowlf = rvu_get_lf(rvu, block, pcifunc, hws);
+ if (ssowlf < 0)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ /* Reset this SSO LF GWS cache */
+ rvu_write64(rvu, blkaddr, SSO_AF_HWSX_INV(ssowlf), 1);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_ssow_chng_mship(struct rvu *rvu,
+ struct ssow_chng_mship *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int ssolf, ssowlf, hwgrp;
+ u8 pos, bit;
+ int blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, pcifunc);
+ if (blkaddr < 0)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ ssowlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, req->hws);
+ if (ssowlf < 0)
+ return SSO_AF_ERR_PARAM;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ for (hwgrp = 0; hwgrp < req->nb_hwgrps; hwgrp++) {
+ ssolf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc,
+ req->hwgrps[hwgrp]);
+ if (ssolf < 0)
+ return SSO_AF_ERR_PARAM;
+
+ if (req->set > 1)
+ return SSO_AF_ERR_PARAM;
+ pos = ssolf / 64;
+ bit = ssolf % 64;
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWSX_SX_GRPMSKX(ssowlf,
+ req->set,
+ pos));
+ if (req->enable)
+ reg |= BIT_ULL(bit);
+ else
+ reg &= ~BIT_ULL(bit);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_HWSX_SX_GRPMSKX(ssowlf,
+ req->set,
+ pos), reg);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_ssow_lf_alloc(struct rvu *rvu,
+ struct ssow_lf_alloc_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (pfvf->ssow <= 0)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ return 0;
+}
+
+int rvu_mbox_handler_ssow_lf_free(struct rvu *rvu,
+ struct ssow_lf_free_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int ssowlf, hws, err, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, pcifunc);
+ if (blkaddr < 0)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ for (hws = 0; hws < req->hws; hws++) {
+ ssowlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, hws);
+ if (ssowlf < 0)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ err = rvu_ssow_lf_teardown(rvu, pcifunc, ssowlf, hws);
+ if (err)
+ return err;
+
+ /* Reset this SSO LF */
+ err = rvu_lf_reset(rvu, &hw->block[blkaddr], ssowlf);
+ if (err)
+ dev_err(rvu->dev, "SSOW%d free: failed to reset\n",
+ ssowlf);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_ssow_config_lsw(struct rvu *rvu,
+ struct ssow_config_lsw *req,
+ struct msg_rsp *rsp)
+{
+ int num_lfs, ssowlf, hws, blkaddr;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ bool has_lsw;
+ u64 val;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, pcifunc);
+ if (blkaddr < 0)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ block->addr);
+ if (!num_lfs)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ /* SSO HWS LSW config registers are part of SSO AF */
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ val = rvu_read64(rvu, blkaddr, SSO_AF_CONST1);
+ has_lsw = !!(val & SSO_AF_CONST1_LSW_PRESENT);
+
+ if (!has_lsw || req->lsw_mode > SSOW_LSW_GW_IMM ||
+ req->wqe_release > SSOW_WQE_REL_IMM)
+ return SSOW_AF_ERR_INVALID_CFG;
+
+ for (hws = 0; hws < num_lfs; hws++) {
+ ssowlf = rvu_get_lf(rvu, block, pcifunc, hws);
+ if (ssowlf < 0)
+ return SSOW_AF_ERR_LF_INVALID;
+ val = req->wqe_release << 2;
+ val |= req->lsw_mode;
+ rvu_write64(rvu, blkaddr, SSO_AF_HWSX_LSW_CFG(ssowlf), val);
+ }
+
+ return 0;
+}
+
+static int rvu_sso_do_register_interrupt(struct rvu *rvu, int irq_offs,
+ irq_handler_t handler,
+ const char *name)
+{
+ int ret = 0;
+
+ ret = request_irq(pci_irq_vector(rvu->pdev, irq_offs), handler, 0,
+ name, rvu);
+ if (ret) {
+ dev_err(rvu->dev, "SSOAF: %s irq registration failed", name);
+ goto err;
+ }
+
+ WARN_ON(rvu->irq_allocated[irq_offs]);
+ rvu->irq_allocated[irq_offs] = true;
+err:
+ return ret;
+}
+
+static irqreturn_t rvu_sso_af_err0_intr_handler(int irq, void *ptr)
+{
+ struct rvu *rvu = (struct rvu *)ptr;
+ struct rvu_block *block;
+ int i, blkaddr;
+ u64 reg, reg0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ block = &rvu->hw->block[blkaddr];
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_ERR0);
+ dev_err_ratelimited(rvu->dev, "Received SSO_AF_ERR0 irq : 0x%llx", reg);
+
+ if (reg & BIT_ULL(15)) {
+ dev_err_ratelimited(rvu->dev, "Received Bad-fill-packet NCB error");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_POISON)
+ }
+
+ if (reg & BIT_ULL(14)) {
+ dev_err_ratelimited(rvu->dev, "An FLR was initiated, but SSO_LF_GGRP_AQ_CNT[AQ_CNT] != 0");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_FLR_AQ_DIGEST)
+ }
+
+ if (reg & BIT_ULL(13)) {
+ dev_err_ratelimited(rvu->dev, "Add work dropped due to XAQ pointers not yet initialized.");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_XAQDIS_DIGEST)
+ }
+
+ if (reg & (0xF << 9)) {
+ dev_err_ratelimited(rvu->dev, "PF_FUNC mapping error.");
+ dev_err_ratelimited(rvu->dev, "SSO_AF_UNMAP_INFO : 0x%llx",
+ rvu_read64(rvu, blkaddr, SSO_AF_UNMAP_INFO));
+ }
+
+ if (reg & BIT_ULL(8)) {
+ dev_err_ratelimited(rvu->dev, "Add work dropped due to QTL being disabled, 0x0");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_QCTLDIS_DIGEST)
+ }
+
+ if (reg & BIT_ULL(7)) {
+ dev_err_ratelimited(rvu->dev, "Add work dropped due to WQP being 0x0");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_WQP0_DIGEST)
+ }
+
+ if (reg & BIT_ULL(6))
+ dev_err_ratelimited(rvu->dev, "Add work dropped due to 64 bit write");
+
+ if (reg & BIT_ULL(5))
+ dev_err_ratelimited(rvu->dev, "Set when received add work with tag type is specified as EMPTY");
+
+ if (reg & BIT_ULL(4)) {
+ dev_err_ratelimited(rvu->dev, "Add work to disabled hardware group. An ADDWQ was received and dropped to a hardware group with SSO_AF_HWGRP(0..255)_IAQ_THR[RSVD_THR] = 0.");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_GRPDIS_DIGEST)
+ }
+
+ if (reg & BIT_ULL(3)) {
+ dev_err_ratelimited(rvu->dev, "Bad-fill-packet NCB error");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_BFPN_DIGEST)
+ }
+
+ if (reg & BIT_ULL(2)) {
+ dev_err_ratelimited(rvu->dev, "Bad-fill-packet error.");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_BFP_DIGEST)
+ }
+
+ if (reg & BIT_ULL(1)) {
+ dev_err_ratelimited(rvu->dev, "The NPA returned an error indication");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_NPA_DIGEST)
+ }
+
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR0, reg);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_sso_af_err2_intr_handler(int irq, void *ptr)
+{
+ struct rvu *rvu = (struct rvu *)ptr;
+ int blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_ERR2);
+ dev_err_ratelimited(rvu->dev, "received SSO_AF_ERR2 irq : 0x%llx", reg);
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR2, reg);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_sso_af_ras_intr_handler(int irq, void *ptr)
+{
+ struct rvu *rvu = (struct rvu *)ptr;
+ struct rvu_block *block;
+ int i, blkaddr;
+ u64 reg, reg0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ block = &rvu->hw->block[blkaddr];
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_RAS);
+ dev_err_ratelimited(rvu->dev, "received SSO_AF_RAS irq : 0x%llx", reg);
+ rvu_write64(rvu, blkaddr, SSO_AF_RAS, reg);
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_POISON)
+
+ return IRQ_HANDLED;
+}
+
+void rvu_sso_unregister_interrupts(struct rvu *rvu)
+{
+ int i, blkaddr, offs;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ offs = rvu_read64(rvu, blkaddr, SSO_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs)
+ return;
+
+ rvu_write64(rvu, blkaddr, SSO_AF_RAS_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR2_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR0_ENA_W1C, ~0ULL);
+
+ for (i = 0; i < SSO_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+int rvu_sso_register_interrupts(struct rvu *rvu)
+{
+ int blkaddr, offs, ret = 0;
+
+ if (!is_block_implemented(rvu->hw, BLKADDR_SSO))
+ return 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ offs = rvu_read64(rvu, blkaddr, SSO_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs) {
+ dev_warn(rvu->dev,
+ "Failed to get SSO_AF_INT vector offsets\n");
+ return 0;
+ }
+
+ ret = rvu_sso_do_register_interrupt(rvu, offs + SSO_AF_INT_VEC_ERR0,
+ rvu_sso_af_err0_intr_handler,
+ "SSO_AF_ERR0");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR0_ENA_W1S, ~0ULL);
+
+ ret = rvu_sso_do_register_interrupt(rvu, offs + SSO_AF_INT_VEC_ERR2,
+ rvu_sso_af_err2_intr_handler,
+ "SSO_AF_ERR2");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR2_ENA_W1S, ~0ULL);
+
+ ret = rvu_sso_do_register_interrupt(rvu, offs + SSO_AF_INT_VEC_RAS,
+ rvu_sso_af_ras_intr_handler,
+ "SSO_AF_RAS");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, SSO_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+err:
+ rvu_sso_unregister_interrupts(rvu);
+ return ret;
+}
+
+int rvu_sso_init(struct rvu *rvu)
+{
+ u64 iaq_free_cnt, iaq_rsvd, iaq_max, iaq_rsvd_cnt = 0;
+ u64 taq_free_cnt, taq_rsvd, taq_max, taq_rsvd_cnt = 0;
+ struct sso_rsrc *sso = &rvu->hw->sso;
+ int blkaddr, hwgrp, grpmsk, hws, err;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return 0;
+
+ if (!is_rvu_otx2(rvu))
+ rvu_sso_block_cn10k_init(rvu, blkaddr);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_CONST);
+ /* number of SSO hardware work slots */
+ sso->sso_hws = (reg >> 56) & 0xFF;
+ /* number of SSO hardware groups */
+ sso->sso_hwgrps = (reg & 0xFFFF);
+ /* number of SSO In-Unit entries */
+ sso->sso_iue = (reg >> 16) & 0xFFFF;
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_CONST1);
+ /* number of work entries in external admission queue (XAQ) */
+ sso->sso_xaq_num_works = (reg >> 16) & 0xFFFF;
+ /* number of bytes in a XAQ buffer */
+ sso->sso_xaq_buf_size = (reg & 0xFFFF);
+
+ /* Configure IAQ entries */
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_AW_WE);
+ iaq_free_cnt = reg & SSO_AF_IAQ_FREE_CNT_MASK;
+
+ /* Give out half of buffers fairly, rest left floating */
+ iaq_rsvd = iaq_free_cnt / sso->sso_hwgrps / 2;
+
+ /* Enforce minimum per hardware requirements */
+ if (iaq_rsvd < SSO_HWGRP_IAQ_RSVD_THR)
+ iaq_rsvd = SSO_HWGRP_IAQ_RSVD_THR;
+ /* To ensure full streaming performance should be at least 208. */
+ iaq_max = iaq_rsvd + SSO_HWGRP_IAQ_MAX_THR_STRM_PERF;
+
+ if (iaq_max >= (SSO_AF_IAQ_FREE_CNT_MAX + 1))
+ iaq_max = SSO_AF_IAQ_FREE_CNT_MAX;
+
+ /* Configure TAQ entries */
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_TAQ_CNT);
+ taq_free_cnt = reg & SSO_AF_TAQ_FREE_CNT_MASK;
+
+ /* Give out half of buffers fairly, rest left floating */
+ taq_rsvd = taq_free_cnt / sso->sso_hwgrps / 2;
+
+ /* Enforce minimum per hardware requirements */
+ if (taq_rsvd < SSO_HWGRP_TAQ_RSVD_THR)
+ taq_rsvd = SSO_HWGRP_TAQ_RSVD_THR;
+ /* To ensure full streaming performance should be at least 16. */
+ taq_max = taq_rsvd + SSO_HWGRP_TAQ_MAX_THR_STRM_PERF;
+
+ if (taq_max >= (SSO_AF_TAQ_FREE_CNT_MAX + 1))
+ taq_max = SSO_AF_TAQ_FREE_CNT_MAX;
+
+ /* Save thresholds to reprogram HWGRPs on reset */
+ sso->iaq_rsvd = iaq_rsvd;
+ sso->iaq_max = iaq_max;
+ sso->taq_rsvd = taq_rsvd;
+ sso->taq_max = taq_max;
+
+ for (hwgrp = 0; hwgrp < sso->sso_hwgrps; hwgrp++) {
+ rvu_sso_hwgrp_config_thresh(rvu, blkaddr, hwgrp);
+ iaq_rsvd_cnt += iaq_rsvd;
+ taq_rsvd_cnt += taq_rsvd;
+ }
+
+ /* Verify SSO_AW_WE[RSVD_FREE], TAQ_CNT[RSVD_FREE] are greater than
+ * or equal to sum of IAQ[RSVD_THR], TAQ[RSRVD_THR] fields.
+ */
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_AW_WE);
+ reg = (reg >> SSO_AF_IAQ_RSVD_FREE_SHIFT) & SSO_AF_IAQ_RSVD_FREE_MASK;
+ if (reg < iaq_rsvd_cnt) {
+ dev_warn(rvu->dev, "WARN: Wrong IAQ resource calculations %llx vs %llx\n",
+ reg, iaq_rsvd_cnt);
+ rvu_write64(rvu, blkaddr, SSO_AF_AW_WE,
+ (iaq_rsvd_cnt & SSO_AF_IAQ_RSVD_FREE_MASK) <<
+ SSO_AF_IAQ_RSVD_FREE_SHIFT);
+ }
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_TAQ_CNT);
+ reg = (reg >> SSO_AF_TAQ_RSVD_FREE_SHIFT) & SSO_AF_TAQ_RSVD_FREE_MASK;
+ if (reg < taq_rsvd_cnt) {
+ dev_warn(rvu->dev, "WARN: Wrong TAQ resource calculations %llx vs %llx\n",
+ reg, taq_rsvd_cnt);
+ rvu_write64(rvu, blkaddr, SSO_AF_TAQ_CNT,
+ (taq_rsvd_cnt & SSO_AF_TAQ_RSVD_FREE_MASK) <<
+ SSO_AF_TAQ_RSVD_FREE_SHIFT);
+ }
+
+ /* Unset the HWS Hardware Group Mask.
+ * The hardware group mask should be set by PF/VF
+ * using SSOW_LF_GWS_GRPMSK_CHG based on the LF allocations.
+ */
+ for (grpmsk = 0; grpmsk < (sso->sso_hwgrps / 64); grpmsk++) {
+ for (hws = 0; hws < sso->sso_hws; hws++) {
+ rvu_write64(rvu, blkaddr,
+ SSO_AF_HWSX_SX_GRPMSKX(hws, 0, grpmsk),
+ 0x0);
+ rvu_write64(rvu, blkaddr,
+ SSO_AF_HWSX_SX_GRPMSKX(hws, 1, grpmsk),
+ 0x0);
+ }
+ }
+
+ /* Allocate SSO_AF_CONST::HWS + 1. As the total number of pf/vf are
+ * limited by the numeber of HWS available.
+ */
+ sso->pfvf_ident.max = sso->sso_hws + 1;
+ err = rvu_alloc_bitmap(&sso->pfvf_ident);
+ if (err)
+ return err;
+
+ /* Reserve one bit so that identifier starts from 1 */
+ rvu_alloc_rsrc(&sso->pfvf_ident);
+
+ /* Enable SSO time counter by default to a period of 10us */
+ rvu_write64(rvu, blkaddr, SSO_AF_WQ_INT_PC, 0x28UL << 8);
+
+ return 0;
+}
+
+void rvu_sso_freemem(struct rvu *rvu)
+{
+ struct sso_rsrc *sso = &rvu->hw->sso;
+
+ kfree(sso->pfvf_ident.bmap);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index a3ecb5de9000..d4ce01b46010 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -1,11 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef RVU_STRUCT_H
@@ -14,24 +11,31 @@
/* RVU Block revision IDs */
#define RVU_BLK_RVUM_REVID 0x01
+#define RVU_MULTI_BLK_VER 0x7ULL
+
/* RVU Block Address Enumeration */
enum rvu_block_addr_e {
- BLKADDR_RVUM = 0x0ULL,
- BLKADDR_LMT = 0x1ULL,
- BLKADDR_MSIX = 0x2ULL,
- BLKADDR_NPA = 0x3ULL,
- BLKADDR_NIX0 = 0x4ULL,
- BLKADDR_NIX1 = 0x5ULL,
- BLKADDR_NPC = 0x6ULL,
- BLKADDR_SSO = 0x7ULL,
- BLKADDR_SSOW = 0x8ULL,
- BLKADDR_TIM = 0x9ULL,
- BLKADDR_CPT0 = 0xaULL,
- BLKADDR_CPT1 = 0xbULL,
- BLKADDR_NDC_NIX0_RX = 0xcULL,
- BLKADDR_NDC_NIX0_TX = 0xdULL,
- BLKADDR_NDC_NPA0 = 0xeULL,
- BLK_COUNT = 0xfULL,
+ BLKADDR_RVUM = 0x0ULL,
+ BLKADDR_LMT = 0x1ULL,
+ BLKADDR_MSIX = 0x2ULL,
+ BLKADDR_NPA = 0x3ULL,
+ BLKADDR_NIX0 = 0x4ULL,
+ BLKADDR_NIX1 = 0x5ULL,
+ BLKADDR_NPC = 0x6ULL,
+ BLKADDR_SSO = 0x7ULL,
+ BLKADDR_SSOW = 0x8ULL,
+ BLKADDR_TIM = 0x9ULL,
+ BLKADDR_CPT0 = 0xaULL,
+ BLKADDR_CPT1 = 0xbULL,
+ BLKADDR_NDC_NIX0_RX = 0xcULL,
+ BLKADDR_NDC_NIX0_TX = 0xdULL,
+ BLKADDR_NDC_NPA0 = 0xeULL,
+ BLKADDR_NDC_NIX1_RX = 0x10ULL,
+ BLKADDR_NDC_NIX1_TX = 0x11ULL,
+ BLKADDR_REE0 = 0x14ULL,
+ BLKADDR_REE1 = 0x15ULL,
+ BLKADDR_APR = 0x16ULL,
+ BLK_COUNT = 0x17ULL,
};
/* RVU Block Type Enumeration */
@@ -47,7 +51,8 @@ enum rvu_block_type_e {
BLKTYPE_TIM = 0x8,
BLKTYPE_CPT = 0x9,
BLKTYPE_NDC = 0xa,
- BLKTYPE_MAX = 0xa,
+ BLKTYPE_REE = 0xe,
+ BLKTYPE_MAX = 0xe,
};
/* RVU Admin function Interrupt Vector Enumeration */
@@ -60,6 +65,60 @@ enum rvu_af_int_vec_e {
RVU_AF_INT_VEC_CNT = 0x5,
};
+/* SSO Admin function Interrupt Vector Enumeration */
+enum sso_af_int_vec_e {
+ SSO_AF_INT_VEC_ERR0 = 0x0,
+ SSO_AF_INT_VEC_ERR2 = 0x1,
+ SSO_AF_INT_VEC_RAS = 0x2,
+ SSO_AF_INT_VEC_CNT = 0x3,
+};
+
+/* CPT Admin function Interrupt Vector Enumeration */
+enum cpt_af_int_vec_e {
+ CPT_AF_INT_VEC_FLT0 = 0x0,
+ CPT_AF_INT_VEC_FLT1 = 0x1,
+ CPT_AF_INT_VEC_RVU = 0x2,
+ CPT_AF_INT_VEC_RAS = 0x3,
+ CPT_AF_INT_VEC_CNT = 0x4,
+};
+
+enum cpt_10k_af_int_vec_e {
+ CPT_10K_AF_INT_VEC_FLT0 = 0x0,
+ CPT_10K_AF_INT_VEC_FLT1 = 0x1,
+ CPT_10K_AF_INT_VEC_FLT2 = 0x2,
+ CPT_10K_AF_INT_VEC_RVU = 0x3,
+ CPT_10K_AF_INT_VEC_RAS = 0x4,
+ CPT_10K_AF_INT_VEC_CNT = 0x5,
+};
+/* REE Admin function Interrupt Vector Enumeration */
+enum ree_af_int_vec_e {
+ REE_AF_INT_VEC_RAS = 0x0,
+ REE_AF_INT_VEC_RVU = 0x1,
+ REE_AF_INT_VEC_QUE_DONE = 0x2,
+ REE_AF_INT_VEC_AQ = 0x3,
+ REE_AF_INT_VEC_CNT = 0x4,
+};
+
+/* NPA Admin function Interrupt Vector Enumeration */
+enum npa_af_int_vec_e {
+ NPA_AF_INT_VEC_RVU = 0x0,
+ NPA_AF_INT_VEC_GEN = 0x1,
+ NPA_AF_INT_VEC_AQ_DONE = 0x2,
+ NPA_AF_INT_VEC_AF_ERR = 0x3,
+ NPA_AF_INT_VEC_POISON = 0x4,
+ NPA_AF_INT_VEC_CNT = 0x5,
+};
+
+/* NIX Admin function Interrupt Vector Enumeration */
+enum nix_af_int_vec_e {
+ NIX_AF_INT_VEC_RVU = 0x0,
+ NIX_AF_INT_VEC_GEN = 0x1,
+ NIX_AF_INT_VEC_AQ_DONE = 0x2,
+ NIX_AF_INT_VEC_AF_ERR = 0x3,
+ NIX_AF_INT_VEC_POISON = 0x4,
+ NIX_AF_INT_VEC_CNT = 0x5,
+};
+
/**
* RVU PF Interrupt Vector Enumeration
*/
@@ -100,65 +159,44 @@ enum npa_aq_instop {
NPA_AQ_INSTOP_UNLOCK = 0x5,
};
+/* ALLOC/FREE input queues Enumeration from coprocessors */
+enum npa_inpq {
+ NPA_INPQ_NIX0_RX = 0x0,
+ NPA_INPQ_NIX0_TX = 0x1,
+ NPA_INPQ_NIX1_RX = 0x2,
+ NPA_INPQ_NIX1_TX = 0x3,
+ NPA_INPQ_SSO = 0x4,
+ NPA_INPQ_TIM = 0x5,
+ NPA_INPQ_DPI = 0x6,
+ NPA_INPQ_AURA_OP = 0xe,
+ NPA_INPQ_INTERNAL_RSV = 0xf,
+};
+
/* NPA admin queue instruction structure */
struct npa_aq_inst_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 doneint : 1; /* W0 */
- u64 reserved_44_62 : 19;
- u64 cindex : 20;
- u64 reserved_17_23 : 7;
- u64 lf : 9;
- u64 ctype : 4;
- u64 op : 4;
-#else
- u64 op : 4;
+ u64 op : 4; /* W0 */
u64 ctype : 4;
u64 lf : 9;
u64 reserved_17_23 : 7;
u64 cindex : 20;
u64 reserved_44_62 : 19;
u64 doneint : 1;
-#endif
u64 res_addr; /* W1 */
};
/* NPA admin queue result structure */
struct npa_aq_res_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_17_63 : 47; /* W0 */
- u64 doneint : 1;
- u64 compcode : 8;
- u64 ctype : 4;
- u64 op : 4;
-#else
- u64 op : 4;
+ u64 op : 4; /* W0 */
u64 ctype : 4;
u64 compcode : 8;
u64 doneint : 1;
u64 reserved_17_63 : 47;
-#endif
u64 reserved_64_127; /* W1 */
};
struct npa_aura_s {
u64 pool_addr; /* W0 */
-#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
- u64 avg_level : 8;
- u64 reserved_118_119 : 2;
- u64 shift : 6;
- u64 aura_drop : 8;
- u64 reserved_98_103 : 6;
- u64 bp_ena : 2;
- u64 aura_drop_ena : 1;
- u64 pool_drop_ena : 1;
- u64 reserved_93 : 1;
- u64 avg_con : 9;
- u64 pool_way_mask : 16;
- u64 pool_caching : 1;
- u64 reserved_65 : 2;
- u64 ena : 1;
-#else
- u64 ena : 1;
+ u64 ena : 1; /* W1 */
u64 reserved_65 : 2;
u64 pool_caching : 1;
u64 pool_way_mask : 16;
@@ -172,59 +210,24 @@ struct npa_aura_s {
u64 shift : 6;
u64 reserved_118_119 : 2;
u64 avg_level : 8;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
- u64 reserved_189_191 : 3;
- u64 nix1_bpid : 9;
- u64 reserved_177_179 : 3;
- u64 nix0_bpid : 9;
- u64 reserved_164_167 : 4;
- u64 count : 36;
-#else
- u64 count : 36;
+ u64 count : 36; /* W2 */
u64 reserved_164_167 : 4;
u64 nix0_bpid : 9;
u64 reserved_177_179 : 3;
u64 nix1_bpid : 9;
u64 reserved_189_191 : 3;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
- u64 reserved_252_255 : 4;
- u64 fc_hyst_bits : 4;
- u64 fc_stype : 2;
- u64 fc_up_crossing : 1;
- u64 fc_ena : 1;
- u64 reserved_240_243 : 4;
- u64 bp : 8;
- u64 reserved_228_231 : 4;
- u64 limit : 36;
-#else
- u64 limit : 36;
+ u64 limit : 36; /* W3 */
u64 reserved_228_231 : 4;
u64 bp : 8;
- u64 reserved_240_243 : 4;
+ u64 reserved_241_243 : 3;
+ u64 fc_be : 1;
u64 fc_ena : 1;
u64 fc_up_crossing : 1;
u64 fc_stype : 2;
u64 fc_hyst_bits : 4;
u64 reserved_252_255 : 4;
-#endif
u64 fc_addr; /* W4 */
-#if defined(__BIG_ENDIAN_BITFIELD) /* W5 */
- u64 reserved_379_383 : 5;
- u64 err_qint_idx : 7;
- u64 reserved_371 : 1;
- u64 thresh_qint_idx : 7;
- u64 reserved_363 : 1;
- u64 thresh_up : 1;
- u64 thresh_int_ena : 1;
- u64 thresh_int : 1;
- u64 err_int_ena : 8;
- u64 err_int : 8;
- u64 update_time : 16;
- u64 pool_drop : 8;
-#else
- u64 pool_drop : 8;
+ u64 pool_drop : 8; /* W5 */
u64 update_time : 16;
u64 err_int : 8;
u64 err_int_ena : 8;
@@ -236,31 +239,15 @@ struct npa_aura_s {
u64 reserved_371 : 1;
u64 err_qint_idx : 7;
u64 reserved_379_383 : 5;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W6 */
- u64 reserved_420_447 : 28;
- u64 thresh : 36;
-#else
- u64 thresh : 36;
- u64 reserved_420_447 : 28;
-#endif
+ u64 thresh : 36; /* W6*/
+ u64 rsvd_423_420 : 4;
+ u64 fc_msh_dst : 11;
+ u64 reserved_435_447 : 13;
u64 reserved_448_511; /* W7 */
};
struct npa_pool_s {
u64 stack_base; /* W0 */
-#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
- u64 reserved_115_127 : 13;
- u64 buf_size : 11;
- u64 reserved_100_103 : 4;
- u64 buf_offset : 12;
- u64 stack_way_mask : 16;
- u64 reserved_70_71 : 3;
- u64 stack_caching : 1;
- u64 reserved_66_67 : 2;
- u64 nat_align : 1;
- u64 ena : 1;
-#else
u64 ena : 1;
u64 nat_align : 1;
u64 reserved_66_67 : 2;
@@ -271,36 +258,10 @@ struct npa_pool_s {
u64 reserved_100_103 : 4;
u64 buf_size : 11;
u64 reserved_115_127 : 13;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
- u64 stack_pages : 32;
- u64 stack_max_pages : 32;
-#else
u64 stack_max_pages : 32;
u64 stack_pages : 32;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
- u64 reserved_240_255 : 16;
- u64 op_pc : 48;
-#else
u64 op_pc : 48;
u64 reserved_240_255 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W4 */
- u64 reserved_316_319 : 4;
- u64 update_time : 16;
- u64 reserved_297_299 : 3;
- u64 fc_up_crossing : 1;
- u64 fc_hyst_bits : 4;
- u64 fc_stype : 2;
- u64 fc_ena : 1;
- u64 avg_con : 9;
- u64 avg_level : 8;
- u64 reserved_270_271 : 2;
- u64 shift : 6;
- u64 reserved_260_263 : 4;
- u64 stack_offset : 4;
-#else
u64 stack_offset : 4;
u64 reserved_260_263 : 4;
u64 shift : 6;
@@ -311,26 +272,13 @@ struct npa_pool_s {
u64 fc_stype : 2;
u64 fc_hyst_bits : 4;
u64 fc_up_crossing : 1;
- u64 reserved_297_299 : 3;
+ u64 fc_be : 1;
+ u64 reserved_298_299 : 2;
u64 update_time : 16;
u64 reserved_316_319 : 4;
-#endif
u64 fc_addr; /* W5 */
u64 ptr_start; /* W6 */
u64 ptr_end; /* W7 */
-#if defined(__BIG_ENDIAN_BITFIELD) /* W8 */
- u64 reserved_571_575 : 5;
- u64 err_qint_idx : 7;
- u64 reserved_563 : 1;
- u64 thresh_qint_idx : 7;
- u64 reserved_555 : 1;
- u64 thresh_up : 1;
- u64 thresh_int_ena : 1;
- u64 thresh_int : 1;
- u64 err_int_ena : 8;
- u64 err_int : 8;
- u64 reserved_512_535 : 24;
-#else
u64 reserved_512_535 : 24;
u64 err_int : 8;
u64 err_int_ena : 8;
@@ -342,14 +290,10 @@ struct npa_pool_s {
u64 reserved_563 : 1;
u64 err_qint_idx : 7;
u64 reserved_571_575 : 5;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */
- u64 reserved_612_639 : 28;
- u64 thresh : 36;
-#else
u64 thresh : 36;
- u64 reserved_612_639 : 28;
-#endif
+ u64 rsvd_615_612 : 4;
+ u64 fc_msh_dst : 11;
+ u64 reserved_627_639 : 13;
u64 reserved_640_703; /* W10 */
u64 reserved_704_767; /* W11 */
u64 reserved_768_831; /* W12 */
@@ -377,6 +321,7 @@ enum nix_aq_ctype {
NIX_AQ_CTYPE_MCE = 0x3,
NIX_AQ_CTYPE_RSS = 0x4,
NIX_AQ_CTYPE_DYNO = 0x5,
+ NIX_AQ_CTYPE_BANDPROF = 0x6,
};
/* NIX admin queue instruction opcodes */
@@ -391,59 +336,29 @@ enum nix_aq_instop {
/* NIX admin queue instruction structure */
struct nix_aq_inst_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 doneint : 1; /* W0 */
- u64 reserved_44_62 : 19;
- u64 cindex : 20;
- u64 reserved_15_23 : 9;
- u64 lf : 7;
- u64 ctype : 4;
- u64 op : 4;
-#else
u64 op : 4;
u64 ctype : 4;
- u64 lf : 7;
- u64 reserved_15_23 : 9;
+ u64 lf : 9;
+ u64 reserved_17_23 : 7;
u64 cindex : 20;
u64 reserved_44_62 : 19;
u64 doneint : 1;
-#endif
u64 res_addr; /* W1 */
};
/* NIX admin queue result structure */
struct nix_aq_res_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_17_63 : 47; /* W0 */
- u64 doneint : 1;
- u64 compcode : 8;
- u64 ctype : 4;
- u64 op : 4;
-#else
u64 op : 4;
u64 ctype : 4;
u64 compcode : 8;
u64 doneint : 1;
u64 reserved_17_63 : 47;
-#endif
u64 reserved_64_127; /* W1 */
};
/* NIX Completion queue context structure */
struct nix_cq_ctx_s {
u64 base;
-#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
- u64 wrptr : 20;
- u64 avg_con : 9;
- u64 cint_idx : 7;
- u64 cq_err : 1;
- u64 qint_idx : 7;
- u64 rsvd_81_83 : 3;
- u64 bpid : 9;
- u64 rsvd_69_71 : 3;
- u64 bp_ena : 1;
- u64 rsvd_64_67 : 4;
-#else
u64 rsvd_64_67 : 4;
u64 bp_ena : 1;
u64 rsvd_69_71 : 3;
@@ -454,31 +369,10 @@ struct nix_cq_ctx_s {
u64 cint_idx : 7;
u64 avg_con : 9;
u64 wrptr : 20;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
- u64 update_time : 16;
- u64 avg_level : 8;
- u64 head : 20;
- u64 tail : 20;
-#else
u64 tail : 20;
u64 head : 20;
u64 avg_level : 8;
u64 update_time : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
- u64 cq_err_int_ena : 8;
- u64 cq_err_int : 8;
- u64 qsize : 4;
- u64 rsvd_233_235 : 3;
- u64 caching : 1;
- u64 substream : 20;
- u64 rsvd_210_211 : 2;
- u64 ena : 1;
- u64 drop_ena : 1;
- u64 drop : 8;
- u64 bp : 8;
-#else
u64 bp : 8;
u64 drop : 8;
u64 drop_ena : 1;
@@ -490,20 +384,161 @@ struct nix_cq_ctx_s {
u64 qsize : 4;
u64 cq_err_int : 8;
u64 cq_err_int_ena : 8;
-#endif
+};
+
+/* CN10K NIX Receive queue context structure */
+struct nix_cn10k_rq_ctx_s {
+ u64 ena : 1;
+ u64 sso_ena : 1;
+ u64 ipsech_ena : 1;
+ u64 ena_wqwd : 1;
+ u64 cq : 20;
+ u64 rsvd_36_24 : 13;
+ u64 lenerr_dis : 1;
+ u64 csum_il4_dis : 1;
+ u64 csum_ol4_dis : 1;
+ u64 len_il4_dis : 1;
+ u64 len_il3_dis : 1;
+ u64 len_ol4_dis : 1;
+ u64 len_ol3_dis : 1;
+ u64 wqe_aura : 20;
+ u64 spb_aura : 20;
+ u64 lpb_aura : 20;
+ u64 sso_grp : 10;
+ u64 sso_tt : 2;
+ u64 pb_caching : 2;
+ u64 wqe_caching : 1;
+ u64 xqe_drop_ena : 1;
+ u64 spb_drop_ena : 1;
+ u64 lpb_drop_ena : 1;
+ u64 pb_stashing : 1;
+ u64 ipsecd_drop_ena : 1;
+ u64 chi_ena : 1;
+ u64 rsvd_127_125 : 3;
+ u64 band_prof_id : 10; /* W2 */
+ u64 rsvd_138 : 1;
+ u64 policer_ena : 1;
+ u64 spb_sizem1 : 6;
+ u64 wqe_skip : 2;
+ u64 rsvd_150_148 : 3;
+ u64 spb_ena : 1;
+ u64 lpb_sizem1 : 12;
+ u64 first_skip : 7;
+ u64 rsvd_171 : 1;
+ u64 later_skip : 6;
+ u64 xqe_imm_size : 6;
+ u64 rsvd_189_184 : 6;
+ u64 xqe_imm_copy : 1;
+ u64 xqe_hdr_split : 1;
+ u64 xqe_drop : 8; /* W3 */
+ u64 xqe_pass : 8;
+ u64 wqe_pool_drop : 8;
+ u64 wqe_pool_pass : 8;
+ u64 spb_aura_drop : 8;
+ u64 spb_aura_pass : 8;
+ u64 spb_pool_drop : 8;
+ u64 spb_pool_pass : 8;
+ u64 lpb_aura_drop : 8; /* W4 */
+ u64 lpb_aura_pass : 8;
+ u64 lpb_pool_drop : 8;
+ u64 lpb_pool_pass : 8;
+ u64 rsvd_291_288 : 4;
+ u64 rq_int : 8;
+ u64 rq_int_ena : 8;
+ u64 qint_idx : 7;
+ u64 rsvd_319_315 : 5;
+ u64 ltag : 24; /* W5 */
+ u64 good_utag : 8;
+ u64 bad_utag : 8;
+ u64 flow_tagw : 6;
+ u64 ipsec_vwqe : 1;
+ u64 vwqe_ena : 1;
+ u64 vwqe_wait : 8;
+ u64 max_vsize_exp : 4;
+ u64 vwqe_skip : 2;
+ u64 rsvd_383_382 : 2;
+ u64 octs : 48; /* W6 */
+ u64 rsvd_447_432 : 16;
+ u64 pkts : 48; /* W7 */
+ u64 rsvd_511_496 : 16;
+ u64 drop_octs : 48; /* W8 */
+ u64 rsvd_575_560 : 16;
+ u64 drop_pkts : 48; /* W9 */
+ u64 rsvd_639_624 : 16;
+ u64 re_pkts : 48; /* W10 */
+ u64 rsvd_703_688 : 16;
+ u64 rsvd_767_704; /* W11 */
+ u64 rsvd_831_768; /* W12 */
+ u64 rsvd_895_832; /* W13 */
+ u64 rsvd_959_896; /* W14 */
+ u64 rsvd_1023_960; /* W15 */
+};
+
+/* CN10K NIX Send queue context structure */
+struct nix_cn10k_sq_ctx_s {
+ u64 ena : 1;
+ u64 qint_idx : 6;
+ u64 substream : 20;
+ u64 sdp_mcast : 1;
+ u64 cq : 20;
+ u64 sqe_way_mask : 16;
+ u64 smq : 10; /* W1 */
+ u64 cq_ena : 1;
+ u64 xoff : 1;
+ u64 sso_ena : 1;
+ u64 smq_rr_weight : 14;
+ u64 default_chan : 12;
+ u64 sqb_count : 16;
+ u64 rsvd_120_119 : 2;
+ u64 smq_rr_count_lb : 7;
+ u64 smq_rr_count_ub : 25; /* W2 */
+ u64 sqb_aura : 20;
+ u64 sq_int : 8;
+ u64 sq_int_ena : 8;
+ u64 sqe_stype : 2;
+ u64 rsvd_191 : 1;
+ u64 max_sqe_size : 2; /* W3 */
+ u64 cq_limit : 8;
+ u64 lmt_dis : 1;
+ u64 mnq_dis : 1;
+ u64 smq_next_sq : 20;
+ u64 smq_lso_segnum : 8;
+ u64 tail_offset : 6;
+ u64 smenq_offset : 6;
+ u64 head_offset : 6;
+ u64 smenq_next_sqb_vld : 1;
+ u64 smq_pend : 1;
+ u64 smq_next_sq_vld : 1;
+ u64 rsvd_255_253 : 3;
+ u64 next_sqb : 64; /* W4 */
+ u64 tail_sqb : 64; /* W5 */
+ u64 smenq_sqb : 64; /* W6 */
+ u64 smenq_next_sqb : 64; /* W7 */
+ u64 head_sqb : 64; /* W8 */
+ u64 rsvd_583_576 : 8; /* W9 */
+ u64 vfi_lso_total : 18;
+ u64 vfi_lso_sizem1 : 3;
+ u64 vfi_lso_sb : 8;
+ u64 vfi_lso_mps : 14;
+ u64 vfi_lso_vlan0_ins_ena : 1;
+ u64 vfi_lso_vlan1_ins_ena : 1;
+ u64 vfi_lso_vld : 1;
+ u64 rsvd_639_630 : 10;
+ u64 scm_lso_rem : 18; /* W10 */
+ u64 rsvd_703_658 : 46;
+ u64 octs : 48; /* W11 */
+ u64 rsvd_767_752 : 16;
+ u64 pkts : 48; /* W12 */
+ u64 rsvd_831_816 : 16;
+ u64 rsvd_895_832 : 64; /* W13 */
+ u64 dropped_octs : 48;
+ u64 rsvd_959_944 : 16;
+ u64 dropped_pkts : 48;
+ u64 rsvd_1023_1008 : 16;
};
/* NIX Receive queue context structure */
struct nix_rq_ctx_s {
-#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */
- u64 wqe_aura : 20;
- u64 substream : 20;
- u64 cq : 20;
- u64 ena_wqwd : 1;
- u64 ipsech_ena : 1;
- u64 sso_ena : 1;
- u64 ena : 1;
-#else
u64 ena : 1;
u64 sso_ena : 1;
u64 ipsech_ena : 1;
@@ -511,19 +546,6 @@ struct nix_rq_ctx_s {
u64 cq : 20;
u64 substream : 20;
u64 wqe_aura : 20;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
- u64 rsvd_127_122 : 6;
- u64 lpb_drop_ena : 1;
- u64 spb_drop_ena : 1;
- u64 xqe_drop_ena : 1;
- u64 wqe_caching : 1;
- u64 pb_caching : 2;
- u64 sso_tt : 2;
- u64 sso_grp : 10;
- u64 lpb_aura : 20;
- u64 spb_aura : 20;
-#else
u64 spb_aura : 20;
u64 lpb_aura : 20;
u64 sso_grp : 10;
@@ -534,23 +556,7 @@ struct nix_rq_ctx_s {
u64 spb_drop_ena : 1;
u64 lpb_drop_ena : 1;
u64 rsvd_127_122 : 6;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
- u64 xqe_hdr_split : 1;
- u64 xqe_imm_copy : 1;
- u64 rsvd_189_184 : 6;
- u64 xqe_imm_size : 6;
- u64 later_skip : 6;
- u64 rsvd_171 : 1;
- u64 first_skip : 7;
- u64 lpb_sizem1 : 12;
- u64 spb_ena : 1;
- u64 rsvd_150_148 : 3;
- u64 wqe_skip : 2;
- u64 spb_sizem1 : 6;
- u64 rsvd_139_128 : 12;
-#else
- u64 rsvd_139_128 : 12;
+ u64 rsvd_139_128 : 12; /* W2 */
u64 spb_sizem1 : 6;
u64 wqe_skip : 2;
u64 rsvd_150_148 : 3;
@@ -563,18 +569,7 @@ struct nix_rq_ctx_s {
u64 rsvd_189_184 : 6;
u64 xqe_imm_copy : 1;
u64 xqe_hdr_split : 1;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
- u64 spb_pool_pass : 8;
- u64 spb_pool_drop : 8;
- u64 spb_aura_pass : 8;
- u64 spb_aura_drop : 8;
- u64 wqe_pool_pass : 8;
- u64 wqe_pool_drop : 8;
- u64 xqe_pass : 8;
- u64 xqe_drop : 8;
-#else
- u64 xqe_drop : 8;
+ u64 xqe_drop : 8; /* W3*/
u64 xqe_pass : 8;
u64 wqe_pool_drop : 8;
u64 wqe_pool_pass : 8;
@@ -582,19 +577,7 @@ struct nix_rq_ctx_s {
u64 spb_aura_pass : 8;
u64 spb_pool_drop : 8;
u64 spb_pool_pass : 8;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W4 */
- u64 rsvd_319_315 : 5;
- u64 qint_idx : 7;
- u64 rq_int_ena : 8;
- u64 rq_int : 8;
- u64 rsvd_291_288 : 4;
- u64 lpb_pool_pass : 8;
- u64 lpb_pool_drop : 8;
- u64 lpb_aura_pass : 8;
- u64 lpb_aura_drop : 8;
-#else
- u64 lpb_aura_drop : 8;
+ u64 lpb_aura_drop : 8; /* W4 */
u64 lpb_aura_pass : 8;
u64 lpb_pool_drop : 8;
u64 lpb_pool_pass : 8;
@@ -603,55 +586,21 @@ struct nix_rq_ctx_s {
u64 rq_int_ena : 8;
u64 qint_idx : 7;
u64 rsvd_319_315 : 5;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W5 */
- u64 rsvd_383_366 : 18;
- u64 flow_tagw : 6;
- u64 bad_utag : 8;
- u64 good_utag : 8;
- u64 ltag : 24;
-#else
- u64 ltag : 24;
+ u64 ltag : 24; /* W5 */
u64 good_utag : 8;
u64 bad_utag : 8;
u64 flow_tagw : 6;
u64 rsvd_383_366 : 18;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W6 */
- u64 rsvd_447_432 : 16;
- u64 octs : 48;
-#else
- u64 octs : 48;
+ u64 octs : 48; /* W6 */
u64 rsvd_447_432 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W7 */
- u64 rsvd_511_496 : 16;
- u64 pkts : 48;
-#else
- u64 pkts : 48;
+ u64 pkts : 48; /* W7 */
u64 rsvd_511_496 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W8 */
+ u64 drop_octs : 48; /* W8 */
u64 rsvd_575_560 : 16;
- u64 drop_octs : 48;
-#else
- u64 drop_octs : 48;
- u64 rsvd_575_560 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */
- u64 rsvd_639_624 : 16;
- u64 drop_pkts : 48;
-#else
- u64 drop_pkts : 48;
+ u64 drop_pkts : 48; /* W9 */
u64 rsvd_639_624 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W10 */
- u64 rsvd_703_688 : 16;
- u64 re_pkts : 48;
-#else
- u64 re_pkts : 48;
+ u64 re_pkts : 48; /* W10 */
u64 rsvd_703_688 : 16;
-#endif
u64 rsvd_767_704; /* W11 */
u64 rsvd_831_768; /* W12 */
u64 rsvd_895_832; /* W13 */
@@ -674,30 +623,12 @@ enum nix_stype {
/* NIX Send queue context structure */
struct nix_sq_ctx_s {
-#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */
- u64 sqe_way_mask : 16;
- u64 cq : 20;
- u64 sdp_mcast : 1;
- u64 substream : 20;
- u64 qint_idx : 6;
- u64 ena : 1;
-#else
u64 ena : 1;
u64 qint_idx : 6;
u64 substream : 20;
u64 sdp_mcast : 1;
u64 cq : 20;
u64 sqe_way_mask : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
- u64 sqb_count : 16;
- u64 default_chan : 12;
- u64 smq_rr_quantum : 24;
- u64 sso_ena : 1;
- u64 xoff : 1;
- u64 cq_ena : 1;
- u64 smq : 9;
-#else
u64 smq : 9;
u64 cq_ena : 1;
u64 xoff : 1;
@@ -705,37 +636,12 @@ struct nix_sq_ctx_s {
u64 smq_rr_quantum : 24;
u64 default_chan : 12;
u64 sqb_count : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
- u64 rsvd_191 : 1;
- u64 sqe_stype : 2;
- u64 sq_int_ena : 8;
- u64 sq_int : 8;
- u64 sqb_aura : 20;
- u64 smq_rr_count : 25;
-#else
u64 smq_rr_count : 25;
u64 sqb_aura : 20;
u64 sq_int : 8;
u64 sq_int_ena : 8;
u64 sqe_stype : 2;
u64 rsvd_191 : 1;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
- u64 rsvd_255_253 : 3;
- u64 smq_next_sq_vld : 1;
- u64 smq_pend : 1;
- u64 smenq_next_sqb_vld : 1;
- u64 head_offset : 6;
- u64 smenq_offset : 6;
- u64 tail_offset : 6;
- u64 smq_lso_segnum : 8;
- u64 smq_next_sq : 20;
- u64 mnq_dis : 1;
- u64 lmt_dis : 1;
- u64 cq_limit : 8;
- u64 max_sqe_size : 2;
-#else
u64 max_sqe_size : 2;
u64 cq_limit : 8;
u64 lmt_dis : 1;
@@ -749,23 +655,11 @@ struct nix_sq_ctx_s {
u64 smq_pend : 1;
u64 smq_next_sq_vld : 1;
u64 rsvd_255_253 : 3;
-#endif
u64 next_sqb : 64;/* W4 */
u64 tail_sqb : 64;/* W5 */
u64 smenq_sqb : 64;/* W6 */
u64 smenq_next_sqb : 64;/* W7 */
u64 head_sqb : 64;/* W8 */
-#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */
- u64 rsvd_639_630 : 10;
- u64 vfi_lso_vld : 1;
- u64 vfi_lso_vlan1_ins_ena : 1;
- u64 vfi_lso_vlan0_ins_ena : 1;
- u64 vfi_lso_mps : 14;
- u64 vfi_lso_sb : 8;
- u64 vfi_lso_sizem1 : 3;
- u64 vfi_lso_total : 18;
- u64 rsvd_583_576 : 8;
-#else
u64 rsvd_583_576 : 8;
u64 vfi_lso_total : 18;
u64 vfi_lso_sizem1 : 3;
@@ -775,68 +669,28 @@ struct nix_sq_ctx_s {
u64 vfi_lso_vlan1_ins_ena : 1;
u64 vfi_lso_vld : 1;
u64 rsvd_639_630 : 10;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W10 */
- u64 rsvd_703_658 : 46;
- u64 scm_lso_rem : 18;
-#else
u64 scm_lso_rem : 18;
u64 rsvd_703_658 : 46;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W11 */
- u64 rsvd_767_752 : 16;
- u64 octs : 48;
-#else
u64 octs : 48;
u64 rsvd_767_752 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W12 */
- u64 rsvd_831_816 : 16;
- u64 pkts : 48;
-#else
u64 pkts : 48;
u64 rsvd_831_816 : 16;
-#endif
u64 rsvd_895_832 : 64;/* W13 */
-#if defined(__BIG_ENDIAN_BITFIELD) /* W14 */
- u64 rsvd_959_944 : 16;
- u64 dropped_octs : 48;
-#else
u64 dropped_octs : 48;
u64 rsvd_959_944 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W15 */
- u64 rsvd_1023_1008 : 16;
- u64 dropped_pkts : 48;
-#else
u64 dropped_pkts : 48;
u64 rsvd_1023_1008 : 16;
-#endif
};
/* NIX Receive side scaling entry structure*/
struct nix_rsse_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- uint32_t reserved_20_31 : 12;
- uint32_t rq : 20;
-#else
uint32_t rq : 20;
uint32_t reserved_20_31 : 12;
-#endif
};
/* NIX receive multicast/mirror entry structure */
struct nix_rx_mce_s {
-#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */
- uint64_t next : 16;
- uint64_t pf_func : 16;
- uint64_t rsvd_31_24 : 8;
- uint64_t index : 20;
- uint64_t eol : 1;
- uint64_t rsvd_2 : 1;
- uint64_t op : 2;
-#else
uint64_t op : 2;
uint64_t rsvd_2 : 1;
uint64_t eol : 1;
@@ -844,7 +698,89 @@ struct nix_rx_mce_s {
uint64_t rsvd_31_24 : 8;
uint64_t pf_func : 16;
uint64_t next : 16;
-#endif
+};
+
+enum nix_band_prof_layers {
+ BAND_PROF_LEAF_LAYER = 0,
+ BAND_PROF_INVAL_LAYER = 1,
+ BAND_PROF_MID_LAYER = 2,
+ BAND_PROF_TOP_LAYER = 3,
+ BAND_PROF_NUM_LAYERS = 4,
+};
+
+enum NIX_RX_BAND_PROF_ACTIONRESULT_E {
+ NIX_RX_BAND_PROF_ACTIONRESULT_PASS = 0x0,
+ NIX_RX_BAND_PROF_ACTIONRESULT_DROP = 0x1,
+ NIX_RX_BAND_PROF_ACTIONRESULT_RED = 0x2,
+};
+
+enum nix_band_prof_pc_mode {
+ NIX_RX_PC_MODE_VLAN = 0,
+ NIX_RX_PC_MODE_DSCP = 1,
+ NIX_RX_PC_MODE_GEN = 2,
+ NIX_RX_PC_MODE_RSVD = 3,
+};
+
+/* NIX ingress policer bandwidth profile structure */
+struct nix_bandprof_s {
+ uint64_t pc_mode : 2; /* W0 */
+ uint64_t icolor : 2;
+ uint64_t tnl_ena : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t peir_exponent : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pebs_exponent : 5;
+ uint64_t reserved_21_23 : 3;
+ uint64_t cir_exponent : 5;
+ uint64_t reserved_29_31 : 3;
+ uint64_t cbs_exponent : 5;
+ uint64_t reserved_37_39 : 3;
+ uint64_t peir_mantissa : 8;
+ uint64_t pebs_mantissa : 8;
+ uint64_t cir_mantissa : 8;
+ uint64_t cbs_mantissa : 8; /* W1 */
+ uint64_t lmode : 1;
+ uint64_t l_sellect : 3;
+ uint64_t rdiv : 4;
+ uint64_t adjust_exponent : 5;
+ uint64_t reserved_85_86 : 2;
+ uint64_t adjust_mantissa : 9;
+ uint64_t gc_action : 2;
+ uint64_t yc_action : 2;
+ uint64_t rc_action : 2;
+ uint64_t meter_algo : 2;
+ uint64_t band_prof_id : 7;
+ uint64_t reserved_111_118 : 8;
+ uint64_t hl_en : 1;
+ uint64_t reserved_120_127 : 8;
+ uint64_t ts : 48; /* W2 */
+ uint64_t reserved_176_191 : 16;
+ uint64_t pe_accum : 32; /* W3 */
+ uint64_t c_accum : 32;
+ uint64_t green_pkt_pass : 48; /* W4 */
+ uint64_t reserved_304_319 : 16;
+ uint64_t yellow_pkt_pass : 48; /* W5 */
+ uint64_t reserved_368_383 : 16;
+ uint64_t red_pkt_pass : 48; /* W6 */
+ uint64_t reserved_432_447 : 16;
+ uint64_t green_octs_pass : 48; /* W7 */
+ uint64_t reserved_496_511 : 16;
+ uint64_t yellow_octs_pass : 48; /* W8 */
+ uint64_t reserved_560_575 : 16;
+ uint64_t red_octs_pass : 48; /* W9 */
+ uint64_t reserved_624_639 : 16;
+ uint64_t green_pkt_drop : 48; /* W10 */
+ uint64_t reserved_688_703 : 16;
+ uint64_t yellow_pkt_drop : 48; /* W11 */
+ uint64_t reserved_752_767 : 16;
+ uint64_t red_pkt_drop : 48; /* W12 */
+ uint64_t reserved_816_831 : 16;
+ uint64_t green_octs_drop : 48; /* W13 */
+ uint64_t reserved_880_895 : 16;
+ uint64_t yellow_octs_drop : 48; /* W14 */
+ uint64_t reserved_944_959 : 16;
+ uint64_t red_octs_drop : 48; /* W15 */
+ uint64_t reserved_1008_1023 : 16;
};
enum nix_lsoalg {
@@ -863,15 +799,6 @@ enum nix_txlayer {
};
struct nix_lso_format {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 rsvd_19_63 : 45;
- u64 alg : 3;
- u64 rsvd_14_15 : 2;
- u64 sizem1 : 2;
- u64 rsvd_10_11 : 2;
- u64 layer : 2;
- u64 offset : 8;
-#else
u64 offset : 8;
u64 layer : 2;
u64 rsvd_10_11 : 2;
@@ -879,24 +806,9 @@ struct nix_lso_format {
u64 rsvd_14_15 : 2;
u64 alg : 3;
u64 rsvd_19_63 : 45;
-#endif
};
struct nix_rx_flowkey_alg {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_35_63 :29;
- u64 ltype_match :4;
- u64 ltype_mask :4;
- u64 sel_chan :1;
- u64 ena :1;
- u64 reserved_24_24 :1;
- u64 lid :3;
- u64 bytesm1 :5;
- u64 hdr_offset :8;
- u64 fn_mask :1;
- u64 ln_mask :1;
- u64 key_offset :6;
-#else
u64 key_offset :6;
u64 ln_mask :1;
u64 fn_mask :1;
@@ -909,7 +821,6 @@ struct nix_rx_flowkey_alg {
u64 ltype_mask :4;
u64 ltype_match :4;
u64 reserved_35_63 :29;
-#endif
};
/* NIX VTAG size */
@@ -917,4 +828,36 @@ enum nix_vtag_size {
VTAGSIZE_T4 = 0x0,
VTAGSIZE_T8 = 0x1,
};
+
+enum nix_tx_vtag_op {
+ NOP = 0x0,
+ VTAG_INSERT = 0x1,
+ VTAG_REPLACE = 0x2,
+};
+
+/* NIX RX VTAG actions */
+#define VTAG_STRIP BIT_ULL(4)
+#define VTAG_CAPTURE BIT_ULL(5)
+
+/* REE admin queue instruction structure */
+struct ree_af_aq_inst_s {
+ u64 rof_ptr_addr;
+ u64 reserved_64_64 : 1;
+ u64 nc : 1;
+ u64 reserved_66_66 : 1;
+ u64 doneint : 1;
+ u64 reserved_68_95 : 28;
+ u64 length : 15;
+ u64 reserved_111_127 : 17;
+};
+
+/* REE ROF file entry structure */
+struct ree_rof_s {
+ u64 addr : 24;
+ u64 reserved_24_31 : 8;
+ u64 typ : 8;
+ u64 reserved_40_63 : 24;
+ u64 data;
+};
+
#endif /* RVU_STRUCT_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
new file mode 100644
index 000000000000..6f2e7944381b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include "rvu.h"
+
+static int rvu_switch_install_rx_rule(struct rvu *rvu, u16 pcifunc,
+ u16 chan_mask)
+{
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ /* If the pcifunc is not initialized then nothing to do.
+ * This same function will be called again via rvu_switch_update_rules
+ * after pcifunc is initialized.
+ */
+ if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags))
+ return 0;
+
+ ether_addr_copy(req.packet.dmac, pfvf->mac_addr);
+ eth_broadcast_addr((u8 *)&req.mask.dmac);
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc;
+ req.features = BIT_ULL(NPC_DMAC);
+ req.channel = pfvf->rx_chan_base;
+ req.chan_mask = chan_mask;
+ req.intf = pfvf->nix_rx_intf;
+ req.op = NIX_RX_ACTION_DEFAULT;
+ req.default_rule = 1;
+
+ return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+static int rvu_switch_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry)
+{
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct rvu_pfvf *pfvf;
+ u8 lbkid;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ /* If the pcifunc is not initialized then nothing to do.
+ * This same function will be called again via rvu_switch_update_rules
+ * after pcifunc is initialized.
+ */
+ if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags))
+ return 0;
+
+ lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1;
+ ether_addr_copy(req.packet.dmac, pfvf->mac_addr);
+ eth_broadcast_addr((u8 *)&req.mask.dmac);
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc;
+ req.entry = entry;
+ req.features = BIT_ULL(NPC_DMAC);
+ req.intf = pfvf->nix_tx_intf;
+ req.op = NIX_TX_ACTIONOP_UCAST_CHAN;
+ req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN;
+ req.set_cntr = 1;
+
+ return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+static int rvu_switch_install_rules(struct rvu *rvu)
+{
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ u16 start = rswitch->start_entry;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf, vf, numvfs, hwvf;
+ u16 pcifunc, entry = 0;
+ int err;
+
+ for (pf = 1; pf < hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+
+ pcifunc = pf << 10;
+ /* rvu_get_nix_blkaddr sets up the corresponding NIX block
+ * address and NIX RX and TX interfaces for a pcifunc.
+ * Generally it is called during attach call of a pcifunc but it
+ * is called here since we are pre-installing rules before
+ * nixlfs are attached
+ */
+ rvu_get_nix_blkaddr(rvu, pcifunc);
+
+ /* MCAM RX rule for a PF/VF already exists as default unicast
+ * rules installed by AF. Hence change the channel in those
+ * rules to ignore channel so that packets with the required
+ * DMAC received from LBK(by other PF/VFs in system) or from
+ * external world (from wire) are accepted.
+ */
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+ if (err) {
+ dev_err(rvu->dev, "RX rule for PF%d failed(%d)\n",
+ pf, err);
+ return err;
+ }
+
+ err = rvu_switch_install_tx_rule(rvu, pcifunc, start + entry);
+ if (err) {
+ dev_err(rvu->dev, "TX rule for PF%d failed(%d)\n",
+ pf, err);
+ return err;
+ }
+
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
+ for (vf = 0; vf < numvfs; vf++, hwvf++) {
+ pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
+ rvu_get_nix_blkaddr(rvu, pcifunc);
+
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+ if (err) {
+ dev_err(rvu->dev,
+ "RX rule for PF%dVF%d failed(%d)\n",
+ pf, vf, err);
+ return err;
+ }
+
+ err = rvu_switch_install_tx_rule(rvu, pcifunc,
+ start + entry);
+ if (err) {
+ dev_err(rvu->dev,
+ "TX rule for PF%dVF%d failed(%d)\n",
+ pf, vf, err);
+ return err;
+ }
+
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+ }
+ }
+
+ return 0;
+}
+
+void rvu_switch_enable(struct rvu *rvu)
+{
+ struct npc_mcam_alloc_entry_req alloc_req = { 0 };
+ struct npc_mcam_alloc_entry_rsp alloc_rsp = { 0 };
+ struct npc_delete_flow_req uninstall_req = { 0 };
+ struct npc_mcam_free_entry_req free_req = { 0 };
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ struct msg_rsp rsp;
+ int ret;
+
+ alloc_req.contig = true;
+ alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs;
+ ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
+ &alloc_rsp);
+ if (ret) {
+ dev_err(rvu->dev,
+ "Unable to allocate MCAM entries\n");
+ goto exit;
+ }
+
+ if (alloc_rsp.count != alloc_req.count) {
+ dev_err(rvu->dev,
+ "Unable to allocate %d MCAM entries, got %d\n",
+ alloc_req.count, alloc_rsp.count);
+ goto free_entries;
+ }
+
+ rswitch->entry2pcifunc = kcalloc(alloc_req.count, sizeof(u16),
+ GFP_KERNEL);
+ if (!rswitch->entry2pcifunc)
+ goto free_entries;
+
+ rswitch->used_entries = alloc_rsp.count;
+ rswitch->start_entry = alloc_rsp.entry;
+
+ ret = rvu_switch_install_rules(rvu);
+ if (ret)
+ goto uninstall_rules;
+
+ return;
+
+uninstall_rules:
+ uninstall_req.start = rswitch->start_entry;
+ uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
+ rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
+ kfree(rswitch->entry2pcifunc);
+free_entries:
+ free_req.all = 1;
+ rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
+exit:
+ return;
+}
+
+void rvu_switch_disable(struct rvu *rvu)
+{
+ struct npc_delete_flow_req uninstall_req = { 0 };
+ struct npc_mcam_free_entry_req free_req = { 0 };
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf, vf, numvfs, hwvf;
+ struct msg_rsp rsp;
+ u16 pcifunc;
+ int err;
+
+ if (!rswitch->used_entries)
+ return;
+
+ for (pf = 1; pf < hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+
+ pcifunc = pf << 10;
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
+ if (err)
+ dev_err(rvu->dev,
+ "Reverting RX rule for PF%d failed(%d)\n",
+ pf, err);
+
+ for (vf = 0; vf < numvfs; vf++, hwvf++) {
+ pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
+ if (err)
+ dev_err(rvu->dev,
+ "Reverting RX rule for PF%dVF%d failed(%d)\n",
+ pf, vf, err);
+ }
+ }
+
+ uninstall_req.start = rswitch->start_entry;
+ uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
+ free_req.all = 1;
+ rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
+ rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
+ rswitch->used_entries = 0;
+ kfree(rswitch->entry2pcifunc);
+}
+
+void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ u32 max = rswitch->used_entries;
+ u16 entry;
+
+ if (!rswitch->used_entries)
+ return;
+
+ for (entry = 0; entry < max; entry++) {
+ if (rswitch->entry2pcifunc[entry] == pcifunc)
+ break;
+ }
+
+ if (entry >= max)
+ return;
+
+ rvu_switch_install_tx_rule(rvu, pcifunc, rswitch->start_entry + entry);
+ rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_tim.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_tim.c
new file mode 100644
index 000000000000..fa779cc3fe7a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_tim.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/types.h>
+#include <linux/bitfield.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+
+#define TIM_CHUNKSIZE_MULTIPLE (16)
+#define TIM_CHUNKSIZE_MIN (TIM_CHUNKSIZE_MULTIPLE * 0x2)
+#define TIM_CHUNKSIZE_MAX (TIM_CHUNKSIZE_MULTIPLE * 0x1FFF)
+
+static inline u64 get_tenns_tsc(void)
+{
+ u64 tsc;
+
+#if defined(CONFIG_ARM64)
+ asm volatile("mrs %0, cntvct_el0" : "=r" (tsc));
+#endif
+ return tsc;
+}
+
+static inline u64 get_tenns_clk(void)
+{
+ u64 tsc = 0;
+
+#if defined(CONFIG_ARM64)
+ asm volatile("mrs %0, cntfrq_el0" : "=r" (tsc));
+#endif
+ return tsc;
+}
+
+static inline int tim_block_cn10k_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int lf;
+
+ hw->tim.ring_intvls = kmalloc_array(hw->block[BLKTYPE_TIM].lf.max,
+ sizeof(enum tim_ring_interval),
+ GFP_KERNEL);
+ if (!hw->tim.ring_intvls)
+ return -ENOMEM;
+
+ for (lf = 0; lf < hw->block[BLKTYPE_TIM].lf.max; lf++)
+ hw->tim.ring_intvls[lf] = TIM_INTERVAL_INVAL;
+ hw->tim.rings_per_intvl[TIM_INTERVAL_1US] = 0;
+ hw->tim.rings_per_intvl[TIM_INTERVAL_10US] = 0;
+ hw->tim.rings_per_intvl[TIM_INTERVAL_1MS] = 0;
+
+ return 0;
+}
+
+static inline void tim_cn10k_clear_intvl(struct rvu *rvu, int lf)
+{
+ struct tim_rsrc *tim = &rvu->hw->tim;
+
+ if (tim->ring_intvls[lf] != TIM_INTERVAL_INVAL) {
+ tim->rings_per_intvl[tim->ring_intvls[lf]]--;
+ tim->ring_intvls[lf] = TIM_INTERVAL_INVAL;
+ }
+}
+
+static inline void tim_cn10k_record_intvl(struct rvu *rvu, int lf,
+ u64 intervalns)
+{
+ struct tim_rsrc *tim = &rvu->hw->tim;
+ enum tim_ring_interval intvl;
+
+ tim_cn10k_clear_intvl(rvu, lf);
+
+ if (intervalns < (u64)1E4)
+ intvl = TIM_INTERVAL_1US;
+ else if (intervalns < (u64)1E6)
+ intvl = TIM_INTERVAL_10US;
+ else
+ intvl = TIM_INTERVAL_1MS;
+
+ tim->ring_intvls[lf] = intvl;
+ tim->rings_per_intvl[tim->ring_intvls[lf]]++;
+}
+
+static inline int tim_get_min_intvl(struct rvu *rvu, u8 clocksource,
+ u64 clockfreq, u64 *intvl_ns,
+ u64 *intvl_cyc)
+{
+ struct tim_rsrc *tim = &rvu->hw->tim;
+ int intvl;
+
+ if (is_rvu_otx2(rvu)) {
+ switch (clocksource) {
+ case TIM_CLK_SRCS_TENNS:
+ case TIM_CLK_SRCS_GPIO:
+ intvl = 256;
+ break;
+ case TIM_CLK_SRCS_GTI:
+ case TIM_CLK_SRCS_PTP:
+ intvl = 300;
+ break;
+ default:
+ return TIM_AF_INVALID_CLOCK_SOURCE;
+ }
+
+ *intvl_cyc = (u64)intvl;
+ } else {
+ if (tim->rings_per_intvl[TIM_INTERVAL_1US] < 8)
+ intvl = (u64)1E3;
+ else if (tim->rings_per_intvl[TIM_INTERVAL_10US] < 8)
+ intvl = (u64)1E4;
+ else
+ intvl = (u64)1E6;
+
+ *intvl_cyc = (u64)DIV_ROUND_UP(clockfreq * (intvl), (u64)1E9);
+ }
+
+ *intvl_ns = (u64)DIV_ROUND_UP((*intvl_cyc) * (u64)1E9, clockfreq);
+
+ return 0;
+}
+
+static int rvu_tim_disable_lf(struct rvu *rvu, int lf, int blkaddr)
+{
+ u64 regval;
+
+ regval = rvu_read64(rvu, blkaddr, TIM_AF_RINGX_CTL1(lf));
+ if ((regval & TIM_AF_RINGX_CTL1_ENA) == 0)
+ return TIM_AF_RING_ALREADY_DISABLED;
+
+ /* Clear TIM_AF_RING(0..255)_CTL1[ENA]. */
+ regval = rvu_read64(rvu, blkaddr, TIM_AF_RINGX_CTL1(lf));
+ regval &= ~TIM_AF_RINGX_CTL1_ENA;
+ rvu_write64(rvu, blkaddr, TIM_AF_RINGX_CTL1(lf), regval);
+
+ /*
+ * Poll until the corresponding ring’s
+ * TIM_AF_RING(0..255)_CTL1[RCF_BUSY] is clear.
+ */
+ rvu_poll_reg(rvu, blkaddr, TIM_AF_RINGX_CTL1(lf),
+ TIM_AF_RINGX_CTL1_RCF_BUSY, true);
+ if (!is_rvu_otx2(rvu))
+ tim_cn10k_clear_intvl(rvu, lf);
+
+ return 0;
+}
+
+int rvu_mbox_handler_tim_get_min_intvl(struct rvu *rvu,
+ struct tim_intvl_req *req,
+ struct tim_intvl_rsp *rsp)
+{
+ if (!req->clockfreq)
+ return TIM_AF_INVALID_CLOCK_SOURCE;
+
+ return tim_get_min_intvl(rvu, req->clocksource, req->clockfreq,
+ &rsp->intvl_ns, &rsp->intvl_cyc);
+}
+
+int rvu_mbox_handler_tim_lf_alloc(struct rvu *rvu,
+ struct tim_lf_alloc_req *req,
+ struct tim_lf_alloc_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr;
+ u64 regval;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_TIM, pcifunc);
+ if (blkaddr < 0)
+ return TIM_AF_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, req->ring);
+ if (lf < 0)
+ return TIM_AF_LF_INVALID;
+
+ /* Check if requested 'TIMLF <=> NPALF' mapping is valid */
+ if (req->npa_pf_func) {
+ /* If default, use 'this' TIMLF's PFFUNC */
+ if (req->npa_pf_func == RVU_DEFAULT_PF_FUNC)
+ req->npa_pf_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->npa_pf_func, BLKTYPE_NPA))
+ return TIM_AF_INVAL_NPA_PF_FUNC;
+ }
+
+ /* Check if requested 'TIMLF <=> SSOLF' mapping is valid */
+ if (req->sso_pf_func) {
+ /* If default, use 'this' SSOLF's PFFUNC */
+ if (req->sso_pf_func == RVU_DEFAULT_PF_FUNC)
+ req->sso_pf_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->sso_pf_func, BLKTYPE_SSO))
+ return TIM_AF_INVAL_SSO_PF_FUNC;
+ }
+
+ regval = (((u64)req->npa_pf_func) << 16) |
+ ((u64)req->sso_pf_func);
+ rvu_write64(rvu, blkaddr, TIM_AF_RINGX_GMCTL(lf), regval);
+
+ rsp->tenns_clk = get_tenns_clk();
+
+ return 0;
+}
+
+int rvu_mbox_handler_tim_lf_free(struct rvu *rvu,
+ struct tim_ring_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_TIM, pcifunc);
+ if (blkaddr < 0)
+ return TIM_AF_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, req->ring);
+ if (lf < 0)
+ return TIM_AF_LF_INVALID;
+
+ rvu_tim_lf_teardown(rvu, pcifunc, lf, req->ring);
+
+ return 0;
+}
+
+int rvu_mbox_handler_tim_config_ring(struct rvu *rvu,
+ struct tim_config_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ u64 intvl_cyc, intvl_ns;
+ int lf, blkaddr;
+ u64 regval;
+ int rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_TIM, pcifunc);
+ if (blkaddr < 0)
+ return TIM_AF_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, req->ring);
+ if (lf < 0)
+ return TIM_AF_LF_INVALID;
+
+ /* Check the inputs. */
+ /* bigendian can only be 1 or 0. */
+ if (req->bigendian & ~1)
+ return TIM_AF_INVALID_BIG_ENDIAN_VALUE;
+
+ /* enableperiodic can only be 1 or 0. */
+ if (req->enableperiodic & ~1)
+ return TIM_AF_INVALID_ENABLE_PERIODIC;
+
+ /* enabledontfreebuffer can only be 1 or 0. */
+ if (req->enabledontfreebuffer & ~1)
+ return TIM_AF_INVALID_ENABLE_DONTFREE;
+
+ /*
+ * enabledontfreebuffer needs to be true if enableperiodic
+ * is enabled.
+ */
+ if (req->enableperiodic && !req->enabledontfreebuffer)
+ return TIM_AF_ENA_DONTFRE_NSET_PERIODIC;
+
+
+ /* bucketsize needs to between 2 and 2M (1<<20). */
+ if (req->bucketsize < 2 || req->bucketsize > 1<<20)
+ return TIM_AF_INVALID_BSIZE;
+
+ if (req->chunksize % TIM_CHUNKSIZE_MULTIPLE)
+ return TIM_AF_CSIZE_NOT_ALIGNED;
+
+ if (req->chunksize < TIM_CHUNKSIZE_MIN)
+ return TIM_AF_CSIZE_TOO_SMALL;
+
+ if (req->chunksize > TIM_CHUNKSIZE_MAX)
+ return TIM_AF_CSIZE_TOO_BIG;
+
+ rc = tim_get_min_intvl(rvu, req->clocksource, req->clockfreq,
+ &intvl_ns, &intvl_cyc);
+ if (rc)
+ return rc;
+
+ if (req->interval < intvl_cyc || req->intervalns < intvl_ns)
+ return TIM_AF_INTERVAL_TOO_SMALL;
+
+ /* Configure edge of GPIO clock source */
+ if (req->clocksource == TIM_CLK_SRCS_GPIO &&
+ req->gpioedge < TIM_GPIO_INVALID) {
+ regval = rvu_read64(rvu, blkaddr, TIM_AF_FLAGS_REG);
+ if (FIELD_GET(TIM_AF_FLAGS_REG_GPIO_EDGE_MASK, regval) ==
+ TIM_GPIO_NO_EDGE && req->gpioedge == TIM_GPIO_NO_EDGE)
+ return TIM_AF_GPIO_CLK_SRC_NOT_ENABLED;
+ if (req->gpioedge != TIM_GPIO_NO_EDGE && req->gpioedge !=
+ FIELD_GET(TIM_AF_FLAGS_REG_GPIO_EDGE_MASK, regval)) {
+ dev_info(rvu->dev,
+ "Change edge of GPIO input to %d from %lld.\n",
+ (int)req->gpioedge,
+ FIELD_GET(TIM_AF_FLAGS_REG_GPIO_EDGE_MASK,
+ regval));
+ regval &= ~TIM_AF_FLAGS_REG_GPIO_EDGE_MASK;
+ regval |= FIELD_PREP(TIM_AF_FLAGS_REG_GPIO_EDGE_MASK,
+ req->gpioedge);
+ rvu_write64(rvu, blkaddr, TIM_AF_FLAGS_REG, regval);
+ }
+ }
+
+ if (!is_rvu_otx2(rvu))
+ tim_cn10k_record_intvl(rvu, lf, req->intervalns);
+
+ /* CTL0 */
+ /* EXPIRE_OFFSET = 0 and is set correctly when enabling. */
+ regval = req->interval;
+ rvu_write64(rvu, blkaddr, TIM_AF_RINGX_CTL0(lf), regval);
+
+ /* CTL1 */
+ regval = (((u64)req->bigendian) << 53) |
+ (1ull << 48) | /* LOCK_EN */
+ (((u64)req->enableperiodic) << 45) |
+ (((u64)(req->enableperiodic ^ 1)) << 44) | /* ENA_LDWB */
+ (((u64)req->enabledontfreebuffer) << 43) |
+ (u64)(req->bucketsize - 1);
+ if (is_rvu_otx2(rvu))
+ regval |= (((u64)req->clocksource) << 51);
+ else
+ regval |= (((u64)req->clocksource) << 40);
+
+ rvu_write64(rvu, blkaddr, TIM_AF_RINGX_CTL1(lf), regval);
+
+ /* CTL2 */
+ regval = ((u64)req->chunksize / TIM_CHUNKSIZE_MULTIPLE) << 40;
+ rvu_write64(rvu, blkaddr, TIM_AF_RINGX_CTL2(lf), regval);
+
+ return 0;
+}
+
+int rvu_mbox_handler_tim_enable_ring(struct rvu *rvu,
+ struct tim_ring_req *req,
+ struct tim_enable_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr;
+ u64 regval;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_TIM, pcifunc);
+ if (blkaddr < 0)
+ return TIM_AF_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, req->ring);
+ if (lf < 0)
+ return TIM_AF_LF_INVALID;
+
+ /* Error out if the ring is already running. */
+ regval = rvu_read64(rvu, blkaddr, TIM_AF_RINGX_CTL1(lf));
+ if (regval & TIM_AF_RINGX_CTL1_ENA)
+ return TIM_AF_RING_STILL_RUNNING;
+
+ /* Enable, the ring. */
+ regval = rvu_read64(rvu, blkaddr, TIM_AF_RINGX_CTL1(lf));
+ regval |= TIM_AF_RINGX_CTL1_ENA;
+ rvu_write64(rvu, blkaddr, TIM_AF_RINGX_CTL1(lf), regval);
+
+ rsp->timestarted = get_tenns_tsc();
+ rsp->currentbucket = (regval >> 20) & 0xfffff;
+
+ return 0;
+}
+
+int rvu_mbox_handler_tim_disable_ring(struct rvu *rvu,
+ struct tim_ring_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_TIM, pcifunc);
+ if (blkaddr < 0)
+ return TIM_AF_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, req->ring);
+ if (lf < 0)
+ return TIM_AF_LF_INVALID;
+
+ return rvu_tim_disable_lf(rvu, lf, blkaddr);
+}
+
+int rvu_tim_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot)
+{
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_TIM, pcifunc);
+ if (blkaddr < 0)
+ return TIM_AF_LF_INVALID;
+
+ /* Ensure TIM ring is disabled prior to clearing the mapping */
+ rvu_tim_disable_lf(rvu, lf, blkaddr);
+
+ rvu_write64(rvu, blkaddr, TIM_AF_RINGX_GMCTL(lf), 0);
+
+ return 0;
+}
+
+#define FOR_EACH_TIM_LF(lf) \
+for (lf = 0; lf < hw->block[BLKTYPE_TIM].lf.max; lf++)
+
+int rvu_tim_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int lf, blkaddr, rc = 0;
+ u8 gpio_edge;
+ u64 regval;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_TIM, 0);
+ if (blkaddr < 0)
+ return 0;
+
+ if (!is_rvu_otx2(rvu))
+ rc = tim_block_cn10k_init(rvu);
+
+ regval = rvu_read64(rvu, blkaddr, TIM_AF_FLAGS_REG);
+
+ /* Disable the TIM block, if not already disabled. */
+ if (regval & TIM_AF_FLAGS_REG_ENA_TIM) {
+ /* Disable each ring(lf). */
+ FOR_EACH_TIM_LF(lf) {
+ regval = rvu_read64(rvu, blkaddr,
+ TIM_AF_RINGX_CTL1(lf));
+ if (!(regval & TIM_AF_RINGX_CTL1_ENA))
+ continue;
+
+ rvu_tim_disable_lf(rvu, lf, blkaddr);
+ }
+
+ /* Disable the TIM block. */
+ regval = rvu_read64(rvu, blkaddr, TIM_AF_FLAGS_REG);
+ regval &= ~TIM_AF_FLAGS_REG_ENA_TIM;
+ rvu_write64(rvu, blkaddr, TIM_AF_FLAGS_REG, regval);
+ }
+
+ /* Reset each LF. */
+ FOR_EACH_TIM_LF(lf) {
+ rvu_lf_reset(rvu, &hw->block[BLKTYPE_TIM], lf);
+ }
+
+ /* Reset the TIM block; getting a clean slate. */
+ rvu_write64(rvu, blkaddr, TIM_AF_BLK_RST, 0x1);
+ rvu_poll_reg(rvu, blkaddr, TIM_AF_BLK_RST, BIT_ULL(63), true);
+
+ gpio_edge = TIM_GPIO_NO_EDGE;
+
+ /* Enable TIM block. */
+ regval = FIELD_PREP(TIM_AF_FLAGS_REG_GPIO_EDGE_MASK, gpio_edge) |
+ BIT_ULL(2) | /* RESET */
+ BIT_ULL(0); /* ENA_TIM */
+ rvu_write64(rvu, blkaddr, TIM_AF_FLAGS_REG, regval);
+
+ if(is_rvu_otx2(rvu))
+ rvu_tim_hw_fixes(rvu, blkaddr);
+
+ return rc;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
index 56f90cf9c4c0..775fd4c35794 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Admin Function driver tracepoints
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2020 Marvell.
*
- * Copyright (C) 2020 Marvell International Ltd.
*/
#define CREATE_TRACE_POINTS
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
index e6609068e81b..6d19dde52189 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 RVU Admin Function driver tracepoints
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2020 Marvell.
*
- * Copyright (C) 2020 Marvell International Ltd.
*/
#undef TRACE_SYSTEM
@@ -14,6 +15,8 @@
#include <linux/tracepoint.h>
#include <linux/pci.h>
+#include "mbox.h"
+
TRACE_EVENT(otx2_msg_alloc,
TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size),
TP_ARGS(pdev, id, size),
@@ -25,8 +28,8 @@ TRACE_EVENT(otx2_msg_alloc,
__entry->id = id;
__entry->size = size;
),
- TP_printk("[%s] msg:(0x%x) size:%lld\n", __get_str(dev),
- __entry->id, __entry->size)
+ TP_printk("[%s] msg:(%s) size:%lld\n", __get_str(dev),
+ otx2_mbox_id2name(__entry->id), __entry->size)
);
TRACE_EVENT(otx2_msg_send,
@@ -88,8 +91,8 @@ TRACE_EVENT(otx2_msg_process,
__entry->id = id;
__entry->err = err;
),
- TP_printk("[%s] msg:(0x%x) error:%d\n", __get_str(dev),
- __entry->id, __entry->err)
+ TP_printk("[%s] msg:(%s) error:%d\n", __get_str(dev),
+ otx2_mbox_id2name(__entry->id), __entry->err)
);
#endif /* __RVU_TRACE_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.c
new file mode 100644
index 000000000000..c9bb290e6072
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.c
@@ -0,0 +1,984 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include "rvu.h"
+
+#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
+#define PCI_DEVID_OCTEONTX2_SSO_RVU_PF 0xA0F9
+#define PCI_DEVID_OCTEONTX2_NPA_RVU_PF 0xA0FB
+#define PCI_DEVID_OCTEONTX2_CPT_RVU_PF 0xA0FD
+#define PCI_DEVID_OCTEONTX2_SDP_RVU_PF 0xA0F6
+#define PCI_DEVID_OCTEONTX2_CPT10_RVU_PF 0xA0F2
+
+static u64 quotas_get_sum(struct rvu_quotas *quotas)
+{
+ u64 lf_sum = 0;
+ int i;
+
+ for (i = 0; i < quotas->cnt; i++)
+ lf_sum += quotas->a[i].val;
+
+ return lf_sum;
+}
+
+static ssize_t quota_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct rvu_quota *quota;
+ int val;
+
+ quota = container_of(attr, struct rvu_quota, sysfs);
+
+ if (quota->base->lock)
+ mutex_lock(quota->base->lock);
+ val = quota->val;
+ if (quota->base->lock)
+ mutex_unlock(quota->base->lock);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t quota_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int old_val, new_val, res = 0;
+ struct rvu_quota *quota;
+ struct rvu_quotas *base;
+ struct device *dev;
+ u64 lf_sum;
+
+ quota = container_of(attr, struct rvu_quota, sysfs);
+ dev = quota->dev;
+ base = quota->base;
+
+ if (kstrtoint(buf, 0, &new_val)) {
+ dev_err(dev, "Invalid %s quota: %s\n", attr->attr.name, buf);
+ return -EIO;
+ }
+ if (new_val < 0) {
+ dev_err(dev, "Invalid %s quota: %d < 0\n", attr->attr.name,
+ new_val);
+ return -EIO;
+ }
+
+ if (new_val > base->max) {
+ dev_err(dev, "Invalid %s quota: %d > %d\n", attr->attr.name,
+ new_val, base->max);
+ return -EIO;
+ }
+
+ if (base->lock)
+ mutex_lock(base->lock);
+ old_val = quota->val;
+
+ if (base->ops.pre_store)
+ res = base->ops.pre_store(quota->ops_arg, quota, new_val);
+
+ if (res != 0) {
+ res = -EIO;
+ goto unlock;
+ }
+
+ lf_sum = quotas_get_sum(quota->base);
+
+ if (lf_sum + new_val - quota->val > base->max_sum) {
+ dev_err(dev,
+ "Not enough resources for %s quota. Used: %lld, Max: %lld\n",
+ attr->attr.name, lf_sum, base->max_sum);
+ res = -EIO;
+ goto unlock;
+ }
+ quota->val = new_val;
+
+ if (base->ops.post_store)
+ base->ops.post_store(quota->ops_arg, quota, old_val);
+
+ res = count;
+
+unlock:
+ if (base->lock)
+ mutex_unlock(base->lock);
+ return res;
+}
+
+static int quota_sysfs_destroy(struct rvu_quota *quota)
+{
+ if (quota == NULL)
+ return -EINVAL;
+ if (quota->sysfs.attr.mode != 0) {
+ sysfs_remove_file(quota->parent, &quota->sysfs.attr);
+ quota->sysfs.attr.mode = 0;
+ }
+ return 0;
+}
+
+static struct rvu_quotas *quotas_alloc(u32 cnt, u32 max, u64 max_sum,
+ int init_val, struct mutex *lock,
+ struct rvu_quota_ops *ops)
+{
+ struct rvu_quotas *quotas;
+ u64 i;
+
+ if (cnt == 0)
+ return NULL;
+
+ quotas = kzalloc(sizeof(struct rvu_quotas) +
+ cnt * sizeof(struct rvu_quota), GFP_KERNEL);
+ if (quotas == NULL)
+ return NULL;
+
+ for (i = 0; i < cnt; i++) {
+ quotas->a[i].base = quotas;
+ quotas->a[i].val = init_val;
+ }
+
+ quotas->cnt = cnt;
+ quotas->max = max;
+ quotas->max_sum = max_sum;
+ if (ops) {
+ quotas->ops.pre_store = ops->pre_store;
+ quotas->ops.post_store = ops->post_store;
+ }
+ quotas->lock = lock;
+
+ return quotas;
+}
+
+static void quotas_free(struct rvu_quotas *quotas)
+{
+ u64 i;
+
+ if (quotas == NULL)
+ return;
+ WARN_ON(quotas->cnt == 0);
+
+ for (i = 0; i < quotas->cnt; i++)
+ quota_sysfs_destroy(&quotas->a[i]);
+
+ kfree(quotas);
+}
+
+static int quota_sysfs_create(const char *name, struct kobject *parent,
+ struct device *log_dev, struct rvu_quota *quota,
+ void *ops_arg)
+{
+ int err;
+
+ if (name == NULL || quota == NULL || log_dev == NULL)
+ return -EINVAL;
+
+ quota->sysfs.show = quota_show;
+ quota->sysfs.store = quota_store;
+ quota->sysfs.attr.name = name;
+ quota->sysfs.attr.mode = 0644;
+ quota->parent = parent;
+ quota->dev = log_dev;
+ quota->ops_arg = ops_arg;
+
+ sysfs_attr_init(&quota->sysfs.attr);
+ err = sysfs_create_file(quota->parent, &quota->sysfs.attr);
+ if (err) {
+ dev_err(quota->dev,
+ "Failed to create '%s' quota sysfs for '%s'\n",
+ name, kobject_name(quota->parent));
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int rvu_blk_count_rsrc(struct rvu_block *block, u16 pcifunc, u8 rshift)
+{
+ int count = 0, lf;
+
+ for (lf = 0; lf < block->lf.max; lf++)
+ if ((block->fn_map[lf] >> rshift) == (pcifunc >> rshift) &&
+ block->fn_map[lf] != 0)
+ count++;
+
+ return count;
+}
+
+static int rvu_txsch_count_rsrc(struct rvu *rvu, int lvl, u16 pcifunc,
+ u8 rshift, struct nix_hw *nix_hw)
+{
+ struct nix_txsch *txsch = &nix_hw->txsch[lvl];
+ int count = 0, schq;
+
+ if (lvl == NIX_TXSCH_LVL_TL1)
+ return 0;
+
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (TXSCH_MAP_FLAGS(txsch->pfvf_map[schq]) & NIX_TXSCHQ_FREE)
+ continue;
+ if ((TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) >> rshift) ==
+ (pcifunc >> rshift))
+ count++;
+ }
+
+ return count;
+}
+
+static int free_rsrc_cnt(struct rvu *rvu, struct msg_req *req,
+ struct free_rsrcs_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ block = &hw->block[BLKADDR_NPA];
+ rsp->npa = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_NIX0];
+ rsp->nix = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_NIX1];
+ rsp->nix1 = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_SSO];
+ rsp->sso = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_SSOW];
+ rsp->ssow = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_TIM];
+ rsp->tim = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_CPT0];
+ rsp->cpt = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_CPT1];
+ rsp->cpt1 = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_REE0];
+ rsp->ree0 = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_REE1];
+ rsp->ree1 = rvu_rsrc_free_count(&block->lf);
+
+ if (rvu->hw->cap.nix_fixed_txschq_mapping) {
+ rsp->schq[NIX_TXSCH_LVL_SMQ] = 1;
+ rsp->schq[NIX_TXSCH_LVL_TL4] = 1;
+ rsp->schq[NIX_TXSCH_LVL_TL3] = 1;
+ rsp->schq[NIX_TXSCH_LVL_TL2] = 1;
+ /* NIX1 */
+ if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
+ goto out;
+ rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] = 1;
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL4] = 1;
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL3] = 1;
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL2] = 1;
+ } else {
+ nix_hw = get_nix_hw(hw, BLKADDR_NIX0);
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+ rsp->schq[NIX_TXSCH_LVL_SMQ] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
+ rsp->schq[NIX_TXSCH_LVL_TL4] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
+ rsp->schq[NIX_TXSCH_LVL_TL3] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+ rsp->schq[NIX_TXSCH_LVL_TL2] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
+ goto out;
+
+ nix_hw = get_nix_hw(hw, BLKADDR_NIX1);
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+ rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL4] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL3] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL2] =
+ rvu_rsrc_free_count(&txsch->schq);
+ }
+
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL1] = 1;
+out:
+ rsp->schq[NIX_TXSCH_LVL_TL1] = 1;
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return 0;
+}
+
+int rvu_mbox_handler_free_rsrc_cnt(struct rvu *rvu, struct msg_req *req,
+ struct free_rsrcs_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ int pf, curlfs;
+
+ if (!is_rvu_otx2(rvu))
+ return free_rsrc_cnt(rvu, req, rsp);
+
+ mutex_lock(&rvu->rsrc_lock);
+ pf = rvu_get_pf(pcifunc);
+
+ block = &hw->block[BLKADDR_NPA];
+ curlfs = rvu_blk_count_rsrc(block, pcifunc, RVU_PFVF_PF_SHIFT);
+ rsp->npa = rvu->pf_limits.npa->a[pf].val - curlfs;
+
+ block = &hw->block[BLKADDR_NIX0];
+ curlfs = rvu_blk_count_rsrc(block, pcifunc, RVU_PFVF_PF_SHIFT);
+ rsp->nix = rvu->pf_limits.nix->a[pf].val - curlfs;
+
+ block = &hw->block[BLKADDR_NIX1];
+ rsp->nix1 = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_SSO];
+ curlfs = rvu_blk_count_rsrc(block, pcifunc, RVU_PFVF_PF_SHIFT);
+ rsp->sso = rvu->pf_limits.sso->a[pf].val - curlfs;
+
+ block = &hw->block[BLKADDR_SSOW];
+ curlfs = rvu_blk_count_rsrc(block, pcifunc, RVU_PFVF_PF_SHIFT);
+ rsp->ssow = rvu->pf_limits.ssow->a[pf].val - curlfs;
+
+ block = &hw->block[BLKADDR_TIM];
+ curlfs = rvu_blk_count_rsrc(block, pcifunc, RVU_PFVF_PF_SHIFT);
+ rsp->tim = rvu->pf_limits.tim->a[pf].val - curlfs;
+
+ block = &hw->block[BLKADDR_CPT0];
+ rsp->cpt = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_CPT1];
+ rsp->cpt1 = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_REE0];
+ rsp->ree0 = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_REE1];
+ rsp->ree1 = rvu_rsrc_free_count(&block->lf);
+
+ if (rvu->hw->cap.nix_fixed_txschq_mapping) {
+ rsp->schq[NIX_TXSCH_LVL_SMQ] = 1;
+ rsp->schq[NIX_TXSCH_LVL_TL4] = 1;
+ rsp->schq[NIX_TXSCH_LVL_TL3] = 1;
+ rsp->schq[NIX_TXSCH_LVL_TL2] = 1;
+ /* NIX1 */
+ if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
+ goto out;
+ rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] = 1;
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL4] = 1;
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL3] = 1;
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL2] = 1;
+ } else {
+ nix_hw = get_nix_hw(hw, BLKADDR_NIX0);
+ if (!nix_hw)
+ goto err;
+
+ curlfs = rvu_txsch_count_rsrc(rvu, NIX_TXSCH_LVL_SMQ, pcifunc,
+ RVU_PFVF_PF_SHIFT, nix_hw);
+ rsp->schq[NIX_TXSCH_LVL_SMQ] =
+ rvu->pf_limits.smq->a[pf].val - curlfs;
+
+ curlfs = rvu_txsch_count_rsrc(rvu, NIX_TXSCH_LVL_TL4, pcifunc,
+ RVU_PFVF_PF_SHIFT, nix_hw);
+ rsp->schq[NIX_TXSCH_LVL_TL4] =
+ rvu->pf_limits.tl4->a[pf].val - curlfs;
+
+ curlfs = rvu_txsch_count_rsrc(rvu, NIX_TXSCH_LVL_TL3, pcifunc,
+ RVU_PFVF_PF_SHIFT, nix_hw);
+ rsp->schq[NIX_TXSCH_LVL_TL3] =
+ rvu->pf_limits.tl3->a[pf].val - curlfs;
+
+ curlfs = rvu_txsch_count_rsrc(rvu, NIX_TXSCH_LVL_TL2, pcifunc,
+ RVU_PFVF_PF_SHIFT, nix_hw);
+ rsp->schq[NIX_TXSCH_LVL_TL2] =
+ rvu->pf_limits.tl2->a[pf].val - curlfs;
+ /* NIX1 */
+ if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
+ goto out;
+ nix_hw = get_nix_hw(hw, BLKADDR_NIX1);
+ if (!nix_hw)
+ goto err;
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+ rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL4] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL3] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL2] =
+ rvu_rsrc_free_count(&txsch->schq);
+ }
+
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL1] = 1;
+out:
+ rsp->schq[NIX_TXSCH_LVL_TL1] = 1;
+err:
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return 0;
+}
+
+int rvu_check_txsch_policy(struct rvu *rvu, struct nix_txsch_alloc_req *req,
+ u16 pcifunc)
+{
+ int lvl, req_schq, pf = rvu_get_pf(pcifunc);
+ int limit, familylfs, delta;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ int blkaddr;
+
+ if (!is_rvu_otx2(rvu))
+ return 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+
+ if (!nix_hw)
+ return -ENODEV;
+
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+ req_schq = req->schq_contig[lvl] + req->schq[lvl];
+
+ switch (lvl) {
+ case NIX_TXSCH_LVL_SMQ:
+ limit = rvu->pf_limits.smq->a[pf].val;
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ limit = rvu->pf_limits.tl4->a[pf].val;
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ limit = rvu->pf_limits.tl3->a[pf].val;
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ limit = rvu->pf_limits.tl2->a[pf].val;
+ break;
+ case NIX_TXSCH_LVL_TL1:
+ if (req_schq > 2)
+ return -ENOSPC;
+ continue;
+ }
+
+ familylfs = rvu_txsch_count_rsrc(rvu, lvl, pcifunc,
+ RVU_PFVF_PF_SHIFT, nix_hw);
+ delta = req_schq - rvu_txsch_count_rsrc(rvu, lvl, pcifunc,
+ 0, nix_hw);
+
+ if ((delta > 0) && /* always allow usage decrease */
+ ((limit < familylfs + delta) ||
+ (delta > rvu_rsrc_free_count(&txsch->schq))))
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+int rvu_check_rsrc_policy(struct rvu *rvu, struct rsrc_attach *req,
+ u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int free_lfs, mappedlfs, familylfs, limit, delta;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf = rvu_get_pf(pcifunc);
+ struct rvu_block *block;
+
+ if (!is_rvu_otx2(rvu))
+ return 0;
+
+ /* Only one NIX LF can be attached */
+ if (req->nixlf) {
+ block = &hw->block[BLKADDR_NIX0];
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ limit = rvu->pf_limits.nix->a[pf].val;
+ familylfs = rvu_blk_count_rsrc(block, pcifunc,
+ RVU_PFVF_PF_SHIFT);
+ if (!free_lfs || (limit == familylfs))
+ goto fail;
+ }
+
+ if (req->sso) {
+ block = &hw->block[BLKADDR_SSO];
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ limit = rvu->pf_limits.sso->a[pf].val;
+ familylfs = rvu_blk_count_rsrc(block, pcifunc,
+ RVU_PFVF_PF_SHIFT);
+ /* Check if additional resources are available */
+ delta = req->sso - mappedlfs;
+ if ((delta > 0) && /* always allow usage decrease */
+ ((limit < familylfs + delta) ||
+ (delta > free_lfs)))
+ goto fail;
+ }
+
+ if (req->ssow) {
+ block = &hw->block[BLKADDR_SSOW];
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ limit = rvu->pf_limits.ssow->a[pf].val;
+ familylfs = rvu_blk_count_rsrc(block, pcifunc,
+ RVU_PFVF_PF_SHIFT);
+ /* Check if additional resources are available */
+ delta = req->ssow - mappedlfs;
+ if ((delta > 0) && /* always allow usage decrease */
+ ((limit < familylfs + delta) ||
+ (delta > free_lfs)))
+ goto fail;
+ }
+
+ if (req->timlfs) {
+ block = &hw->block[BLKADDR_TIM];
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ limit = rvu->pf_limits.tim->a[pf].val;
+ familylfs = rvu_blk_count_rsrc(block, pcifunc,
+ RVU_PFVF_PF_SHIFT);
+ /* Check if additional resources are available */
+ delta = req->timlfs - mappedlfs;
+ if ((delta > 0) && /* always allow usage decrease */
+ ((limit < familylfs + delta) ||
+ (delta > free_lfs)))
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_info(rvu->dev, "Request for %s failed\n", block->name);
+ return -ENOSPC;
+}
+
+static int check_mapped_rsrcs(void *arg, struct rvu_quota *quota, int new_val)
+{
+ struct rvu_pfvf *pf = arg;
+ int addr;
+
+ for (addr = 0; addr < BLK_COUNT; addr++) {
+ if (rvu_get_rsrc_mapcount(pf, addr) > 0)
+ return 1;
+ }
+ return 0;
+}
+
+static struct rvu_quota_ops pf_limit_ops = {
+ .pre_store = check_mapped_rsrcs,
+};
+
+static void rvu_set_default_limits(struct rvu *rvu)
+{
+ int i, nvfs, cpt_rvus, npa_rvus, sso_rvus, nix_rvus, nsso, nssow, ntim;
+ int total_cpt_lfs, ncptpf_cptlfs = 0, nssopf_cptlfs = 0;
+ int nnpa, nnix, nsmq = 0, ntl4 = 0, ntl3 = 0, ntl2 = 0;
+ unsigned short devid;
+
+ /* First pass, count number of SSO/TIM PFs. */
+ sso_rvus = 0;
+ nix_rvus = 0;
+ cpt_rvus = 0;
+ npa_rvus = 0;
+ for (i = 0; i < rvu->hw->total_pfs; i++) {
+ if (rvu->pf[i].pdev == NULL)
+ continue;
+ devid = rvu->pf[i].pdev->device;
+ if (devid == PCI_DEVID_OCTEONTX2_SSO_RVU_PF)
+ sso_rvus++;
+ else if (devid == PCI_DEVID_OCTEONTX2_RVU_PF ||
+ devid == PCI_DEVID_OCTEONTX2_RVU_AF ||
+ devid == PCI_DEVID_OCTEONTX2_SDP_RVU_PF)
+ nix_rvus++;
+ else if (devid == PCI_DEVID_OCTEONTX2_CPT_RVU_PF)
+ cpt_rvus++;
+ else if (devid == PCI_DEVID_OCTEONTX2_NPA_RVU_PF)
+ npa_rvus++;
+ }
+ /* Calculate default partitioning. */
+ nsso = rvu->pf_limits.sso->max_sum / sso_rvus;
+ nssow = rvu->pf_limits.ssow->max_sum / sso_rvus;
+ ntim = rvu->pf_limits.tim->max_sum / sso_rvus;
+ total_cpt_lfs = rvu->pf_limits.cpt->max_sum;
+ /* Divide CPT among SSO and CPT PFs since cores shouldn't be shared. */
+ if (total_cpt_lfs) {
+ /* One extra LF needed for inline ipsec inbound configuration */
+ ncptpf_cptlfs = num_online_cpus() + 1;
+ nssopf_cptlfs = (total_cpt_lfs - ncptpf_cptlfs) / sso_rvus;
+ }
+ /* NPA/NIX count depends on DTS VF config. Allocate until run out. */
+ nnpa = rvu->pf_limits.npa->max_sum;
+ nnix = rvu->pf_limits.nix->max_sum;
+ if (!rvu->hw->cap.nix_fixed_txschq_mapping) {
+ nsmq = rvu->pf_limits.smq->max_sum / nix_rvus;
+ ntl4 = rvu->pf_limits.tl4->max_sum / nix_rvus;
+ ntl3 = rvu->pf_limits.tl3->max_sum / nix_rvus;
+ ntl2 = rvu->pf_limits.tl2->max_sum / nix_rvus;
+ }
+
+ /* Second pass, set the default limit values. */
+ for (i = 0; i < rvu->hw->total_pfs; i++) {
+ if (rvu->pf[i].pdev == NULL)
+ continue;
+ nvfs = pci_sriov_get_totalvfs(rvu->pf[i].pdev);
+ switch (rvu->pf[i].pdev->device) {
+ case PCI_DEVID_OCTEONTX2_RVU_AF:
+ nnix -= nvfs;
+ nnpa -= nvfs;
+ rvu->pf_limits.nix->a[i].val = nnix > 0 ? nvfs : 0;
+ rvu->pf_limits.npa->a[i].val = nnpa > 0 ? nvfs : 0;
+ if (rvu->hw->cap.nix_fixed_txschq_mapping)
+ break;
+ rvu->pf_limits.smq->a[i].val = nsmq;
+ rvu->pf_limits.tl4->a[i].val = ntl4;
+ rvu->pf_limits.tl3->a[i].val = ntl3;
+ rvu->pf_limits.tl2->a[i].val = ntl2;
+ break;
+ case PCI_DEVID_OCTEONTX2_RVU_PF:
+ nnix -= 1 + nvfs;
+ nnpa -= 1 + nvfs;
+ rvu->pf_limits.nix->a[i].val = nnix > 0 ? 1 + nvfs : 0;
+ rvu->pf_limits.npa->a[i].val = nnpa > 0 ? 1 + nvfs : 0;
+ if (rvu->hw->cap.nix_fixed_txschq_mapping)
+ break;
+ rvu->pf_limits.smq->a[i].val = nsmq;
+ rvu->pf_limits.tl4->a[i].val = ntl4;
+ rvu->pf_limits.tl3->a[i].val = ntl3;
+ rvu->pf_limits.tl2->a[i].val = ntl2;
+ break;
+ case PCI_DEVID_OCTEONTX2_SSO_RVU_PF:
+ nnpa -= 1 + nvfs;
+ rvu->pf_limits.npa->a[i].val = nnpa > 0 ? 1 + nvfs : 0;
+ rvu->pf_limits.sso->a[i].val = nsso;
+ rvu->pf_limits.ssow->a[i].val = nssow;
+ rvu->pf_limits.tim->a[i].val = ntim;
+ rvu->pf_limits.cpt->a[i].val = nssopf_cptlfs;
+ break;
+ case PCI_DEVID_OCTEONTX2_NPA_RVU_PF:
+ nnpa -= 1 + nvfs;
+ rvu->pf_limits.npa->a[i].val = nnpa > 0 ? 1 + nvfs : 0;
+ break;
+ case PCI_DEVID_OCTEONTX2_CPT_RVU_PF:
+ case PCI_DEVID_OCTEONTX2_CPT10_RVU_PF:
+ nnpa -= 1;
+ rvu->pf_limits.npa->a[i].val = nnpa > 0 ? 1 : 0;
+ rvu->pf_limits.cpt->a[i].val = ncptpf_cptlfs;
+ break;
+ case PCI_DEVID_OCTEONTX2_SDP_RVU_PF:
+ nnix -= 1 + nvfs;
+ rvu->pf_limits.nix->a[i].val = nnix > 0 ? 1 + nvfs : 0;
+ if (rvu->hw->cap.nix_fixed_txschq_mapping)
+ break;
+ rvu->pf_limits.smq->a[i].val = nsmq;
+ rvu->pf_limits.tl4->a[i].val = ntl4;
+ rvu->pf_limits.tl3->a[i].val = ntl3;
+ rvu->pf_limits.tl2->a[i].val = ntl2;
+ break;
+ }
+ }
+}
+
+static int rvu_create_limits_sysfs(struct rvu *rvu)
+{
+ struct pci_dev *pdev;
+ struct rvu_pfvf *pf;
+ int i, err = 0;
+
+ for (i = 0; i < rvu->hw->total_pfs; i++) {
+ pf = &rvu->pf[i];
+ if (!pf->pdev)
+ continue;
+ pdev = pf->pdev;
+
+ pf->limits_kobj = kobject_create_and_add("limits",
+ &pdev->dev.kobj);
+
+ if (quota_sysfs_create("sso", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.sso->a[i], pf)) {
+ dev_err(rvu->dev,
+ "Failed to allocate quota for sso on %s\n",
+ pci_name(pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ if (quota_sysfs_create("ssow", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.ssow->a[i], pf)) {
+ dev_err(rvu->dev,
+ "Failed to allocate quota for ssow, on %s\n",
+ pci_name(pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ if (quota_sysfs_create("tim", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.tim->a[i], pf)) {
+ dev_err(rvu->dev,
+ "Failed to allocate quota for tim, on %s\n",
+ pci_name(pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ if (quota_sysfs_create("cpt", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.cpt->a[i], pf)) {
+ dev_err(rvu->dev,
+ "Failed to allocate quota for cpt, on %s\n",
+ pci_name(pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ if (quota_sysfs_create("npa", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.npa->a[i], pf)) {
+ dev_err(rvu->dev,
+ "Failed to allocate quota for npa, on %s\n",
+ pci_name(pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ if (quota_sysfs_create("nix", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.nix->a[i], pf)) {
+ dev_err(rvu->dev,
+ "Failed to allocate quota for nix, on %s\n",
+ pci_name(pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ /* In fixed TXSCHQ case each LF is assigned only 1 queue. */
+ if (rvu->hw->cap.nix_fixed_txschq_mapping)
+ continue;
+
+ if (quota_sysfs_create("smq", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.smq->a[i], pf)) {
+ dev_err(rvu->dev, "Failed to allocate quota for smq on %s\n",
+ pci_name(pf->pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ if (quota_sysfs_create("tl4", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.tl4->a[i], pf)) {
+ dev_err(rvu->dev, "Failed to allocate quota for tl4 on %s\n",
+ pci_name(pf->pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ if (quota_sysfs_create("tl3", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.tl3->a[i], pf)) {
+ dev_err(rvu->dev, "Failed to allocate quota for tl3 on %s\n",
+ pci_name(pf->pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ if (quota_sysfs_create("tl2", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.tl2->a[i], pf)) {
+ dev_err(rvu->dev, "Failed to allocate quota for tl2 on %s\n",
+ pci_name(pf->pdev));
+ err = -EFAULT;
+ break;
+ }
+ }
+
+ return err;
+}
+
+void rvu_policy_destroy(struct rvu *rvu)
+{
+ struct rvu_pfvf *pf = NULL;
+ int i;
+
+ if (!is_rvu_otx2(rvu))
+ return;
+
+ quotas_free(rvu->pf_limits.sso);
+ quotas_free(rvu->pf_limits.ssow);
+ quotas_free(rvu->pf_limits.npa);
+ quotas_free(rvu->pf_limits.cpt);
+ quotas_free(rvu->pf_limits.tim);
+ quotas_free(rvu->pf_limits.nix);
+
+ rvu->pf_limits.sso = NULL;
+ rvu->pf_limits.ssow = NULL;
+ rvu->pf_limits.npa = NULL;
+ rvu->pf_limits.cpt = NULL;
+ rvu->pf_limits.tim = NULL;
+ rvu->pf_limits.nix = NULL;
+
+ if (rvu->hw->cap.nix_fixed_txschq_mapping) {
+ quotas_free(rvu->pf_limits.smq);
+ quotas_free(rvu->pf_limits.tl4);
+ quotas_free(rvu->pf_limits.tl3);
+ quotas_free(rvu->pf_limits.tl2);
+
+ rvu->pf_limits.smq = NULL;
+ rvu->pf_limits.tl4 = NULL;
+ rvu->pf_limits.tl3 = NULL;
+ rvu->pf_limits.tl2 = NULL;
+ }
+
+ for (i = 0; i < rvu->hw->total_pfs; i++) {
+ pf = &rvu->pf[i];
+ kobject_del(pf->limits_kobj);
+ }
+}
+
+int rvu_policy_init(struct rvu *rvu)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, BLKADDR_NIX0);
+ struct pci_dev *pdev = rvu->pdev;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int err = -EINVAL, i = 0;
+ u32 max = 0;
+
+ if (!is_rvu_otx2(rvu))
+ return 0;
+
+ if (!nix_hw)
+ goto error;
+
+ max = hw->block[BLKADDR_SSO].lf.max;
+ rvu->pf_limits.sso = quotas_alloc(rvu->hw->total_pfs, max, max,
+ 0, &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.sso) {
+ dev_err(rvu->dev, "Failed to allocate sso limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ max = hw->block[BLKADDR_SSOW].lf.max;
+ rvu->pf_limits.ssow = quotas_alloc(rvu->hw->total_pfs, max, max,
+ 0, &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.ssow) {
+ dev_err(rvu->dev, "Failed to allocate ssow limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ max = hw->block[BLKADDR_TIM].lf.max;
+ rvu->pf_limits.tim = quotas_alloc(rvu->hw->total_pfs, max, max,
+ 0, &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.tim) {
+ dev_err(rvu->dev, "Failed to allocate tim limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ max = hw->block[BLKADDR_CPT0].lf.max;
+ rvu->pf_limits.cpt = quotas_alloc(rvu->hw->total_pfs, max, max,
+ 0, &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.cpt) {
+ dev_err(rvu->dev, "Failed to allocate cpt limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ /* Because limits track also VFs under PF, the maximum NPA LF limit for
+ * a single PF has to be max, not 1. Same for NIX below.
+ */
+ max = hw->block[BLKADDR_NPA].lf.max;
+ rvu->pf_limits.npa = quotas_alloc(rvu->hw->total_pfs, max, max,
+ 0, &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.npa) {
+ dev_err(rvu->dev, "Failed to allocate npa limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ max = hw->block[BLKADDR_NIX0].lf.max + hw->block[BLKADDR_NIX1].lf.max;
+ rvu->pf_limits.nix = quotas_alloc(rvu->hw->total_pfs, max, max,
+ 0, &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.nix) {
+ dev_err(rvu->dev, "Failed to allocate nix limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ if (rvu->hw->cap.nix_fixed_txschq_mapping)
+ goto skip_txschq_limits;
+
+ max = nix_hw->txsch[NIX_TXSCH_LVL_SMQ].schq.max;
+ rvu->pf_limits.smq = quotas_alloc(hw->total_pfs, max, max, 0,
+ &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.smq) {
+ dev_err(rvu->dev, "Failed to allocate SQM txschq limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ max = nix_hw->txsch[NIX_TXSCH_LVL_TL4].schq.max;
+ rvu->pf_limits.tl4 = quotas_alloc(hw->total_pfs, max, max, 0,
+ &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.tl4) {
+ dev_err(rvu->dev, "Failed to allocate TL4 txschq limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ max = nix_hw->txsch[NIX_TXSCH_LVL_TL3].schq.max;
+ rvu->pf_limits.tl3 = quotas_alloc(hw->total_pfs, max, max, 0,
+ &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.tl3) {
+ dev_err(rvu->dev, "Failed to allocate TL3 txschq limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ max = nix_hw->txsch[NIX_TXSCH_LVL_TL2].schq.max;
+ rvu->pf_limits.tl2 = quotas_alloc(hw->total_pfs, max, max, 0,
+ &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.tl2) {
+ dev_err(rvu->dev, "Failed to allocate TL2 txschq limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+skip_txschq_limits:
+ for (i = 0; i < hw->total_pfs; i++)
+ rvu->pf[i].pdev =
+ pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
+ i + 1, 0);
+
+ rvu_set_default_limits(rvu);
+
+ err = rvu_create_limits_sysfs(rvu);
+ if (err) {
+ dev_err(rvu->dev, "Failed to create limits sysfs\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ rvu_policy_destroy(rvu);
+ return err;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.h
new file mode 100644
index 000000000000..731dde3ab0f3
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#ifndef RVU_VALIDATION_H
+#define RVU_VALIDATION_H
+
+struct rvu;
+struct rvu_quotas;
+
+struct rvu_quota {
+ struct kobj_attribute sysfs;
+ /* Device to scope logs to */
+ struct device *dev;
+ /* Kobject of the sysfs file */
+ struct kobject *parent;
+ /* Pointer to base structure */
+ struct rvu_quotas *base;
+ /* Argument passed to the quota_ops when this quota is modified */
+ void *ops_arg;
+ /* Value of the quota */
+ int val;
+};
+
+struct rvu_quota_ops {
+ /*
+ * Called before sysfs store(). store() will proceed if returns 0.
+ * It is called with struct rvu_quotas::lock taken.
+ */
+ int (*pre_store)(void *arg, struct rvu_quota *quota, int new_val);
+ /** called after sysfs store(). */
+ void (*post_store)(void *arg, struct rvu_quota *quota, int old_val);
+};
+
+struct rvu_quotas {
+ struct rvu_quota_ops ops;
+ struct mutex *lock; /* lock taken for each sysfs operation */
+ u32 cnt; /* number of elements in arr */
+ u32 max; /* maximum value for a single quota */
+ u64 max_sum; /* maximum sum of all quotas */
+ struct rvu_quota a[0]; /* array of quota assignments */
+};
+
+struct rvu_limits {
+ struct rvu_quotas *sso;
+ struct rvu_quotas *ssow;
+ struct rvu_quotas *tim;
+ struct rvu_quotas *cpt;
+ struct rvu_quotas *npa;
+ struct rvu_quotas *nix;
+ struct rvu_quotas *smq;
+ struct rvu_quotas *tl4;
+ struct rvu_quotas *tl3;
+ struct rvu_quotas *tl2;
+};
+
+int rvu_policy_init(struct rvu *rvu);
+void rvu_policy_destroy(struct rvu *rvu);
+int rvu_check_rsrc_policy(struct rvu *rvu,
+ struct rsrc_attach *req, u16 pcifunc);
+int rvu_check_txsch_policy(struct rvu *rvu, struct nix_txsch_alloc_req *req,
+ u16 pcifunc);
+
+int rvu_mbox_handler_free_rsrc_cnt(struct rvu *rvu, struct msg_req *req,
+ struct free_rsrcs_rsp *rsp);
+#endif /* RVU_VALIDATION_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/Makefile b/drivers/net/ethernet/marvell/octeontx2/bphy/Makefile
new file mode 100644
index 000000000000..a4dfa1b5c9d4
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's OcteonTX2 BPHY RFOE netdev driver
+#
+
+obj-$(CONFIG_OCTEONTX2_BPHY_RFOE_NETDEV) += octeontx2_bphy_netdev.o
+
+#EXTRA_CFLAGS += -DDEBUG
+
+octeontx2_bphy_netdev-y := otx2_bphy_main.o otx2_rfoe.o otx2_rfoe_ethtool.o otx2_rfoe_ptp.o \
+ otx2_cpri.o otx2_cpri_ethtool.o otx2_bphy_debugfs.o \
+ cnf10k_rfoe.o cnf10k_rfoe_ethtool.o cnf10k_rfoe_ptp.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/bphy_common.h b/drivers/net/ethernet/marvell/octeontx2/bphy/bphy_common.h
new file mode 100644
index 000000000000..41018b33b07a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/bphy_common.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell BPHY Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#ifndef _BPHY_COMMON_H_
+#define _BPHY_COMMON_H_
+
+/* BPHY definitions */
+#define OTX2_BPHY_PCI_VENDOR_ID 0x177D
+#define OTX2_BPHY_PCI_DEVICE_ID 0xA089
+
+/* eCPRI ethertype */
+#define ETH_P_ECPRI 0xAEFE
+
+/* max ptp tx requests */
+extern int max_ptp_req;
+
+/* reg base address */
+extern void __iomem *bphy_reg_base;
+extern void __iomem *psm_reg_base;
+extern void __iomem *rfoe_reg_base;
+extern void __iomem *bcn_reg_base;
+extern void __iomem *ptp_reg_base;
+extern void __iomem *cpri_reg_base;
+
+enum port_link_state {
+ LINK_STATE_DOWN,
+ LINK_STATE_UP,
+};
+
+/* iova to kernel virtual addr */
+static inline void *otx2_iova_to_virt(struct iommu_domain *domain, u64 iova)
+{
+ return phys_to_virt(iommu_iova_to_phys(domain, iova));
+}
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/bphy_netdev_comm_if.h b/drivers/net/ethernet/marvell/octeontx2/bphy/bphy_netdev_comm_if.h
new file mode 100644
index 000000000000..9fdeba5be2a3
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/bphy_netdev_comm_if.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell BPHY Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#ifndef _BPHY_NETDEV_COMM_IF_H_
+#define _BPHY_NETDEV_COMM_IF_H_
+
+/* Max LMAC's per RFOE MHAB */
+#define MAX_LMAC_PER_RFOE 4
+
+/* Max Lanes per CPRI MHAB */
+#define MAX_LANE_PER_CPRI 4
+
+#define MAX_PTP_MSG_PER_LMAC 4 /* 16 Per RFoE */
+#define MAX_OTH_MSG_PER_LMAC 16 /* 64 Per RFoE */
+/* 64 per RFoE; RFoE2 shall have 32 entries */
+#define MAX_OTH_MSG_PER_RFOE (MAX_OTH_MSG_PER_LMAC * MAX_LMAC_PER_RFOE)
+
+/**
+ * @enum bphy_netdev_if_type
+ * @brief BPHY Interface Types
+ *
+ */
+enum bphy_netdev_if_type {
+ IF_TYPE_ETHERNET = 0,
+ IF_TYPE_CPRI = 1,
+ IF_TYPE_NONE = 2,
+ IF_TYPE_MAX,
+};
+
+/**
+ * @enum bphy_netdev_packet_type
+ * @brief Packet types
+ *
+ */
+enum bphy_netdev_packet_type {
+ PACKET_TYPE_PTP = 0,
+ PACKET_TYPE_ECPRI = 1,
+ PACKET_TYPE_OTHER = 2,
+ PACKET_TYPE_MAX,
+};
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_bphy_hw.h b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_bphy_hw.h
new file mode 100644
index 000000000000..9b2a7a02b564
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_bphy_hw.h
@@ -0,0 +1,482 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell CNF10K BPHY Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#ifndef _CNF10K_BPHY_HW_H_
+#define _CNF10K_BPHY_HW_H_
+
+#include <linux/types.h>
+
+/* PSM register offsets */
+#define PSM_QUEUE_CMD_LO(a) (0x0 + (a) * 0x10)
+#define PSM_QUEUE_CMD_HI(a) (0x8 + (a) * 0x10)
+#define PSM_QUEUE_CFG(a) (0x1000 + (a) * 0x10)
+#define PSM_QUEUE_PTR(a) (0x2000 + (a) * 0x10)
+#define PSM_QUEUE_SPACE(a) (0x3000 + (a) * 0x10)
+#define PSM_QUEUE_TIMEOUT_CFG(a) (0x4000 + (a) * 0x10)
+#define PSM_QUEUE_INFO(a) (0x5000 + (a) * 0x10)
+#define PSM_QUEUE_ENA_W1S(a) (0x10000 + (a) * 0x8)
+#define PSM_QUEUE_ENA_W1C(a) (0x10100 + (a) * 0x8)
+#define PSM_QUEUE_FULL_STS(a) (0x10200 + (a) * 0x8)
+#define PSM_QUEUE_BUSY_STS(a) (0x10300 + (a) * 0x8)
+
+/* BPHY PSM GPINT register offsets */
+#define PSM_INT_GP_SUM_W1C(a) (0x10E0000 + (a) * 0x100)
+#define PSM_INT_GP_SUM_W1S(a) (0x10E0040 + (a) * 0x100)
+#define PSM_INT_GP_ENA_W1C(a) (0x10E0080 + (a) * 0x100)
+#define PSM_INT_GP_ENA_W1S(a) (0x10E00C0 + (a) * 0x100)
+
+/* RFOE MHAB register offsets */
+#define RFOEX_RX_CTL(a) (0x0818ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_VLANX_CFG(a, b) (0x0870ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((b) << 3))
+#define RFOEX_RX_INDIRECT_INDEX_OFFSET(a) (0x13F8ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_FTX_CFG(a, b) (0x1400ULL | \
+ (((unsigned long)(a) << 24)) + \
+ ((b) << 3))
+#define RFOEX_RX_IND_MBT_CFG(a) (0x1420ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_MBT_CFG2(a) (0x1428ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_MBT_ADDR(a) (0x1430ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_MBT_SEG_STATE(a) (0x1438ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_VLANX_FWD(a, b) (0x14D0ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((b) << 3))
+#define RFOEX_RX_IND_JDT_CFG0(a) (0x1440ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_JDT_CFG1(a) (0x1448ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_JDT_CFG2(a) (0x1490ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_JDT_PTR(a) (0x1450ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_JDT_STATE(a) (0x1478ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_ECPRI_FT_CFG(a) (0x14C0ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_TX_PTP_TSTMP_W0(a, b) (0x7A0ULL | \
+ (((unsigned long)(a) << 24)) | \
+ ((b) << 3))
+#define RFOEX_TX_PTP_TSTMP_W1(a, b) (0x7C0ULL | \
+ (((unsigned long)(a) << 24)) | \
+ ((b) << 3))
+#define RFOEX_TX_PKT_STAT(a, b) (0x720ULL | \
+ (((unsigned long)(a) << 24)) | \
+ ((b) << 3))
+#define RFOEX_TX_OCTS_STAT(a, b) (0x740ULL | \
+ (((unsigned long)(a) << 24)) | \
+ ((b) << 3))
+#define RFOEX_RX_VLAN_DROP_STAT(a, b) (0x8A0ULL | \
+ (((unsigned long)(a) << 24)) | \
+ ((b) << 3))
+#define RFOEX_RX_RPM_PKT_STAT(a, b) (0x15C0ULL | \
+ (((unsigned long)(a) << 24)) | \
+ ((b) << 3))
+#define RFOEX_RX_RPM_OCTS_STAT(a, b) (0x15E0ULL | \
+ (((unsigned long)(a) << 24)) | \
+ ((b) << 3))
+
+/* BCN register offsets and definitions */
+#define BCN_CAPTURE_CFG 0x400
+#define BCN_CAPTURE_N1_N2 0x410
+#define BCN_CAPTURE_PTP 0x430
+
+/* BCN_CAPTURE_CFG register definitions */
+#define CAPT_EN BIT(0)
+#define CAPT_TRIG_SW (3UL << 8)
+
+/* CPRI register offsets */
+#define CPRIX_RXD_GMII_UL_CBUF_CFG1(a) (0x1000ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_RXD_GMII_UL_CBUF_CFG2(a) (0x1008ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_RXD_GMII_UL_RD_DOORBELL(a) (0x1010ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_RXD_GMII_UL_SW_RD_PTR(a) (0x1018ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_RXD_GMII_UL_NXT_WR_PTR(a) (0x1020ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_RXD_GMII_UL_PKT_COUNT(a) (0x1028ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_TXD_GMII_DL_CBUF_CFG1(a) (0x1100ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_TXD_GMII_DL_CBUF_CFG2(a) (0x1108ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_TXD_GMII_DL_WR_DOORBELL(a) (0x1110ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_TXD_GMII_DL_SW_WR_PTR(a) (0x1118ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_TXD_GMII_DL_NXT_RD_PTR(a) (0x1120ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_ETH_UL_INT(a) (0x280ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_ETH_UL_INT_ENA_W1S(a) (0x288ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_ETH_UL_INT_ENA_W1C(a) (0x290ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_ETH_UL_INT_W1S(a) (0x298ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_ETH_BAD_CRC_CNT(a, b) (0x400ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_ERR_CNT(a, b) (0x408ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_OSIZE_CNT(a, b) (0x410ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_USIZE_CNT(a, b) (0x418ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_FIFO_ORUN_CNT(a, b) (0x420ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_GPKTS_CNT(a, b) (0x428ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_BOCT_CNT(a, b) (0x430ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_GOCT_CNT(a, b) (0x438ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_MALFORMED_CNT(a, b) (0x440ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_DL_GOCTETS_CNT(a, b) (0x450ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_DL_GPKTS_CNT(a, b) (0x458ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+
+/* MHAB Structures */
+struct mhbw_jd_dma_cfg_word_0_s {
+ u64 dma_mode : 3;
+ u64 reserved1 : 1;
+ u64 dswap : 3;
+ u64 cmd_type : 2;
+ u64 reserved2 : 7;
+ u64 chunk_size : 16;
+ u64 block_size : 16;
+ u64 thread_id : 6;
+ u64 reserved3 : 2;
+ u64 group_id : 4;
+ u64 reserved4 : 4;
+};
+
+struct mhbw_jd_dma_cfg_word_1_s {
+ u64 start_addr : 53;
+ u64 reserved1 : 11;
+};
+
+struct rfoex_abx_slotx_configuration {
+ u64 pkt_mode : 2;
+ u64 da_sel : 3;
+ u64 sa_sel : 3;
+ u64 etype_sel : 3;
+ u64 flowid : 8;
+ u64 subtype : 8;
+ u64 reserved1 : 2;
+ u64 sample_mode : 1;
+ u64 sample_width : 5;
+ u64 sample_width_option : 1;
+ u64 sample_width_sat_bypass : 1;
+ u64 orderinfotype : 1;
+ u64 orderinfooffset : 5;
+ u64 antenna : 8;
+ u64 symbol : 8;
+ u64 sos : 1;
+ u64 eos : 1;
+ u64 orderinfo_insert : 1;
+ u64 custom_timestamp_insert : 1;
+ u64 rfoe_mode : 1;
+};
+
+struct rfoex_abx_slotx_configuration1 {
+ u64 rbmap_bytes : 8;
+ u64 reserved1 : 16;
+ u64 hdr_len : 8;
+ u64 presentation_time_offset : 29;
+ u64 reserved2 : 1;
+ u64 sof_mode : 2;
+};
+
+struct rfoex_abx_slotx_configuration2 {
+ u64 vlan_sel : 3;
+ u64 vlan_num : 2;
+ u64 ptp_mode : 1;
+ u64 ecpri_id_insert : 1;
+ u64 ecpri_seq_id_insert : 1;
+ u64 ecpri_rev : 8;
+ u64 ecpri_msgtype : 8;
+ u64 ecpri_id : 16;
+ u64 ecpri_seq_id : 16;
+ u64 cc_mac_sec_en : 1;
+ u64 ptp_ring_id : 2;
+ u64 reserved1 : 5;
+};
+
+struct rfoex_abx_slotx_configuration3 {
+ u64 pkt_len : 16;
+ u64 lmacid : 2;
+ u64 tx_err : 1;
+ u64 reserved : 45;
+};
+
+struct mhab_job_desc_cfg {
+ struct rfoex_abx_slotx_configuration cfg;
+ struct rfoex_abx_slotx_configuration1 cfg1;
+ struct rfoex_abx_slotx_configuration2 cfg2;
+ struct rfoex_abx_slotx_configuration3 cfg3;
+} __packed;
+
+/* PSM Enumerations */
+enum psm_opcode_e {
+ PSM_OP_NOP = 0x0,
+ PSM_OP_ADDJOB = 0x1,
+ PSM_OP_CONTJOB = 0x2,
+ PSM_OP_DJCNT = 0x10,
+ PSM_OP_GPINT = 0x11,
+ PSM_OP_WAIT = 0x12,
+ PSM_OP_ADDWORK = 0x13,
+ PSM_OP_FREE = 0x14,
+ PSM_OP_WRSTS = 0x15,
+ PSM_OP_WRMSG = 0x16,
+ PSM_OP_ADDNOTIF = 0x17,
+ PSM_OP_QRST = 0x20,
+ PSM_OP_QBLK = 0x21,
+ PSM_OP_QRUN = 0x22,
+ PSM_OP_BCAST = 0x3E,
+ PSM_OP_RSP = 0x3F,
+};
+
+/* PSM Structures */
+struct psm_cmd_addjob_s {
+ /* W0 */
+ u64 opcode : 6;
+ u64 rsrc_set : 2;
+ u64 qid : 8;
+ u64 waitcond : 8;
+ u64 jobtag : 16;
+ u64 reserved1 : 8;
+ u64 mabq : 1;
+ u64 reserved2 : 3;
+ u64 tmem : 1;
+ u64 reserved3 : 3;
+ u64 jobtype : 8;
+ /* W1 */
+ u64 jobptr : 53;
+ u64 reserved4 : 8;
+ u64 gm_id : 3;
+};
+
+/* RFOE Enumerations */
+enum rfoe_ecpri_hdr_err_type_e {
+ NONE = 0x0,
+ CONCATENATION = 0x1,
+ ILLEGAL_VERSION = 0x2,
+ ILLEGAL_RSVD = 0x3,
+ PC_ID = 0x4,
+};
+
+enum rfoe_ecpri_pcid_flowid_mode_e {
+ HASH = 0x0,
+ BASE = 0x1,
+ LMAC_TRUNCATE = 0x2,
+ SHIFT = 0x3,
+};
+
+enum rfoe_order_info_type_e {
+ SEQNUM = 0x0,
+ TIMESTAMP = 0x1,
+};
+
+enum rfoe_rx_dir_ctl_pkt_type_e {
+ ROE = 0x0,
+ CHI = 0x1,
+ ALT = 0x2,
+ ECPRI = 0x4,
+ GENERIC = 0x8,
+};
+
+enum rfoe_rx_pswt_e {
+ RSVD5 = 0x0,
+ ROE_BCN_TYPE = 0x1,
+ RSVD6 = 0x2,
+ ECPRI_BCN_TYPE = 0x3,
+};
+
+enum rfoe_rx_pkt_err_e {
+ RE_NONE = 0x0,
+ RE_PARTIAL = 0x1,
+ RE_JABBER = 0x2,
+ RE_FCS = 0x7,
+ RE_FCS_RCV = 0x8,
+ RE_TERMINATE = 0x9,
+ RE_RX_CTL = 0xB,
+ RE_SKIP = 0xC,
+};
+
+enum rfoe_rx_pkt_logger_idx_e {
+ RX_PKT = 0x0,
+ TX_PKT = 0x1,
+};
+
+/* RFOE Structures */
+struct ecpri_hdr_s {
+ u64 seq_id : 16;
+ u64 pc_id : 16;
+ u64 pyld_size : 16;
+ u64 msg_type : 8;
+ u64 concatenation : 1;
+ u64 reserved : 3;
+ u64 version : 4;
+};
+
+struct rfoe_ab_cfg_w3_s {
+ u64 pkt_len : 16;
+ u64 lmac_id : 2;
+ u64 tx_err : 1;
+ u64 reserved : 45;
+};
+
+struct rfoe_psw_s {
+ /* W0 */
+ u64 jd_ptr : 53;
+ u64 jd_ptr_tmem : 1;
+ u64 jd_ptr_type : 1;
+ u64 reserved1 : 1;
+ u64 gm_id : 3;
+ u64 reserved2 : 3;
+ u64 pswt : 2;
+ /* W1 */
+ u64 ethertype : 16;
+ u64 eindex : 5;
+ u64 reserved3 : 3;
+ u64 pkt_len : 16;
+ u64 mcs_err_sts : 8;
+ u64 mac_err_sts : 6;
+ u64 reserved4 : 2;
+ u64 pkt_type : 4;
+ u64 reserved5 : 4;
+ /* W2 */
+ u64 proto_sts_word;
+ /* W3 */
+ u64 rfoe_tstamp;
+ /* W4 */
+ u64 ptp_timestamp;
+ /* W5 */
+ u64 reserved6;
+ /* W6 */
+ u64 reserved7 : 24;
+ u64 dec_error : 8;
+ u64 dec_num_sections : 8;
+ u64 dec_num_syminc : 8;
+ u64 reserved8 : 16;
+ /* W7 */
+ u64 reserved9;
+};
+
+struct rfoe_psw_w0_s {
+ u64 jd_ptr : 53;
+ u64 jd_ptr_tmem : 1;
+ u64 jd_ptr_type : 1;
+ u64 reserved1 : 1;
+ u64 gm_id : 3;
+ u64 reserved2 : 3;
+ u64 pswt : 2;
+};
+
+struct rfoe_psw_w1_s {
+ u64 ethertype : 16;
+ u64 eindex : 5;
+ u64 reserved3 : 3;
+ u64 pkt_len : 16;
+ u64 mcs_err_sts : 8;
+ u64 mac_err_sts : 6;
+ u64 reserved4 : 2;
+ u64 pkt_type : 4;
+ u64 reserved5 : 4;
+};
+
+struct rfoe_psw_w2_ecpri_s {
+ u64 msg_type : 8;
+ u64 pc_id : 16;
+ u64 seq_id : 16;
+ u64 flow_id : 10;
+ u64 lmac_id : 2;
+ u64 rfoe_id : 4;
+ u64 sa_table_index : 7;
+ u64 reserved : 1;
+};
+
+struct rfoe_psw_w2_roe_s {
+ u64 subtype : 8;
+ u64 fd_symbol : 8;
+ u64 fd_antid : 8;
+ u64 reserved1 : 16;
+ u64 flowid : 8;
+ u64 reserved2 : 2;
+ u64 lmac_id : 2;
+ u64 rfoe_id : 4;
+ u64 sa_table_index : 7;
+ u64 reserved3 : 1;
+};
+
+struct rfoe_psw_w3_bcn_s {
+ u64 n2 : 24;
+ u64 n1 : 40;
+};
+
+struct rfoe_psw_w4_s {
+ u64 ptp_timestamp;
+};
+
+struct rfoe_rx_pkt_log_s {
+ u64 timestamp;
+ u64 psw_w2;
+ u64 psw_w1;
+ u64 psw_w0;
+};
+
+struct rfoe_timestamp_s {
+ u32 time_tick : 16;
+ u32 sf : 4;
+ u32 bfn : 12;
+};
+
+struct rfoe_tx_pkt_log_s {
+ u64 timestamp;
+ u64 lmac_id : 2;
+ u64 rfoe_id : 4;
+ u64 jobid : 16;
+ u64 drop : 1;
+ u64 tx_err : 1;
+ u64 reserved : 40;
+};
+
+struct rfoe_tx_ptp_tstmp_s {
+ u64 ptp_timestamp;
+ u64 reserved1 : 2;
+ u64 rfoe_id : 4;
+ u64 jobid : 16;
+ u64 drop : 1;
+ u64 tx_err : 1;
+ u64 reserved2 : 39;
+ u64 valid : 1;
+};
+
+struct rfoe_rx_ind_vlanx_fwd {
+ u64 fwd : 64;
+};
+
+#endif /* _CNF10K_BPHY_HW_H_ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_bphy_netdev_comm_if.h b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_bphy_netdev_comm_if.h
new file mode 100644
index 000000000000..f9307b1e489e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_bphy_netdev_comm_if.h
@@ -0,0 +1,296 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell CNF10K BPHY Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#ifndef _CNF10K_BPHY_NETDEV_COMM_IF_H_
+#define _CNF10K_BPHY_NETDEV_COMM_IF_H_
+
+#include <linux/etherdevice.h>
+#include "bphy_netdev_comm_if.h"
+
+#define BPHY_MAX_RFOE_MHAB 8 /* Max RFOE MHAB instances */
+#define BPHY_MAX_CPRI_MHAB 4 /* Max CPRI MHAB instances */
+
+#define MAX_PTP_RING 4 /* Max ptp rings per lmac */
+
+#define CNF10KB_VERSION 2 /* chip version */
+#define CNF10KA_VERSION 3 /* chip version */
+
+#define CHIP_CNF10KB(v) (((v) == CNF10KB_VERSION) ? 1 : 0)
+#define CHIP_CNF10KA(v) (((v) == CNF10KA_VERSION) ? 1 : 0)
+
+#define CHIP_CNF10K(v) ({ \
+ typeof(v) _v = (v); \
+ (CHIP_CNF10KB(_v) | CHIP_CNF10KA(_v)); \
+})
+
+/**
+ * @enum BPHY_NETDEV_CPRI_RX_GP_INT_e_
+ * @brief GP_INT numbers for CPRI Ethernet packet Rx notification
+ * by BPHY to netdev.
+ *
+ */
+enum bphy_netdev_cpri_rx_gp_int {
+ CNF10K_RX_GP_INT_CPRI0_ETH = 93, //PSM_GPINT93,
+ CNF10K_RX_GP_INT_CPRI1_ETH = 94, //PSM_GPINT94,
+ CNF10K_RX_GP_INT_CPRI2_ETH = 95, //PSM_GPINT95
+};
+
+/**
+ * @enum BPHY_NETDEV_TX_GP_INT_e_
+ * @brief GP_INT numbers for packet notification by netdev to BPHY.
+ *
+ */
+#ifdef CNF10KB
+enum bphy_netdev_tx_gp_int {
+ CNF10K_TX_GP_INT_RFOE0_LMAC0 = 32, //PSM_GPINT32,
+ CNF10K_TX_GP_INT_RFOE0_LMAC1 = 33, //PSM_GPINT33,
+
+ CNF10K_TX_GP_INT_RFOE1_LMAC2 = 34, //PSM_GPINT34,
+ CNF10K_TX_GP_INT_RFOE1_LMAC3 = 35, //PSM_GPINT35,
+
+ CNF10K_TX_GP_INT_RFOE2_LMAC0 = 36, //PSM_GPINT36,
+ CNF10K_TX_GP_INT_RFOE2_LMAC1 = 37, //PSM_GPINT37,
+
+ CNF10K_TX_GP_INT_RFOE3_LMAC2 = 38, //PSM_GPINT38,
+ CNF10K_TX_GP_INT_RFOE3_LMAC3 = 39, //PSM_GPINT39,
+
+ CNF10K_TX_GP_INT_RFOE4_LMAC0 = 40, //PSM_GPINT40,
+ CNF10K_TX_GP_INT_RFOE4_LMAC1 = 41, //PSM_GPINT41
+
+ CNF10K_TX_GP_INT_RFOE5_LMAC0 = 42, //PSM_GPINT42,
+ CNF10K_TX_GP_INT_RFOE5_LMAC1 = 43, //PSM_GPINT43,
+
+ CNF10K_TX_GP_INT_RFOE6_LMAC2 = 44, //PSM_GPINT44,
+ CNF10K_TX_GP_INT_RFOE6_LMAC3 = 45, //PSM_GPINT45,
+};
+#else
+enum bphy_netdev_tx_gp_int {
+ CNF10K_TX_GP_INT_RFOE0_LMAC0 = 32, //PSM_GPINT32,
+ CNF10K_TX_GP_INT_RFOE0_LMAC1 = 33, //PSM_GPINT33,
+ CNF10K_TX_GP_INT_RFOE0_LMAC2 = 34, //PSM_GPINT34,
+ CNF10K_TX_GP_INT_RFOE0_LMAC3 = 35, //PSM_GPINT35,
+
+ CNF10K_TX_GP_INT_RFOE1_LMAC0 = 36, //PSM_GPINT36,
+ CNF10K_TX_GP_INT_RFOE1_LMAC1 = 37, //PSM_GPINT37,
+ CNF10K_TX_GP_INT_RFOE1_LMAC2 = 38, //PSM_GPINT38,
+ CNF10K_TX_GP_INT_RFOE1_LMAC3 = 39, //PSM_GPINT39,
+};
+#endif
+
+/**
+ * @enum BPHY_NETDEV_CNF10K_RX_GP_INT_e_
+ * @brief GP_INT numbers for packet notification by BPHY to netdev.
+ *
+ */
+enum bphy_netdev_rx_gp_int {
+ CNF10K_RX_GP_INT_RFOE0_PTP = 63, //PSM_GPINT63,
+ CNF10K_RX_GP_INT_RFOE0_ECPRI = 62, //PSM_GPINT62,
+ CNF10K_RX_GP_INT_RFOE0_GENERIC = 61, //PSM_GPINT61,
+
+ CNF10K_RX_GP_INT_RFOE1_PTP = 60, //PSM_GPINT60,
+ CNF10K_RX_GP_INT_RFOE1_ECPRI = 59, //PSM_GPINT59,
+ CNF10K_RX_GP_INT_RFOE1_GENERIC = 58, //PSM_GPINT58,
+#ifdef CNF10KB
+ CNF10K_RX_GP_INT_RFOE2_PTP = 57, //PSM_GPINT57,
+ CNF10K_RX_GP_INT_RFOE2_ECPRI = 56, //PSM_GPINT56,
+ CNF10K_RX_GP_INT_RFOE2_GENERIC = 55, //PSM_GPINT55,
+
+ CNF10K_RX_GP_INT_RFOE3_PTP = 54, //PSM_GPINT54,
+ CNF10K_RX_GP_INT_RFOE3_ECPRI = 53, //PSM_GPINT53,
+ CNF10K_RX_GP_INT_RFOE3_GENERIC = 52, //PSM_GPINT52,
+
+ CNF10K_RX_GP_INT_RFOE4_PTP = 51, //PSM_GPINT51,
+ CNF10K_RX_GP_INT_RFOE4_ECPRI = 50, //PSM_GPINT50,
+ CNF10K_RX_GP_INT_RFOE4_GENERIC = 49, //PSM_GPINT49,
+
+ CNF10K_RX_GP_INT_RFOE5_PTP = 48, //PSM_GPINT48,
+ CNF10K_RX_GP_INT_RFOE5_ECPRI = 47, //PSM_GPINT47,
+ CNF10K_RX_GP_INT_RFOE5_GENERIC = 46, //PSM_GPINT46,
+
+ CNF10K_RX_GP_INT_RFOE6_PTP = 66, //PSM_GPINT66,
+ CNF10K_RX_GP_INT_RFOE6_ECPRI = 65, //PSM_GPINT65,
+ CNF10K_RX_GP_INT_RFOE6_GENERIC = 64, //PSM_GPINT64,
+#endif
+};
+
+/**
+ * @struct BPHY_NETDEV_RBUF_INFO_s
+ * @brief Information about the packet ring buffer which shall be used to
+ * send the packets from BPHY to netdev.
+ *
+ */
+struct cnf10k_bphy_ndev_rbuf_info {
+ enum bphy_netdev_packet_type pkt_type;
+ enum bphy_netdev_rx_gp_int gp_int_num;
+ u16 flow_id;
+ u16 mbt_index;
+ /**Maximum number of buffers in the Ring/Pool*/
+ u16 num_bufs;
+ /**MAX Buffer Size configured */
+ u16 buf_size; // TBC: 1536?
+ /**MBT byffer target memory*/
+ u8 mbt_target_mem;
+ /**Buffers starting address*/
+ u64 mbt_iova_addr;
+ u16 jdt_index;
+ /**Maximum number of JD buffers in the Ring/Pool*/
+ u16 num_jd;
+ /**MAX JD size configured */
+ u8 jd_size;
+ /**MBT byffer target memory*/
+ u8 jdt_target_mem;
+ /**Buffers starting address*/
+ u64 jdt_iova_addr;
+ u64 reserved[4];
+};
+
+/**
+ * @struct BPHY_NETDEV_TX_PSM_CMD_INFO_s
+ * @brief TX PSM command information defnition to be shared with
+ * netdev for TX communication.
+ *
+ */
+struct cnf10k_bphy_ndev_tx_psm_cmd_info {
+ enum bphy_netdev_tx_gp_int gp_int_num; // Valid only for PTP messages
+ u64 jd_iova_addr;
+ u64 rd_dma_iova_addr;
+ u64 low_cmd;
+ u64 high_cmd;
+ u64 reserved[4];
+};
+
+/**
+ * @struct BPHY_NETDEV_TX_PTP_RING_INFO_s
+ * @brief TX PTP timestamp ring buffer configuration to be shared
+ * with netdev for reading ptp timestamp.
+ *
+ */
+struct cnf10k_bphy_ndev_tx_ptp_ring_info {
+ u8 is_enable;
+ u8 ring_idx;
+ /**Number of TX PTP timestamp entries in ring */
+ u8 ring_size;
+ /**PTP Ring buffer target memory*/
+ u8 ring_target_mem;
+ /**PTP Ring buffer byte swap mode when TMEM is LLC/DRAM*/
+ u8 dswap;
+ /**Stream ID*/
+ u8 gmid;
+ /**Buffers starting address*/
+ u64 ring_iova_addr;
+ u64 reserved[4];
+};
+
+/**
+ * @struct cnf10k_bphy_netdev_intf_info
+ * @brief LMAC lane number, mac address and status information
+ *
+ */
+struct cnf10k_bphy_ndev_intf_info {
+ u8 rfoe_num;
+ u8 lane_num;
+ /* Source mac address */
+ u8 eth_addr[ETH_ALEN];
+ /* LMAC interface status */
+ u8 status; //0-DOWN, 1-UP
+ /* Configuration valid status; This interface shall be
+ * invalid if this field is set to 0
+ */
+ u8 is_valid;
+ u64 reserved;
+};
+
+/**
+ * @struct BPHY_NETDEV_COMM_IF_s
+ * @brief The communication interface defnitions which would be used
+ * by the netdev and bphy application.
+ *
+ */
+struct cnf10k_bphy_ndev_comm_if {
+ struct cnf10k_bphy_ndev_intf_info lmac_info;
+ struct cnf10k_bphy_ndev_rbuf_info rbuf_info[PACKET_TYPE_MAX];
+ /** Defining single array to handle both PTP and OTHER cmds info.
+ */
+ struct cnf10k_bphy_ndev_tx_psm_cmd_info
+ ptp_pkt_info[MAX_PTP_MSG_PER_LMAC];
+ struct cnf10k_bphy_ndev_tx_ptp_ring_info
+ ptp_ts_ring_info[MAX_PTP_RING];
+ u64 reserved[4];
+};
+
+/**
+ * @struct BPHY_NETDEV_CPRI_IF_s
+ * @brief Communication interface structure defnition to be used by BPHY
+ * and NETDEV applications for CPRI Interface.
+ *
+ */
+struct cnf10k_bphy_ndev_cpri_intf_cfg {
+ u8 id; /**< CPRI_ID 0..2 */
+ u8 active_lane_mask; /**< Lane Id mask */
+ u8 ul_gp_int_num; /**< UL GP INT NUM */
+ u8 ul_int_threshold; /**< UL INT THRESHOLD */
+ u8 num_ul_buf; /**< Num UL Buffers */
+ u8 num_dl_buf; /**< Num DL Buffers */
+ u64 ul_circ_buf_iova_addr; /**< UL circular buffer base address */
+ u64 dl_circ_buf_iova_addr; /**< DL circular buffer base address */
+ u8 eth_addr[MAX_LANE_PER_CPRI][ETH_ALEN];
+ u64 reserved[4];
+};
+
+/**
+ * @struct BPHY_NETDEV_RFOE_10x_IF_s
+ * @brief New Communication interface structure defnition to be used
+ * by BPHY and NETDEV applications for RFOE Interface.
+ *
+ */
+struct cnf10k_bphy_ndev_rfoe_if {
+ /**< Interface configuration */
+ struct cnf10k_bphy_ndev_comm_if if_cfg[MAX_LMAC_PER_RFOE];
+ /**TX JD cmds to send packets other than PTP;
+ * These are defined per RFoE and all LMAC can share
+ */
+ struct cnf10k_bphy_ndev_tx_psm_cmd_info
+ oth_pkt_info[MAX_OTH_MSG_PER_RFOE];
+ /**Packet types for which the RX flows are configured.*/
+ u8 pkt_type_mask;
+ u64 reserved[4];
+};
+
+/* hardware specific information */
+struct bphy_hw_params {
+ u32 chip_ver; /* (version << 4) | revision */
+ u32 reserved[15]; /* reserved for future extension */
+};
+
+/**
+ * @struct BPHY_NETDEV_COMM_INTF_CFG_s
+ * @brief ODP-NETDEV communication interface defnition structure to
+ * share the RX/TX intrefaces information.
+ *
+ */
+struct cnf10k_rfoe_ndev_comm_intf_cfg {
+ /**< BPHY Hardware parameters */
+ struct bphy_hw_params hw_params;
+ /**< RFOE Interface Configuration */
+ struct cnf10k_bphy_ndev_rfoe_if rfoe_if_cfg[BPHY_MAX_RFOE_MHAB];
+ u64 reserved[4];
+};
+
+/**
+ * @struct BPHY_CPRI_NETDEV_COMM_INTF_CFG_s
+ * @brief Main Communication interface structure definition to be used
+ * by BPHY and NETDEV applications for CPRI Interface.
+ *
+ */
+struct cnf10k_bphy_cpri_netdev_comm_intf_cfg {
+ /**< BPHY Hardware parameters */
+ struct bphy_hw_params hw_params;
+ /**< RFOE Interface Configuration */
+ struct cnf10k_bphy_ndev_cpri_intf_cfg cpri_if_cfg[BPHY_MAX_CPRI_MHAB];
+ u64 reserved[4];
+};
+
+#endif //_CNF10K_BPHY_NETDEV_COMM_IF_H_
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe.c b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe.c
new file mode 100644
index 000000000000..283ee7f51431
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe.c
@@ -0,0 +1,1427 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CNF10K BPHY RFOE Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include "cnf10k_rfoe.h"
+#include "cnf10k_bphy_hw.h"
+
+/* global driver ctx */
+struct cnf10k_rfoe_drv_ctx cnf10k_rfoe_drv_ctx[CNF10K_RFOE_MAX_INTF];
+
+void cnf10k_bphy_intr_handler(struct otx2_bphy_cdev_priv *cdev_priv, u32 status)
+{
+ struct cnf10k_rfoe_drv_ctx *cnf10k_drv_ctx;
+ struct cnf10k_rfoe_ndev_priv *priv;
+ struct net_device *netdev;
+ int rfoe_num, i;
+ u32 intr_mask;
+
+ /* rx intr processing */
+ for (rfoe_num = 0; rfoe_num < cdev_priv->num_rfoe_mhab; rfoe_num++) {
+ intr_mask = CNF10K_RFOE_RX_INTR_MASK(rfoe_num);
+ if (status & intr_mask)
+ cnf10k_rfoe_rx_napi_schedule(rfoe_num, status);
+ }
+
+ /* tx intr processing */
+ for (i = 0; i < CNF10K_RFOE_MAX_INTF; i++) {
+ cnf10k_drv_ctx = &cnf10k_rfoe_drv_ctx[i];
+ if (cnf10k_drv_ctx->valid) {
+ netdev = cnf10k_drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ intr_mask = CNF10K_RFOE_TX_PTP_INTR_MASK(priv->rfoe_num,
+ priv->lmac_id,
+ cdev_priv->num_rfoe_lmac);
+ if ((status & intr_mask) && priv->ptp_tx_skb)
+ schedule_work(&priv->ptp_tx_work);
+ }
+ }
+}
+
+void cnf10k_rfoe_disable_intf(int rfoe_num)
+{
+ struct cnf10k_rfoe_drv_ctx *drv_ctx;
+ struct cnf10k_rfoe_ndev_priv *priv;
+ struct net_device *netdev;
+ int idx;
+
+ for (idx = 0; idx < CNF10K_RFOE_MAX_INTF; idx++) {
+ drv_ctx = &cnf10k_rfoe_drv_ctx[idx];
+ if (drv_ctx->rfoe_num == rfoe_num && drv_ctx->valid) {
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ priv->if_type = IF_TYPE_NONE;
+ }
+ }
+}
+
+void cnf10k_bphy_rfoe_cleanup(void)
+{
+ struct cnf10k_rfoe_drv_ctx *drv_ctx = NULL;
+ struct cnf10k_rfoe_ndev_priv *priv;
+ struct cnf10k_rx_ft_cfg *ft_cfg;
+ struct net_device *netdev;
+ int i, idx;
+
+ for (i = 0; i < CNF10K_RFOE_MAX_INTF; i++) {
+ drv_ctx = &cnf10k_rfoe_drv_ctx[i];
+ if (drv_ctx->valid) {
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ cnf10k_rfoe_ptp_destroy(priv);
+ unregister_netdev(netdev);
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ ft_cfg = &priv->rx_ft_cfg[idx];
+ netif_napi_del(&ft_cfg->napi);
+ }
+ --(priv->rfoe_common->refcnt);
+ if (priv->rfoe_common->refcnt == 0)
+ kfree(priv->rfoe_common);
+ free_netdev(netdev);
+ drv_ctx->valid = 0;
+ }
+ }
+}
+
+/* submit pending ptp tx requests */
+static void cnf10k_rfoe_ptp_submit_work(struct work_struct *work)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = container_of(work,
+ struct cnf10k_rfoe_ndev_priv,
+ ptp_queue_work);
+ struct mhbw_jd_dma_cfg_word_0_s *jd_dma_cfg_word_0;
+ struct mhbw_jd_dma_cfg_word_1_s *jd_dma_cfg_word_1;
+ struct mhab_job_desc_cfg *jd_cfg_ptr;
+ struct rfoe_tx_ptp_tstmp_s *tx_tstmp;
+ struct psm_cmd_addjob_s *psm_cmd_lo;
+ struct tx_job_queue_cfg *job_cfg;
+ struct tx_job_entry *job_entry;
+ struct ptp_tstamp_skb *ts_skb;
+ u16 psm_queue_id, queue_space;
+ struct sk_buff *skb = NULL;
+ struct list_head *head;
+ u64 jd_cfg_ptr_iova;
+ unsigned long flags;
+ u64 regval;
+
+ job_cfg = &priv->tx_ptp_job_cfg;
+
+ spin_lock_irqsave(&job_cfg->lock, flags);
+
+ /* check pending ptp requests */
+ if (list_empty(&priv->ptp_skb_list.list)) {
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "no pending ptp tx requests\n");
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+ return;
+ }
+
+ /* check psm queue space available */
+ psm_queue_id = job_cfg->psm_queue_id;
+ regval = readq(priv->psm_reg_base + PSM_QUEUE_SPACE(psm_queue_id));
+ queue_space = regval & 0x7FFF;
+ if (queue_space < 1) {
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "ptp tx psm queue %d full\n",
+ psm_queue_id);
+ /* reschedule to check later */
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+ schedule_work(&priv->ptp_queue_work);
+ return;
+ }
+
+ if (test_and_set_bit_lock(PTP_TX_IN_PROGRESS, &priv->state)) {
+ netif_dbg(priv, tx_queued, priv->netdev, "ptp tx ongoing\n");
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+ return;
+ }
+
+ head = &priv->ptp_skb_list.list;
+ ts_skb = list_entry(head->next, struct ptp_tstamp_skb, list);
+ skb = ts_skb->skb;
+ list_del(&ts_skb->list);
+ kfree(ts_skb);
+ priv->ptp_skb_list.count--;
+
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "submitting ptp tx skb %pS\n", skb);
+
+ priv->last_tx_ptp_jiffies = jiffies;
+
+ /* ptp timestamp entry is 128-bit in size */
+ tx_tstmp = (struct rfoe_tx_ptp_tstmp_s *)
+ ((u8 *)priv->ptp_ring_cfg.ptp_ring_base +
+ (16 * priv->ptp_ring_cfg.ptp_ring_idx));
+ memset(tx_tstmp, 0, sizeof(struct rfoe_tx_ptp_tstmp_s));
+
+ /* get the tx job entry */
+ job_entry = (struct tx_job_entry *)
+ &job_cfg->job_entries[job_cfg->q_idx];
+
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "rfoe=%d lmac=%d psm_queue=%d tx_job_entry %d job_cmd_lo=0x%llx job_cmd_high=0x%llx jd_iova_addr=0x%llx\n",
+ priv->rfoe_num, priv->lmac_id, psm_queue_id, job_cfg->q_idx,
+ job_entry->job_cmd_lo, job_entry->job_cmd_hi,
+ job_entry->jd_iova_addr);
+
+ priv->ptp_tx_skb = skb;
+ psm_cmd_lo = (struct psm_cmd_addjob_s *)&job_entry->job_cmd_lo;
+ priv->ptp_job_tag = psm_cmd_lo->jobtag;
+
+ /* update length and block size in jd dma cfg word */
+ jd_cfg_ptr_iova = *(u64 *)((u8 *)job_entry->jd_ptr + 8);
+ jd_cfg_ptr = otx2_iova_to_virt(priv->iommu_domain, jd_cfg_ptr_iova);
+ jd_cfg_ptr->cfg3.pkt_len = skb->len;
+ jd_dma_cfg_word_0 = (struct mhbw_jd_dma_cfg_word_0_s *)
+ job_entry->rd_dma_ptr;
+ jd_dma_cfg_word_0->block_size = (((skb->len + 15) >> 4) * 4);
+
+ /* copy packet data to rd_dma_ptr start addr */
+ jd_dma_cfg_word_1 = (struct mhbw_jd_dma_cfg_word_1_s *)
+ ((u8 *)job_entry->rd_dma_ptr + 8);
+ memcpy(otx2_iova_to_virt(priv->iommu_domain,
+ jd_dma_cfg_word_1->start_addr),
+ skb->data, skb->len);
+
+ /* make sure that all memory writes are completed */
+ dma_wmb();
+
+ /* submit PSM job */
+ writeq(job_entry->job_cmd_lo,
+ priv->psm_reg_base + PSM_QUEUE_CMD_LO(psm_queue_id));
+ writeq(job_entry->job_cmd_hi,
+ priv->psm_reg_base + PSM_QUEUE_CMD_HI(psm_queue_id));
+
+ /* increment queue index */
+ job_cfg->q_idx++;
+ if (job_cfg->q_idx == job_cfg->num_entries)
+ job_cfg->q_idx = 0;
+
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+}
+
+#define OTX2_RFOE_PTP_TSTMP_POLL_CNT 100
+
+/* ptp interrupt processing bottom half */
+static void cnf10k_rfoe_ptp_tx_work(struct work_struct *work)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = container_of(work,
+ struct cnf10k_rfoe_ndev_priv,
+ ptp_tx_work);
+ struct rfoe_tx_ptp_tstmp_s *tx_tstmp;
+ struct skb_shared_hwtstamps ts;
+ u64 timestamp;
+ u16 jobid;
+
+ if (!priv->ptp_tx_skb) {
+ netif_err(priv, tx_done, priv->netdev,
+ "ptp tx skb not found, something wrong!\n");
+ goto submit_next_req;
+ }
+
+ /* make sure that all memory writes by rfoe are completed */
+ dma_rmb();
+
+ /* ptp timestamp entry is 128-bit in size */
+ tx_tstmp = (struct rfoe_tx_ptp_tstmp_s *)
+ ((u8 *)priv->ptp_ring_cfg.ptp_ring_base +
+ (16 * priv->ptp_ring_cfg.ptp_ring_idx));
+
+ /* match job id */
+ jobid = tx_tstmp->jobid;
+ if (jobid != priv->ptp_job_tag) {
+ netif_err(priv, tx_done, priv->netdev,
+ "ptp job id doesn't match, job_id=0x%x skb->job_tag=0x%x\n",
+ jobid, priv->ptp_job_tag);
+ priv->stats.tx_hwtstamp_failures++;
+ goto submit_next_req;
+ }
+
+ if (tx_tstmp->drop || tx_tstmp->tx_err) {
+ netif_err(priv, tx_done, priv->netdev,
+ "ptp tx timstamp error\n");
+ priv->stats.tx_hwtstamp_failures++;
+ goto submit_next_req;
+ }
+
+ /* update timestamp value in skb */
+ timestamp = tx_tstmp->ptp_timestamp;
+
+ memset(&ts, 0, sizeof(ts));
+ ts.hwtstamp = ns_to_ktime(timestamp);
+ skb_tstamp_tx(priv->ptp_tx_skb, &ts);
+
+submit_next_req:
+ priv->ptp_ring_cfg.ptp_ring_idx++;
+ if (priv->ptp_ring_cfg.ptp_ring_idx >= priv->ptp_ring_cfg.ptp_ring_size)
+ priv->ptp_ring_cfg.ptp_ring_idx = 0;
+ if (priv->ptp_tx_skb)
+ dev_kfree_skb_any(priv->ptp_tx_skb);
+ priv->ptp_tx_skb = NULL;
+ clear_bit_unlock(PTP_TX_IN_PROGRESS, &priv->state);
+ schedule_work(&priv->ptp_queue_work);
+}
+
+/* psm queue timer callback to check queue space */
+static void cnf10k_rfoe_tx_timer_cb(struct timer_list *t)
+{
+ struct cnf10k_rfoe_ndev_priv *priv =
+ container_of(t, struct cnf10k_rfoe_ndev_priv, tx_timer);
+ u16 psm_queue_id, queue_space;
+ int reschedule = 0;
+ u64 regval;
+
+ /* check psm queue space for both ptp and oth packets */
+ if (netif_queue_stopped(priv->netdev)) {
+ psm_queue_id = priv->tx_ptp_job_cfg.psm_queue_id;
+ // check queue space
+ regval = readq(priv->psm_reg_base +
+ PSM_QUEUE_SPACE(psm_queue_id));
+ queue_space = regval & 0x7FFF;
+ if (queue_space > 1) {
+ netif_wake_queue(priv->netdev);
+ reschedule = 0;
+ } else {
+ reschedule = 1;
+ }
+
+ psm_queue_id = priv->rfoe_common->tx_oth_job_cfg.psm_queue_id;
+ // check queue space
+ regval = readq(priv->psm_reg_base +
+ PSM_QUEUE_SPACE(psm_queue_id));
+ queue_space = regval & 0x7FFF;
+ if (queue_space > 1) {
+ netif_wake_queue(priv->netdev);
+ reschedule = 0;
+ } else {
+ reschedule = 1;
+ }
+ }
+
+ if (reschedule)
+ mod_timer(&priv->tx_timer, jiffies + msecs_to_jiffies(100));
+}
+
+static void cnf10k_rfoe_process_rx_pkt(struct cnf10k_rfoe_ndev_priv *priv,
+ struct cnf10k_rx_ft_cfg *ft_cfg,
+ int mbt_buf_idx)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv = priv->cdev_priv;
+ struct mhbw_jd_dma_cfg_word_0_s *jd_dma_cfg_word_0;
+ u64 tstamp = 0, mbt_state, jdt_iova_addr;
+ struct rfoe_psw_w2_ecpri_s *ecpri_psw_w2;
+ struct rfoe_psw_w2_roe_s *rfoe_psw_w2;
+ struct cnf10k_rfoe_ndev_priv *priv2;
+ struct cnf10k_rfoe_drv_ctx *drv_ctx;
+ int found = 0, idx, len, pkt_type;
+ unsigned int ptp_message_len = 0;
+ struct rfoe_psw_s *psw = NULL;
+ struct net_device *netdev;
+ u8 *buf_ptr, *jdt_ptr;
+ struct sk_buff *skb;
+ u8 lmac_id;
+
+ /* read mbt state */
+ spin_lock(&cdev_priv->mbt_lock);
+ writeq(mbt_buf_idx, (priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num)));
+ mbt_state = readq(priv->rfoe_reg_base +
+ RFOEX_RX_IND_MBT_SEG_STATE(priv->rfoe_num));
+ spin_unlock(&cdev_priv->mbt_lock);
+
+ if ((mbt_state >> 16 & 0xf) != 0) {
+ pr_err("rx pkt error: mbt_buf_idx=%d, err=%d\n",
+ mbt_buf_idx, (u8)(mbt_state >> 16 & 0xf));
+ return;
+ }
+ if (mbt_state >> 20 & 0x1) {
+ pr_err("rx dma error: mbt_buf_idx=%d\n", mbt_buf_idx);
+ return;
+ }
+
+ buf_ptr = (u8 *)ft_cfg->mbt_virt_addr +
+ (ft_cfg->buf_size * mbt_buf_idx);
+
+ pkt_type = ft_cfg->pkt_type;
+
+ psw = (struct rfoe_psw_s *)buf_ptr;
+ if (psw->mac_err_sts || psw->mcs_err_sts) {
+ net_warn_ratelimited("%s: psw mac_err_sts = 0x%x, mcs_err_sts=0x%x\n",
+ priv->netdev->name,
+ psw->mac_err_sts,
+ psw->mcs_err_sts);
+ return;
+ }
+
+ if (pkt_type != PACKET_TYPE_ECPRI) {
+ /* check that the psw type is correct: */
+ if (unlikely(psw->pkt_type == ECPRI)) {
+ net_warn_ratelimited("%s: pswt is eCPRI for pkt_type = %d\n",
+ priv->netdev->name, pkt_type);
+ return;
+ }
+ jdt_iova_addr = (u64)psw->jd_ptr;
+ rfoe_psw_w2 = (struct rfoe_psw_w2_roe_s *)&psw->proto_sts_word;
+ lmac_id = rfoe_psw_w2->lmac_id;
+ tstamp = psw->ptp_timestamp;
+ } else {
+ /* check that the psw type is correct: */
+ if (unlikely(psw->pkt_type != ECPRI)) {
+ net_warn_ratelimited("%s: pswt is not eCPRI for pkt_type = %d\n",
+ priv->netdev->name, pkt_type);
+ return;
+ }
+ jdt_iova_addr = (u64)psw->jd_ptr;
+ ecpri_psw_w2 = (struct rfoe_psw_w2_ecpri_s *)
+ &psw->proto_sts_word;
+ lmac_id = ecpri_psw_w2->lmac_id;
+ tstamp = psw->ptp_timestamp;
+ }
+
+ netif_dbg(priv, rx_status, priv->netdev,
+ "Rx: rfoe=%d lmac=%d mbt_buf_idx=%d\n",
+ priv->rfoe_num, lmac_id, mbt_buf_idx);
+
+ /* read jd ptr from psw */
+ jdt_ptr = otx2_iova_to_virt(priv->iommu_domain, jdt_iova_addr);
+ jd_dma_cfg_word_0 = (struct mhbw_jd_dma_cfg_word_0_s *)
+ ((u8 *)jdt_ptr + ft_cfg->jd_rd_offset);
+ len = (jd_dma_cfg_word_0->block_size) << 2;
+ netif_dbg(priv, rx_status, priv->netdev, "jd rd_dma len = %d\n", len);
+
+ if (unlikely(netif_msg_pktdata(priv))) {
+ netdev_printk(KERN_DEBUG, priv->netdev, "RX MBUF DATA:");
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
+ buf_ptr, len, true);
+ }
+
+ buf_ptr += (ft_cfg->pkt_offset * 16);
+ len -= (ft_cfg->pkt_offset * 16);
+
+ for (idx = 0; idx < CNF10K_RFOE_MAX_INTF; idx++) {
+ drv_ctx = &cnf10k_rfoe_drv_ctx[idx];
+ if (drv_ctx->valid && drv_ctx->rfoe_num == priv->rfoe_num &&
+ drv_ctx->lmac_id == lmac_id) {
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ netdev = cnf10k_rfoe_drv_ctx[idx].netdev;
+ priv2 = netdev_priv(netdev);
+ } else {
+ pr_err("netdev not found, something went wrong!\n");
+ return;
+ }
+
+ /* drop the packet if interface is down */
+ if (unlikely(!netif_carrier_ok(netdev))) {
+ netif_err(priv2, rx_err, netdev,
+ "%s {rfoe%d lmac%d} link down, drop pkt\n",
+ netdev->name, priv2->rfoe_num,
+ priv2->lmac_id);
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_PTP) {
+ priv2->stats.ptp_rx_dropped++;
+ priv2->last_rx_ptp_dropped_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv2->stats.ecpri_rx_dropped++;
+ priv2->last_rx_dropped_jiffies = jiffies;
+ } else {
+ priv2->stats.rx_dropped++;
+ priv2->last_rx_dropped_jiffies = jiffies;
+ }
+ return;
+ }
+
+ skb = netdev_alloc_skb_ip_align(netdev, len);
+ if (!skb) {
+ netif_err(priv2, rx_err, netdev, "Rx: alloc skb failed\n");
+ return;
+ }
+
+ memcpy(skb->data, buf_ptr, len);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ /* remove trailing padding for ptp packets */
+ if (skb->protocol == htons(ETH_P_1588)) {
+ ptp_message_len = skb->data[2] << 8 | skb->data[3];
+ skb_trim(skb, ptp_message_len);
+ }
+
+ if (priv2->rx_hw_tstamp_en)
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tstamp);
+
+ netif_receive_skb(skb);
+
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_PTP) {
+ priv2->stats.ptp_rx_packets++;
+ priv2->last_rx_ptp_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv2->stats.ecpri_rx_packets++;
+ priv2->last_rx_jiffies = jiffies;
+ } else {
+ priv2->stats.rx_packets++;
+ priv2->last_rx_jiffies = jiffies;
+ }
+ priv2->stats.rx_bytes += skb->len;
+}
+
+static int cnf10k_rfoe_process_rx_flow(struct cnf10k_rfoe_ndev_priv *priv,
+ int pkt_type, int budget)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv = priv->cdev_priv;
+ int count = 0, processed_pkts = 0;
+ struct cnf10k_rx_ft_cfg *ft_cfg;
+ u64 mbt_cfg;
+ u16 nxt_buf;
+ int *mbt_last_idx = &priv->rfoe_common->rx_mbt_last_idx[pkt_type];
+ u16 *prv_nxt_buf = &priv->rfoe_common->nxt_buf[pkt_type];
+
+ ft_cfg = &priv->rx_ft_cfg[pkt_type];
+
+ spin_lock(&cdev_priv->mbt_lock);
+ /* read mbt nxt_buf */
+ writeq(ft_cfg->mbt_idx,
+ priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num));
+ mbt_cfg = readq(priv->rfoe_reg_base +
+ RFOEX_RX_IND_MBT_CFG(priv->rfoe_num));
+ spin_unlock(&cdev_priv->mbt_lock);
+
+ nxt_buf = (mbt_cfg >> 32) & 0xffff;
+
+ /* no mbt entries to process */
+ if (nxt_buf == *prv_nxt_buf) {
+ netif_dbg(priv, rx_status, priv->netdev,
+ "no rx packets to process, rfoe=%d pkt_type=%d mbt_idx=%d nxt_buf=%d mbt_buf_sw_head=%d\n",
+ priv->rfoe_num, pkt_type, ft_cfg->mbt_idx, nxt_buf,
+ *mbt_last_idx);
+ return 0;
+ }
+
+ *prv_nxt_buf = nxt_buf;
+
+ /* get count of pkts to process, check ring wrap condition */
+ if (*mbt_last_idx > nxt_buf) {
+ count = ft_cfg->num_bufs - *mbt_last_idx;
+ count += nxt_buf;
+ } else {
+ count = nxt_buf - *mbt_last_idx;
+ }
+
+ netif_dbg(priv, rx_status, priv->netdev,
+ "rfoe=%d pkt_type=%d mbt_idx=%d nxt_buf=%d mbt_buf_sw_head=%d count=%d\n",
+ priv->rfoe_num, pkt_type, ft_cfg->mbt_idx, nxt_buf,
+ *mbt_last_idx, count);
+
+ while (likely((processed_pkts < budget) && (processed_pkts < count))) {
+ cnf10k_rfoe_process_rx_pkt(priv, ft_cfg, *mbt_last_idx);
+
+ (*mbt_last_idx)++;
+ if (*mbt_last_idx == ft_cfg->num_bufs)
+ *mbt_last_idx = 0;
+
+ processed_pkts++;
+ }
+
+ return processed_pkts;
+}
+
+/* napi poll routine */
+static int cnf10k_rfoe_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct cnf10k_rfoe_ndev_priv *priv;
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ int workdone = 0, pkt_type;
+ struct cnf10k_rx_ft_cfg *ft_cfg;
+ u64 intr_en, regval;
+
+ ft_cfg = container_of(napi, struct cnf10k_rx_ft_cfg, napi);
+ priv = ft_cfg->priv;
+ cdev_priv = priv->cdev_priv;
+ pkt_type = ft_cfg->pkt_type;
+
+ /* pkt processing loop */
+ workdone += cnf10k_rfoe_process_rx_flow(priv, pkt_type, budget);
+
+ if (workdone < budget) {
+ napi_complete_done(napi, workdone);
+
+ /* Re enable the Rx interrupts */
+ intr_en = PKT_TYPE_TO_INTR(pkt_type) <<
+ CNF10K_RFOE_RX_INTR_SHIFT(priv->rfoe_num);
+ spin_lock(&cdev_priv->lock);
+ if (priv->rfoe_num < 6) {
+ regval = readq(bphy_reg_base + PSM_INT_GP_ENA_W1S(1));
+ regval |= intr_en;
+ writeq(regval, bphy_reg_base + PSM_INT_GP_ENA_W1S(1));
+ } else {
+ regval = readq(bphy_reg_base + PSM_INT_GP_ENA_W1S(2));
+ regval |= intr_en;
+ writeq(regval, bphy_reg_base + PSM_INT_GP_ENA_W1S(2));
+ }
+ spin_unlock(&cdev_priv->lock);
+ }
+
+ return workdone;
+}
+
+/* Rx GPINT napi schedule api */
+void cnf10k_rfoe_rx_napi_schedule(int rfoe_num, u32 status)
+{
+ enum bphy_netdev_packet_type pkt_type;
+ struct cnf10k_rfoe_drv_ctx *drv_ctx;
+ struct cnf10k_rfoe_ndev_priv *priv;
+ struct cnf10k_rx_ft_cfg *ft_cfg;
+ int intf, bit_idx;
+ u32 intr_sts;
+ u64 regval;
+
+ for (intf = 0; intf < CNF10K_RFOE_MAX_INTF; intf++) {
+ drv_ctx = &cnf10k_rfoe_drv_ctx[intf];
+ /* ignore lmac, one interrupt/pkt_type/rfoe */
+ if (!(drv_ctx->valid && drv_ctx->rfoe_num == rfoe_num))
+ continue;
+ /* check if i/f down, napi disabled */
+ priv = netdev_priv(drv_ctx->netdev);
+ if (test_bit(RFOE_INTF_DOWN, &priv->state))
+ continue;
+ /* check rx pkt type */
+ intr_sts = ((status >> CNF10K_RFOE_RX_INTR_SHIFT(rfoe_num)) &
+ RFOE_RX_INTR_EN);
+ for (bit_idx = 0; bit_idx < PACKET_TYPE_MAX; bit_idx++) {
+ if (!(intr_sts & BIT(bit_idx)))
+ continue;
+ pkt_type = INTR_TO_PKT_TYPE(bit_idx);
+ if (unlikely(!(priv->pkt_type_mask & (1U << pkt_type))))
+ continue;
+ /* clear intr enable bit, re-enable in napi handler */
+ regval = PKT_TYPE_TO_INTR(pkt_type) <<
+ CNF10K_RFOE_RX_INTR_SHIFT(rfoe_num);
+ if (rfoe_num < 6)
+ writeq(regval, bphy_reg_base + PSM_INT_GP_ENA_W1C(1));
+ else
+ writeq(regval, bphy_reg_base + PSM_INT_GP_ENA_W1C(2));
+ /* schedule napi */
+ ft_cfg = &drv_ctx->ft_cfg[pkt_type];
+ napi_schedule(&ft_cfg->napi);
+ }
+ /* napi scheduled per pkt_type, return */
+ return;
+ }
+}
+
+static void cnf10k_rfoe_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct otx2_rfoe_stats *dev_stats = &priv->stats;
+
+ stats->rx_bytes = dev_stats->rx_bytes;
+ stats->rx_packets = dev_stats->rx_packets +
+ dev_stats->ptp_rx_packets +
+ dev_stats->ecpri_rx_packets;
+ stats->rx_dropped = dev_stats->rx_dropped +
+ dev_stats->ptp_rx_dropped +
+ dev_stats->ecpri_rx_dropped;
+
+ stats->tx_bytes = dev_stats->tx_bytes;
+ stats->tx_packets = dev_stats->tx_packets +
+ dev_stats->ptp_tx_packets +
+ dev_stats->ecpri_tx_packets;
+ stats->tx_dropped = dev_stats->tx_dropped +
+ dev_stats->ptp_tx_dropped +
+ dev_stats->ecpri_tx_dropped;
+}
+
+static int cnf10k_rfoe_config_hwtstamp(struct net_device *netdev,
+ struct ifreq *ifr)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ /* ptp hw timestamp is always enabled, mark the sw flags
+ * so that tx ptp requests are submitted to ptp psm queue
+ * and rx timestamp is copied to skb
+ */
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->tx_hw_tstamp_en = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ priv->tx_hw_tstamp_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ priv->rx_hw_tstamp_en = 0;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ priv->rx_hw_tstamp_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/* netdev ioctl */
+static int cnf10k_rfoe_ioctl(struct net_device *netdev, struct ifreq *req,
+ int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return cnf10k_rfoe_config_hwtstamp(netdev, req);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* netdev xmit */
+static netdev_tx_t cnf10k_rfoe_eth_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct mhbw_jd_dma_cfg_word_0_s *jd_dma_cfg_word_0;
+ struct mhbw_jd_dma_cfg_word_1_s *jd_dma_cfg_word_1;
+ struct mhab_job_desc_cfg *jd_cfg_ptr;
+ struct rfoe_tx_ptp_tstmp_s *tx_tstmp;
+ struct psm_cmd_addjob_s *psm_cmd_lo;
+ struct tx_job_queue_cfg *job_cfg;
+ struct tx_job_entry *job_entry;
+ struct ptp_tstamp_skb *ts_skb;
+ int psm_queue_id, queue_space;
+ u64 jd_cfg_ptr_iova, regval;
+ unsigned long flags;
+ struct ethhdr *eth;
+ int pkt_type = 0;
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ if (!priv->tx_hw_tstamp_en) {
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "skb HW timestamp requested but not enabled, this packet will not be timestamped\n");
+ job_cfg = &priv->rfoe_common->tx_oth_job_cfg;
+ pkt_type = PACKET_TYPE_OTHER;
+ } else {
+ job_cfg = &priv->tx_ptp_job_cfg;
+ pkt_type = PACKET_TYPE_PTP;
+ }
+ } else {
+ job_cfg = &priv->rfoe_common->tx_oth_job_cfg;
+ eth = (struct ethhdr *)skb->data;
+ if (htons(eth->h_proto) == ETH_P_ECPRI)
+ pkt_type = PACKET_TYPE_ECPRI;
+ else
+ pkt_type = PACKET_TYPE_OTHER;
+ }
+
+ spin_lock_irqsave(&job_cfg->lock, flags);
+
+ if (unlikely(priv->if_type != IF_TYPE_ETHERNET)) {
+ netif_err(priv, tx_queued, netdev,
+ "%s {rfoe%d lmac%d} invalid intf mode, drop pkt\n",
+ netdev->name, priv->rfoe_num, priv->lmac_id);
+ /* update stats */
+ priv->stats.tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ goto exit;
+ }
+
+ if (unlikely(!netif_carrier_ok(netdev))) {
+ netif_err(priv, tx_err, netdev,
+ "%s {rfoe%d lmac%d} link down, drop pkt\n",
+ netdev->name, priv->rfoe_num,
+ priv->lmac_id);
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv->stats.ecpri_tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_PTP) {
+ priv->stats.ptp_tx_dropped++;
+ priv->last_tx_ptp_dropped_jiffies = jiffies;
+ } else {
+ priv->stats.tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ }
+
+ goto exit;
+ }
+
+ if (unlikely(!(priv->pkt_type_mask & (1U << pkt_type)))) {
+ netif_err(priv, tx_queued, netdev,
+ "%s {rfoe%d lmac%d} pkt not supported, drop pkt\n",
+ netdev->name, priv->rfoe_num,
+ priv->lmac_id);
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv->stats.ecpri_tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_PTP) {
+ priv->stats.ptp_tx_dropped++;
+ priv->last_tx_ptp_dropped_jiffies = jiffies;
+ } else {
+ priv->stats.tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ }
+
+ goto exit;
+ }
+
+ /* get psm queue number */
+ psm_queue_id = job_cfg->psm_queue_id;
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "psm: queue(%d): cfg=0x%llx ptr=0x%llx space=0x%llx\n",
+ psm_queue_id,
+ readq(priv->psm_reg_base + PSM_QUEUE_CFG(psm_queue_id)),
+ readq(priv->psm_reg_base + PSM_QUEUE_PTR(psm_queue_id)),
+ readq(priv->psm_reg_base + PSM_QUEUE_SPACE(psm_queue_id)));
+
+ /* check psm queue space available */
+ regval = readq(priv->psm_reg_base + PSM_QUEUE_SPACE(psm_queue_id));
+ queue_space = regval & 0x7FFF;
+ if (queue_space < 1 && pkt_type != PACKET_TYPE_PTP) {
+ netif_err(priv, tx_err, netdev,
+ "no space in psm queue %d, dropping pkt\n",
+ psm_queue_id);
+ netif_stop_queue(netdev);
+ dev_kfree_skb_any(skb);
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_ECPRI)
+ priv->stats.ecpri_tx_dropped++;
+ else
+ priv->stats.tx_dropped++;
+
+ priv->last_tx_dropped_jiffies = jiffies;
+
+ mod_timer(&priv->tx_timer, jiffies + msecs_to_jiffies(100));
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+ return NETDEV_TX_OK;
+ }
+
+ /* get the tx job entry */
+ job_entry = (struct tx_job_entry *)
+ &job_cfg->job_entries[job_cfg->q_idx];
+
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "rfoe=%d lmac=%d psm_queue=%d tx_job_entry %d job_cmd_lo=0x%llx job_cmd_high=0x%llx jd_iova_addr=0x%llx\n",
+ priv->rfoe_num, priv->lmac_id, psm_queue_id, job_cfg->q_idx,
+ job_entry->job_cmd_lo, job_entry->job_cmd_hi,
+ job_entry->jd_iova_addr);
+
+ /* hw timestamp */
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->tx_hw_tstamp_en) {
+ if (list_empty(&priv->ptp_skb_list.list) &&
+ !test_and_set_bit_lock(PTP_TX_IN_PROGRESS, &priv->state)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->ptp_tx_skb = skb;
+ psm_cmd_lo = (struct psm_cmd_addjob_s *)
+ &job_entry->job_cmd_lo;
+ priv->ptp_job_tag = psm_cmd_lo->jobtag;
+
+ /* ptp timestamp entry is 128-bit in size */
+ tx_tstmp = (struct rfoe_tx_ptp_tstmp_s *)
+ ((u8 *)priv->ptp_ring_cfg.ptp_ring_base +
+ (16 * priv->ptp_ring_cfg.ptp_ring_idx));
+ memset(tx_tstmp, 0, sizeof(struct rfoe_tx_ptp_tstmp_s));
+ } else {
+ /* check ptp queue count */
+ if (priv->ptp_skb_list.count >= max_ptp_req) {
+ netif_err(priv, tx_err, netdev,
+ "ptp list full, dropping pkt\n");
+ priv->stats.ptp_tx_dropped++;
+ priv->last_tx_ptp_dropped_jiffies = jiffies;
+ goto exit;
+ }
+ /* allocate and add ptp req to queue */
+ ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
+ if (!ts_skb) {
+ priv->stats.ptp_tx_dropped++;
+ priv->last_tx_ptp_dropped_jiffies = jiffies;
+ goto exit;
+ }
+ ts_skb->skb = skb;
+ list_add_tail(&ts_skb->list, &priv->ptp_skb_list.list);
+ priv->ptp_skb_list.count++;
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->stats.ptp_tx_packets++;
+ priv->stats.tx_bytes += skb->len;
+ /* sw timestamp */
+ skb_tx_timestamp(skb);
+ goto exit; /* submit the packet later */
+ }
+ }
+
+ /* sw timestamp */
+ skb_tx_timestamp(skb);
+
+ if (unlikely(netif_msg_pktdata(priv))) {
+ netdev_printk(KERN_DEBUG, priv->netdev, "Tx: skb %pS len=%d\n",
+ skb, skb->len);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
+ skb->data, skb->len, true);
+ }
+
+ /* update length and block size in jd dma cfg word */
+ jd_cfg_ptr_iova = *(u64 *)((u8 *)job_entry->jd_ptr + 8);
+ jd_cfg_ptr = otx2_iova_to_virt(priv->iommu_domain, jd_cfg_ptr_iova);
+ jd_cfg_ptr->cfg3.pkt_len = skb->len;
+ jd_dma_cfg_word_0 = (struct mhbw_jd_dma_cfg_word_0_s *)
+ job_entry->rd_dma_ptr;
+ jd_dma_cfg_word_0->block_size = (((skb->len + 15) >> 4) * 4);
+
+ /* update rfoe_mode and lmac id for non-ptp (shared) psm job entry */
+ if (pkt_type != PACKET_TYPE_PTP) {
+ jd_cfg_ptr->cfg3.lmacid = priv->lmac_id & 0x3;
+ if (pkt_type == PACKET_TYPE_ECPRI)
+ jd_cfg_ptr->cfg.rfoe_mode = 1;
+ else
+ jd_cfg_ptr->cfg.rfoe_mode = 0;
+ }
+
+ /* copy packet data to rd_dma_ptr start addr */
+ jd_dma_cfg_word_1 = (struct mhbw_jd_dma_cfg_word_1_s *)
+ ((u8 *)job_entry->rd_dma_ptr + 8);
+ memcpy(otx2_iova_to_virt(priv->iommu_domain,
+ jd_dma_cfg_word_1->start_addr),
+ skb->data, skb->len);
+
+ /* make sure that all memory writes are completed */
+ dma_wmb();
+
+ /* submit PSM job */
+ writeq(job_entry->job_cmd_lo,
+ priv->psm_reg_base + PSM_QUEUE_CMD_LO(psm_queue_id));
+ writeq(job_entry->job_cmd_hi,
+ priv->psm_reg_base + PSM_QUEUE_CMD_HI(psm_queue_id));
+
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv->stats.ecpri_tx_packets++;
+ priv->last_tx_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_PTP) {
+ priv->stats.ptp_tx_packets++;
+ priv->last_tx_ptp_jiffies = jiffies;
+ } else {
+ priv->stats.tx_packets++;
+ priv->last_tx_jiffies = jiffies;
+ }
+ priv->stats.tx_bytes += skb->len;
+
+ /* increment queue index */
+ job_cfg->q_idx++;
+ if (job_cfg->q_idx == job_cfg->num_entries)
+ job_cfg->q_idx = 0;
+exit:
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
+ dev_kfree_skb_any(skb);
+
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+/* netdev open */
+static int cnf10k_rfoe_eth_open(struct net_device *netdev)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ int idx;
+
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ napi_enable(&priv->rx_ft_cfg[idx].napi);
+ }
+
+ priv->ptp_tx_skb = NULL;
+
+ netif_carrier_on(netdev);
+ netif_start_queue(netdev);
+
+ clear_bit(RFOE_INTF_DOWN, &priv->state);
+ priv->link_state = 1;
+
+ return 0;
+}
+
+/* netdev close */
+static int cnf10k_rfoe_eth_stop(struct net_device *netdev)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct ptp_tstamp_skb *ts_skb, *ts_skb2;
+ int idx;
+
+ set_bit(RFOE_INTF_DOWN, &priv->state);
+
+ netif_stop_queue(netdev);
+ netif_carrier_off(netdev);
+ priv->link_state = 0;
+
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ napi_disable(&priv->rx_ft_cfg[idx].napi);
+ }
+
+ del_timer_sync(&priv->tx_timer);
+
+ /* cancel any pending ptp work item in progress */
+ cancel_work_sync(&priv->ptp_tx_work);
+ if (priv->ptp_tx_skb) {
+ dev_kfree_skb_any(priv->ptp_tx_skb);
+ priv->ptp_tx_skb = NULL;
+ clear_bit_unlock(PTP_TX_IN_PROGRESS, &priv->state);
+ }
+
+ /* clear ptp skb list */
+ cancel_work_sync(&priv->ptp_queue_work);
+ list_for_each_entry_safe(ts_skb, ts_skb2,
+ &priv->ptp_skb_list.list, list) {
+ list_del(&ts_skb->list);
+ kfree(ts_skb);
+ }
+ priv->ptp_skb_list.count = 0;
+
+ return 0;
+}
+
+static int cnf10k_rfoe_init(struct net_device *netdev)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ /* Enable VLAN TPID match */
+ writeq(0x18100, (priv->rfoe_reg_base +
+ RFOEX_RX_VLANX_CFG(priv->rfoe_num, 0)));
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ return 0;
+}
+
+static int cnf10k_rfoe_vlan_rx_configure(struct net_device *netdev, u16 vid,
+ bool forward)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct otx2_bphy_cdev_priv *cdev_priv = priv->cdev_priv;
+ struct rfoe_rx_ind_vlanx_fwd fwd;
+ unsigned long flags;
+ u64 mask, index;
+
+ if (vid >= VLAN_N_VID) {
+ netdev_err(netdev, "Invalid VLAN ID %d\n", vid);
+ return -EINVAL;
+ }
+
+ mask = (0x1ll << (vid & 0x3F));
+ index = (vid >> 6) & 0x3F;
+
+ spin_lock_irqsave(&cdev_priv->mbt_lock, flags);
+
+ if (forward && priv->rfoe_common->rx_vlan_fwd_refcnt[vid]++)
+ goto out;
+
+ if (!forward && --priv->rfoe_common->rx_vlan_fwd_refcnt[vid])
+ goto out;
+
+ /* read current fwd mask */
+ writeq(index, (priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num)));
+ fwd.fwd = readq(priv->rfoe_reg_base +
+ RFOEX_RX_IND_VLANX_FWD(priv->rfoe_num, 0));
+
+ if (forward)
+ fwd.fwd |= mask;
+ else
+ fwd.fwd &= ~mask;
+
+ /* write the new fwd mask */
+ writeq(index, (priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num)));
+ writeq(fwd.fwd, (priv->rfoe_reg_base +
+ RFOEX_RX_IND_VLANX_FWD(priv->rfoe_num, 0)));
+
+out:
+ spin_unlock_irqrestore(&cdev_priv->mbt_lock, flags);
+
+ return 0;
+}
+
+static int cnf10k_rfoe_vlan_rx_add(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ return cnf10k_rfoe_vlan_rx_configure(netdev, vid, true);
+}
+
+static int cnf10k_rfoe_vlan_rx_kill(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ return cnf10k_rfoe_vlan_rx_configure(netdev, vid, false);
+}
+
+static const struct net_device_ops cnf10k_rfoe_netdev_ops = {
+ .ndo_init = cnf10k_rfoe_init,
+ .ndo_open = cnf10k_rfoe_eth_open,
+ .ndo_stop = cnf10k_rfoe_eth_stop,
+ .ndo_start_xmit = cnf10k_rfoe_eth_start_xmit,
+ .ndo_do_ioctl = cnf10k_rfoe_ioctl,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_get_stats64 = cnf10k_rfoe_get_stats64,
+ .ndo_vlan_rx_add_vid = cnf10k_rfoe_vlan_rx_add,
+ .ndo_vlan_rx_kill_vid = cnf10k_rfoe_vlan_rx_kill,
+};
+
+static void cnf10k_rfoe_dump_rx_ft_cfg(struct cnf10k_rfoe_ndev_priv *priv)
+{
+ struct cnf10k_rx_ft_cfg *ft_cfg;
+ int idx;
+
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ ft_cfg = &priv->rx_ft_cfg[idx];
+ pr_debug("rfoe=%d lmac=%d pkttype=%d flowid=%d mbt: idx=%d size=%d nbufs=%d iova=0x%llx jdt: idx=%d size=%d num_jd=%d iova=0x%llx\n",
+ priv->rfoe_num, priv->lmac_id, ft_cfg->pkt_type,
+ ft_cfg->flow_id, ft_cfg->mbt_idx, ft_cfg->buf_size,
+ ft_cfg->num_bufs, ft_cfg->mbt_iova_addr,
+ ft_cfg->jdt_idx, ft_cfg->jd_size, ft_cfg->num_jd,
+ ft_cfg->jdt_iova_addr);
+ }
+}
+
+static void cnf10k_rfoe_fill_rx_ft_cfg(struct cnf10k_rfoe_ndev_priv *priv,
+ struct cnf10k_bphy_ndev_comm_if *if_cfg)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv = priv->cdev_priv;
+ struct cnf10k_bphy_ndev_rbuf_info *rbuf_info;
+ struct cnf10k_rx_ft_cfg *ft_cfg;
+ u64 jdt_cfg0, iova;
+ int idx;
+
+ /* RX flow table configuration */
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ ft_cfg = &priv->rx_ft_cfg[idx];
+ rbuf_info = &if_cfg->rbuf_info[idx];
+ ft_cfg->pkt_type = rbuf_info->pkt_type;
+ ft_cfg->gp_int_num = rbuf_info->gp_int_num;
+ ft_cfg->flow_id = rbuf_info->flow_id;
+ ft_cfg->mbt_idx = rbuf_info->mbt_index;
+ ft_cfg->buf_size = rbuf_info->buf_size * 16;
+ ft_cfg->num_bufs = rbuf_info->num_bufs;
+ ft_cfg->mbt_iova_addr = rbuf_info->mbt_iova_addr;
+ iova = ft_cfg->mbt_iova_addr;
+ ft_cfg->mbt_virt_addr = otx2_iova_to_virt(priv->iommu_domain,
+ iova);
+ ft_cfg->jdt_idx = rbuf_info->jdt_index;
+ ft_cfg->jd_size = rbuf_info->jd_size * 8;
+ ft_cfg->num_jd = rbuf_info->num_jd;
+ ft_cfg->jdt_iova_addr = rbuf_info->jdt_iova_addr;
+ iova = ft_cfg->jdt_iova_addr;
+ ft_cfg->jdt_virt_addr = otx2_iova_to_virt(priv->iommu_domain,
+ iova);
+ spin_lock(&cdev_priv->mbt_lock);
+ writeq(ft_cfg->jdt_idx,
+ (priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num)));
+ jdt_cfg0 = readq(priv->rfoe_reg_base +
+ RFOEX_RX_IND_JDT_CFG0(priv->rfoe_num));
+ spin_unlock(&cdev_priv->mbt_lock);
+ ft_cfg->jd_rd_offset = ((jdt_cfg0 >> 27) & 0x3f) * 8;
+ ft_cfg->pkt_offset = (u8)((jdt_cfg0 >> 52) & 0x1f);
+ ft_cfg->priv = priv;
+ netif_napi_add(priv->netdev, &ft_cfg->napi,
+ cnf10k_rfoe_napi_poll,
+ NAPI_POLL_WEIGHT);
+ }
+}
+
+static void cnf10k_rfoe_fill_tx_job_entries(struct cnf10k_rfoe_ndev_priv *priv,
+ struct tx_job_queue_cfg *job_cfg,
+ struct cnf10k_bphy_ndev_tx_psm_cmd_info *tx_job,
+ int num_entries)
+{
+ struct tx_job_entry *job_entry;
+ u64 jd_cfg_iova, iova;
+ int i;
+
+ for (i = 0; i < num_entries; i++) {
+ job_entry = &job_cfg->job_entries[i];
+ job_entry->job_cmd_lo = tx_job->low_cmd;
+ job_entry->job_cmd_hi = tx_job->high_cmd;
+ job_entry->jd_iova_addr = tx_job->jd_iova_addr;
+ iova = job_entry->jd_iova_addr;
+ job_entry->jd_ptr = otx2_iova_to_virt(priv->iommu_domain, iova);
+ jd_cfg_iova = *(u64 *)((u8 *)job_entry->jd_ptr + 8);
+ job_entry->jd_cfg_ptr = otx2_iova_to_virt(priv->iommu_domain,
+ jd_cfg_iova);
+ job_entry->rd_dma_iova_addr = tx_job->rd_dma_iova_addr;
+ iova = job_entry->rd_dma_iova_addr;
+ job_entry->rd_dma_ptr = otx2_iova_to_virt(priv->iommu_domain,
+ iova);
+ pr_debug("job_cmd_lo=0x%llx job_cmd_hi=0x%llx jd_iova_addr=0x%llx rd_dma_iova_addr=%llx\n",
+ tx_job->low_cmd, tx_job->high_cmd,
+ tx_job->jd_iova_addr, tx_job->rd_dma_iova_addr);
+ tx_job++;
+ }
+ /* get psm queue id */
+ job_entry = &job_cfg->job_entries[0];
+ job_cfg->psm_queue_id = (job_entry->job_cmd_lo >> 8) & 0xff;
+ job_cfg->q_idx = 0;
+ job_cfg->num_entries = num_entries;
+ spin_lock_init(&job_cfg->lock);
+}
+
+int cnf10k_rfoe_parse_and_init_intf(struct otx2_bphy_cdev_priv *cdev,
+ struct cnf10k_rfoe_ndev_comm_intf_cfg *cfg)
+{
+ int i, intf_idx = 0, num_entries, lmac, idx, ret;
+ struct cnf10k_bphy_ndev_tx_psm_cmd_info *tx_info;
+ struct cnf10k_bphy_ndev_tx_ptp_ring_info *info;
+ struct cnf10k_rfoe_drv_ctx *drv_ctx = NULL;
+ struct cnf10k_rfoe_ndev_priv *priv, *priv2;
+ struct cnf10k_bphy_ndev_rfoe_if *rfoe_cfg;
+ struct cnf10k_bphy_ndev_comm_if *if_cfg;
+ struct tx_ptp_ring_cfg *ptp_ring_cfg;
+ struct tx_job_queue_cfg *tx_cfg;
+ struct cnf10k_rx_ft_cfg *ft_cfg;
+ struct net_device *netdev;
+ u8 pkt_type_mask;
+
+ cdev->hw_version = cfg->hw_params.chip_ver;
+ dev_dbg(cdev->dev, "hw_version = 0x%x\n", cfg->hw_params.chip_ver);
+
+ if (CHIP_CNF10KB(cdev->hw_version)) {
+ cdev->num_rfoe_mhab = 7;
+ cdev->num_rfoe_lmac = 2;
+ cdev->tot_rfoe_intf = 14;
+ } else if (CHIP_CNF10KA(cdev->hw_version)) {
+ cdev->num_rfoe_mhab = 2;
+ cdev->num_rfoe_lmac = 4;
+ cdev->tot_rfoe_intf = 8;
+ } else {
+ dev_err(cdev->dev, "unsupported chip version\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < BPHY_MAX_RFOE_MHAB; i++) {
+ priv2 = NULL;
+ rfoe_cfg = &cfg->rfoe_if_cfg[i];
+ pkt_type_mask = rfoe_cfg->pkt_type_mask;
+ for (lmac = 0; lmac < MAX_LMAC_PER_RFOE; lmac++) {
+ if_cfg = &rfoe_cfg->if_cfg[lmac];
+ /* check if lmac is valid */
+ if (!if_cfg->lmac_info.is_valid) {
+ dev_dbg(cdev->dev,
+ "rfoe%d lmac%d invalid, skipping\n",
+ i, lmac);
+ continue;
+ }
+ if (lmac >= cdev->num_rfoe_lmac) {
+ dev_dbg(cdev->dev,
+ "rfoe%d, lmac%d not supported, skipping\n",
+ i, lmac);
+ continue;
+ }
+ netdev = alloc_etherdev(sizeof(*priv));
+ if (!netdev) {
+ dev_err(cdev->dev,
+ "error allocating net device\n");
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+ priv = netdev_priv(netdev);
+ memset(priv, 0, sizeof(*priv));
+ if (!priv2) {
+ priv->rfoe_common =
+ kzalloc(sizeof(struct rfoe_common_cfg),
+ GFP_KERNEL);
+ if (!priv->rfoe_common) {
+ dev_err(cdev->dev, "kzalloc failed\n");
+ free_netdev(netdev);
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+ priv->rfoe_common->refcnt = 1;
+ }
+ spin_lock_init(&priv->lock);
+ priv->netdev = netdev;
+ priv->cdev_priv = cdev;
+ priv->msg_enable = netif_msg_init(-1, 0);
+ spin_lock_init(&priv->stats.lock);
+ priv->rfoe_num = if_cfg->lmac_info.rfoe_num;
+ priv->lmac_id = if_cfg->lmac_info.lane_num;
+ priv->if_type = IF_TYPE_ETHERNET;
+ memcpy(priv->mac_addr, if_cfg->lmac_info.eth_addr,
+ ETH_ALEN);
+ if (is_valid_ether_addr(priv->mac_addr))
+ ether_addr_copy(netdev->dev_addr,
+ priv->mac_addr);
+ else
+ random_ether_addr(netdev->dev_addr);
+ priv->pdev = pci_get_device(OTX2_BPHY_PCI_VENDOR_ID,
+ OTX2_BPHY_PCI_DEVICE_ID,
+ NULL);
+ priv->iommu_domain =
+ iommu_get_domain_for_dev(&priv->pdev->dev);
+ priv->bphy_reg_base = bphy_reg_base;
+ priv->psm_reg_base = psm_reg_base;
+ priv->rfoe_reg_base = rfoe_reg_base;
+ priv->bcn_reg_base = bcn_reg_base;
+ priv->ptp_reg_base = ptp_reg_base;
+
+ /* Initialise PTP TX work queue */
+ INIT_WORK(&priv->ptp_tx_work, cnf10k_rfoe_ptp_tx_work);
+ INIT_WORK(&priv->ptp_queue_work,
+ cnf10k_rfoe_ptp_submit_work);
+
+ /* Initialise PTP skb list */
+ INIT_LIST_HEAD(&priv->ptp_skb_list.list);
+ priv->ptp_skb_list.count = 0;
+ timer_setup(&priv->tx_timer,
+ cnf10k_rfoe_tx_timer_cb, 0);
+
+ priv->pkt_type_mask = pkt_type_mask;
+ cnf10k_rfoe_fill_rx_ft_cfg(priv, if_cfg);
+ cnf10k_rfoe_dump_rx_ft_cfg(priv);
+
+ /* TX PTP job configuration */
+ if (priv->pkt_type_mask & (1U << PACKET_TYPE_PTP)) {
+ tx_cfg = &priv->tx_ptp_job_cfg;
+ tx_info = &if_cfg->ptp_pkt_info[0];
+ num_entries = MAX_PTP_MSG_PER_LMAC;
+ cnf10k_rfoe_fill_tx_job_entries(priv, tx_cfg,
+ tx_info,
+ num_entries);
+ /* fill ptp ring info */
+ ptp_ring_cfg = &priv->ptp_ring_cfg;
+ info = &if_cfg->ptp_ts_ring_info[0];
+ ptp_ring_cfg->ptp_ring_base =
+ otx2_iova_to_virt(priv->iommu_domain,
+ info->ring_iova_addr);
+ ptp_ring_cfg->ptp_ring_id = info->ring_idx;
+ ptp_ring_cfg->ptp_ring_size = info->ring_size;
+ ptp_ring_cfg->ptp_ring_idx = 0;
+ }
+
+ /* TX ECPRI/OTH(PTP) job configuration */
+ if (!priv2 &&
+ ((priv->pkt_type_mask &
+ (1U << PACKET_TYPE_OTHER)) ||
+ (priv->pkt_type_mask &
+ (1U << PACKET_TYPE_ECPRI)))) {
+ num_entries = cdev->num_rfoe_lmac *
+ MAX_OTH_MSG_PER_LMAC;
+ tx_cfg = &priv->rfoe_common->tx_oth_job_cfg;
+ tx_info = &rfoe_cfg->oth_pkt_info[0];
+ cnf10k_rfoe_fill_tx_job_entries(priv, tx_cfg,
+ tx_info,
+ num_entries);
+ } else {
+ /* share rfoe_common data */
+ priv->rfoe_common = priv2->rfoe_common;
+ ++(priv->rfoe_common->refcnt);
+ }
+
+ /* keep last (rfoe + lmac) priv structure */
+ if (!priv2)
+ priv2 = priv;
+
+ intf_idx = (i * cdev->num_rfoe_lmac) + lmac;
+ snprintf(netdev->name, sizeof(netdev->name),
+ "rfoe%d", intf_idx);
+ netdev->netdev_ops = &cnf10k_rfoe_netdev_ops;
+ cnf10k_rfoe_set_ethtool_ops(netdev);
+ cnf10k_rfoe_ptp_init(priv);
+ netdev->watchdog_timeo = (15 * HZ);
+ netdev->mtu = 1500U;
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = 1500U;
+ ret = register_netdev(netdev);
+ if (ret < 0) {
+ dev_err(cdev->dev,
+ "failed to register net device %s\n",
+ netdev->name);
+ free_netdev(netdev);
+ ret = -ENODEV;
+ goto err_exit;
+ }
+ dev_dbg(cdev->dev, "net device %s registered\n",
+ netdev->name);
+
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ set_bit(RFOE_INTF_DOWN, &priv->state);
+ priv->link_state = 0;
+
+ /* initialize global ctx */
+ drv_ctx = &cnf10k_rfoe_drv_ctx[intf_idx];
+ drv_ctx->rfoe_num = priv->rfoe_num;
+ drv_ctx->lmac_id = priv->lmac_id;
+ drv_ctx->valid = 1;
+ drv_ctx->netdev = netdev;
+ drv_ctx->ft_cfg = &priv->rx_ft_cfg[0];
+ }
+ }
+
+ return 0;
+
+err_exit:
+ for (i = 0; i < CNF10K_RFOE_MAX_INTF; i++) {
+ drv_ctx = &cnf10k_rfoe_drv_ctx[i];
+ if (drv_ctx->valid) {
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ cnf10k_rfoe_ptp_destroy(priv);
+ unregister_netdev(netdev);
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ ft_cfg = &priv->rx_ft_cfg[idx];
+ netif_napi_del(&ft_cfg->napi);
+ }
+ --(priv->rfoe_common->refcnt);
+ if (priv->rfoe_common->refcnt == 0)
+ kfree(priv->rfoe_common);
+ free_netdev(netdev);
+ drv_ctx->valid = 0;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe.h b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe.h
new file mode 100644
index 000000000000..215056a1c7ca
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell CNF10K BPHY RFOE Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#ifndef _CNF10K_RFOE_H_
+#define _CNF10K_RFOE_H_
+
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/if_vlan.h>
+
+#include "rfoe_common.h"
+#include "otx2_bphy.h"
+
+#define DEBUG
+
+#define CNF10K_RFOE_RX_INTR_SHIFT(a) ({ \
+ typeof(a) _a = (a); \
+ ((_a) < 6) ? (32 - ((_a) + 1) * 3) : (((_a) - 6) * 3); \
+})
+#define CNF10K_RFOE_RX_INTR_MASK(a) (RFOE_RX_INTR_EN << \
+ CNF10K_RFOE_RX_INTR_SHIFT(a))
+#define CNF10K_RFOE_TX_PTP_INTR_MASK(a, b, n) (1UL << ((a) * (n) + (b)))
+
+#define CNF10K_RFOE_MAX_INTF 14
+
+/* global driver context */
+struct cnf10k_rfoe_drv_ctx {
+ u8 rfoe_num;
+ u8 lmac_id;
+ int valid;
+ struct net_device *netdev;
+ struct cnf10k_rx_ft_cfg *ft_cfg;
+ int tx_gpint_bit;
+};
+
+extern struct cnf10k_rfoe_drv_ctx cnf10k_rfoe_drv_ctx[CNF10K_RFOE_MAX_INTF];
+
+/* rx flow table configuration */
+struct cnf10k_rx_ft_cfg {
+ enum bphy_netdev_packet_type pkt_type; /* pkt_type for psw */
+ enum bphy_netdev_rx_gpint gp_int_num;
+ u16 flow_id; /* flow id */
+ u16 mbt_idx; /* mbt index */
+ u16 buf_size; /* mbt buf size */
+ u16 num_bufs; /* mbt num bufs */
+ u64 mbt_iova_addr;
+ void __iomem *mbt_virt_addr;
+ u16 jdt_idx; /* jdt index */
+ u8 jd_size; /* jd size */
+ u16 num_jd; /* num jd's */
+ u64 jdt_iova_addr;
+ void __iomem *jdt_virt_addr;
+ u8 jd_rd_offset; /* jd rd offset */
+ u8 pkt_offset;
+ struct napi_struct napi;
+ struct cnf10k_rfoe_ndev_priv *priv;
+};
+
+struct tx_ptp_ring_cfg {
+ u8 ptp_ring_id;
+ void __iomem *ptp_ring_base;
+ u8 ptp_ring_size;
+ u8 ptp_ring_idx;
+};
+
+/* netdev priv */
+struct cnf10k_rfoe_ndev_priv {
+ u8 rfoe_num;
+ u8 lmac_id;
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ u32 msg_enable;
+ void __iomem *bphy_reg_base;
+ void __iomem *psm_reg_base;
+ void __iomem *rfoe_reg_base;
+ void __iomem *bcn_reg_base;
+ void __iomem *ptp_reg_base;
+ struct iommu_domain *iommu_domain;
+ struct cnf10k_rx_ft_cfg rx_ft_cfg[PACKET_TYPE_MAX];
+ struct tx_job_queue_cfg tx_ptp_job_cfg;
+ struct tx_ptp_ring_cfg ptp_ring_cfg;
+ struct rfoe_common_cfg *rfoe_common;
+ u8 pkt_type_mask;
+ /* priv lock */
+ spinlock_t lock;
+ int rx_hw_tstamp_en;
+ int tx_hw_tstamp_en;
+ struct sk_buff *ptp_tx_skb;
+ u16 ptp_job_tag;
+ struct timer_list tx_timer;
+ unsigned long state;
+ struct work_struct ptp_tx_work;
+ struct work_struct ptp_queue_work;
+ struct ptp_tx_skb_list ptp_skb_list;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ struct otx2_rfoe_stats stats;
+ u8 mac_addr[ETH_ALEN];
+ s32 sec_bcn_offset;
+ int if_type;
+ u8 link_state;
+ unsigned long last_tx_jiffies;
+ unsigned long last_tx_ptp_jiffies;
+ unsigned long last_rx_jiffies;
+ unsigned long last_rx_ptp_jiffies;
+ unsigned long last_tx_dropped_jiffies;
+ unsigned long last_tx_ptp_dropped_jiffies;
+ unsigned long last_rx_dropped_jiffies;
+ unsigned long last_rx_ptp_dropped_jiffies;
+};
+
+void cnf10k_rfoe_rx_napi_schedule(int rfoe_num, u32 status);
+
+int cnf10k_rfoe_parse_and_init_intf(struct otx2_bphy_cdev_priv *cdev,
+ struct cnf10k_rfoe_ndev_comm_intf_cfg *cfg);
+
+void cnf10k_bphy_rfoe_cleanup(void);
+
+void cnf10k_rfoe_disable_intf(int rfoe_num);
+
+/* ethtool */
+void cnf10k_rfoe_set_ethtool_ops(struct net_device *netdev);
+
+/* ptp */
+int cnf10k_rfoe_ptp_init(struct cnf10k_rfoe_ndev_priv *priv);
+void cnf10k_rfoe_ptp_destroy(struct cnf10k_rfoe_ndev_priv *priv);
+
+void cnf10k_bphy_intr_handler(struct otx2_bphy_cdev_priv *cdev_priv,
+ u32 status);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe_ethtool.c
new file mode 100644
index 000000000000..5d7bbd9fc82f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe_ethtool.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CNF10K BPHY RFOE Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include "cnf10k_rfoe.h"
+#include "cnf10k_bphy_hw.h"
+
+static const char ethtool_stat_strings[][ETH_GSTRING_LEN] = {
+ "oth_rx_packets",
+ "ptp_rx_packets",
+ "ecpri_rx_packets",
+ "rx_bytes",
+ "oth_rx_dropped",
+ "ptp_rx_dropped",
+ "ecpri_rx_dropped",
+ "oth_tx_packets",
+ "ptp_tx_packets",
+ "ecpri_tx_packets",
+ "tx_bytes",
+ "oth_tx_dropped",
+ "ptp_tx_dropped",
+ "ecpri_tx_dropped",
+ "ptp_tx_hwtstamp_failures",
+ "EthIfInFrames",
+ "EthIfInOctets",
+ "EthIfOutFrames",
+ "EthIfOutOctets",
+ "EthIfInUnknownVlan",
+};
+
+static void cnf10k_rfoe_get_strings(struct net_device *netdev, u32 sset,
+ u8 *data)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ memcpy(data, *ethtool_stat_strings,
+ sizeof(ethtool_stat_strings));
+ break;
+ }
+}
+
+static int cnf10k_rfoe_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(ethtool_stat_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void cnf10k_rfoe_update_lmac_stats(struct cnf10k_rfoe_ndev_priv *priv)
+{
+ struct otx2_rfoe_stats *stats = &priv->stats;
+
+ stats->EthIfInFrames = readq(priv->rfoe_reg_base +
+ RFOEX_RX_RPM_PKT_STAT(priv->rfoe_num,
+ priv->lmac_id));
+ stats->EthIfInOctets = readq(priv->rfoe_reg_base +
+ RFOEX_RX_RPM_OCTS_STAT(priv->rfoe_num,
+ priv->lmac_id));
+ stats->EthIfOutFrames = readq(priv->rfoe_reg_base +
+ RFOEX_TX_PKT_STAT(priv->rfoe_num,
+ priv->lmac_id));
+ stats->EthIfOutOctets = readq(priv->rfoe_reg_base +
+ RFOEX_TX_OCTS_STAT(priv->rfoe_num,
+ priv->lmac_id));
+ stats->EthIfInUnknownVlan =
+ readq(priv->rfoe_reg_base +
+ RFOEX_RX_VLAN_DROP_STAT(priv->rfoe_num,
+ priv->lmac_id));
+}
+
+static void cnf10k_rfoe_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ cnf10k_rfoe_update_lmac_stats(priv);
+ spin_lock(&priv->stats.lock);
+ memcpy(data, &priv->stats,
+ ARRAY_SIZE(ethtool_stat_strings) * sizeof(u64));
+ spin_unlock(&priv->stats.lock);
+}
+
+static void cnf10k_rfoe_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *p)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ snprintf(p->driver, sizeof(p->driver), "cnf10k_rfoe {rfoe%d lmac%d}",
+ priv->rfoe_num, priv->lmac_id);
+ strlcpy(p->bus_info, "platform", sizeof(p->bus_info));
+}
+
+static int cnf10k_rfoe_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = ptp_clock_index(priv->ptp_clock);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static u32 cnf10k_rfoe_get_msglevel(struct net_device *netdev)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ return priv->msg_enable;
+}
+
+static void cnf10k_rfoe_set_msglevel(struct net_device *netdev, u32 level)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ priv->msg_enable = level;
+}
+
+static const struct ethtool_ops cnf10k_rfoe_ethtool_ops = {
+ .get_drvinfo = cnf10k_rfoe_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = cnf10k_rfoe_get_ts_info,
+ .get_strings = cnf10k_rfoe_get_strings,
+ .get_sset_count = cnf10k_rfoe_get_sset_count,
+ .get_ethtool_stats = cnf10k_rfoe_get_ethtool_stats,
+ .get_msglevel = cnf10k_rfoe_get_msglevel,
+ .set_msglevel = cnf10k_rfoe_set_msglevel,
+};
+
+void cnf10k_rfoe_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &cnf10k_rfoe_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe_ptp.c b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe_ptp.c
new file mode 100644
index 000000000000..4ea2fc29ee71
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe_ptp.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CNF10K BPHY RFOE Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include "cnf10k_rfoe.h"
+
+static int cnf10k_rfoe_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+static int cnf10k_rfoe_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ return -EOPNOTSUPP;
+}
+
+static int cnf10k_rfoe_ptp_gettime(struct ptp_clock_info *ptp_info,
+ struct timespec64 *ts)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = container_of(ptp_info,
+ struct cnf10k_rfoe_ndev_priv,
+ ptp_clock_info);
+ u64 nsec;
+
+ nsec = readq(priv->ptp_reg_base + MIO_PTP_CLOCK_HI);
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+static int cnf10k_rfoe_ptp_settime(struct ptp_clock_info *ptp_info,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
+static int cnf10k_rfoe_ptp_enable(struct ptp_clock_info *ptp_info,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static const struct ptp_clock_info cnf10k_rfoe_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .max_adj = 1000000000ull,
+ .n_ext_ts = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfine = cnf10k_rfoe_ptp_adjfine,
+ .adjtime = cnf10k_rfoe_ptp_adjtime,
+ .gettime64 = cnf10k_rfoe_ptp_gettime,
+ .settime64 = cnf10k_rfoe_ptp_settime,
+ .enable = cnf10k_rfoe_ptp_enable,
+};
+
+int cnf10k_rfoe_ptp_init(struct cnf10k_rfoe_ndev_priv *priv)
+{
+ int err;
+
+ priv->ptp_clock_info = cnf10k_rfoe_ptp_clock_info;
+ snprintf(priv->ptp_clock_info.name, 16, "%s", priv->netdev->name);
+ priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_info,
+ &priv->pdev->dev);
+ if (IS_ERR_OR_NULL(priv->ptp_clock)) {
+ priv->ptp_clock = NULL;
+ err = PTR_ERR(priv->ptp_clock);
+ return err;
+ }
+
+ return 0;
+}
+
+void cnf10k_rfoe_ptp_destroy(struct cnf10k_rfoe_ndev_priv *priv)
+{
+ ptp_clock_unregister(priv->ptp_clock);
+ priv->ptp_clock = NULL;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy.h b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy.h
new file mode 100644
index 000000000000..5cb8a89eef0b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _OTX2_BPHY_H_
+#define _OTX2_BPHY_H_
+
+#include <linux/device.h>
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+
+#include "bphy_common.h"
+#include "rfoe_bphy_netdev_comm_if.h"
+#include "cnf10k_bphy_netdev_comm_if.h"
+
+#define DEVICE_NAME "otx2_rfoe"
+#define DRV_NAME "octeontx2-bphy-netdev"
+#define DRV_STRING "Marvell OcteonTX2 BPHY Ethernet Driver"
+
+/* char device ioctl numbers */
+#define OTX2_RFOE_IOCTL_BASE 0xCC /* Temporary */
+#define OTX2_RFOE_IOCTL_ODP_INTF_CFG _IOW(OTX2_RFOE_IOCTL_BASE, 0x01, \
+ struct bphy_netdev_comm_intf_cfg)
+#define OTX2_RFOE_IOCTL_ODP_DEINIT _IO(OTX2_RFOE_IOCTL_BASE, 0x02)
+#define OTX2_RFOE_IOCTL_RX_IND_CFG _IOWR(OTX2_RFOE_IOCTL_BASE, 0x03, \
+ struct otx2_rfoe_rx_ind_cfg)
+#define OTX2_RFOE_IOCTL_PTP_OFFSET _IOW(OTX2_RFOE_IOCTL_BASE, 0x04, \
+ struct ptp_clk_cfg)
+#define OTX2_RFOE_IOCTL_SEC_BCN_OFFSET _IOW(OTX2_RFOE_IOCTL_BASE, 0x05, \
+ struct bcn_sec_offset_cfg)
+#define OTX2_RFOE_IOCTL_MODE_CPRI _IOW(OTX2_RFOE_IOCTL_BASE, 0x06, \
+ int)
+#define OTX2_RFOE_IOCTL_LINK_EVENT _IOW(OTX2_RFOE_IOCTL_BASE, 0x07, \
+ struct otx2_rfoe_link_event)
+#define OTX2_CPRI_IOCTL_LINK_EVENT _IOW(OTX2_RFOE_IOCTL_BASE, 0x08, \
+ struct otx2_cpri_link_event)
+#define OTX2_IOCTL_RFOE_10x_CFG _IOW(OTX2_RFOE_IOCTL_BASE, 0x0A, \
+ uint64_t)
+#define OTX2_IOCTL_CPRI_INTF_CFG _IOW(OTX2_RFOE_IOCTL_BASE, 0x0B, \
+ uint64_t)
+
+//#define ASIM /* ASIM environment */
+
+#define OTX2_BPHY_MHAB_INST 3
+
+/* char driver private data */
+struct otx2_bphy_cdev_priv {
+ struct device *dev;
+ struct cdev cdev;
+ dev_t devt;
+ int is_open;
+ int odp_intf_cfg;
+ int irq;
+ struct mutex mutex_lock; /* mutex */
+ spinlock_t lock; /* irq lock */
+ spinlock_t mbt_lock; /* mbt ind lock */
+ u8 mhab_mode[BPHY_MAX_RFOE_MHAB];
+ /* cnf10k specific information */
+ u32 hw_version;
+ u8 num_rfoe_mhab;
+ u8 num_rfoe_lmac;
+ u8 tot_rfoe_intf;
+ int gpint2_irq;
+};
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_debugfs.c
new file mode 100644
index 000000000000..a2d03352c89d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_debugfs.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2021 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+
+#include "otx2_bphy_debugfs.h"
+#include "otx2_bphy.h"
+
+#define OTX2_BPHY_DEBUGFS_MODE 0400
+
+struct otx2_bphy_debugfs_reader_info {
+ atomic_t refcnt;
+ size_t buffer_size;
+ void *priv;
+ otx2_bphy_debugfs_reader reader;
+ struct dentry *entry;
+ char buffer[1];
+};
+
+static struct dentry *otx2_bphy_debugfs;
+
+static int otx2_bphy_debugfs_open(struct inode *inode, struct file *file);
+
+static int otx2_bphy_debugfs_release(struct inode *inode, struct file *file);
+
+static ssize_t otx2_bphy_debugfs_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *offset);
+
+static const struct file_operations otx2_bphy_debugfs_foper = {
+ .owner = THIS_MODULE,
+ .open = otx2_bphy_debugfs_open,
+ .release = otx2_bphy_debugfs_release,
+ .read = otx2_bphy_debugfs_read,
+};
+
+void __init otx2_bphy_debugfs_init(void)
+{
+ otx2_bphy_debugfs = debugfs_create_dir(DRV_NAME, NULL);
+ if (!otx2_bphy_debugfs)
+ pr_info("%s: debugfs is not enabled\n", DRV_NAME);
+}
+
+void *otx2_bphy_debugfs_add_file(const char *name,
+ size_t buffer_size,
+ void *priv,
+ otx2_bphy_debugfs_reader reader)
+{
+ struct otx2_bphy_debugfs_reader_info *info = NULL;
+ size_t total_size = 0;
+
+ if (!otx2_bphy_debugfs) {
+ pr_info("%s: debugfs not enabled, ignoring %s\n", DRV_NAME,
+ name);
+ goto out;
+ }
+
+ total_size = buffer_size +
+ offsetof(struct otx2_bphy_debugfs_reader_info,
+ buffer);
+
+ info = kzalloc(total_size, GFP_KERNEL);
+
+ if (!info)
+ goto out;
+
+ info->buffer_size = buffer_size;
+ info->priv = priv;
+ info->reader = reader;
+
+ atomic_set(&info->refcnt, 0);
+
+ info->entry = debugfs_create_file(name, OTX2_BPHY_DEBUGFS_MODE,
+ otx2_bphy_debugfs, info,
+ &otx2_bphy_debugfs_foper);
+
+ if (!info->entry) {
+ pr_err("%s: debugfs failed to add file %s\n", DRV_NAME, name);
+ kfree(info);
+ info = NULL;
+ goto out;
+ }
+
+ pr_info("%s: debugfs created successfully for %s\n", DRV_NAME, name);
+
+out:
+ return info;
+}
+
+void otx2_bphy_debugfs_remove_file(void *entry)
+{
+ struct otx2_bphy_debugfs_reader_info *info = entry;
+
+ debugfs_remove(info->entry);
+
+ kfree(info);
+}
+
+void __exit otx2_bphy_debugfs_exit(void)
+{
+ debugfs_remove_recursive(otx2_bphy_debugfs);
+}
+
+static int otx2_bphy_debugfs_open(struct inode *inode, struct file *file)
+{
+ struct otx2_bphy_debugfs_reader_info *info = NULL;
+
+ info = inode->i_private;
+
+ if (!atomic_cmpxchg(&info->refcnt, 0, 1)) {
+ file->private_data = info;
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+static int otx2_bphy_debugfs_release(struct inode *inode, struct file *file)
+{
+ struct otx2_bphy_debugfs_reader_info *info = NULL;
+
+ info = inode->i_private;
+
+ atomic_cmpxchg(&info->refcnt, 1, 0);
+
+ return 0;
+}
+
+static ssize_t otx2_bphy_debugfs_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *offset)
+{
+ struct otx2_bphy_debugfs_reader_info *info = NULL;
+ ssize_t retval = 0;
+
+ info = file->private_data;
+
+ if (!(*offset))
+ info->reader(&info->buffer[0], info->buffer_size, info->priv);
+
+ if (*offset >= info->buffer_size)
+ goto out;
+
+ if (*offset + count > info->buffer_size)
+ count = info->buffer_size - *offset;
+
+ if (copy_to_user((void __user *)buffer, info->buffer + *offset,
+ count)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ *offset += count;
+ retval = count;
+
+out:
+ return retval;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_debugfs.h b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_debugfs.h
new file mode 100644
index 000000000000..099290565bfa
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_debugfs.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2021 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+
+#ifndef _OTX2_BPHY_DEBUGFS_H_
+#define _OTX2_BPHY_DEBUGFS_H_
+
+typedef void (*otx2_bphy_debugfs_reader)(char *buffer, size_t buffer_size,
+ void *priv);
+
+void otx2_bphy_debugfs_init(void);
+
+void *otx2_bphy_debugfs_add_file(const char *name,
+ size_t buffer_size,
+ void *priv,
+ otx2_bphy_debugfs_reader reader);
+
+void otx2_bphy_debugfs_remove_file(void *entry);
+
+void otx2_bphy_debugfs_exit(void);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_hw.h b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_hw.h
new file mode 100644
index 000000000000..48bfd2017ea1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_hw.h
@@ -0,0 +1,381 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _OTX2_BPHY_HW_H_
+#define _OTX2_BPHY_HW_H_
+
+#include <linux/types.h>
+
+/* PSM register offsets */
+#define PSM_QUEUE_CMD_LO(a) (0x0 + (a) * 0x10)
+#define PSM_QUEUE_CMD_HI(a) (0x8 + (a) * 0x10)
+#define PSM_QUEUE_CFG(a) (0x1000 + (a) * 0x10)
+#define PSM_QUEUE_PTR(a) (0x2000 + (a) * 0x10)
+#define PSM_QUEUE_SPACE(a) (0x3000 + (a) * 0x10)
+#define PSM_QUEUE_TIMEOUT_CFG(a) (0x4000 + (a) * 0x10)
+#define PSM_QUEUE_INFO(a) (0x5000 + (a) * 0x10)
+#define PSM_QUEUE_ENA_W1S(a) (0x10000 + (a) * 0x8)
+#define PSM_QUEUE_ENA_W1C(a) (0x10100 + (a) * 0x8)
+#define PSM_QUEUE_FULL_STS(a) (0x10200 + (a) * 0x8)
+#define PSM_QUEUE_BUSY_STS(a) (0x10300 + (a) * 0x8)
+
+/* BPHY PSM GPINT register offsets */
+#define PSM_INT_GP_SUM_W1C(a) (0x10E0000 + (a) * 0x100)
+#define PSM_INT_GP_SUM_W1S(a) (0x10E0040 + (a) * 0x100)
+#define PSM_INT_GP_ENA_W1C(a) (0x10E0080 + (a) * 0x100)
+#define PSM_INT_GP_ENA_W1S(a) (0x10E00C0 + (a) * 0x100)
+
+/* RFOE MHAB register offsets */
+#define RFOEX_RX_CTL(a) (0x0818ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_VLANX_CFG(a, b) (0x0870ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((b) << 3))
+#define RFOEX_RX_INDIRECT_INDEX_OFFSET(a) (0x13F8ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_IND_FTX_CFG(a, b) (0x1400ULL | \
+ (((unsigned long)(a) << 36)) + \
+ ((b) << 3))
+#define RFOEX_RX_IND_MBT_CFG(a) (0x1420ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_IND_MBT_ADDR(a) (0x1428ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_IND_MBT_SEG_STATE(a) (0x1430ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_IND_VLANX_FWD(a, b) (0x14D0ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((b) << 3))
+#define RFOEX_RX_IND_JDT_CFG0(a) (0x1440ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_IND_JDT_CFG1(a) (0x1448ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_IND_JDT_PTR(a) (0x1450ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_IND_JDT_STATE(a) (0x1478ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_IND_ECPRI_FT_CFG(a) (0x14C0ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_TX_PTP_TSTMP_W0(a, b) (0x7A0ULL | \
+ (((unsigned long)(a) << 36)) | \
+ ((b) << 3))
+#define RFOEX_TX_PTP_TSTMP_W1(a, b) (0x7C0ULL | \
+ (((unsigned long)(a) << 36)) | \
+ ((b) << 3))
+#define RFOEX_TX_PKT_STAT(a, b) (0x720ULL | \
+ (((unsigned long)(a) << 36)) | \
+ ((b) << 3))
+#define RFOEX_TX_OCTS_STAT(a, b) (0x740ULL | \
+ (((unsigned long)(a) << 36)) | \
+ ((b) << 3))
+#define RFOEX_RX_VLAN_DROP_STAT(a, b) (0x8A0ULL | \
+ (((unsigned long)(a) << 36)) | \
+ ((b) << 3))
+#define RFOEX_RX_CGX_PKT_STAT(a, b) (0x15C0ULL | \
+ (((unsigned long)(a) << 36)) | \
+ ((b) << 3))
+#define RFOEX_RX_CGX_OCTS_STAT(a, b) (0x15E0ULL | \
+ (((unsigned long)(a) << 36)) | \
+ ((b) << 3))
+
+/* BCN register offsets and definitions */
+#define BCN_CAPTURE_CFG 0x10400
+#define BCN_CAPTURE_N1_N2 0x10410
+#define BCN_CAPTURE_PTP 0x10430
+
+/* BCN_CAPTURE_CFG register definitions */
+#define CAPT_EN BIT(0)
+#define CAPT_TRIG_SW (3UL << 8)
+
+/* CPRI register offsets */
+#define CPRIX_RXD_GMII_UL_CBUF_CFG1(a) (0x1000ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_RXD_GMII_UL_CBUF_CFG2(a) (0x1008ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_RXD_GMII_UL_RD_DOORBELL(a) (0x1010ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_RXD_GMII_UL_SW_RD_PTR(a) (0x1018ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_RXD_GMII_UL_NXT_WR_PTR(a) (0x1020ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_RXD_GMII_UL_PKT_COUNT(a) (0x1028ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_TXD_GMII_DL_CBUF_CFG1(a) (0x1100ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_TXD_GMII_DL_CBUF_CFG2(a) (0x1108ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_TXD_GMII_DL_WR_DOORBELL(a) (0x1110ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_TXD_GMII_DL_SW_WR_PTR(a) (0x1118ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_TXD_GMII_DL_NXT_RD_PTR(a) (0x1120ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_ETH_UL_INT(a) (0x280ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_ETH_UL_INT_ENA_W1S(a) (0x288ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_ETH_UL_INT_ENA_W1C(a) (0x290ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_ETH_UL_INT_W1S(a) (0x298ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_ETH_BAD_CRC_CNT(a, b) (0x400ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_ERR_CNT(a, b) (0x408ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_OSIZE_CNT(a, b) (0x410ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_USIZE_CNT(a, b) (0x418ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_FIFO_ORUN_CNT(a, b) (0x420ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_GPKTS_CNT(a, b) (0x428ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_BOCT_CNT(a, b) (0x430ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_GOCT_CNT(a, b) (0x438ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_DL_GOCTETS_CNT(a, b) (0x440ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_DL_GPKTS_CNT(a, b) (0x448ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+
+/* MHAB definitions */
+struct mhbw_jd_dma_cfg_word_0_s {
+ u64 dma_mode : 3;
+ u64 target_mem : 1;
+ u64 dswap : 3;
+ u64 cmd_type : 2;
+ u64 reserved1 : 7;
+ u64 chunk_size : 16;
+ u64 block_size : 16;
+ u64 thread_id : 6;
+ u64 reserved2 : 2;
+ u64 group_id : 4;
+ u64 reserved3 : 4;
+};
+
+struct mhbw_jd_dma_cfg_word_1_s {
+ u64 start_addr : 53;
+ u64 reserved1 : 11;
+};
+
+/* RFOE definitions */
+enum rfoe_rx_dir_ctl_pkt_type_e {
+ ROE = 0x0,
+ CHI = 0x1,
+ ALT = 0x2,
+ ECPRI = 0x3,
+ GENERIC = 0x8,
+};
+
+enum rfoe_rx_pswt_e {
+ ROE_TYPE = 0x0,
+ ECPRI_TYPE = 0x2,
+};
+
+enum rfoe_rx_pkt_err_e {
+ RE_NONE = 0x0,
+ RE_PARTIAL = 0x1,
+ RE_JABBER = 0x2,
+ RE_FCS = 0x7,
+ RE_FCS_RCV = 0x8,
+ RE_TERMINATE = 0x9,
+ RE_RX_CTL = 0xB,
+ RE_SKIP = 0xC,
+};
+
+enum rfoe_rx_pkt_logger_idx_e {
+ RX_PKT = 0x0,
+ TX_PKT = 0x1,
+};
+
+struct psm_cmd_addjob_s {
+ /* W0 */
+ u64 opcode : 6;
+ u64 rsrc_set : 2;
+ u64 qid : 8;
+ u64 waitcond : 8;
+ u64 jobtag : 16;
+ u64 reserved1 : 8;
+ u64 mabq : 1;
+ u64 reserved2 : 3;
+ u64 tmem : 1;
+ u64 reserved3 : 3;
+ u64 jobtype : 8;
+ /* W1 */
+ u64 jobptr : 53;
+ u64 reserved4 : 11;
+};
+
+struct rfoe_ecpri_psw0_s {
+ /* W0 */
+ u64 jd_ptr : 53;
+ u64 jd_ptr_tmem : 1;
+ u64 reserved1 : 2;
+ u64 src_id : 4;
+ u64 reserved2 : 2;
+ u64 pswt : 2;
+ /* W1 */
+ u64 msg_type : 8;
+ u64 ecpri_id : 16;
+ u64 flow_id : 8;
+ u64 reserved3 : 6;
+ u64 err_sts : 6;
+ u64 reserved4 : 2;
+ u64 seq_id : 16;
+};
+
+struct rfoe_ecpri_psw1_s {
+ /* W0 */
+ u64 ptp_timestamp;
+ /* W1 */
+ u64 ethertype : 16;
+ u64 eindex : 5;
+ u64 reserved1 : 3;
+ u64 dec_error : 8;
+ u64 dec_num_sections : 8;
+ u64 dec_num_syminc : 8;
+ u64 reserved2 : 8;
+ u64 ptype : 4;
+ u64 reserved3 : 4;
+};
+
+struct rfoe_psw0_s {
+ /* W0 */
+ u64 pkt_err_sts : 4;
+ u64 dma_error : 1;
+ u64 jd_ptr : 53;
+ u64 jd_target_mem : 1;
+ u64 orderinfo_status : 1;
+ u64 lmac_id : 2;
+ u64 pswt : 2;
+ /* W1 */
+ u64 roe_subtype : 8;
+ u64 roe_flowid : 8;
+ u64 fd_symbol : 8;
+ u64 fd_antid : 8;
+ u64 rfoe_timestamp : 32;
+};
+
+struct rfoe_psw1_s {
+ /* W0 */
+ u64 ptp_timestamp;
+ /* W1 */
+ u64 ethertype : 16;
+ u64 eindex : 5;
+ u64 reserved1 : 3;
+ u64 dec_error : 8;
+ u64 dec_num_sections : 8;
+ u64 dec_num_syminc : 8;
+ u64 reserved2 : 8;
+ u64 ptype : 4;
+ u64 reserved3 : 4;
+};
+
+struct rfoex_tx_ptp_tstmp_w1 {
+ u64 lmac_id : 2;
+ u64 rfoe_id : 2;
+ u64 jobid : 16;
+ u64 drop : 1;
+ u64 tx_err : 1;
+ u64 reserved1 : 41;
+ u64 valid : 1;
+};
+
+struct rfoex_abx_slotx_configuration {
+ u64 pkt_mode : 2;
+ u64 da_sel : 3;
+ u64 sa_sel : 3;
+ u64 etype_sel : 3;
+ u64 flowid : 8;
+ u64 subtype : 8;
+ u64 lmacid : 2;
+ u64 sample_mode : 1;
+ u64 sample_widt : 5;
+ u64 sample_width_option : 1;
+ u64 sample_width_sat_bypass : 1;
+ u64 orderinfotype : 1;
+ u64 orderinfooffset : 5;
+ u64 antenna : 8;
+ u64 symbol : 8;
+ u64 sos : 1;
+ u64 eos : 1;
+ u64 orderinfo_insert : 1;
+ u64 custom_timestamp_insert : 1;
+ u64 rfoe_mode : 1;
+};
+
+struct rfoex_abx_slotx_configuration1 {
+ u64 rbmap_bytes : 8;
+ u64 pkt_len : 16;
+ u64 hdr_len : 8;
+ u64 presentation_time_offset : 29;
+ u64 reserved1 : 1;
+ u64 sof_mode : 2;
+};
+
+struct rfoex_abx_slotx_configuration2 {
+ u64 vlan_sel : 3;
+ u64 vlan_num : 2;
+ u64 ptp_mode : 1;
+ u64 ecpri_id_insert : 1;
+ u64 ecpri_seq_id_insert : 1;
+ u64 ecpri_rev : 8;
+ u64 ecpri_msgtype : 8;
+ u64 ecpri_id : 16;
+ u64 ecpri_seq_id : 16;
+ u64 reserved1 : 8;
+};
+
+struct rfoe_rx_ind_vlanx_fwd {
+ u64 fwd : 64;
+};
+
+struct mhab_job_desc_cfg {
+ struct rfoex_abx_slotx_configuration cfg;
+ struct rfoex_abx_slotx_configuration1 cfg1;
+ struct rfoex_abx_slotx_configuration2 cfg2;
+} __packed;
+
+/* CPRI definitions */
+struct cpri_pkt_dl_wqe_hdr {
+ u64 lane_id : 2;
+ u64 reserved1 : 2;
+ u64 mhab_id : 2;
+ u64 reserved2 : 2;
+ u64 pkt_length : 11;
+ u64 reserved3 : 45;
+ u64 w1;
+};
+
+struct cpri_pkt_ul_wqe_hdr {
+ u64 lane_id : 2;
+ u64 reserved1 : 2;
+ u64 mhab_id : 2;
+ u64 reserved2 : 2;
+ u64 pkt_length : 11;
+ u64 reserved3 : 5;
+ u64 fcserr : 1;
+ u64 rsp_ferr : 1;
+ u64 rsp_nferr : 1;
+ u64 reserved4 : 37;
+ u64 w1;
+};
+
+#endif /* _OTX2_BPHY_HW_H_ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_main.c b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_main.c
new file mode 100644
index 000000000000..d0c222aeaa75
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_main.c
@@ -0,0 +1,887 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/interrupt.h>
+
+#include "otx2_bphy.h"
+#include "otx2_rfoe.h"
+#include "otx2_cpri.h"
+#include "otx2_bphy_debugfs.h"
+#include "cnf10k_rfoe.h"
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION(DRV_STRING);
+MODULE_LICENSE("GPL v2");
+
+/* max ptp tx requests */
+int max_ptp_req = 16;
+module_param(max_ptp_req, int, 0644);
+MODULE_PARM_DESC(max_ptp_req, "Maximum PTP Tx requests");
+
+/* cdev */
+static struct class *otx2rfoe_class;
+
+/* reg base address */
+void __iomem *bphy_reg_base;
+void __iomem *psm_reg_base;
+void __iomem *rfoe_reg_base;
+void __iomem *bcn_reg_base;
+void __iomem *ptp_reg_base;
+void __iomem *cpri_reg_base;
+
+/* check if cpri block is available */
+#define cpri_available() ((cpri_reg_base) ? 1 : 0)
+
+/* GPINT(2) interrupt handler routine */
+static irqreturn_t cnf10k_gpint2_intr_handler(int irq, void *dev_id)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ u32 status, intr_mask;
+ int rfoe_num;
+
+ cdev_priv = (struct otx2_bphy_cdev_priv *)dev_id;
+
+ /* clear interrupt status */
+ status = readq(bphy_reg_base + PSM_INT_GP_SUM_W1C(2)) & 0xFFFFFFFF;
+ writeq(status, bphy_reg_base + PSM_INT_GP_SUM_W1C(2));
+
+ pr_debug("gpint2 status = 0x%x\n", status);
+
+ /* rx intr processing */
+ for (rfoe_num = 0; rfoe_num < cdev_priv->num_rfoe_mhab; rfoe_num++) {
+ intr_mask = CNF10K_RFOE_RX_INTR_MASK(rfoe_num);
+ if (status & intr_mask)
+ cnf10k_rfoe_rx_napi_schedule(rfoe_num, status);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* GPINT(1) interrupt handler routine */
+static irqreturn_t otx2_bphy_intr_handler(int irq, void *dev_id)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ struct otx2_rfoe_drv_ctx *drv_ctx;
+ struct otx2_rfoe_ndev_priv *priv;
+ struct net_device *netdev;
+ int rfoe_num, cpri_num, i;
+ u32 intr_mask, status;
+
+ cdev_priv = (struct otx2_bphy_cdev_priv *)dev_id;
+
+ /* clear interrupt status */
+ status = readq(bphy_reg_base + PSM_INT_GP_SUM_W1C(1)) & 0xFFFFFFFF;
+ writeq(status, bphy_reg_base + PSM_INT_GP_SUM_W1C(1));
+
+ pr_debug("gpint status = 0x%x\n", status);
+
+ /* CNF10K intr processing */
+ if (CHIP_CNF10K(cdev_priv->hw_version)) {
+ cnf10k_bphy_intr_handler(cdev_priv, status);
+ return IRQ_HANDLED;
+ }
+
+ /* CNF95 intr processing */
+ for (rfoe_num = 0; rfoe_num < MAX_RFOE_INTF; rfoe_num++) {
+ intr_mask = RFOE_RX_INTR_MASK(rfoe_num);
+ if (status & intr_mask)
+ otx2_rfoe_rx_napi_schedule(rfoe_num, status);
+ }
+
+ for (cpri_num = 0; cpri_num < OTX2_BPHY_CPRI_MAX_MHAB; cpri_num++) {
+ intr_mask = CPRI_RX_INTR_MASK(cpri_num);
+ if (status & intr_mask) {
+ /* clear UL ETH interrupt */
+ writeq(0x1, cpri_reg_base + CPRIX_ETH_UL_INT(cpri_num));
+ otx2_cpri_rx_napi_schedule(cpri_num, status);
+ }
+ }
+
+ /* tx intr processing */
+ for (i = 0; i < RFOE_MAX_INTF; i++) {
+ drv_ctx = &rfoe_drv_ctx[i];
+ if (drv_ctx->valid) {
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ intr_mask = RFOE_TX_PTP_INTR_MASK(priv->rfoe_num,
+ priv->lmac_id);
+ if ((status & intr_mask) && priv->ptp_tx_skb)
+ schedule_work(&priv->ptp_tx_work);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static inline void msix_enable_ctrl(struct pci_dev *dev)
+{
+ u16 control;
+
+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
+ control |= PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
+}
+
+static long otx2_bphy_cdev_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct otx2_bphy_cdev_priv *cdev = filp->private_data;
+ int ret;
+
+ if (!cdev) {
+ pr_warn("ioctl: device not opened\n");
+ return -EIO;
+ }
+
+ mutex_lock(&cdev->mutex_lock);
+
+ switch (cmd) {
+ case OTX2_RFOE_IOCTL_ODP_INTF_CFG:
+ {
+ struct bphy_netdev_comm_intf_cfg *intf_cfg;
+ struct pci_dev *bphy_pdev;
+ int idx;
+
+ if (cdev->odp_intf_cfg) {
+ dev_info(cdev->dev, "odp interface cfg already done\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ intf_cfg = kzalloc(MAX_RFOE_INTF * sizeof(*intf_cfg),
+ GFP_KERNEL);
+ if (!intf_cfg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(intf_cfg, (void __user *)arg,
+ (MAX_RFOE_INTF *
+ sizeof(struct bphy_netdev_comm_intf_cfg)))) {
+ dev_err(cdev->dev, "copy from user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+
+ for (idx = 0; idx < OTX2_BPHY_MHAB_INST; idx++)
+ cdev->mhab_mode[idx] = intf_cfg[idx].if_type;
+
+ ret = otx2_rfoe_parse_and_init_intf(cdev, intf_cfg);
+ if (ret < 0) {
+ dev_err(cdev->dev, "odp <-> netdev parse error\n");
+ goto out;
+ }
+
+ if (cpri_available()) {
+ ret = otx2_cpri_parse_and_init_intf(cdev, intf_cfg);
+ if (ret < 0) {
+ dev_err(cdev->dev, "odp <-> netdev parse error\n");
+ goto out;
+ }
+ }
+
+ /* The MSIXEN bit is getting cleared when ODP BPHY driver
+ * resets BPHY. So enabling it back in IOCTL.
+ */
+ bphy_pdev = pci_get_device(OTX2_BPHY_PCI_VENDOR_ID,
+ OTX2_BPHY_PCI_DEVICE_ID, NULL);
+ if (!bphy_pdev) {
+ dev_err(cdev->dev, "Couldn't find BPHY PCI device %x\n",
+ OTX2_BPHY_PCI_DEVICE_ID);
+ ret = -ENODEV;
+ goto out;
+ }
+ msix_enable_ctrl(bphy_pdev);
+
+ /* Enable CPRI ETH UL INT */
+ for (idx = 0; idx < OTX2_BPHY_CPRI_MAX_MHAB; idx++) {
+ if (intf_cfg[idx].if_type == IF_TYPE_CPRI)
+ writeq(0x1, cpri_reg_base +
+ CPRIX_ETH_UL_INT_ENA_W1S(idx));
+ }
+
+ /* Enable GPINT Rx and Tx interrupts */
+ writeq(0xFFFFFFFF, bphy_reg_base + PSM_INT_GP_ENA_W1S(1));
+
+ cdev->odp_intf_cfg = 1;
+
+ kfree(intf_cfg);
+
+ ret = 0;
+ goto out;
+ }
+ case OTX2_RFOE_IOCTL_ODP_DEINIT:
+ {
+ u32 status;
+
+ /* Disable GPINT Rx and Tx interrupts */
+ writeq(0xFFFFFFFF, bphy_reg_base + PSM_INT_GP_ENA_W1C(1));
+
+ /* clear interrupt status */
+ status = readq(bphy_reg_base + PSM_INT_GP_SUM_W1C(1)) &
+ 0xFFFFFFFF;
+ writeq(status, bphy_reg_base + PSM_INT_GP_SUM_W1C(1));
+
+ otx2_bphy_rfoe_cleanup();
+ if (cpri_available())
+ otx2_bphy_cpri_cleanup();
+
+ cdev->odp_intf_cfg = 0;
+
+ ret = 0;
+ goto out;
+ }
+ case OTX2_RFOE_IOCTL_RX_IND_CFG:
+ {
+ struct otx2_rfoe_rx_ind_cfg cfg;
+ unsigned long flags;
+
+ if (copy_from_user(&cfg, (void __user *)arg,
+ sizeof(struct otx2_rfoe_rx_ind_cfg))) {
+ dev_err(cdev->dev, "copy from user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+
+ spin_lock_irqsave(&cdev->mbt_lock, flags);
+ writeq(cfg.rx_ind_idx, (rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(cfg.rfoe_num)));
+ if (cfg.dir == OTX2_RFOE_RX_IND_READ)
+ cfg.regval = readq(rfoe_reg_base + cfg.regoff);
+ else
+ writeq(cfg.regval, rfoe_reg_base + cfg.regoff);
+ spin_unlock_irqrestore(&cdev->mbt_lock, flags);
+ if (copy_to_user((void __user *)(unsigned long)arg, &cfg,
+ sizeof(struct otx2_rfoe_rx_ind_cfg))) {
+ dev_err(cdev->dev, "copy to user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+ ret = 0;
+ goto out;
+ }
+ case OTX2_RFOE_IOCTL_PTP_OFFSET:
+ {
+ u64 bcn_n1, bcn_n2, bcn_n1_ns, bcn_n2_ps, ptp0_ns, regval;
+ struct otx2_rfoe_drv_ctx *drv_ctx = NULL;
+ struct otx2_rfoe_ndev_priv *priv;
+ struct ptp_bcn_off_cfg *ptp_cfg;
+ struct ptp_clk_cfg clk_cfg;
+ struct net_device *netdev;
+ struct ptp_bcn_ref ref;
+ unsigned long expires;
+ int idx;
+
+ if (!cdev->odp_intf_cfg) {
+ dev_info(cdev->dev, "odp interface cfg is not done\n");
+ ret = -EBUSY;
+ goto out;
+ }
+ if (copy_from_user(&clk_cfg, (void __user *)arg,
+ sizeof(struct ptp_clk_cfg))) {
+ dev_err(cdev->dev, "copy from user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+ if (!(clk_cfg.clk_freq_ghz && clk_cfg.clk_freq_div)) {
+ dev_err(cdev->dev, "Invalid ptp clk parameters\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ for (idx = 0; idx < RFOE_MAX_INTF; idx++) {
+ drv_ctx = &rfoe_drv_ctx[idx];
+ if (drv_ctx->valid)
+ break;
+ }
+ if (idx >= RFOE_MAX_INTF) {
+ dev_err(cdev->dev, "drv ctx not found\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ ptp_cfg = priv->ptp_cfg;
+ ptp_cfg->clk_cfg.clk_freq_ghz = clk_cfg.clk_freq_ghz;
+ ptp_cfg->clk_cfg.clk_freq_div = clk_cfg.clk_freq_div;
+ /* capture ptp and bcn timestamp using BCN_CAPTURE_CFG */
+ writeq((CAPT_EN | CAPT_TRIG_SW),
+ priv->bcn_reg_base + BCN_CAPTURE_CFG);
+ /* poll for capt_en to become 0 */
+ while ((readq(priv->bcn_reg_base + BCN_CAPTURE_CFG) & CAPT_EN))
+ cpu_relax();
+ ptp0_ns = readq(priv->bcn_reg_base + BCN_CAPTURE_PTP);
+ regval = readq(priv->bcn_reg_base + BCN_CAPTURE_N1_N2);
+ bcn_n1 = (regval >> 24) & 0xFFFFFFFFFF;
+ bcn_n2 = regval & 0xFFFFFF;
+ /* BCN N1 10 msec counter to nsec */
+ bcn_n1_ns = bcn_n1 * 10 * NSEC_PER_MSEC;
+ bcn_n1_ns += UTC_GPS_EPOCH_DIFF * NSEC_PER_SEC;
+ /* BCN N2 clock period 0.813802083 nsec to pico secs */
+ bcn_n2_ps = (bcn_n2 * 813802083UL) / 1000000;
+ ref.ptp0_ns = ptp0_ns;
+ ref.bcn0_n1_ns = bcn_n1_ns;
+ ref.bcn0_n2_ps = bcn_n2_ps;
+ memcpy(&ptp_cfg->old_ref, &ref, sizeof(struct ptp_bcn_ref));
+ memcpy(&ptp_cfg->new_ref, &ref, sizeof(struct ptp_bcn_ref));
+ ptp_cfg->use_ptp_alg = 1;
+ expires = jiffies + PTP_OFF_RESAMPLE_THRESH * HZ;
+ mod_timer(&ptp_cfg->ptp_timer, expires);
+ ret = 0;
+ goto out;
+ }
+ case OTX2_RFOE_IOCTL_SEC_BCN_OFFSET:
+ {
+ struct otx2_rfoe_drv_ctx *drv_ctx = NULL;
+ struct otx2_rfoe_ndev_priv *priv;
+ struct bcn_sec_offset_cfg cfg;
+ struct net_device *netdev;
+ int idx;
+
+ if (!cdev->odp_intf_cfg) {
+ dev_info(cdev->dev, "odp interface cfg is not done\n");
+ ret = -EBUSY;
+ goto out;
+ }
+ if (copy_from_user(&cfg, (void __user *)arg,
+ sizeof(struct bcn_sec_offset_cfg))) {
+ dev_err(cdev->dev, "copy from user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+ for (idx = 0; idx < RFOE_MAX_INTF; idx++) {
+ drv_ctx = &rfoe_drv_ctx[idx];
+ if (drv_ctx->valid &&
+ drv_ctx->rfoe_num == cfg.rfoe_num &&
+ drv_ctx->lmac_id == cfg.lmac_id)
+ break;
+ }
+ if (idx >= RFOE_MAX_INTF) {
+ dev_err(cdev->dev, "drv ctx not found\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ priv->sec_bcn_offset = cfg.sec_bcn_offset;
+ ret = 0;
+ goto out;
+ }
+ case OTX2_RFOE_IOCTL_MODE_CPRI:
+ {
+ int id = 0;
+
+ if (!cdev->odp_intf_cfg) {
+ dev_info(cdev->dev, "odp interface cfg is not done\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (copy_from_user(&id, (void __user *)arg, sizeof(int))) {
+ dev_err(cdev->dev, "copy from user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (cdev->mhab_mode[id] == IF_TYPE_ETHERNET) {
+ otx2_rfoe_disable_intf(id);
+ otx2_cpri_enable_intf(id);
+ cdev->mhab_mode[id] = IF_TYPE_CPRI;
+ }
+
+ ret = 0;
+ goto out;
+ }
+ case OTX2_RFOE_IOCTL_LINK_EVENT:
+ {
+ struct otx2_rfoe_drv_ctx *drv_ctx = NULL;
+ struct otx2_rfoe_link_event cfg;
+ struct net_device *netdev;
+ int idx;
+
+ if (!cdev->odp_intf_cfg) {
+ dev_info(cdev->dev, "odp interface cfg is not done\n");
+ ret = -EBUSY;
+ goto out;
+ }
+ if (copy_from_user(&cfg, (void __user *)arg,
+ sizeof(struct otx2_rfoe_link_event))) {
+ dev_err(cdev->dev, "copy from user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+ for (idx = 0; idx < RFOE_MAX_INTF; idx++) {
+ drv_ctx = &rfoe_drv_ctx[idx];
+ if (drv_ctx->valid &&
+ drv_ctx->rfoe_num == cfg.rfoe_num &&
+ drv_ctx->lmac_id == cfg.lmac_id)
+ break;
+ }
+ if (idx >= RFOE_MAX_INTF) {
+ dev_err(cdev->dev, "drv ctx not found\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ netdev = drv_ctx->netdev;
+ otx2_rfoe_set_link_state(netdev, cfg.link_state);
+ ret = 0;
+ goto out;
+ }
+ case OTX2_CPRI_IOCTL_LINK_EVENT:
+ {
+ struct otx2_cpri_drv_ctx *drv_ctx = NULL;
+ struct otx2_cpri_link_event cfg;
+ struct net_device *netdev;
+ int idx;
+
+ if (!cdev->odp_intf_cfg) {
+ dev_info(cdev->dev, "odp interface cfg is not done\n");
+ ret = -EBUSY;
+ goto out;
+ }
+ if (copy_from_user(&cfg, (void __user *)arg,
+ sizeof(struct otx2_cpri_link_event))) {
+ dev_err(cdev->dev, "copy from user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+ for (idx = 0; idx < OTX2_BPHY_CPRI_MAX_INTF; idx++) {
+ drv_ctx = &cpri_drv_ctx[idx];
+ if (drv_ctx->valid &&
+ drv_ctx->cpri_num == cfg.cpri_num &&
+ drv_ctx->lmac_id == cfg.lmac_id)
+ break;
+ }
+ if (idx >= OTX2_BPHY_CPRI_MAX_INTF) {
+ dev_err(cdev->dev, "drv ctx not found\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ netdev = drv_ctx->netdev;
+ otx2_cpri_set_link_state(netdev, cfg.link_state);
+ ret = 0;
+ goto out;
+ }
+ case OTX2_IOCTL_RFOE_10x_CFG:
+ {
+ struct cnf10k_rfoe_ndev_comm_intf_cfg *intf_cfg;
+ struct pci_dev *bphy_pdev;
+ int idx;
+
+ if (cdev->odp_intf_cfg) {
+ dev_info(cdev->dev, "odp interface cfg already done\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ intf_cfg = kzalloc(BPHY_MAX_RFOE_MHAB * sizeof(*intf_cfg),
+ GFP_KERNEL);
+ if (!intf_cfg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(intf_cfg, (void __user *)arg,
+ (BPHY_MAX_RFOE_MHAB *
+ sizeof(*intf_cfg)))) {
+ dev_err(cdev->dev, "copy from user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+
+ for (idx = 0; idx < BPHY_MAX_RFOE_MHAB; idx++)
+ cdev->mhab_mode[idx] = IF_TYPE_ETHERNET;
+
+ ret = cnf10k_rfoe_parse_and_init_intf(cdev, intf_cfg);
+ if (ret < 0) {
+ dev_err(cdev->dev, "odp <-> netdev parse error\n");
+ goto out;
+ }
+
+ /* The MSIXEN bit is getting cleared when ODP BPHY driver
+ * resets BPHY. So enabling it back in IOCTL.
+ */
+ bphy_pdev = pci_get_device(OTX2_BPHY_PCI_VENDOR_ID,
+ OTX2_BPHY_PCI_DEVICE_ID, NULL);
+ if (!bphy_pdev) {
+ dev_err(cdev->dev, "Couldn't find BPHY PCI device %x\n",
+ OTX2_BPHY_PCI_DEVICE_ID);
+ ret = -ENODEV;
+ goto out;
+ }
+ msix_enable_ctrl(bphy_pdev);
+
+ /* Enable GPINT Rx and Tx interrupts */
+ writeq(0xFFFFFFFF, bphy_reg_base + PSM_INT_GP_ENA_W1S(1));
+ if (cdev->gpint2_irq)
+ writeq(0xFFFFFFFF, bphy_reg_base + PSM_INT_GP_ENA_W1S(2));
+
+ cdev->odp_intf_cfg = 1;
+
+ kfree(intf_cfg);
+
+ ret = 0;
+ goto out;
+ }
+ default:
+ {
+ dev_info(cdev->dev, "ioctl: no match\n");
+ ret = -EINVAL;
+ }
+ }
+
+out:
+ mutex_unlock(&cdev->mutex_lock);
+ return ret;
+}
+
+static int otx2_bphy_cdev_open(struct inode *inode, struct file *filp)
+{
+ struct otx2_bphy_cdev_priv *cdev;
+ int status = 0;
+
+ cdev = container_of(inode->i_cdev, struct otx2_bphy_cdev_priv, cdev);
+
+ mutex_lock(&cdev->mutex_lock);
+
+ if (cdev->is_open) {
+ dev_err(cdev->dev, "failed to open the device\n");
+ status = -EBUSY;
+ goto error;
+ }
+ cdev->is_open = 1;
+ filp->private_data = cdev;
+
+error:
+ mutex_unlock(&cdev->mutex_lock);
+
+ return status;
+}
+
+static int otx2_bphy_cdev_release(struct inode *inode, struct file *filp)
+{
+ struct otx2_bphy_cdev_priv *cdev = filp->private_data;
+ u32 status;
+
+ mutex_lock(&cdev->mutex_lock);
+
+ if (!cdev->odp_intf_cfg)
+ goto cdev_release_exit;
+
+ /* Disable GPINT Rx and Tx interrupts */
+ writeq(0xFFFFFFFF, bphy_reg_base + PSM_INT_GP_ENA_W1C(1));
+ if (cdev->gpint2_irq)
+ writeq(0xFFFFFFFF, bphy_reg_base + PSM_INT_GP_ENA_W1C(2));
+
+ /* clear interrupt status */
+ status = readq(bphy_reg_base + PSM_INT_GP_SUM_W1C(1)) & 0xFFFFFFFF;
+ writeq(status, bphy_reg_base + PSM_INT_GP_SUM_W1C(1));
+ if (cdev->gpint2_irq) {
+ status = readq(bphy_reg_base + PSM_INT_GP_SUM_W1C(2)) &
+ 0xFFFFFFFF;
+ writeq(status, bphy_reg_base + PSM_INT_GP_SUM_W1C(2));
+ }
+
+ otx2_bphy_rfoe_cleanup();
+ if (cpri_available())
+ otx2_bphy_cpri_cleanup();
+
+ cdev->odp_intf_cfg = 0;
+
+cdev_release_exit:
+ cdev->is_open = 0;
+ mutex_unlock(&cdev->mutex_lock);
+
+ return 0;
+}
+
+static const struct file_operations otx2_bphy_cdev_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = otx2_bphy_cdev_ioctl,
+ .open = otx2_bphy_cdev_open,
+ .release = otx2_bphy_cdev_release,
+};
+
+static int otx2_bphy_probe(struct platform_device *pdev)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ struct pci_dev *bphy_pdev;
+ struct resource *res;
+ int err = 0;
+ dev_t devt;
+
+ /* allocate priv structure */
+ cdev_priv = kzalloc(sizeof(*cdev_priv), GFP_KERNEL);
+ if (!cdev_priv) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* BPHY is a PCI device and the kernel resets the MSIXEN bit during
+ * enumeration. So enable it back for interrupts to be generated.
+ */
+ bphy_pdev = pci_get_device(OTX2_BPHY_PCI_VENDOR_ID,
+ OTX2_BPHY_PCI_DEVICE_ID, NULL);
+ if (!bphy_pdev) {
+ dev_err(&pdev->dev, "Couldn't find BPHY PCI device %x\n",
+ OTX2_BPHY_PCI_DEVICE_ID);
+ err = -ENODEV;
+ goto free_cdev_priv;
+ }
+ msix_enable_ctrl(bphy_pdev);
+
+ /* bphy registers ioremap */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get bphy resource\n");
+ err = -ENXIO;
+ goto free_cdev_priv;
+ }
+ bphy_reg_base = ioremap(res->start, resource_size(res));
+ if (IS_ERR(bphy_reg_base)) {
+ dev_err(&pdev->dev, "failed to ioremap bphy registers\n");
+ err = PTR_ERR(bphy_reg_base);
+ goto free_cdev_priv;
+ }
+ /* psm registers ioremap */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get psm resource\n");
+ err = -ENXIO;
+ goto out_unmap_bphy_reg;
+ }
+ psm_reg_base = ioremap(res->start, resource_size(res));
+ if (IS_ERR(psm_reg_base)) {
+ dev_err(&pdev->dev, "failed to ioremap psm registers\n");
+ err = PTR_ERR(psm_reg_base);
+ goto out_unmap_bphy_reg;
+ }
+ /* rfoe registers ioremap */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get rfoe resource\n");
+ err = -ENXIO;
+ goto out_unmap_psm_reg;
+ }
+ rfoe_reg_base = ioremap(res->start, resource_size(res));
+ if (IS_ERR(rfoe_reg_base)) {
+ dev_err(&pdev->dev, "failed to ioremap rfoe registers\n");
+ err = PTR_ERR(rfoe_reg_base);
+ goto out_unmap_psm_reg;
+ }
+ /* bcn register ioremap */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get bcn resource\n");
+ err = -ENXIO;
+ goto out_unmap_rfoe_reg;
+ }
+ bcn_reg_base = ioremap(res->start, resource_size(res));
+ if (IS_ERR(bcn_reg_base)) {
+ dev_err(&pdev->dev, "failed to ioremap bcn registers\n");
+ err = PTR_ERR(bcn_reg_base);
+ goto out_unmap_rfoe_reg;
+ }
+ /* ptp register ioremap */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get ptp resource\n");
+ err = -ENXIO;
+ goto out_unmap_bcn_reg;
+ }
+ ptp_reg_base = ioremap(res->start, resource_size(res));
+ if (IS_ERR(ptp_reg_base)) {
+ dev_err(&pdev->dev, "failed to ioremap ptp registers\n");
+ err = PTR_ERR(ptp_reg_base);
+ goto out_unmap_bcn_reg;
+ }
+ /* cpri registers ioremap */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
+ if (!res) {
+ cpri_reg_base = NULL;
+ } else {
+ dev_info(&pdev->dev, "cpri mem resource found\n");
+ cpri_reg_base = ioremap(res->start, resource_size(res));
+ if (IS_ERR(cpri_reg_base)) {
+ dev_err(&pdev->dev, "failed to ioremap cpri registers\n");
+ err = PTR_ERR(cpri_reg_base);
+ goto out_unmap_ptp_reg;
+ }
+ }
+ /* get irq */
+ cdev_priv->irq = platform_get_irq(pdev, 0);
+ if (cdev_priv->irq <= 0) {
+ dev_err(&pdev->dev, "irq resource not found\n");
+ goto out_unmap_cpri_reg;
+ }
+ cdev_priv->gpint2_irq = platform_get_irq(pdev, 1);
+ if (cdev_priv->gpint2_irq < 0)
+ cdev_priv->gpint2_irq = 0;
+ else
+ dev_info(&pdev->dev, "gpint2 irq resource found\n");
+
+ /* create a character device */
+ err = alloc_chrdev_region(&devt, 0, 1, DEVICE_NAME);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to alloc chrdev device region\n");
+ goto out_unmap_cpri_reg;
+ }
+
+ otx2rfoe_class = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(otx2rfoe_class)) {
+ dev_err(&pdev->dev, "couldn't create class %s\n", DEVICE_NAME);
+ err = PTR_ERR(otx2rfoe_class);
+ goto out_unregister_chrdev_region;
+ }
+
+ cdev_priv->devt = devt;
+ cdev_priv->is_open = 0;
+ spin_lock_init(&cdev_priv->lock);
+ spin_lock_init(&cdev_priv->mbt_lock);
+ mutex_init(&cdev_priv->mutex_lock);
+
+ cdev_init(&cdev_priv->cdev, &otx2_bphy_cdev_fops);
+ cdev_priv->cdev.owner = THIS_MODULE;
+
+ err = cdev_add(&cdev_priv->cdev, devt, 1);
+ if (err < 0) {
+ dev_err(&pdev->dev, "cdev_add() failed\n");
+ goto out_class_destroy;
+ }
+
+ cdev_priv->dev = device_create(otx2rfoe_class, &pdev->dev,
+ cdev_priv->cdev.dev, cdev_priv,
+ DEVICE_NAME);
+ if (IS_ERR(cdev_priv->dev)) {
+ dev_err(&pdev->dev, "device_create failed\n");
+ err = PTR_ERR(cdev_priv->dev);
+ goto out_cdev_del;
+ }
+
+ dev_info(&pdev->dev, "successfully registered char device, major=%d\n",
+ MAJOR(cdev_priv->cdev.dev));
+
+ err = request_irq(cdev_priv->irq, otx2_bphy_intr_handler, 0,
+ "otx2_bphy_int", cdev_priv);
+ if (err) {
+ dev_err(&pdev->dev, "can't assign irq %d\n", cdev_priv->irq);
+ goto out_device_destroy;
+ }
+
+ if (cdev_priv->gpint2_irq) {
+ err = request_irq(cdev_priv->gpint2_irq,
+ cnf10k_gpint2_intr_handler, 0,
+ "cn10k_bphy_int", cdev_priv);
+ if (err) {
+ dev_err(&pdev->dev, "can't assign irq %d\n",
+ cdev_priv->gpint2_irq);
+ goto free_irq;
+ }
+ }
+
+ err = 0;
+ goto out;
+
+free_irq:
+ free_irq(cdev_priv->irq, cdev_priv);
+out_device_destroy:
+ device_destroy(otx2rfoe_class, cdev_priv->cdev.dev);
+out_cdev_del:
+ cdev_del(&cdev_priv->cdev);
+out_class_destroy:
+ class_destroy(otx2rfoe_class);
+out_unregister_chrdev_region:
+ unregister_chrdev_region(devt, 1);
+out_unmap_cpri_reg:
+ iounmap(cpri_reg_base);
+out_unmap_ptp_reg:
+ iounmap(ptp_reg_base);
+out_unmap_bcn_reg:
+ iounmap(bcn_reg_base);
+out_unmap_rfoe_reg:
+ iounmap(rfoe_reg_base);
+out_unmap_psm_reg:
+ iounmap(psm_reg_base);
+out_unmap_bphy_reg:
+ iounmap(bphy_reg_base);
+free_cdev_priv:
+ kfree(cdev_priv);
+out:
+ return err;
+}
+
+static int otx2_bphy_remove(struct platform_device *pdev)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv = dev_get_drvdata(&pdev->dev);
+
+ /* unmap register regions */
+ iounmap(cpri_reg_base);
+ iounmap(ptp_reg_base);
+ iounmap(bcn_reg_base);
+ iounmap(rfoe_reg_base);
+ iounmap(psm_reg_base);
+ iounmap(bphy_reg_base);
+
+ /* free irq */
+ free_irq(cdev_priv->irq, cdev_priv);
+
+ /* char device cleanup */
+ device_destroy(otx2rfoe_class, cdev_priv->cdev.dev);
+ cdev_del(&cdev_priv->cdev);
+ class_destroy(otx2rfoe_class);
+ unregister_chrdev_region(cdev_priv->cdev.dev, 1);
+ kfree(cdev_priv);
+
+ return 0;
+}
+
+static const struct of_device_id otx2_bphy_of_match[] = {
+ { .compatible = "marvell,bphy-netdev" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, otx2_bphy_of_match);
+
+static struct platform_driver otx2_bphy_driver = {
+ .probe = otx2_bphy_probe,
+ .remove = otx2_bphy_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = otx2_bphy_of_match,
+ },
+};
+
+static int __init otx2_bphy_init(void)
+{
+ int ret;
+
+ pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
+
+ ret = platform_driver_register(&otx2_bphy_driver);
+ if (ret < 0)
+ return ret;
+
+ otx2_bphy_debugfs_init();
+
+ return 0;
+}
+
+static void __exit otx2_bphy_exit(void)
+{
+ otx2_bphy_debugfs_exit();
+
+ platform_driver_unregister(&otx2_bphy_driver);
+}
+
+module_init(otx2_bphy_init);
+module_exit(otx2_bphy_exit);
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri.c b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri.c
new file mode 100644
index 000000000000..2fda900e22c9
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri.c
@@ -0,0 +1,755 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "otx2_cpri.h"
+#include "otx2_bphy_debugfs.h"
+
+/* Theory of Operation
+ *
+ * I. General
+ *
+ * The BPHY CPRI netdev processes ethernet packets which are received
+ * and transmitted by CPRI MHAB. The ODP BPHY application shares the
+ * CPRI ETH UL/DL configuration information using ioctl. The Rx
+ * notification is sent to netdev using PSM GPINT.
+ *
+ * II. Driver Operation
+ *
+ * This driver register's a character device and provides ioctl for
+ * ODP application to initialize the netdev(s) to process CPRI Ethernet
+ * packets. Each netdev instance created by the driver corresponds to
+ * a unique CPRI MHAB id and Lane id. The ODP application shares the
+ * information such as CPRI ETH UL/DL circular buffers and Rx GPINT
+ * number per CPRI MHAB. The CPRI ETH UL/DL circular buffers are shared
+ * per each CPRI MHAB id. The Rx/Tx packet memory(DDR) is also allocated
+ * by ODP application. The GPINT is setup using CPRI_ETH_UL_INT_PSM_MSG_W0
+ * and CPRI_ETH_UL_INT_PSM_MSG_W1 registers.
+ *
+ * III. Transmit
+ *
+ * The driver xmit routine selects DL circular buffer ring based on MHAB
+ * id and if there is a free entry available, the driver updates the WQE
+ * header and packet data to the DL entry and updates the DL_WR_DOORBELL
+ * with number of packets written for the hardware to process.
+ *
+ * IV. Receive
+ *
+ * The driver receives GPINT interrupt notification per each MHAB and
+ * invokes NAPI handler. The NAPI handler reads the UL circular buffer
+ * ring parameters UL_SW_RD_PTR and UL_NXT_WR_PTR to get the count of
+ * packets to be processed. For each packet received, the driver allocates
+ * skb and copies the packet data to skb. The driver updates
+ * UL_RD_DOORBELL register with count of packets processed by the driver.
+ *
+ * V. Miscellaneous
+ *
+ * Ethtool:
+ * The ethtool stats shows packet stats for each netdev instance.
+ *
+ */
+
+/* global driver ctx */
+struct otx2_cpri_drv_ctx cpri_drv_ctx[OTX2_BPHY_CPRI_MAX_INTF];
+
+/* debugfs */
+static void otx2_cpri_debugfs_reader(char *buffer, size_t count, void *priv);
+static const char *otx2_cpri_debugfs_get_formatter(void);
+static size_t otx2_cpri_debugfs_get_buffer_size(void);
+static void otx2_cpri_debugfs_create(struct otx2_cpri_drv_ctx *ctx);
+static void otx2_cpri_debugfs_remove(struct otx2_cpri_drv_ctx *ctx);
+
+static struct net_device *otx2_cpri_get_netdev(int mhab_id, int lmac_id)
+{
+ struct net_device *netdev = NULL;
+ int idx;
+
+ for (idx = 0; idx < OTX2_BPHY_CPRI_MAX_INTF; idx++) {
+ if (cpri_drv_ctx[idx].cpri_num == mhab_id &&
+ cpri_drv_ctx[idx].lmac_id == lmac_id &&
+ cpri_drv_ctx[idx].valid) {
+ netdev = cpri_drv_ctx[idx].netdev;
+ break;
+ }
+ }
+
+ return netdev;
+}
+
+void otx2_cpri_enable_intf(int cpri_num)
+{
+ struct otx2_cpri_drv_ctx *drv_ctx;
+ struct otx2_cpri_ndev_priv *priv;
+ struct net_device *netdev;
+ int idx;
+
+ for (idx = 0; idx < OTX2_BPHY_CPRI_MAX_INTF; idx++) {
+ drv_ctx = &cpri_drv_ctx[idx];
+ if (drv_ctx->cpri_num == cpri_num && drv_ctx->valid) {
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ priv->if_type = IF_TYPE_CPRI;
+ }
+ }
+}
+
+void otx2_bphy_cpri_cleanup(void)
+{
+ struct otx2_cpri_drv_ctx *drv_ctx = NULL;
+ struct otx2_cpri_ndev_priv *priv;
+ struct net_device *netdev;
+ int i;
+
+ for (i = 0; i < OTX2_BPHY_CPRI_MAX_INTF; i++) {
+ drv_ctx = &cpri_drv_ctx[i];
+ if (drv_ctx->valid) {
+ otx2_cpri_debugfs_remove(drv_ctx);
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ unregister_netdev(netdev);
+ netif_napi_del(&priv->napi);
+ --(priv->cpri_common->refcnt);
+ if (priv->cpri_common->refcnt == 0)
+ kfree(priv->cpri_common);
+ free_netdev(netdev);
+ drv_ctx->valid = 0;
+ }
+ }
+
+ /* Disable CPRI ETH UL INT */
+ for (i = 0; i < OTX2_BPHY_CPRI_MAX_MHAB; i++)
+ writeq(0x1, cpri_reg_base +
+ CPRIX_ETH_UL_INT_ENA_W1C(i));
+}
+
+static int otx2_cpri_process_rx_pkts(struct otx2_cpri_ndev_priv *priv,
+ int budget)
+{
+ int count, head, processed_pkts = 0;
+ struct otx2_cpri_ndev_priv *priv2;
+ struct cpri_pkt_ul_wqe_hdr *wqe;
+ struct ul_cbuf_cfg *ul_cfg;
+ struct net_device *netdev;
+ u16 nxt_wr_ptr, len;
+ struct sk_buff *skb;
+ u8 *pkt_buf;
+
+ ul_cfg = &priv->cpri_common->ul_cfg;
+
+ nxt_wr_ptr = readq(priv->cpri_reg_base +
+ CPRIX_RXD_GMII_UL_NXT_WR_PTR(priv->cpri_num)) &
+ 0xFFFF;
+ /* get the HW head */
+ head = CIRC_BUF_ENTRY(nxt_wr_ptr);
+
+ if (ul_cfg->sw_rd_ptr > head) {
+ count = ul_cfg->num_entries - ul_cfg->sw_rd_ptr;
+ count += head;
+ } else {
+ count = head - ul_cfg->sw_rd_ptr;
+ }
+
+ while (likely((processed_pkts < budget) && (processed_pkts < count))) {
+ pkt_buf = (u8 *)ul_cfg->cbuf_virt_addr +
+ (OTX2_BPHY_CPRI_PKT_BUF_SIZE * ul_cfg->sw_rd_ptr);
+ wqe = (struct cpri_pkt_ul_wqe_hdr *)pkt_buf;
+ netdev = otx2_cpri_get_netdev(wqe->mhab_id, wqe->lane_id);
+ if (unlikely(!netdev)) {
+ net_err_ratelimited("CPRI Rx netdev not found, cpri%d lmac%d\n",
+ wqe->mhab_id, wqe->lane_id);
+ priv->stats.rx_dropped++;
+ priv->last_rx_dropped_jiffies = jiffies;
+ goto update_processed_pkts;
+ }
+ priv2 = netdev_priv(netdev);
+ if (wqe->fcserr || wqe->rsp_ferr || wqe->rsp_nferr) {
+ net_err_ratelimited("%s: CPRI Rx err,cpri%d lmac%d sw_rd_ptr=%d\n",
+ netdev->name,
+ wqe->mhab_id, wqe->lane_id,
+ ul_cfg->sw_rd_ptr);
+ priv2->stats.rx_dropped++;
+ priv2->last_rx_dropped_jiffies = jiffies;
+ goto update_processed_pkts;
+ }
+ if (unlikely(!netif_carrier_ok(netdev))) {
+ net_err_ratelimited("%s {cpri%d lmac%d} link down, drop pkt\n",
+ netdev->name, priv2->cpri_num,
+ priv2->lmac_id);
+ priv2->stats.rx_dropped++;
+ priv2->last_rx_dropped_jiffies = jiffies;
+ goto update_processed_pkts;
+ }
+
+ len = wqe->pkt_length;
+
+ if (unlikely(netif_msg_pktdata(priv2))) {
+ netdev_printk(KERN_DEBUG, priv2->netdev, "RX DATA:");
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16,
+ 4, pkt_buf,
+ len + OTX2_BPHY_CPRI_WQE_SIZE, true);
+ }
+
+ pkt_buf += OTX2_BPHY_CPRI_WQE_SIZE;
+
+ skb = netdev_alloc_skb_ip_align(netdev, len);
+ if (!skb) {
+ net_err_ratelimited("%s:CPRI Rx: alloc skb failed\n",
+ netdev->name);
+ priv->stats.rx_dropped++;
+ priv->last_rx_dropped_jiffies = jiffies;
+ goto update_processed_pkts;
+ }
+
+ memcpy(skb->data, pkt_buf, len);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ netif_receive_skb(skb);
+
+ priv2->last_rx_jiffies = jiffies;
+
+update_processed_pkts:
+ processed_pkts++;
+ ul_cfg->sw_rd_ptr++;
+ if (ul_cfg->sw_rd_ptr == ul_cfg->num_entries)
+ ul_cfg->sw_rd_ptr = 0;
+
+ }
+
+ if (processed_pkts)
+ writeq(processed_pkts, priv->cpri_reg_base +
+ CPRIX_RXD_GMII_UL_RD_DOORBELL(priv->cpri_num));
+
+ return processed_pkts;
+}
+
+/* napi poll routine */
+static int otx2_cpri_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ struct otx2_cpri_ndev_priv *priv;
+ u64 intr_en, regval;
+ int workdone = 0;
+
+ priv = container_of(napi, struct otx2_cpri_ndev_priv, napi);
+ cdev_priv = priv->cdev_priv;
+
+ /* pkt processing loop */
+ workdone += otx2_cpri_process_rx_pkts(priv, budget);
+
+ if (workdone < budget) {
+ napi_complete_done(napi, workdone);
+
+ /* Re enable the Rx interrupts */
+ intr_en = 1 << CPRI_RX_INTR_SHIFT(priv->cpri_num);
+ spin_lock(&cdev_priv->lock);
+ regval = readq(priv->bphy_reg_base + PSM_INT_GP_ENA_W1S(1));
+ regval |= intr_en;
+ writeq(regval, priv->bphy_reg_base + PSM_INT_GP_ENA_W1S(1));
+ spin_unlock(&cdev_priv->lock);
+ }
+
+ return workdone;
+}
+
+void otx2_cpri_rx_napi_schedule(int cpri_num, u32 status)
+{
+ struct otx2_cpri_drv_ctx *drv_ctx;
+ struct otx2_cpri_ndev_priv *priv;
+ u64 regval;
+ int idx;
+
+ for (idx = 0; idx < OTX2_BPHY_CPRI_MAX_INTF; idx++) {
+ drv_ctx = &cpri_drv_ctx[idx];
+ /* ignore lmac, one UL interrupt/cpri */
+ if (!(drv_ctx->valid && drv_ctx->cpri_num == cpri_num))
+ continue;
+ /* check if i/f down, napi disabled */
+ priv = netdev_priv(drv_ctx->netdev);
+ if (test_bit(CPRI_INTF_DOWN, &priv->state))
+ continue;
+ /* clear intr enable bit, re-enable in napi handler */
+ regval = 1 << CPRI_RX_INTR_SHIFT(cpri_num);
+ writeq(regval, priv->bphy_reg_base + PSM_INT_GP_ENA_W1C(1));
+ /* schedule napi */
+ napi_schedule(&priv->napi);
+ /* napi scheduled per MHAB, return */
+ return;
+ }
+}
+
+void otx2_cpri_update_stats(struct otx2_cpri_ndev_priv *priv)
+{
+ struct otx2_cpri_stats *dev_stats = &priv->stats;
+
+ dev_stats->rx_frames += readq(priv->cpri_reg_base +
+ CPRIX_ETH_UL_GPKTS_CNT(priv->cpri_num,
+ priv->lmac_id));
+ dev_stats->rx_octets += readq(priv->cpri_reg_base +
+ CPRIX_ETH_UL_GOCT_CNT(priv->cpri_num,
+ priv->lmac_id));
+ dev_stats->rx_err += readq(priv->cpri_reg_base +
+ CPRIX_ETH_UL_ERR_CNT(priv->cpri_num,
+ priv->lmac_id));
+ dev_stats->bad_crc += readq(priv->cpri_reg_base +
+ CPRIX_ETH_BAD_CRC_CNT(priv->cpri_num,
+ priv->lmac_id));
+ dev_stats->oversize += readq(priv->cpri_reg_base +
+ CPRIX_ETH_UL_OSIZE_CNT(priv->cpri_num,
+ priv->lmac_id));
+ dev_stats->undersize += readq(priv->cpri_reg_base +
+ CPRIX_ETH_UL_USIZE_CNT(priv->cpri_num,
+ priv->lmac_id));
+ dev_stats->fifo_ovr += readq(priv->cpri_reg_base +
+ CPRIX_ETH_UL_FIFO_ORUN_CNT(priv->cpri_num,
+ priv->lmac_id));
+ dev_stats->tx_frames += readq(priv->cpri_reg_base +
+ CPRIX_ETH_DL_GPKTS_CNT(priv->cpri_num,
+ priv->lmac_id));
+ dev_stats->tx_octets += readq(priv->cpri_reg_base +
+ CPRIX_ETH_DL_GOCTETS_CNT(priv->cpri_num,
+ priv->lmac_id));
+}
+
+static void otx2_cpri_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct otx2_cpri_ndev_priv *priv = netdev_priv(netdev);
+ struct otx2_cpri_stats *dev_stats = &priv->stats;
+
+ otx2_cpri_update_stats(priv);
+
+ stats->rx_bytes = dev_stats->rx_octets;
+ stats->rx_packets = dev_stats->rx_frames;
+ stats->rx_dropped = dev_stats->rx_dropped;
+ stats->rx_errors = dev_stats->rx_err;
+ stats->rx_crc_errors = dev_stats->bad_crc;
+ stats->rx_fifo_errors = dev_stats->fifo_ovr;
+ stats->rx_length_errors = dev_stats->oversize + dev_stats->undersize;
+
+ stats->tx_bytes = dev_stats->tx_octets;
+ stats->tx_packets = dev_stats->tx_frames;
+}
+
+/* netdev ioctl */
+static int otx2_cpri_ioctl(struct net_device *netdev, struct ifreq *req,
+ int cmd)
+{
+ return -EOPNOTSUPP;
+}
+
+/* netdev xmit */
+static netdev_tx_t otx2_cpri_eth_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct otx2_cpri_ndev_priv *priv = netdev_priv(netdev);
+ struct cpri_pkt_dl_wqe_hdr *wqe;
+ struct dl_cbuf_cfg *dl_cfg;
+ unsigned long flags;
+ u8 *buf_ptr;
+ int tail, count;
+ u16 nxt_rd_ptr;
+
+ dl_cfg = &priv->cpri_common->dl_cfg;
+
+ spin_lock_irqsave(&dl_cfg->lock, flags);
+
+ if (unlikely(priv->if_type != IF_TYPE_CPRI)) {
+ netif_err(priv, tx_queued, netdev,
+ "%s {cpri%d lmac%d} invalid intf mode, drop pkt\n",
+ netdev->name, priv->cpri_num, priv->lmac_id);
+ /* update stats */
+ priv->stats.tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ goto exit;
+ }
+
+ if (unlikely(!netif_carrier_ok(netdev))) {
+ /* update stats */
+ priv->stats.tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ goto exit;
+ }
+
+ /* Read CPRI(0..2)_TXD_GMII_DL_WR_DOORBELL to become 0 */
+ while ((readq(priv->cpri_reg_base +
+ CPRIX_TXD_GMII_DL_WR_DOORBELL(priv->cpri_num)) & 0xFF))
+ cpu_relax();
+
+ nxt_rd_ptr = readq(priv->cpri_reg_base +
+ CPRIX_TXD_GMII_DL_NXT_RD_PTR(priv->cpri_num)) &
+ 0xFFFF;
+ /* get the HW tail */
+ tail = CIRC_BUF_ENTRY(nxt_rd_ptr);
+ if (dl_cfg->sw_wr_ptr >= tail)
+ count = dl_cfg->num_entries - dl_cfg->sw_wr_ptr + tail;
+ else
+ count = tail - dl_cfg->sw_wr_ptr;
+
+ if (count == 0) {
+ spin_unlock_irqrestore(&dl_cfg->lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (unlikely(netif_msg_pktdata(priv))) {
+ netdev_printk(KERN_DEBUG, priv->netdev, "Tx: skb %pS len=%d\n",
+ skb, skb->len);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
+ skb->data, skb->len, true);
+ }
+
+ buf_ptr = (u8 *)dl_cfg->cbuf_virt_addr +
+ (OTX2_BPHY_CPRI_PKT_BUF_SIZE * dl_cfg->sw_wr_ptr);
+ wqe = (struct cpri_pkt_dl_wqe_hdr *)buf_ptr;
+ wqe->mhab_id = priv->cpri_num;
+ wqe->lane_id = priv->lmac_id;
+ buf_ptr += OTX2_BPHY_CPRI_WQE_SIZE;
+ /* zero pad for short pkts, since there is no HW support */
+ if (skb->len < 64)
+ memset(buf_ptr, 0, 64);
+ memcpy(buf_ptr, skb->data, skb->len);
+ wqe->pkt_length = skb->len > 64 ? skb->len : 64;
+
+ /* ensure the memory is updated before ringing doorbell */
+ dma_wmb();
+ writeq(1, priv->cpri_reg_base +
+ CPRIX_TXD_GMII_DL_WR_DOORBELL(priv->cpri_num));
+
+ /* increment queue index */
+ dl_cfg->sw_wr_ptr++;
+ if (dl_cfg->sw_wr_ptr == dl_cfg->num_entries)
+ dl_cfg->sw_wr_ptr = 0;
+
+ priv->last_tx_jiffies = jiffies;
+exit:
+ dev_kfree_skb_any(skb);
+ spin_unlock_irqrestore(&dl_cfg->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+/* netdev open */
+static int otx2_cpri_eth_open(struct net_device *netdev)
+{
+ struct otx2_cpri_ndev_priv *priv = netdev_priv(netdev);
+
+ napi_enable(&priv->napi);
+
+ spin_lock(&priv->lock);
+ clear_bit(CPRI_INTF_DOWN, &priv->state);
+ if (priv->link_state == LINK_STATE_UP) {
+ netif_carrier_on(netdev);
+ netif_start_queue(netdev);
+ }
+ spin_unlock(&priv->lock);
+
+ return 0;
+}
+
+/* netdev close */
+static int otx2_cpri_eth_stop(struct net_device *netdev)
+{
+ struct otx2_cpri_ndev_priv *priv = netdev_priv(netdev);
+
+ spin_lock(&priv->lock);
+ set_bit(CPRI_INTF_DOWN, &priv->state);
+
+ netif_stop_queue(netdev);
+ netif_carrier_off(netdev);
+ spin_unlock(&priv->lock);
+
+ napi_disable(&priv->napi);
+
+ return 0;
+}
+
+static const struct net_device_ops otx2_cpri_netdev_ops = {
+ .ndo_open = otx2_cpri_eth_open,
+ .ndo_stop = otx2_cpri_eth_stop,
+ .ndo_start_xmit = otx2_cpri_eth_start_xmit,
+ .ndo_do_ioctl = otx2_cpri_ioctl,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_get_stats64 = otx2_cpri_get_stats64,
+};
+
+static void otx2_cpri_dump_ul_cbuf(struct otx2_cpri_ndev_priv *priv)
+{
+ struct ul_cbuf_cfg *ul_cfg = &priv->cpri_common->ul_cfg;
+
+ pr_debug("%s: num_entries=%d iova=0x%llx\n",
+ __func__, ul_cfg->num_entries, ul_cfg->cbuf_iova_addr);
+}
+
+static void otx2_cpri_dump_dl_cbuf(struct otx2_cpri_ndev_priv *priv)
+{
+ struct dl_cbuf_cfg *dl_cfg = &priv->cpri_common->dl_cfg;
+
+ pr_debug("%s: num_entries=%d iova=0x%llx\n",
+ __func__, dl_cfg->num_entries, dl_cfg->cbuf_iova_addr);
+}
+
+static void otx2_cpri_fill_dl_ul_cfg(struct otx2_cpri_ndev_priv *priv,
+ struct bphy_netdev_cpri_if *cpri_cfg)
+{
+ struct dl_cbuf_cfg *dl_cfg;
+ struct ul_cbuf_cfg *ul_cfg;
+ u64 iova;
+
+ dl_cfg = &priv->cpri_common->dl_cfg;
+ dl_cfg->num_entries = cpri_cfg->num_dl_buf;
+ iova = cpri_cfg->dl_buf_iova_addr;
+ dl_cfg->cbuf_iova_addr = iova;
+ dl_cfg->cbuf_virt_addr = otx2_iova_to_virt(priv->iommu_domain, iova);
+ dl_cfg->sw_wr_ptr = 0;
+ spin_lock_init(&dl_cfg->lock);
+ otx2_cpri_dump_dl_cbuf(priv);
+
+ ul_cfg = &priv->cpri_common->ul_cfg;
+ ul_cfg->num_entries = cpri_cfg->num_ul_buf;
+ iova = cpri_cfg->ul_buf_iova_addr;
+ ul_cfg->cbuf_iova_addr = iova;
+ ul_cfg->cbuf_virt_addr = otx2_iova_to_virt(priv->iommu_domain, iova);
+ ul_cfg->sw_rd_ptr = 0;
+ spin_lock_init(&ul_cfg->lock);
+ otx2_cpri_dump_ul_cbuf(priv);
+}
+
+int otx2_cpri_parse_and_init_intf(struct otx2_bphy_cdev_priv *cdev,
+ struct bphy_netdev_comm_intf_cfg *cfg)
+{
+ struct otx2_cpri_drv_ctx *drv_ctx = NULL;
+ struct otx2_cpri_ndev_priv *priv, *priv2;
+ struct bphy_netdev_cpri_if *cpri_cfg;
+ int i, intf_idx = 0, lmac, ret;
+ struct net_device *netdev;
+
+ for (i = 0; i < OTX2_BPHY_CPRI_MAX_MHAB; i++) {
+ priv2 = NULL;
+ cpri_cfg = &cfg[i].cpri_if_cfg;
+ for (lmac = 0; lmac < OTX2_BPHY_CPRI_MAX_LMAC; lmac++) {
+ if (!(cpri_cfg->active_lane_mask & (1 << lmac)))
+ continue;
+ netdev =
+ alloc_etherdev(sizeof(struct otx2_cpri_ndev_priv));
+ if (!netdev) {
+ dev_err(cdev->dev,
+ "error allocating net device\n");
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+ priv = netdev_priv(netdev);
+ memset(priv, 0, sizeof(*priv));
+ if (!priv2) {
+ priv->cpri_common =
+ kzalloc(sizeof(struct cpri_common_cfg),
+ GFP_KERNEL);
+ if (!priv->cpri_common) {
+ dev_err(cdev->dev, "kzalloc failed\n");
+ free_netdev(netdev);
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+ priv->cpri_common->refcnt = 1;
+ }
+ spin_lock_init(&priv->lock);
+ priv->netdev = netdev;
+ priv->cdev_priv = cdev;
+ priv->msg_enable = netif_msg_init(-1, 0);
+ spin_lock_init(&priv->stats.lock);
+ priv->cpri_num = cpri_cfg->id;
+ priv->lmac_id = lmac;
+ priv->if_type = cfg[i].if_type;
+ memcpy(priv->mac_addr, &cpri_cfg->eth_addr[lmac],
+ ETH_ALEN);
+ if (is_valid_ether_addr(priv->mac_addr))
+ ether_addr_copy(netdev->dev_addr,
+ priv->mac_addr);
+ else
+ random_ether_addr(netdev->dev_addr);
+ priv->pdev = pci_get_device(OTX2_BPHY_PCI_VENDOR_ID,
+ OTX2_BPHY_PCI_DEVICE_ID,
+ NULL);
+ priv->iommu_domain =
+ iommu_get_domain_for_dev(&priv->pdev->dev);
+ priv->bphy_reg_base = bphy_reg_base;
+ priv->cpri_reg_base = cpri_reg_base;
+
+ if (!priv2) {
+ otx2_cpri_fill_dl_ul_cfg(priv, cpri_cfg);
+ } else {
+ /* share cpri_common data */
+ priv->cpri_common = priv2->cpri_common;
+ ++(priv->cpri_common->refcnt);
+ }
+
+ netif_napi_add(priv->netdev, &priv->napi,
+ otx2_cpri_napi_poll, NAPI_POLL_WEIGHT);
+
+ /* keep last (cpri + lmac) priv structure */
+ if (!priv2)
+ priv2 = priv;
+
+ intf_idx = (i * 4) + lmac;
+ snprintf(netdev->name, sizeof(netdev->name),
+ "cpri%d", intf_idx);
+ netdev->netdev_ops = &otx2_cpri_netdev_ops;
+ otx2_cpri_set_ethtool_ops(netdev);
+ netdev->mtu = 1500U;
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = 1500U;
+ ret = register_netdev(netdev);
+ if (ret < 0) {
+ dev_err(cdev->dev,
+ "failed to register net device %s\n",
+ netdev->name);
+ free_netdev(netdev);
+ ret = -ENODEV;
+ goto err_exit;
+ }
+ dev_dbg(cdev->dev, "net device %s registered\n",
+ netdev->name);
+
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ set_bit(CPRI_INTF_DOWN, &priv->state);
+ priv->link_state = LINK_STATE_UP;
+
+ /* initialize global ctx */
+ drv_ctx = &cpri_drv_ctx[intf_idx];
+ drv_ctx->cpri_num = priv->cpri_num;
+ drv_ctx->lmac_id = priv->lmac_id;
+ drv_ctx->valid = 1;
+ drv_ctx->netdev = netdev;
+
+ /* create debugfs entry */
+ otx2_cpri_debugfs_create(drv_ctx);
+ }
+ }
+
+ return 0;
+
+err_exit:
+ for (i = 0; i < OTX2_BPHY_CPRI_MAX_INTF; i++) {
+ drv_ctx = &cpri_drv_ctx[i];
+ if (drv_ctx->valid) {
+ otx2_cpri_debugfs_remove(drv_ctx);
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ unregister_netdev(netdev);
+ netif_napi_del(&priv->napi);
+ --(priv->cpri_common->refcnt);
+ if (priv->cpri_common->refcnt == 0)
+ kfree(priv->cpri_common);
+ free_netdev(netdev);
+ drv_ctx->valid = 0;
+ }
+ }
+ return ret;
+}
+
+static void otx2_cpri_debugfs_reader(char *buffer, size_t count, void *priv)
+{
+ struct otx2_cpri_drv_ctx *ctx;
+ struct otx2_cpri_ndev_priv *netdev;
+ u8 queue_stopped, state_up;
+ const char *formatter;
+
+ ctx = priv;
+ netdev = netdev_priv(ctx->netdev);
+ queue_stopped = netif_queue_stopped(ctx->netdev);
+ state_up = netdev->link_state;
+ formatter = otx2_cpri_debugfs_get_formatter();
+
+ snprintf(buffer, count, formatter,
+ queue_stopped,
+ state_up,
+ netdev->last_tx_jiffies,
+ netdev->last_tx_dropped_jiffies,
+ netdev->last_rx_jiffies,
+ netdev->last_rx_dropped_jiffies,
+ jiffies);
+}
+
+static const char *otx2_cpri_debugfs_get_formatter(void)
+{
+ static const char *buffer_format = "queue-stopped: %u\n"
+ "state-up: %u\n"
+ "last-tx-jiffies: %lu\n"
+ "last-tx-dropped-jiffies: %lu\n"
+ "last-rx-jiffies: %lu\n"
+ "last-rx-dropped-jiffies: %lu\n"
+ "current-jiffies: %lu\n";
+
+ return buffer_format;
+}
+
+static size_t otx2_cpri_debugfs_get_buffer_size(void)
+{
+ static size_t buffer_size;
+
+ if (!buffer_size) {
+ const char *formatter = otx2_cpri_debugfs_get_formatter();
+ u8 max_boolean = 1;
+ unsigned long max_jiffies = (unsigned long)-1;
+
+ buffer_size = snprintf(NULL, 0, formatter,
+ max_boolean,
+ max_boolean,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies);
+ ++buffer_size;
+ }
+
+ return buffer_size;
+}
+
+static void otx2_cpri_debugfs_create(struct otx2_cpri_drv_ctx *ctx)
+{
+ size_t buffer_size = otx2_cpri_debugfs_get_buffer_size();
+
+ ctx->debugfs = otx2_bphy_debugfs_add_file(ctx->netdev->name,
+ buffer_size, ctx,
+ otx2_cpri_debugfs_reader);
+}
+
+static void otx2_cpri_debugfs_remove(struct otx2_cpri_drv_ctx *ctx)
+{
+ if (ctx->debugfs)
+ otx2_bphy_debugfs_remove_file(ctx->debugfs);
+}
+
+void otx2_cpri_set_link_state(struct net_device *netdev, u8 state)
+{
+ struct otx2_cpri_ndev_priv *priv;
+
+ priv = netdev_priv(netdev);
+
+ spin_lock(&priv->lock);
+ if (priv->link_state != state) {
+ priv->link_state = state;
+ if (state == LINK_STATE_DOWN) {
+ netdev_info(netdev, "Link DOWN\n");
+ if (netif_running(netdev)) {
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ } else {
+ netdev_info(netdev, "Link UP\n");
+ if (netif_running(netdev)) {
+ netif_carrier_on(netdev);
+ netif_start_queue(netdev);
+ }
+ }
+ }
+ spin_unlock(&priv->lock);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri.h b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri.h
new file mode 100644
index 000000000000..e8b88384cd3d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _OTX2_CPRI_H_
+#define _OTX2_CPRI_H_
+
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/net_tstamp.h>
+
+#include "otx2_bphy.h"
+#include "otx2_bphy_hw.h"
+#include "rfoe_bphy_netdev_comm_if.h"
+
+#define OTX2_BPHY_CPRI_MAX_MHAB 3
+#define OTX2_BPHY_CPRI_MAX_LMAC 4
+#define OTX2_BPHY_CPRI_MAX_INTF 10
+
+#define OTX2_BPHY_CPRI_PKT_BUF_SIZE 1664 /* wqe 128 bytes + 1536 bytes */
+#define OTX2_BPHY_CPRI_WQE_SIZE 128
+
+#define CPRI_RX_INTR_MASK(a) ((1UL << (a)) << 13)
+#define CPRI_RX_INTR_SHIFT(a) (13 + (a))
+
+/* Each entry increments by cnt 0x68, 1 unit = 16 bytes */
+#define CIRC_BUF_ENTRY(a) ((a) / 0x68)
+
+enum cpri_state {
+ CPRI_INTF_DOWN = 1,
+};
+
+/* CPRI support */
+struct otx2_cpri_drv_ctx {
+ u8 cpri_num;
+ u8 lmac_id;
+ int valid;
+ void *debugfs;
+ struct net_device *netdev;
+};
+
+extern struct otx2_cpri_drv_ctx cpri_drv_ctx[OTX2_BPHY_CPRI_MAX_INTF];
+
+struct otx2_cpri_stats {
+ /* Rx */
+ u64 rx_frames;
+ u64 rx_octets;
+ u64 rx_err;
+ u64 bad_crc;
+ u64 oversize;
+ u64 undersize;
+ u64 fifo_ovr;
+ u64 rx_dropped;
+ /* Tx */
+ u64 tx_frames;
+ u64 tx_octets;
+ u64 tx_dropped;
+ /* stats lock */
+ spinlock_t lock;
+};
+
+/* cpri dl cbuf cfg */
+struct dl_cbuf_cfg {
+ int num_entries;
+ u64 cbuf_iova_addr;
+ void __iomem *cbuf_virt_addr;
+ /* sw */
+ u64 sw_wr_ptr;
+ /* dl lock */
+ spinlock_t lock;
+};
+
+/* cpri ul cbuf cfg */
+struct ul_cbuf_cfg {
+ int num_entries;
+ u64 cbuf_iova_addr;
+ void __iomem *cbuf_virt_addr;
+ /* sw */
+ int sw_rd_ptr;
+ /* ul lock */
+ spinlock_t lock;
+};
+
+struct cpri_common_cfg {
+ struct dl_cbuf_cfg dl_cfg;
+ struct ul_cbuf_cfg ul_cfg;
+ u8 refcnt;
+};
+
+struct otx2_cpri_link_event {
+ u8 cpri_num;
+ u8 lmac_id;
+ u8 link_state;
+};
+
+/* cpri netdev priv */
+struct otx2_cpri_ndev_priv {
+ u8 cpri_num;
+ u8 lmac_id;
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ u32 msg_enable;
+ void __iomem *bphy_reg_base;
+ void __iomem *cpri_reg_base;
+ struct iommu_domain *iommu_domain;
+ struct cpri_common_cfg *cpri_common;
+ struct napi_struct napi;
+ unsigned long state;
+ struct otx2_cpri_stats stats;
+ u8 mac_addr[ETH_ALEN];
+ /* priv lock */
+ spinlock_t lock;
+ int if_type;
+ u8 link_state;
+ unsigned long last_tx_jiffies;
+ unsigned long last_rx_jiffies;
+ unsigned long last_tx_dropped_jiffies;
+ unsigned long last_rx_dropped_jiffies;
+};
+
+int otx2_cpri_parse_and_init_intf(struct otx2_bphy_cdev_priv *cdev,
+ struct bphy_netdev_comm_intf_cfg *cfg);
+
+void otx2_cpri_rx_napi_schedule(int cpri_num, u32 status);
+
+void otx2_cpri_update_stats(struct otx2_cpri_ndev_priv *priv);
+
+void otx2_bphy_cpri_cleanup(void);
+
+void otx2_cpri_enable_intf(int cpri_num);
+
+/* ethtool */
+void otx2_cpri_set_ethtool_ops(struct net_device *netdev);
+
+/* update carrier state */
+void otx2_cpri_set_link_state(struct net_device *netdev, u8 state);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri_ethtool.c
new file mode 100644
index 000000000000..ae70cfa36043
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri_ethtool.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/net_tstamp.h>
+
+#include "otx2_cpri.h"
+
+static const char ethtool_stat_strings[][ETH_GSTRING_LEN] = {
+ "rx_frames",
+ "rx_octets",
+ "rx_err",
+ "bad_crc",
+ "oversize",
+ "undersize",
+ "rx_fifo_overrun",
+ "rx_dropped",
+ "tx_frames",
+ "tx_octets",
+ "tx_dropped",
+};
+
+static void otx2_cpri_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ memcpy(data, *ethtool_stat_strings,
+ sizeof(ethtool_stat_strings));
+ break;
+ }
+}
+
+static int otx2_cpri_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(ethtool_stat_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void otx2_cpri_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct otx2_cpri_ndev_priv *priv = netdev_priv(netdev);
+
+ otx2_cpri_update_stats(priv);
+
+ spin_lock(&priv->stats.lock);
+ memcpy(data, &priv->stats,
+ ARRAY_SIZE(ethtool_stat_strings) * sizeof(u64));
+ spin_unlock(&priv->stats.lock);
+}
+
+static void otx2_cpri_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *p)
+{
+ struct otx2_cpri_ndev_priv *priv = netdev_priv(netdev);
+
+ snprintf(p->driver, sizeof(p->driver), "otx2_cpri {cpri%d lmac%d}",
+ priv->cpri_num, priv->lmac_id);
+ strlcpy(p->bus_info, "platform", sizeof(p->bus_info));
+}
+
+static u32 otx2_cpri_get_msglevel(struct net_device *netdev)
+{
+ struct otx2_cpri_ndev_priv *priv = netdev_priv(netdev);
+
+ return priv->msg_enable;
+}
+
+static void otx2_cpri_set_msglevel(struct net_device *netdev, u32 level)
+{
+ struct otx2_cpri_ndev_priv *priv = netdev_priv(netdev);
+
+ priv->msg_enable = level;
+}
+
+static const struct ethtool_ops otx2_cpri_ethtool_ops = {
+ .get_drvinfo = otx2_cpri_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = otx2_cpri_get_strings,
+ .get_sset_count = otx2_cpri_get_sset_count,
+ .get_ethtool_stats = otx2_cpri_get_ethtool_stats,
+ .get_msglevel = otx2_cpri_get_msglevel,
+ .set_msglevel = otx2_cpri_set_msglevel,
+};
+
+void otx2_cpri_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &otx2_cpri_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe.c b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe.c
new file mode 100644
index 000000000000..0bf0d1a50024
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe.c
@@ -0,0 +1,1697 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "otx2_rfoe.h"
+#include "otx2_bphy_hw.h"
+#include "otx2_bphy_debugfs.h"
+
+/* Theory of Operation
+ *
+ * I. General
+ *
+ * The BPHY RFOE netdev driver handles packets such as eCPRI control,
+ * PTP and other ethernet packets received from/sent to BPHY RFOE MHAB
+ * in Linux kernel. All other packets such as ROE and eCPRI non-control
+ * are handled by ODP application in user space. The ODP application
+ * initializes the JDT/MBT/PSM-queues to process the Rx/Tx packets in
+ * netdev and shares the information through driver ioctl. The Rx/TX
+ * notification will be sent to netdev using one of the PSM GPINT.
+ *
+ * II. Driver Operation
+ *
+ * This driver register's a character device and provides ioctl for
+ * ODP application to initialize the netdev(s) to process eCPRI and
+ * other Ethernet packets. Each netdev corresponds to a unique RFOE
+ * index and LMAC id. The ODP application initializes the flow tables,
+ * Rx JDT and RX MBT to process Rx packets. There will be a unique
+ * Flow Table, JDT, MBT for processing eCPRI, PTP and other Ethernet
+ * packets separately. The Rx packet memory (DDR) is also allocated
+ * by ODP and configured in MBT. All LMAC's in a single RFOE MHAB share
+ * the Rx configuration tuple {Flow Id, JDT and MBT}. The Rx event is
+ * notified to the netdev via PSM GPINT1. Each PSM GPINT supports 32-bits
+ * and can be used as interrupt status bits. For each Rx packet type
+ * per RFOE, one PSM GPINT bit is reserved to notify the Rx event for
+ * that packet type. The ODP application configures PSM_CMD_GPINT_S
+ * in the JCE section of JD for each packet. There are total 32 JDT
+ * and MBT entries per packet type. These entries will be reused when
+ * the JDT/MBT circular entries wraps around.
+ *
+ * On Tx side, the ODP application creates preconfigured job commands
+ * for the driver use. Each job command contains information such as
+ * PSM cmd (ADDJOB) info, JD iova address. The packet memory is also
+ * allocated by ODP app. The JD rd dma cfg section contains the memory
+ * addr for packet DMA. There are two PSM queues/RFOE reserved for Tx
+ * puropose. One queue handles PTP traffic and other queue is used for
+ * eCPRI and regular Ethernet traffic. The PTP job descriptor's (JD) are
+ * configured to generate Tx completion event through GPINT mechanism.
+ * For each LMAC/RFOE there will be one GPINT bit reserved for this
+ * purpose. For eCPRI and other Ethernet traffic there is no GPINT event
+ * to signal Tx completion to the driver. The driver Tx interrupt handler
+ * reads RFOE(0..2)_TX_PTP_TSTMP_W0 and RFOE(0..2)_TX_PTP_TSTMP_W1
+ * registers for PTP timestamp and fills the time stamp in PTP skb. The
+ * number of preconfigured job commands are 64 for non-ptp shared by all
+ * LMAC's in RFOE and 4 for PTP per each LMAC in RFOE. The PTP job cmds
+ * are not shared because the timestamp registers are unique per LMAC.
+ *
+ * III. Transmit
+ *
+ * The driver xmit routine selects the PSM queue based on whether the
+ * packet needs to be timestamped in HW by checking SKBTX_HW_TSTAMP flag.
+ * In case of PTP packet, if there is pending PTP packet in progress then
+ * the drivers adds this skb to a list and returns success. This list
+ * is processed after the previous PTP packet is sent and timestamp is
+ * copied to the skb successfully in the Tx interrupt handler.
+ *
+ * Once the PSM queue is selected, the driver checks whether there is
+ * enough space in that PSM queue by reading PSM_QUEUE(0..127)_SPACE
+ * reister. If the PSM queue is not full, then the driver get's the
+ * corresponding job entries associated with that queue and updates the
+ * length in JD DMA cfg word0 and copied the packet data to JD DMA
+ * cfg word1. For eCPRI/non-PTP packets, the driver also updates JD CFG
+ * RFOE_MODE.
+ *
+ * IV. Receive
+ *
+ * The driver receives an interrupt per pkt_type and invokes NAPI handler.
+ * The NAPI handler reads the corresponding MBT cfg (nxt_buf) to see the
+ * number of packets to be processed. For each successful mbt_entry, the
+ * packet handler get's corresponding mbt entry buffer address and based
+ * on packet type, the PSW0/ECPRI_PSW0 is read to get the JD iova addr
+ * corresponding to that MBT entry. The DMA block size is read from the
+ * JDT entry to know the number of bytes DMA'd including PSW bytes. The
+ * MBT entry buffer address is moved by pkt_offset bytes and length is
+ * decremented by pkt_offset to get actual pkt data and length. For each
+ * pkt, skb is allocated and packet data is copied to skb->data. In case
+ * of PTP packets, the PSW1 contains the PTP timestamp value and will be
+ * copied to the skb.
+ *
+ * V. Miscellaneous
+ *
+ * Ethtool:
+ * The ethtool stats shows packet stats for each packet type.
+ *
+ */
+
+/* global driver ctx */
+struct otx2_rfoe_drv_ctx rfoe_drv_ctx[RFOE_MAX_INTF];
+
+/* debugfs */
+static void otx2_rfoe_debugfs_reader(char *buffer, size_t count, void *priv);
+static const char *otx2_rfoe_debugfs_get_formatter(void);
+static size_t otx2_rfoe_debugfs_get_buffer_size(void);
+static void otx2_rfoe_debugfs_create(struct otx2_rfoe_drv_ctx *ctx);
+static void otx2_rfoe_debugfs_remove(struct otx2_rfoe_drv_ctx *ctx);
+
+void otx2_rfoe_disable_intf(int rfoe_num)
+{
+ struct otx2_rfoe_drv_ctx *drv_ctx;
+ struct otx2_rfoe_ndev_priv *priv;
+ struct net_device *netdev;
+ int idx;
+
+ for (idx = 0; idx < RFOE_MAX_INTF; idx++) {
+ drv_ctx = &rfoe_drv_ctx[idx];
+ if (drv_ctx->rfoe_num == rfoe_num && drv_ctx->valid) {
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ priv->if_type = IF_TYPE_NONE;
+ }
+ }
+}
+
+void otx2_bphy_rfoe_cleanup(void)
+{
+ struct otx2_rfoe_drv_ctx *drv_ctx = NULL;
+ struct otx2_rfoe_ndev_priv *priv;
+ struct net_device *netdev;
+ struct rx_ft_cfg *ft_cfg;
+ int i, idx;
+
+ for (i = 0; i < RFOE_MAX_INTF; i++) {
+ drv_ctx = &rfoe_drv_ctx[i];
+ if (drv_ctx->valid) {
+ otx2_rfoe_debugfs_remove(drv_ctx);
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ --(priv->ptp_cfg->refcnt);
+ if (!priv->ptp_cfg->refcnt) {
+ del_timer_sync(&priv->ptp_cfg->ptp_timer);
+ kfree(priv->ptp_cfg);
+ }
+ otx2_rfoe_ptp_destroy(priv);
+ unregister_netdev(netdev);
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ ft_cfg = &priv->rx_ft_cfg[idx];
+ netif_napi_del(&ft_cfg->napi);
+ }
+ --(priv->rfoe_common->refcnt);
+ if (priv->rfoe_common->refcnt == 0)
+ kfree(priv->rfoe_common);
+ free_netdev(netdev);
+ drv_ctx->valid = 0;
+ }
+ }
+}
+
+void otx2_rfoe_calc_ptp_ts(struct otx2_rfoe_ndev_priv *priv, u64 *ts)
+{
+ u64 ptp_diff_nsec, ptp_diff_psec;
+ struct ptp_bcn_off_cfg *ptp_cfg;
+ struct ptp_clk_cfg *clk_cfg;
+ struct ptp_bcn_ref *ref;
+ unsigned long flags;
+ u64 timestamp = *ts;
+
+ ptp_cfg = priv->ptp_cfg;
+ if (!ptp_cfg->use_ptp_alg)
+ return;
+ clk_cfg = &ptp_cfg->clk_cfg;
+
+ spin_lock_irqsave(&ptp_cfg->lock, flags);
+
+ if (likely(timestamp > ptp_cfg->new_ref.ptp0_ns))
+ ref = &ptp_cfg->new_ref;
+ else
+ ref = &ptp_cfg->old_ref;
+
+ /* calculate ptp timestamp diff in pico sec */
+ ptp_diff_psec = ((timestamp - ref->ptp0_ns) * PICO_SEC_PER_NSEC *
+ clk_cfg->clk_freq_div) / clk_cfg->clk_freq_ghz;
+ ptp_diff_nsec = (ptp_diff_psec + ref->bcn0_n2_ps + 500) /
+ PICO_SEC_PER_NSEC;
+ timestamp = ref->bcn0_n1_ns - priv->sec_bcn_offset + ptp_diff_nsec;
+
+ spin_unlock_irqrestore(&ptp_cfg->lock, flags);
+
+ *ts = timestamp;
+}
+
+static void otx2_rfoe_ptp_offset_timer(struct timer_list *t)
+{
+ struct ptp_bcn_off_cfg *ptp_cfg = from_timer(ptp_cfg, t, ptp_timer);
+ u64 mio_ptp_ts, ptp_ts_diff, ptp_diff_nsec, ptp_diff_psec;
+ struct ptp_clk_cfg *clk_cfg = &ptp_cfg->clk_cfg;
+ unsigned long expires, flags;
+
+ spin_lock_irqsave(&ptp_cfg->lock, flags);
+
+ memcpy(&ptp_cfg->old_ref, &ptp_cfg->new_ref,
+ sizeof(struct ptp_bcn_ref));
+
+ mio_ptp_ts = readq(ptp_reg_base + MIO_PTP_CLOCK_HI);
+ ptp_ts_diff = mio_ptp_ts - ptp_cfg->new_ref.ptp0_ns;
+ ptp_diff_psec = (ptp_ts_diff * PICO_SEC_PER_NSEC *
+ clk_cfg->clk_freq_div) / clk_cfg->clk_freq_ghz;
+ ptp_diff_nsec = ptp_diff_psec / PICO_SEC_PER_NSEC;
+ ptp_cfg->new_ref.ptp0_ns += ptp_ts_diff;
+ ptp_cfg->new_ref.bcn0_n1_ns += ptp_diff_nsec;
+ ptp_cfg->new_ref.bcn0_n2_ps += ptp_diff_psec -
+ (ptp_diff_nsec * PICO_SEC_PER_NSEC);
+
+ spin_unlock_irqrestore(&ptp_cfg->lock, flags);
+
+ expires = jiffies + PTP_OFF_RESAMPLE_THRESH * HZ;
+ mod_timer(&ptp_cfg->ptp_timer, expires);
+}
+
+/* submit pending ptp tx requests */
+static void otx2_rfoe_ptp_submit_work(struct work_struct *work)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(work,
+ struct otx2_rfoe_ndev_priv,
+ ptp_queue_work);
+ struct mhbw_jd_dma_cfg_word_0_s *jd_dma_cfg_word_0;
+ struct mhbw_jd_dma_cfg_word_1_s *jd_dma_cfg_word_1;
+ struct mhab_job_desc_cfg *jd_cfg_ptr;
+ struct psm_cmd_addjob_s *psm_cmd_lo;
+ struct tx_job_queue_cfg *job_cfg;
+ struct tx_job_entry *job_entry;
+ struct ptp_tstamp_skb *ts_skb;
+ u16 psm_queue_id, queue_space;
+ struct sk_buff *skb = NULL;
+ struct list_head *head;
+ u64 jd_cfg_ptr_iova;
+ unsigned long flags;
+ u64 regval;
+
+ job_cfg = &priv->tx_ptp_job_cfg;
+
+ spin_lock_irqsave(&job_cfg->lock, flags);
+
+ /* check pending ptp requests */
+ if (list_empty(&priv->ptp_skb_list.list)) {
+ netif_dbg(priv, tx_queued, priv->netdev, "no pending ptp tx requests\n");
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+ return;
+ }
+
+ /* check psm queue space available */
+ psm_queue_id = job_cfg->psm_queue_id;
+ regval = readq(priv->psm_reg_base + PSM_QUEUE_SPACE(psm_queue_id));
+ queue_space = regval & 0x7FFF;
+ if (queue_space < 1) {
+ netif_dbg(priv, tx_queued, priv->netdev, "ptp tx psm queue %d full\n",
+ psm_queue_id);
+ /* reschedule to check later */
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+ schedule_work(&priv->ptp_queue_work);
+ return;
+ }
+
+ if (test_and_set_bit_lock(PTP_TX_IN_PROGRESS, &priv->state)) {
+ netif_dbg(priv, tx_queued, priv->netdev, "ptp tx ongoing\n");
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+ return;
+ }
+
+ head = &priv->ptp_skb_list.list;
+ ts_skb = list_entry(head->next, struct ptp_tstamp_skb, list);
+ skb = ts_skb->skb;
+ list_del(&ts_skb->list);
+ kfree(ts_skb);
+ priv->ptp_skb_list.count--;
+
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "submitting ptp tx skb %pS\n", skb);
+
+ priv->last_tx_ptp_jiffies = jiffies;
+
+ /* get the tx job entry */
+ job_entry = (struct tx_job_entry *)
+ &job_cfg->job_entries[job_cfg->q_idx];
+
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "rfoe=%d lmac=%d psm_queue=%d tx_job_entry %d job_cmd_lo=0x%llx job_cmd_high=0x%llx jd_iova_addr=0x%llx\n",
+ priv->rfoe_num, priv->lmac_id, psm_queue_id, job_cfg->q_idx,
+ job_entry->job_cmd_lo, job_entry->job_cmd_hi,
+ job_entry->jd_iova_addr);
+
+ priv->ptp_tx_skb = skb;
+ psm_cmd_lo = (struct psm_cmd_addjob_s *)&job_entry->job_cmd_lo;
+ priv->ptp_job_tag = psm_cmd_lo->jobtag;
+
+ /* update length and block size in jd dma cfg word */
+ jd_cfg_ptr_iova = *(u64 *)((u8 *)job_entry->jd_ptr + 8);
+ jd_cfg_ptr = otx2_iova_to_virt(priv->iommu_domain, jd_cfg_ptr_iova);
+ jd_cfg_ptr->cfg1.pkt_len = skb->len;
+ jd_dma_cfg_word_0 = (struct mhbw_jd_dma_cfg_word_0_s *)
+ job_entry->rd_dma_ptr;
+ jd_dma_cfg_word_0->block_size = (((skb->len + 15) >> 4) * 4);
+
+ /* copy packet data to rd_dma_ptr start addr */
+ jd_dma_cfg_word_1 = (struct mhbw_jd_dma_cfg_word_1_s *)
+ ((u8 *)job_entry->rd_dma_ptr + 8);
+ memcpy(otx2_iova_to_virt(priv->iommu_domain,
+ jd_dma_cfg_word_1->start_addr),
+ skb->data, skb->len);
+
+ /* make sure that all memory writes are completed */
+ dma_wmb();
+
+ /* submit PSM job */
+ writeq(job_entry->job_cmd_lo,
+ priv->psm_reg_base + PSM_QUEUE_CMD_LO(psm_queue_id));
+ writeq(job_entry->job_cmd_hi,
+ priv->psm_reg_base + PSM_QUEUE_CMD_HI(psm_queue_id));
+
+ /* increment queue index */
+ job_cfg->q_idx++;
+ if (job_cfg->q_idx == job_cfg->num_entries)
+ job_cfg->q_idx = 0;
+
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+}
+
+#define OTX2_RFOE_PTP_TSTMP_POLL_CNT 100
+
+/* ptp interrupt processing bottom half */
+static void otx2_rfoe_ptp_tx_work(struct work_struct *work)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(work,
+ struct otx2_rfoe_ndev_priv,
+ ptp_tx_work);
+ struct skb_shared_hwtstamps ts;
+ u64 timestamp, tstmp_w1;
+ u16 jobid;
+ int cnt;
+
+ if (!priv->ptp_tx_skb) {
+ netif_err(priv, tx_done, priv->netdev,
+ "ptp tx skb not found, something wrong!\n");
+ goto submit_next_req;
+ }
+
+ /* poll for timestamp valid bit to go high */
+ for (cnt = 0; cnt < OTX2_RFOE_PTP_TSTMP_POLL_CNT; cnt++) {
+ /* read RFOE(0..2)_TX_PTP_TSTMP_W1(0..3) */
+ tstmp_w1 = readq(priv->rfoe_reg_base +
+ RFOEX_TX_PTP_TSTMP_W1(priv->rfoe_num,
+ priv->lmac_id));
+ /* check valid bit */
+ if (tstmp_w1 & (1ULL << 63))
+ break;
+ usleep_range(5, 10);
+ }
+
+ if (cnt >= OTX2_RFOE_PTP_TSTMP_POLL_CNT) {
+ netif_err(priv, tx_err, priv->netdev,
+ "ptp tx timestamp polling timeout, skb=%pS\n",
+ priv->ptp_tx_skb);
+ priv->stats.tx_hwtstamp_failures++;
+ goto submit_next_req;
+ }
+
+ /* check err or drop condition */
+ if ((tstmp_w1 & (1ULL << 21)) || (tstmp_w1 & (1ULL << 20))) {
+ netif_err(priv, tx_done, priv->netdev,
+ "ptp timestamp error tstmp_w1=0x%llx\n",
+ tstmp_w1);
+ goto submit_next_req;
+ }
+ /* match job id */
+ jobid = (tstmp_w1 >> 4) & 0xffff;
+ if (jobid != priv->ptp_job_tag) {
+ netif_err(priv, tx_done, priv->netdev,
+ "ptp job id doesn't match, tstmp_w1->job_id=0x%x skb->job_tag=0x%x\n",
+ jobid, priv->ptp_job_tag);
+ goto submit_next_req;
+ }
+ /* update timestamp value in skb */
+ timestamp = readq(priv->rfoe_reg_base +
+ RFOEX_TX_PTP_TSTMP_W0(priv->rfoe_num,
+ priv->lmac_id));
+ if (priv->pdev->subsystem_device == PCI_SUBSYS_DEVID_OCTX2_95XXN)
+ otx2_rfoe_calc_ptp_ts(priv, &timestamp);
+ else
+ timestamp = timecounter_cyc2time(&priv->time_counter, timestamp);
+
+ memset(&ts, 0, sizeof(ts));
+ ts.hwtstamp = ns_to_ktime(timestamp);
+ skb_tstamp_tx(priv->ptp_tx_skb, &ts);
+
+submit_next_req:
+ if (priv->ptp_tx_skb)
+ dev_kfree_skb_any(priv->ptp_tx_skb);
+ priv->ptp_tx_skb = NULL;
+ clear_bit_unlock(PTP_TX_IN_PROGRESS, &priv->state);
+ schedule_work(&priv->ptp_queue_work);
+}
+
+/* psm queue timer callback to check queue space */
+static void otx2_rfoe_tx_timer_cb(struct timer_list *t)
+{
+ struct otx2_rfoe_ndev_priv *priv =
+ container_of(t, struct otx2_rfoe_ndev_priv, tx_timer);
+ u16 psm_queue_id, queue_space;
+ int reschedule = 0;
+ u64 regval;
+
+ /* check psm queue space for both ptp and oth packets */
+ if (netif_queue_stopped(priv->netdev)) {
+ psm_queue_id = priv->tx_ptp_job_cfg.psm_queue_id;
+ // check queue space
+ regval = readq(priv->psm_reg_base +
+ PSM_QUEUE_SPACE(psm_queue_id));
+ queue_space = regval & 0x7FFF;
+ if (queue_space > 1) {
+ netif_wake_queue(priv->netdev);
+ reschedule = 0;
+ } else {
+ reschedule = 1;
+ }
+
+ psm_queue_id = priv->rfoe_common->tx_oth_job_cfg.psm_queue_id;
+ // check queue space
+ regval = readq(priv->psm_reg_base +
+ PSM_QUEUE_SPACE(psm_queue_id));
+ queue_space = regval & 0x7FFF;
+ if (queue_space > 1) {
+ netif_wake_queue(priv->netdev);
+ reschedule = 0;
+ } else {
+ reschedule = 1;
+ }
+ }
+
+ if (reschedule)
+ mod_timer(&priv->tx_timer, jiffies + msecs_to_jiffies(100));
+}
+
+static void otx2_rfoe_process_rx_pkt(struct otx2_rfoe_ndev_priv *priv,
+ struct rx_ft_cfg *ft_cfg, int mbt_buf_idx)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv = priv->cdev_priv;
+ struct mhbw_jd_dma_cfg_word_0_s *jd_dma_cfg_word_0;
+ struct rfoe_ecpri_psw0_s *ecpri_psw0 = NULL;
+ struct rfoe_ecpri_psw1_s *ecpri_psw1 = NULL;
+ u64 tstamp = 0, mbt_state, jdt_iova_addr;
+ int found = 0, idx, len, pkt_type;
+ struct otx2_rfoe_ndev_priv *priv2;
+ struct otx2_rfoe_drv_ctx *drv_ctx;
+ unsigned int ptp_message_len = 0;
+ struct rfoe_psw0_s *psw0 = NULL;
+ struct rfoe_psw1_s *psw1 = NULL;
+ struct net_device *netdev;
+ u8 *buf_ptr, *jdt_ptr;
+ struct sk_buff *skb;
+ u8 lmac_id;
+
+ /* read mbt state */
+ spin_lock(&cdev_priv->mbt_lock);
+ writeq(mbt_buf_idx, (priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num)));
+ mbt_state = readq(priv->rfoe_reg_base +
+ RFOEX_RX_IND_MBT_SEG_STATE(priv->rfoe_num));
+ spin_unlock(&cdev_priv->mbt_lock);
+
+ if ((mbt_state >> 16 & 0xf) != 0) {
+ pr_err("rx pkt error: mbt_buf_idx=%d, err=%d\n",
+ mbt_buf_idx, (u8)(mbt_state >> 16 & 0xf));
+ return;
+ }
+ if (mbt_state >> 20 & 0x1) {
+ pr_err("rx dma error: mbt_buf_idx=%d\n", mbt_buf_idx);
+ return;
+ }
+
+ buf_ptr = (u8 *)ft_cfg->mbt_virt_addr +
+ (ft_cfg->buf_size * mbt_buf_idx);
+
+ pkt_type = ft_cfg->pkt_type;
+#ifdef ASIM
+ // ASIM issue, all rx packets will hit eCPRI flow table
+ pkt_type = PACKET_TYPE_ECPRI;
+#endif
+ if (pkt_type != PACKET_TYPE_ECPRI) {
+ psw0 = (struct rfoe_psw0_s *)buf_ptr;
+ if (psw0->pkt_err_sts || psw0->dma_error) {
+ net_warn_ratelimited("%s: psw0 pkt_err_sts = 0x%x, dma_err=0x%x\n",
+ priv->netdev->name,
+ psw0->pkt_err_sts,
+ psw0->dma_error);
+ return;
+ }
+ /* check that the psw type is correct: */
+ if (unlikely(psw0->pswt == ECPRI_TYPE)) {
+ net_warn_ratelimited("%s: pswt is eCPRI for pkt_type = %d\n",
+ priv->netdev->name, pkt_type);
+ return;
+ }
+ lmac_id = psw0->lmac_id;
+ jdt_iova_addr = (u64)psw0->jd_ptr;
+ psw1 = (struct rfoe_psw1_s *)(buf_ptr + 16);
+ tstamp = psw1->ptp_timestamp;
+ } else {
+ ecpri_psw0 = (struct rfoe_ecpri_psw0_s *)buf_ptr;
+ if (ecpri_psw0->err_sts & 0x1F) {
+ net_warn_ratelimited("%s: ecpri_psw0 err_sts = 0x%x\n",
+ priv->netdev->name,
+ ecpri_psw0->err_sts);
+ return;
+ }
+ /* check that the psw type is correct: */
+ if (unlikely(ecpri_psw0->pswt != ECPRI_TYPE)) {
+ net_warn_ratelimited("%s: pswt is not eCPRI for pkt_type = %d\n",
+ priv->netdev->name, pkt_type);
+ return;
+ }
+ lmac_id = ecpri_psw0->src_id & 0x3;
+ jdt_iova_addr = (u64)ecpri_psw0->jd_ptr;
+ ecpri_psw1 = (struct rfoe_ecpri_psw1_s *)(buf_ptr + 16);
+ tstamp = ecpri_psw1->ptp_timestamp;
+ }
+
+ netif_dbg(priv, rx_status, priv->netdev,
+ "Rx: rfoe=%d lmac=%d mbt_buf_idx=%d psw0(w0)=0x%llx psw0(w1)=0x%llx psw1(w0)=0x%llx psw1(w1)=0x%llx jd:iova=0x%llx\n",
+ priv->rfoe_num, lmac_id, mbt_buf_idx,
+ *(u64 *)buf_ptr, *((u64 *)buf_ptr + 1),
+ *((u64 *)buf_ptr + 2), *((u64 *)buf_ptr + 3),
+ jdt_iova_addr);
+
+ /* read jd ptr from psw */
+ jdt_ptr = otx2_iova_to_virt(priv->iommu_domain, jdt_iova_addr);
+ jd_dma_cfg_word_0 = (struct mhbw_jd_dma_cfg_word_0_s *)
+ ((u8 *)jdt_ptr + ft_cfg->jd_rd_offset);
+ len = (jd_dma_cfg_word_0->block_size) << 2;
+ netif_dbg(priv, rx_status, priv->netdev, "jd rd_dma len = %d\n", len);
+
+ if (unlikely(netif_msg_pktdata(priv))) {
+ netdev_printk(KERN_DEBUG, priv->netdev, "RX MBUF DATA:");
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
+ buf_ptr, len, true);
+ }
+
+ buf_ptr += (ft_cfg->pkt_offset * 16);
+ len -= (ft_cfg->pkt_offset * 16);
+
+ for (idx = 0; idx < RFOE_MAX_INTF; idx++) {
+ drv_ctx = &rfoe_drv_ctx[idx];
+ if (drv_ctx->valid && drv_ctx->rfoe_num == priv->rfoe_num &&
+ drv_ctx->lmac_id == lmac_id) {
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ netdev = rfoe_drv_ctx[idx].netdev;
+ priv2 = netdev_priv(netdev);
+ } else {
+ pr_err("netdev not found, something went wrong!\n");
+ return;
+ }
+
+ /* drop the packet if interface is down */
+ if (unlikely(!netif_carrier_ok(netdev))) {
+ netif_err(priv2, rx_err, netdev,
+ "%s {rfoe%d lmac%d} link down, drop pkt\n",
+ netdev->name, priv2->rfoe_num,
+ priv2->lmac_id);
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_PTP) {
+ priv2->stats.ptp_rx_dropped++;
+ priv2->last_rx_ptp_dropped_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv2->stats.ecpri_rx_dropped++;
+ priv2->last_rx_dropped_jiffies = jiffies;
+ } else {
+ priv2->stats.rx_dropped++;
+ priv2->last_rx_dropped_jiffies = jiffies;
+ }
+ return;
+ }
+
+ skb = netdev_alloc_skb_ip_align(netdev, len);
+ if (!skb) {
+ netif_err(priv2, rx_err, netdev, "Rx: alloc skb failed\n");
+ return;
+ }
+
+ memcpy(skb->data, buf_ptr, len);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ /* remove trailing padding for ptp packets */
+ if (skb->protocol == htons(ETH_P_1588)) {
+ ptp_message_len = skb->data[2] << 8 | skb->data[3];
+ skb_trim(skb, ptp_message_len);
+ }
+
+ if (priv2->rx_hw_tstamp_en) {
+ if (priv->pdev->subsystem_device == PCI_SUBSYS_DEVID_OCTX2_95XXN)
+ otx2_rfoe_calc_ptp_ts(priv, &tstamp);
+ else
+ tstamp = timecounter_cyc2time(&priv->time_counter, tstamp);
+
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tstamp);
+ }
+
+ netif_receive_skb(skb);
+
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_PTP) {
+ priv2->stats.ptp_rx_packets++;
+ priv2->last_rx_ptp_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv2->stats.ecpri_rx_packets++;
+ priv2->last_rx_jiffies = jiffies;
+ } else {
+ priv2->stats.rx_packets++;
+ priv2->last_rx_jiffies = jiffies;
+ }
+ priv2->stats.rx_bytes += skb->len;
+}
+
+static int otx2_rfoe_process_rx_flow(struct otx2_rfoe_ndev_priv *priv,
+ int pkt_type, int budget)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv = priv->cdev_priv;
+ int count = 0, processed_pkts = 0;
+ struct rx_ft_cfg *ft_cfg;
+ u64 mbt_cfg;
+ u16 nxt_buf;
+ int *mbt_last_idx = &priv->rfoe_common->rx_mbt_last_idx[pkt_type];
+ u16 *prv_nxt_buf = &priv->rfoe_common->nxt_buf[pkt_type];
+
+ ft_cfg = &priv->rx_ft_cfg[pkt_type];
+
+ spin_lock(&cdev_priv->mbt_lock);
+ /* read mbt nxt_buf */
+ writeq(ft_cfg->mbt_idx,
+ priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num));
+ mbt_cfg = readq(priv->rfoe_reg_base +
+ RFOEX_RX_IND_MBT_CFG(priv->rfoe_num));
+ spin_unlock(&cdev_priv->mbt_lock);
+
+ nxt_buf = (mbt_cfg >> 32) & 0xffff;
+
+ /* no mbt entries to process */
+ if (nxt_buf == *prv_nxt_buf) {
+ netif_dbg(priv, rx_status, priv->netdev,
+ "no rx packets to process, rfoe=%d pkt_type=%d mbt_idx=%d nxt_buf=%d mbt_buf_sw_head=%d\n",
+ priv->rfoe_num, pkt_type, ft_cfg->mbt_idx, nxt_buf,
+ *mbt_last_idx);
+ return 0;
+ }
+
+ *prv_nxt_buf = nxt_buf;
+
+ /* get count of pkts to process, check ring wrap condition */
+ if (*mbt_last_idx > nxt_buf) {
+ count = ft_cfg->num_bufs - *mbt_last_idx;
+ count += nxt_buf;
+ } else {
+ count = nxt_buf - *mbt_last_idx;
+ }
+
+ netif_dbg(priv, rx_status, priv->netdev,
+ "rfoe=%d pkt_type=%d mbt_idx=%d nxt_buf=%d mbt_buf_sw_head=%d count=%d\n",
+ priv->rfoe_num, pkt_type, ft_cfg->mbt_idx, nxt_buf,
+ *mbt_last_idx, count);
+
+ while (likely((processed_pkts < budget) && (processed_pkts < count))) {
+ otx2_rfoe_process_rx_pkt(priv, ft_cfg, *mbt_last_idx);
+
+ (*mbt_last_idx)++;
+ if (*mbt_last_idx == ft_cfg->num_bufs)
+ *mbt_last_idx = 0;
+
+ processed_pkts++;
+ }
+
+ return processed_pkts;
+}
+
+/* napi poll routine */
+static int otx2_rfoe_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ struct otx2_rfoe_ndev_priv *priv;
+ int workdone = 0, pkt_type;
+ struct rx_ft_cfg *ft_cfg;
+ u64 intr_en, regval;
+
+ ft_cfg = container_of(napi, struct rx_ft_cfg, napi);
+ priv = ft_cfg->priv;
+ cdev_priv = priv->cdev_priv;
+ pkt_type = ft_cfg->pkt_type;
+
+ /* pkt processing loop */
+ workdone += otx2_rfoe_process_rx_flow(priv, pkt_type, budget);
+
+ if (workdone < budget) {
+ napi_complete_done(napi, workdone);
+
+ /* Re enable the Rx interrupts */
+ intr_en = PKT_TYPE_TO_INTR(pkt_type) <<
+ RFOE_RX_INTR_SHIFT(priv->rfoe_num);
+ spin_lock(&cdev_priv->lock);
+ regval = readq(bphy_reg_base + PSM_INT_GP_ENA_W1S(1));
+ regval |= intr_en;
+ writeq(regval, bphy_reg_base + PSM_INT_GP_ENA_W1S(1));
+ spin_unlock(&cdev_priv->lock);
+ }
+
+ return workdone;
+}
+
+/* Rx GPINT napi schedule api */
+void otx2_rfoe_rx_napi_schedule(int rfoe_num, u32 status)
+{
+ enum bphy_netdev_packet_type pkt_type;
+ struct otx2_rfoe_drv_ctx *drv_ctx;
+ struct otx2_rfoe_ndev_priv *priv;
+ struct rx_ft_cfg *ft_cfg;
+ int intf, bit_idx;
+ u32 intr_sts;
+ u64 regval;
+
+ for (intf = 0; intf < RFOE_MAX_INTF; intf++) {
+ drv_ctx = &rfoe_drv_ctx[intf];
+ /* ignore lmac, one interrupt/pkt_type/rfoe */
+ if (!(drv_ctx->valid && drv_ctx->rfoe_num == rfoe_num))
+ continue;
+ /* check if i/f down, napi disabled */
+ priv = netdev_priv(drv_ctx->netdev);
+ if (test_bit(RFOE_INTF_DOWN, &priv->state))
+ continue;
+ /* check rx pkt type */
+ intr_sts = ((status >> RFOE_RX_INTR_SHIFT(rfoe_num)) &
+ RFOE_RX_INTR_EN);
+ for (bit_idx = 0; bit_idx < PACKET_TYPE_MAX; bit_idx++) {
+ if (!(intr_sts & BIT(bit_idx)))
+ continue;
+ pkt_type = INTR_TO_PKT_TYPE(bit_idx);
+ if (unlikely(!(priv->pkt_type_mask & (1U << pkt_type))))
+ continue;
+ /* clear intr enable bit, re-enable in napi handler */
+ regval = PKT_TYPE_TO_INTR(pkt_type) <<
+ RFOE_RX_INTR_SHIFT(rfoe_num);
+ writeq(regval, bphy_reg_base + PSM_INT_GP_ENA_W1C(1));
+ /* schedule napi */
+ ft_cfg = &drv_ctx->ft_cfg[pkt_type];
+ napi_schedule(&ft_cfg->napi);
+ }
+ /* napi scheduled per pkt_type, return */
+ return;
+ }
+}
+
+static void otx2_rfoe_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct otx2_rfoe_stats *dev_stats = &priv->stats;
+
+ stats->rx_bytes = dev_stats->rx_bytes;
+ stats->rx_packets = dev_stats->rx_packets +
+ dev_stats->ptp_rx_packets +
+ dev_stats->ecpri_rx_packets;
+ stats->rx_dropped = dev_stats->rx_dropped +
+ dev_stats->ptp_rx_dropped +
+ dev_stats->ecpri_rx_dropped;
+
+ stats->tx_bytes = dev_stats->tx_bytes;
+ stats->tx_packets = dev_stats->tx_packets +
+ dev_stats->ptp_tx_packets +
+ dev_stats->ecpri_tx_packets;
+ stats->tx_dropped = dev_stats->tx_dropped +
+ dev_stats->ptp_tx_dropped +
+ dev_stats->ecpri_tx_dropped;
+}
+
+static int otx2_rfoe_config_hwtstamp(struct net_device *netdev,
+ struct ifreq *ifr)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ /* ptp hw timestamp is always enabled, mark the sw flags
+ * so that tx ptp requests are submitted to ptp psm queue
+ * and rx timestamp is copied to skb
+ */
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->tx_hw_tstamp_en = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ priv->tx_hw_tstamp_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ priv->rx_hw_tstamp_en = 0;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ priv->rx_hw_tstamp_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/* netdev ioctl */
+static int otx2_rfoe_ioctl(struct net_device *netdev, struct ifreq *req,
+ int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return otx2_rfoe_config_hwtstamp(netdev, req);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* netdev xmit */
+static netdev_tx_t otx2_rfoe_eth_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct mhbw_jd_dma_cfg_word_0_s *jd_dma_cfg_word_0;
+ struct mhbw_jd_dma_cfg_word_1_s *jd_dma_cfg_word_1;
+ struct mhab_job_desc_cfg *jd_cfg_ptr;
+ struct psm_cmd_addjob_s *psm_cmd_lo;
+ struct tx_job_queue_cfg *job_cfg;
+ u64 jd_cfg_ptr_iova, regval;
+ struct tx_job_entry *job_entry;
+ struct ptp_tstamp_skb *ts_skb;
+ int psm_queue_id, queue_space;
+ int pkt_type = 0;
+ unsigned long flags;
+ struct ethhdr *eth;
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ if (!priv->tx_hw_tstamp_en) {
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "skb HW timestamp requested but not enabled, this packet will not be timestamped\n");
+ job_cfg = &priv->rfoe_common->tx_oth_job_cfg;
+ pkt_type = PACKET_TYPE_OTHER;
+ } else {
+ job_cfg = &priv->tx_ptp_job_cfg;
+ pkt_type = PACKET_TYPE_PTP;
+ }
+ } else {
+ job_cfg = &priv->rfoe_common->tx_oth_job_cfg;
+ eth = (struct ethhdr *)skb->data;
+ if (htons(eth->h_proto) == ETH_P_ECPRI)
+ pkt_type = PACKET_TYPE_ECPRI;
+ else
+ pkt_type = PACKET_TYPE_OTHER;
+ }
+
+ spin_lock_irqsave(&job_cfg->lock, flags);
+
+ if (unlikely(priv->if_type != IF_TYPE_ETHERNET)) {
+ netif_err(priv, tx_queued, netdev,
+ "%s {rfoe%d lmac%d} invalid intf mode, drop pkt\n",
+ netdev->name, priv->rfoe_num, priv->lmac_id);
+ /* update stats */
+ priv->stats.tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ goto exit;
+ }
+
+ if (unlikely(!netif_carrier_ok(netdev))) {
+ netif_err(priv, tx_err, netdev,
+ "%s {rfoe%d lmac%d} link down, drop pkt\n",
+ netdev->name, priv->rfoe_num,
+ priv->lmac_id);
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv->stats.ecpri_tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_PTP) {
+ priv->stats.ptp_tx_dropped++;
+ priv->last_tx_ptp_dropped_jiffies = jiffies;
+ } else {
+ priv->stats.tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ }
+
+ goto exit;
+ }
+
+ if (unlikely(!(priv->pkt_type_mask & (1U << pkt_type)))) {
+ netif_err(priv, tx_queued, netdev,
+ "%s {rfoe%d lmac%d} pkt not supported, drop pkt\n",
+ netdev->name, priv->rfoe_num,
+ priv->lmac_id);
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv->stats.ecpri_tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_PTP) {
+ priv->stats.ptp_tx_dropped++;
+ priv->last_tx_ptp_dropped_jiffies = jiffies;
+ } else {
+ priv->stats.tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ }
+
+ goto exit;
+ }
+
+ /* get psm queue number */
+ psm_queue_id = job_cfg->psm_queue_id;
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "psm: queue(%d): cfg=0x%llx ptr=0x%llx space=0x%llx\n",
+ psm_queue_id,
+ readq(priv->psm_reg_base + PSM_QUEUE_CFG(psm_queue_id)),
+ readq(priv->psm_reg_base + PSM_QUEUE_PTR(psm_queue_id)),
+ readq(priv->psm_reg_base + PSM_QUEUE_SPACE(psm_queue_id)));
+
+ /* check psm queue space available */
+ regval = readq(priv->psm_reg_base + PSM_QUEUE_SPACE(psm_queue_id));
+ queue_space = regval & 0x7FFF;
+ if (queue_space < 1 && pkt_type != PACKET_TYPE_PTP) {
+ netif_err(priv, tx_err, netdev,
+ "no space in psm queue %d, dropping pkt\n",
+ psm_queue_id);
+ netif_stop_queue(netdev);
+ dev_kfree_skb_any(skb);
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_ECPRI)
+ priv->stats.ecpri_tx_dropped++;
+ else
+ priv->stats.tx_dropped++;
+
+ priv->last_tx_dropped_jiffies = jiffies;
+
+ mod_timer(&priv->tx_timer, jiffies + msecs_to_jiffies(100));
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+ return NETDEV_TX_OK;
+ }
+
+ /* get the tx job entry */
+ job_entry = (struct tx_job_entry *)
+ &job_cfg->job_entries[job_cfg->q_idx];
+
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "rfoe=%d lmac=%d psm_queue=%d tx_job_entry %d job_cmd_lo=0x%llx job_cmd_high=0x%llx jd_iova_addr=0x%llx\n",
+ priv->rfoe_num, priv->lmac_id, psm_queue_id, job_cfg->q_idx,
+ job_entry->job_cmd_lo, job_entry->job_cmd_hi,
+ job_entry->jd_iova_addr);
+
+ /* hw timestamp */
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->tx_hw_tstamp_en) {
+ if (list_empty(&priv->ptp_skb_list.list) &&
+ !test_and_set_bit_lock(PTP_TX_IN_PROGRESS, &priv->state)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->ptp_tx_skb = skb;
+ psm_cmd_lo = (struct psm_cmd_addjob_s *)
+ &job_entry->job_cmd_lo;
+ priv->ptp_job_tag = psm_cmd_lo->jobtag;
+ } else {
+ /* check ptp queue count */
+ if (priv->ptp_skb_list.count >= max_ptp_req) {
+ netif_err(priv, tx_err, netdev,
+ "ptp list full, dropping pkt\n");
+ priv->stats.ptp_tx_dropped++;
+ priv->last_tx_ptp_dropped_jiffies = jiffies;
+ goto exit;
+ }
+ /* allocate and add ptp req to queue */
+ ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
+ if (!ts_skb) {
+ priv->stats.ptp_tx_dropped++;
+ priv->last_tx_ptp_dropped_jiffies = jiffies;
+ goto exit;
+ }
+ ts_skb->skb = skb;
+ list_add_tail(&ts_skb->list, &priv->ptp_skb_list.list);
+ priv->ptp_skb_list.count++;
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->stats.ptp_tx_packets++;
+ priv->stats.tx_bytes += skb->len;
+ /* sw timestamp */
+ skb_tx_timestamp(skb);
+ goto exit; /* submit the packet later */
+ }
+ }
+
+ /* sw timestamp */
+ skb_tx_timestamp(skb);
+
+ if (unlikely(netif_msg_pktdata(priv))) {
+ netdev_printk(KERN_DEBUG, priv->netdev, "Tx: skb %pS len=%d\n",
+ skb, skb->len);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
+ skb->data, skb->len, true);
+ }
+
+ /* update length and block size in jd dma cfg word */
+ jd_cfg_ptr_iova = *(u64 *)((u8 *)job_entry->jd_ptr + 8);
+ jd_cfg_ptr = otx2_iova_to_virt(priv->iommu_domain, jd_cfg_ptr_iova);
+ jd_cfg_ptr->cfg1.pkt_len = skb->len;
+ jd_dma_cfg_word_0 = (struct mhbw_jd_dma_cfg_word_0_s *)
+ job_entry->rd_dma_ptr;
+ jd_dma_cfg_word_0->block_size = (((skb->len + 15) >> 4) * 4);
+
+ /* update rfoe_mode and lmac id for non-ptp (shared) psm job entry */
+ if (pkt_type != PACKET_TYPE_PTP) {
+ jd_cfg_ptr->cfg.lmacid = priv->lmac_id & 0x3;
+ if (pkt_type == PACKET_TYPE_ECPRI)
+ jd_cfg_ptr->cfg.rfoe_mode = 1;
+ else
+ jd_cfg_ptr->cfg.rfoe_mode = 0;
+ }
+
+ /* copy packet data to rd_dma_ptr start addr */
+ jd_dma_cfg_word_1 = (struct mhbw_jd_dma_cfg_word_1_s *)
+ ((u8 *)job_entry->rd_dma_ptr + 8);
+ memcpy(otx2_iova_to_virt(priv->iommu_domain,
+ jd_dma_cfg_word_1->start_addr),
+ skb->data, skb->len);
+
+ /* make sure that all memory writes are completed */
+ dma_wmb();
+
+ /* submit PSM job */
+ writeq(job_entry->job_cmd_lo,
+ priv->psm_reg_base + PSM_QUEUE_CMD_LO(psm_queue_id));
+ writeq(job_entry->job_cmd_hi,
+ priv->psm_reg_base + PSM_QUEUE_CMD_HI(psm_queue_id));
+
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv->stats.ecpri_tx_packets++;
+ priv->last_tx_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_PTP) {
+ priv->stats.ptp_tx_packets++;
+ priv->last_tx_ptp_jiffies = jiffies;
+ } else {
+ priv->stats.tx_packets++;
+ priv->last_tx_jiffies = jiffies;
+ }
+ priv->stats.tx_bytes += skb->len;
+
+ /* increment queue index */
+ job_cfg->q_idx++;
+ if (job_cfg->q_idx == job_cfg->num_entries)
+ job_cfg->q_idx = 0;
+exit:
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
+ dev_kfree_skb_any(skb);
+
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+/* netdev open */
+static int otx2_rfoe_eth_open(struct net_device *netdev)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ int idx;
+
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ napi_enable(&priv->rx_ft_cfg[idx].napi);
+ }
+
+ priv->ptp_tx_skb = NULL;
+
+ spin_lock(&priv->lock);
+ clear_bit(RFOE_INTF_DOWN, &priv->state);
+
+ if (priv->link_state == LINK_STATE_UP) {
+ netif_carrier_on(netdev);
+ netif_start_queue(netdev);
+ }
+ spin_unlock(&priv->lock);
+
+ return 0;
+}
+
+/* netdev close */
+static int otx2_rfoe_eth_stop(struct net_device *netdev)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct ptp_tstamp_skb *ts_skb, *ts_skb2;
+ int idx;
+
+ spin_lock(&priv->lock);
+ set_bit(RFOE_INTF_DOWN, &priv->state);
+
+ netif_stop_queue(netdev);
+ netif_carrier_off(netdev);
+
+ spin_unlock(&priv->lock);
+
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ napi_disable(&priv->rx_ft_cfg[idx].napi);
+ }
+
+ del_timer_sync(&priv->tx_timer);
+
+ /* cancel any pending ptp work item in progress */
+ cancel_work_sync(&priv->ptp_tx_work);
+ if (priv->ptp_tx_skb) {
+ dev_kfree_skb_any(priv->ptp_tx_skb);
+ priv->ptp_tx_skb = NULL;
+ clear_bit_unlock(PTP_TX_IN_PROGRESS, &priv->state);
+ }
+
+ /* clear ptp skb list */
+ cancel_work_sync(&priv->ptp_queue_work);
+ list_for_each_entry_safe(ts_skb, ts_skb2,
+ &priv->ptp_skb_list.list, list) {
+ list_del(&ts_skb->list);
+ kfree(ts_skb);
+ }
+ priv->ptp_skb_list.count = 0;
+
+ return 0;
+}
+
+static int otx2_rfoe_init(struct net_device *netdev)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ /* Enable VLAN TPID match */
+ writeq(0x18100, (priv->rfoe_reg_base +
+ RFOEX_RX_VLANX_CFG(priv->rfoe_num, 0)));
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ return 0;
+}
+
+static int otx2_rfoe_vlan_rx_configure(struct net_device *netdev, u16 vid,
+ bool forward)
+{
+ struct rfoe_rx_ind_vlanx_fwd fwd;
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct otx2_bphy_cdev_priv *cdev_priv = priv->cdev_priv;
+ u64 index = (vid >> 6) & 0x3F;
+ u64 mask = (0x1ll << (vid & 0x3F));
+ unsigned long flags;
+
+ if (vid >= VLAN_N_VID) {
+ netdev_err(netdev, "Invalid VLAN ID %d\n", vid);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&cdev_priv->mbt_lock, flags);
+
+ if (forward && priv->rfoe_common->rx_vlan_fwd_refcnt[vid]++)
+ goto out;
+
+ if (!forward && --priv->rfoe_common->rx_vlan_fwd_refcnt[vid])
+ goto out;
+
+ /* read current fwd mask */
+ writeq(index, (priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num)));
+ fwd.fwd = readq(priv->rfoe_reg_base +
+ RFOEX_RX_IND_VLANX_FWD(priv->rfoe_num, 0));
+
+ if (forward)
+ fwd.fwd |= mask;
+ else
+ fwd.fwd &= ~mask;
+
+ /* write the new fwd mask */
+ writeq(index, (priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num)));
+ writeq(fwd.fwd, (priv->rfoe_reg_base +
+ RFOEX_RX_IND_VLANX_FWD(priv->rfoe_num, 0)));
+
+out:
+ spin_unlock_irqrestore(&cdev_priv->mbt_lock, flags);
+
+ return 0;
+}
+
+static int otx2_rfoe_vlan_rx_add(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ return otx2_rfoe_vlan_rx_configure(netdev, vid, true);
+}
+
+static int otx2_rfoe_vlan_rx_kill(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ return otx2_rfoe_vlan_rx_configure(netdev, vid, false);
+}
+
+static const struct net_device_ops otx2_rfoe_netdev_ops = {
+ .ndo_init = otx2_rfoe_init,
+ .ndo_open = otx2_rfoe_eth_open,
+ .ndo_stop = otx2_rfoe_eth_stop,
+ .ndo_start_xmit = otx2_rfoe_eth_start_xmit,
+ .ndo_do_ioctl = otx2_rfoe_ioctl,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_get_stats64 = otx2_rfoe_get_stats64,
+ .ndo_vlan_rx_add_vid = otx2_rfoe_vlan_rx_add,
+ .ndo_vlan_rx_kill_vid = otx2_rfoe_vlan_rx_kill,
+};
+
+static void otx2_rfoe_dump_rx_ft_cfg(struct otx2_rfoe_ndev_priv *priv)
+{
+ struct rx_ft_cfg *ft_cfg;
+ int idx;
+
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ ft_cfg = &priv->rx_ft_cfg[idx];
+ pr_debug("rfoe=%d lmac=%d pkttype=%d flowid=%d mbt: idx=%d size=%d nbufs=%d iova=0x%llx jdt: idx=%d size=%d num_jd=%d iova=0x%llx\n",
+ priv->rfoe_num, priv->lmac_id, ft_cfg->pkt_type,
+ ft_cfg->flow_id, ft_cfg->mbt_idx, ft_cfg->buf_size,
+ ft_cfg->num_bufs, ft_cfg->mbt_iova_addr,
+ ft_cfg->jdt_idx, ft_cfg->jd_size, ft_cfg->num_jd,
+ ft_cfg->jdt_iova_addr);
+ }
+}
+
+static inline void otx2_rfoe_fill_rx_ft_cfg(struct otx2_rfoe_ndev_priv *priv,
+ struct bphy_netdev_comm_if *if_cfg)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv = priv->cdev_priv;
+ struct bphy_netdev_rbuf_info *rbuf_info;
+ struct rx_ft_cfg *ft_cfg;
+ u64 jdt_cfg0, iova;
+ int idx;
+
+ /* RX flow table configuration */
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ ft_cfg = &priv->rx_ft_cfg[idx];
+ rbuf_info = &if_cfg->rbuf_info[idx];
+ ft_cfg->pkt_type = rbuf_info->pkt_type;
+ ft_cfg->gp_int_num = rbuf_info->gp_int_num;
+ ft_cfg->flow_id = rbuf_info->flow_id;
+ ft_cfg->mbt_idx = rbuf_info->mbt_index;
+ ft_cfg->buf_size = rbuf_info->buf_size * 16;
+ ft_cfg->num_bufs = rbuf_info->num_bufs;
+ ft_cfg->mbt_iova_addr = rbuf_info->mbt_iova_addr;
+ iova = ft_cfg->mbt_iova_addr;
+ ft_cfg->mbt_virt_addr = otx2_iova_to_virt(priv->iommu_domain,
+ iova);
+ ft_cfg->jdt_idx = rbuf_info->jdt_index;
+ ft_cfg->jd_size = rbuf_info->jd_size * 8;
+ ft_cfg->num_jd = rbuf_info->num_jd;
+ ft_cfg->jdt_iova_addr = rbuf_info->jdt_iova_addr;
+ iova = ft_cfg->jdt_iova_addr;
+ ft_cfg->jdt_virt_addr = otx2_iova_to_virt(priv->iommu_domain,
+ iova);
+ spin_lock(&cdev_priv->mbt_lock);
+ writeq(ft_cfg->jdt_idx,
+ (priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num)));
+ jdt_cfg0 = readq(priv->rfoe_reg_base +
+ RFOEX_RX_IND_JDT_CFG0(priv->rfoe_num));
+ spin_unlock(&cdev_priv->mbt_lock);
+ ft_cfg->jd_rd_offset = ((jdt_cfg0 >> 28) & 0xf) * 8;
+ ft_cfg->pkt_offset = (u8)((jdt_cfg0 >> 52) & 0x7);
+ ft_cfg->priv = priv;
+ netif_napi_add(priv->netdev, &ft_cfg->napi,
+ otx2_rfoe_napi_poll,
+ NAPI_POLL_WEIGHT);
+ }
+}
+
+static void otx2_rfoe_fill_tx_job_entries(struct otx2_rfoe_ndev_priv *priv,
+ struct tx_job_queue_cfg *job_cfg,
+ struct bphy_netdev_tx_psm_cmd_info *tx_job,
+ int num_entries)
+{
+ struct tx_job_entry *job_entry;
+ u64 jd_cfg_iova, iova;
+ int i;
+
+ for (i = 0; i < num_entries; i++) {
+ job_entry = &job_cfg->job_entries[i];
+ job_entry->job_cmd_lo = tx_job->low_cmd;
+ job_entry->job_cmd_hi = tx_job->high_cmd;
+ job_entry->jd_iova_addr = tx_job->jd_iova_addr;
+ iova = job_entry->jd_iova_addr;
+ job_entry->jd_ptr = otx2_iova_to_virt(priv->iommu_domain, iova);
+ jd_cfg_iova = *(u64 *)((u8 *)job_entry->jd_ptr + 8);
+ job_entry->jd_cfg_ptr = otx2_iova_to_virt(priv->iommu_domain,
+ jd_cfg_iova);
+ job_entry->rd_dma_iova_addr = tx_job->rd_dma_iova_addr;
+ iova = job_entry->rd_dma_iova_addr;
+ job_entry->rd_dma_ptr = otx2_iova_to_virt(priv->iommu_domain,
+ iova);
+ pr_debug("job_cmd_lo=0x%llx job_cmd_hi=0x%llx jd_iova_addr=0x%llx rd_dma_iova_addr=%llx\n",
+ tx_job->low_cmd, tx_job->high_cmd,
+ tx_job->jd_iova_addr, tx_job->rd_dma_iova_addr);
+ tx_job++;
+ }
+ /* get psm queue id */
+ job_entry = &job_cfg->job_entries[0];
+ job_cfg->psm_queue_id = (job_entry->job_cmd_lo >> 8) & 0xff;
+ job_cfg->q_idx = 0;
+ job_cfg->num_entries = num_entries;
+ spin_lock_init(&job_cfg->lock);
+}
+
+int otx2_rfoe_parse_and_init_intf(struct otx2_bphy_cdev_priv *cdev,
+ struct bphy_netdev_comm_intf_cfg *cfg)
+{
+ int i, intf_idx = 0, num_entries, lmac, idx, ret;
+ struct bphy_netdev_tx_psm_cmd_info *tx_info;
+ struct otx2_rfoe_drv_ctx *drv_ctx = NULL;
+ struct otx2_rfoe_ndev_priv *priv, *priv2;
+ struct bphy_netdev_rfoe_if *rfoe_cfg;
+ struct bphy_netdev_comm_if *if_cfg;
+ struct tx_job_queue_cfg *tx_cfg;
+ struct ptp_bcn_off_cfg *ptp_cfg;
+ struct net_device *netdev;
+ struct rx_ft_cfg *ft_cfg;
+ u8 pkt_type_mask;
+
+ ptp_cfg = kzalloc(sizeof(*ptp_cfg), GFP_KERNEL);
+ if (!ptp_cfg)
+ return -ENOMEM;
+ timer_setup(&ptp_cfg->ptp_timer, otx2_rfoe_ptp_offset_timer, 0);
+ ptp_cfg->clk_cfg.clk_freq_ghz = PTP_CLK_FREQ_GHZ;
+ ptp_cfg->clk_cfg.clk_freq_div = PTP_CLK_FREQ_DIV;
+ spin_lock_init(&ptp_cfg->lock);
+
+ for (i = 0; i < MAX_RFOE_INTF; i++) {
+ priv2 = NULL;
+ rfoe_cfg = &cfg[i].rfoe_if_cfg;
+ pkt_type_mask = rfoe_cfg->pkt_type_mask;
+ for (lmac = 0; lmac < MAX_LMAC_PER_RFOE; lmac++) {
+ if_cfg = &rfoe_cfg->if_cfg[lmac];
+ /* check if lmac is valid */
+ if (!if_cfg->lmac_info.is_valid) {
+ dev_dbg(cdev->dev,
+ "rfoe%d lmac%d invalid\n", i, lmac);
+ continue;
+ }
+ netdev =
+ alloc_etherdev(sizeof(struct otx2_rfoe_ndev_priv));
+ if (!netdev) {
+ dev_err(cdev->dev,
+ "error allocating net device\n");
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+ priv = netdev_priv(netdev);
+ memset(priv, 0, sizeof(*priv));
+ if (!priv2) {
+ priv->rfoe_common =
+ kzalloc(sizeof(struct rfoe_common_cfg),
+ GFP_KERNEL);
+ if (!priv->rfoe_common) {
+ dev_err(cdev->dev, "kzalloc failed\n");
+ free_netdev(netdev);
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+ priv->rfoe_common->refcnt = 1;
+ }
+ spin_lock_init(&priv->lock);
+ priv->netdev = netdev;
+ priv->cdev_priv = cdev;
+ priv->msg_enable = netif_msg_init(-1, 0);
+ spin_lock_init(&priv->stats.lock);
+ priv->rfoe_num = if_cfg->lmac_info.rfoe_num;
+ priv->lmac_id = if_cfg->lmac_info.lane_num;
+ priv->if_type = cfg[i].if_type;
+ memcpy(priv->mac_addr, if_cfg->lmac_info.eth_addr,
+ ETH_ALEN);
+ if (is_valid_ether_addr(priv->mac_addr))
+ ether_addr_copy(netdev->dev_addr,
+ priv->mac_addr);
+ else
+ random_ether_addr(netdev->dev_addr);
+ priv->pdev = pci_get_device(OTX2_BPHY_PCI_VENDOR_ID,
+ OTX2_BPHY_PCI_DEVICE_ID,
+ NULL);
+ priv->iommu_domain =
+ iommu_get_domain_for_dev(&priv->pdev->dev);
+ priv->bphy_reg_base = bphy_reg_base;
+ priv->psm_reg_base = psm_reg_base;
+ priv->rfoe_reg_base = rfoe_reg_base;
+ priv->bcn_reg_base = bcn_reg_base;
+ priv->ptp_reg_base = ptp_reg_base;
+ priv->ptp_cfg = ptp_cfg;
+ ++(priv->ptp_cfg->refcnt);
+
+ /* Initialise PTP TX work queue */
+ INIT_WORK(&priv->ptp_tx_work, otx2_rfoe_ptp_tx_work);
+ INIT_WORK(&priv->ptp_queue_work,
+ otx2_rfoe_ptp_submit_work);
+
+ /* Initialise PTP skb list */
+ INIT_LIST_HEAD(&priv->ptp_skb_list.list);
+ priv->ptp_skb_list.count = 0;
+ timer_setup(&priv->tx_timer, otx2_rfoe_tx_timer_cb, 0);
+
+ priv->pkt_type_mask = pkt_type_mask;
+ otx2_rfoe_fill_rx_ft_cfg(priv, if_cfg);
+ otx2_rfoe_dump_rx_ft_cfg(priv);
+
+ /* TX PTP job configuration */
+ if (priv->pkt_type_mask & (1U << PACKET_TYPE_PTP)) {
+ tx_cfg = &priv->tx_ptp_job_cfg;
+ tx_info = &if_cfg->ptp_pkt_info[0];
+ num_entries = MAX_PTP_MSG_PER_LMAC;
+ otx2_rfoe_fill_tx_job_entries(priv, tx_cfg,
+ tx_info,
+ num_entries);
+ }
+
+ /* TX ECPRI/OTH(PTP) job configuration */
+ if (!priv2 &&
+ ((priv->pkt_type_mask &
+ (1U << PACKET_TYPE_OTHER)) ||
+ (priv->pkt_type_mask &
+ (1U << PACKET_TYPE_ECPRI)))) {
+ /* RFOE 2 will have 2 LMAC's */
+ num_entries = (priv->rfoe_num < 2) ?
+ MAX_OTH_MSG_PER_RFOE : 32;
+ tx_cfg = &priv->rfoe_common->tx_oth_job_cfg;
+ tx_info = &rfoe_cfg->oth_pkt_info[0];
+ otx2_rfoe_fill_tx_job_entries(priv, tx_cfg,
+ tx_info,
+ num_entries);
+ } else {
+ /* share rfoe_common data */
+ priv->rfoe_common = priv2->rfoe_common;
+ ++(priv->rfoe_common->refcnt);
+ }
+
+ /* keep last (rfoe + lmac) priv structure */
+ if (!priv2)
+ priv2 = priv;
+
+ intf_idx = (i * 4) + lmac;
+ snprintf(netdev->name, sizeof(netdev->name),
+ "rfoe%d", intf_idx);
+ netdev->netdev_ops = &otx2_rfoe_netdev_ops;
+ otx2_rfoe_set_ethtool_ops(netdev);
+ otx2_rfoe_ptp_init(priv);
+ netdev->watchdog_timeo = (15 * HZ);
+ netdev->mtu = 1500U;
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = 1500U;
+ ret = register_netdev(netdev);
+ if (ret < 0) {
+ dev_err(cdev->dev,
+ "failed to register net device %s\n",
+ netdev->name);
+ free_netdev(netdev);
+ ret = -ENODEV;
+ goto err_exit;
+ }
+ dev_dbg(cdev->dev, "net device %s registered\n",
+ netdev->name);
+
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ set_bit(RFOE_INTF_DOWN, &priv->state);
+ priv->link_state = LINK_STATE_UP;
+
+ /* initialize global ctx */
+ drv_ctx = &rfoe_drv_ctx[intf_idx];
+ drv_ctx->rfoe_num = priv->rfoe_num;
+ drv_ctx->lmac_id = priv->lmac_id;
+ drv_ctx->valid = 1;
+ drv_ctx->netdev = netdev;
+ drv_ctx->ft_cfg = &priv->rx_ft_cfg[0];
+
+ /* create debugfs entry */
+ otx2_rfoe_debugfs_create(drv_ctx);
+ }
+ }
+
+ return 0;
+
+err_exit:
+ for (i = 0; i < RFOE_MAX_INTF; i++) {
+ drv_ctx = &rfoe_drv_ctx[i];
+ if (drv_ctx->valid) {
+ otx2_rfoe_debugfs_remove(drv_ctx);
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ otx2_rfoe_ptp_destroy(priv);
+ unregister_netdev(netdev);
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ ft_cfg = &priv->rx_ft_cfg[idx];
+ netif_napi_del(&ft_cfg->napi);
+ }
+ --(priv->rfoe_common->refcnt);
+ if (priv->rfoe_common->refcnt == 0)
+ kfree(priv->rfoe_common);
+ free_netdev(netdev);
+ drv_ctx->valid = 0;
+ }
+ }
+ del_timer_sync(&ptp_cfg->ptp_timer);
+ kfree(ptp_cfg);
+
+ return ret;
+}
+
+static void otx2_rfoe_debugfs_reader(char *buffer, size_t count, void *priv)
+{
+ struct otx2_rfoe_drv_ctx *ctx;
+ struct otx2_rfoe_ndev_priv *netdev;
+ u8 ptp_tx_in_progress;
+ unsigned int queued_ptp_reqs;
+ u8 queue_stopped, state_up;
+ u16 other_tx_psm_space, ptp_tx_psm_space, queue_id;
+ u64 regval;
+ const char *formatter;
+
+ ctx = priv;
+ netdev = netdev_priv(ctx->netdev);
+ ptp_tx_in_progress = test_bit(PTP_TX_IN_PROGRESS, &netdev->state);
+ queued_ptp_reqs = netdev->ptp_skb_list.count;
+ queue_stopped = netif_queue_stopped(ctx->netdev);
+ state_up = netdev->link_state;
+ formatter = otx2_rfoe_debugfs_get_formatter();
+
+ /* other tx psm space */
+ queue_id = netdev->rfoe_common->tx_oth_job_cfg.psm_queue_id;
+ regval = readq(netdev->psm_reg_base + PSM_QUEUE_SPACE(queue_id));
+ other_tx_psm_space = regval & 0x7FFF;
+
+ /* ptp tx psm space */
+ queue_id = netdev->tx_ptp_job_cfg.psm_queue_id;
+ regval = readq(netdev->psm_reg_base + PSM_QUEUE_SPACE(queue_id));
+ ptp_tx_psm_space = regval & 0x7FFF;
+
+ snprintf(buffer, count, formatter,
+ ptp_tx_in_progress,
+ queued_ptp_reqs,
+ queue_stopped,
+ state_up,
+ netdev->last_tx_jiffies,
+ netdev->last_tx_dropped_jiffies,
+ netdev->last_tx_ptp_jiffies,
+ netdev->last_tx_ptp_dropped_jiffies,
+ netdev->last_rx_jiffies,
+ netdev->last_rx_dropped_jiffies,
+ netdev->last_rx_ptp_jiffies,
+ netdev->last_rx_ptp_dropped_jiffies,
+ jiffies,
+ other_tx_psm_space,
+ ptp_tx_psm_space);
+}
+
+static const char *otx2_rfoe_debugfs_get_formatter(void)
+{
+ static const char *buffer_format = "ptp-tx-in-progress: %u\n"
+ "queued-ptp-reqs: %u\n"
+ "queue-stopped: %u\n"
+ "state-up: %u\n"
+ "last-tx-jiffies: %lu\n"
+ "last-tx-dropped-jiffies: %lu\n"
+ "last-tx-ptp-jiffies: %lu\n"
+ "last-tx-ptp-dropped-jiffies: %lu\n"
+ "last-rx-jiffies: %lu\n"
+ "last-rx-dropped-jiffies: %lu\n"
+ "last-rx-ptp-jiffies: %lu\n"
+ "last-rx-ptp-dropped-jiffies: %lu\n"
+ "current-jiffies: %lu\n"
+ "other-tx-psm-space: %u\n"
+ "ptp-tx-psm-space: %u\n";
+
+ return buffer_format;
+}
+
+static size_t otx2_rfoe_debugfs_get_buffer_size(void)
+{
+ static size_t buffer_size;
+
+ if (!buffer_size) {
+ const char *formatter = otx2_rfoe_debugfs_get_formatter();
+ u8 max_boolean = 1;
+ int max_ptp_req_count = max_ptp_req;
+ unsigned long max_jiffies = (unsigned long)-1;
+ u16 max_psm_space = (u16)-1;
+
+ buffer_size = snprintf(NULL, 0, formatter,
+ max_boolean,
+ max_ptp_req_count,
+ max_boolean,
+ max_boolean,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_psm_space,
+ max_psm_space);
+ ++buffer_size;
+ }
+
+ return buffer_size;
+}
+
+static void otx2_rfoe_debugfs_create(struct otx2_rfoe_drv_ctx *ctx)
+{
+ size_t buffer_size = otx2_rfoe_debugfs_get_buffer_size();
+
+ ctx->debugfs = otx2_bphy_debugfs_add_file(ctx->netdev->name,
+ buffer_size, ctx,
+ otx2_rfoe_debugfs_reader);
+}
+
+static void otx2_rfoe_debugfs_remove(struct otx2_rfoe_drv_ctx *ctx)
+{
+ if (ctx->debugfs)
+ otx2_bphy_debugfs_remove_file(ctx->debugfs);
+}
+
+void otx2_rfoe_set_link_state(struct net_device *netdev, u8 state)
+{
+ struct otx2_rfoe_ndev_priv *priv;
+
+ priv = netdev_priv(netdev);
+
+ spin_lock(&priv->lock);
+ if (priv->link_state != state) {
+ priv->link_state = state;
+ if (state == LINK_STATE_DOWN) {
+ netdev_info(netdev, "Link DOWN\n");
+ if (netif_running(netdev)) {
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ } else {
+ netdev_info(netdev, "Link UP\n");
+ if (netif_running(netdev)) {
+ netif_carrier_on(netdev);
+ netif_start_queue(netdev);
+ }
+ }
+ }
+ spin_unlock(&priv->lock);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe.h b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe.h
new file mode 100644
index 000000000000..da26a77d3cc6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _OTX2_RFOE_H_
+#define _OTX2_RFOE_H_
+
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/if_vlan.h>
+
+#include "otx2_bphy.h"
+#include "rfoe_common.h"
+
+#define RFOE_RX_INTR_SHIFT(a) (32 - ((a) + 1) * 3)
+#define RFOE_RX_INTR_MASK(a) (RFOE_RX_INTR_EN << \
+ RFOE_RX_INTR_SHIFT(a))
+#define RFOE_TX_PTP_INTR_MASK(a, b) (1UL << ((a) * 4 + (b)))
+
+#define MAX_RFOE_INTF 3 /* Max RFOE instances */
+#define RFOE_MAX_INTF 10 /* 2 rfoe x 4 lmac + 1 rfoe x 2 lmac */
+#define PCI_SUBSYS_DEVID_OCTX2_95XXN 0xB400
+
+/* ethtool msg */
+#define OTX2_RFOE_MSG_DEFAULT (NETIF_MSG_DRV)
+
+/* PTP clock time operates by adding a constant increment every clock
+ * cycle. That increment is expressed (MIO_PTP_CLOCK_COMP) as a Q32.32
+ * number of nanoseconds (32 integer bits and 32 fractional bits). The
+ * value must be equal to 1/(PTP clock frequency in Hz). If the PTP clock
+ * freq is 1 GHz, there is no issue but for other input clock frequency
+ * values for example 950 MHz which is SLCK or 153.6 MHz (bcn_clk/2) the
+ * MIO_PTP_CLOCK_COMP register value can't be expressed exactly and there
+ * will be error accumulated over the time depending on the direction the
+ * PTP_CLOCK_COMP value is rounded. The accumulated error will be around
+ * -70ps or +150ps per second in case of 950 MHz.
+ *
+ * To solve this issue, the driver calculates the PTP timestamps using
+ * BCN clock as reference as per the algorithm proposed as given below.
+ *
+ * Set PTP tick (= MIO_PTP_CLOCK_COMP) to 1.0 ns
+ * Sample once, at exactly the same time, BCN and PTP to (BCN0, PTP0).
+ * Calculate (applying BCN-to-PTP epoch difference and an OAM parameter
+ * secondaryBcnOffset)
+ * PTPbase[ns] = NanoSec(BCN0) + NanoSec(315964819[s]) - secondaryBcnOffset[ns]
+ * When reading packet timestamp (tick count) PTPn, convert it to nanoseconds.
+ * PTP pkt timestamp = PTPbase[ns] + (PTPn - PTP0) / (PTP Clock in GHz)
+ *
+ * The intermediate values generated need to be of pico-second precision to
+ * achieve PTP accuracy < 1ns. The calculations should not overflow 64-bit
+ * value at anytime. Added timer to adjust the PTP and BCN base values
+ * periodically to fix the overflow issue.
+ */
+#define PTP_CLK_FREQ_GHZ 95 /* Clock freq GHz dividend */
+#define PTP_CLK_FREQ_DIV 100 /* Clock freq GHz divisor */
+#define PTP_OFF_RESAMPLE_THRESH 1800 /* resample period in seconds */
+#define PICO_SEC_PER_NSEC 1000 /* pico seconds per nano sec */
+#define UTC_GPS_EPOCH_DIFF 315964819UL /* UTC - GPS epoch secs */
+
+/* global driver context */
+struct otx2_rfoe_drv_ctx {
+ u8 rfoe_num;
+ u8 lmac_id;
+ int valid;
+ struct net_device *netdev;
+ struct rx_ft_cfg *ft_cfg;
+ int tx_gpint_bit;
+ void *debugfs;
+};
+
+extern struct otx2_rfoe_drv_ctx rfoe_drv_ctx[RFOE_MAX_INTF];
+
+/* rx flow table configuration */
+struct rx_ft_cfg {
+ enum bphy_netdev_packet_type pkt_type; /* pkt_type for psw */
+ enum bphy_netdev_rx_gpint gp_int_num;
+ u16 flow_id; /* flow id */
+ u16 mbt_idx; /* mbt index */
+ u16 buf_size; /* mbt buf size */
+ u16 num_bufs; /* mbt num bufs */
+ u64 mbt_iova_addr;
+ void __iomem *mbt_virt_addr;
+ u16 jdt_idx; /* jdt index */
+ u8 jd_size; /* jd size */
+ u16 num_jd; /* num jd's */
+ u64 jdt_iova_addr;
+ void __iomem *jdt_virt_addr;
+ u8 jd_rd_offset; /* jd rd offset */
+ u8 pkt_offset;
+ struct napi_struct napi;
+ struct otx2_rfoe_ndev_priv *priv;
+};
+
+/* PTP clk freq in GHz represented as integer numbers.
+ * This information is passed to netdev by the ODP BPHY
+ * application via ioctl. The values are used in PTP
+ * timestamp calculation algorithm.
+ *
+ * For 950MHz PTP clock =0.95GHz, the values are:
+ * clk_freq_ghz = 95
+ * clk_freq_div = 100
+ *
+ * For 153.6MHz PTP clock =0.1536GHz, the values are:
+ * clk_freq_ghz = 1536
+ * clk_freq_div = 10000
+ *
+ */
+struct ptp_clk_cfg {
+ int clk_freq_ghz; /* ptp clk freq */
+ int clk_freq_div; /* ptp clk divisor */
+};
+
+struct bcn_sec_offset_cfg {
+ u8 rfoe_num;
+ u8 lmac_id;
+ s32 sec_bcn_offset;
+};
+
+struct ptp_bcn_ref {
+ u64 ptp0_ns; /* PTP nanosec */
+ u64 bcn0_n1_ns; /* BCN N1 nanosec */
+ u64 bcn0_n2_ps; /* BCN N2 picosec */
+};
+
+struct ptp_bcn_off_cfg {
+ struct ptp_bcn_ref old_ref;
+ struct ptp_bcn_ref new_ref;
+ struct ptp_clk_cfg clk_cfg;
+ struct timer_list ptp_timer;
+ int use_ptp_alg;
+ u8 refcnt;
+ /* protection lock for updating ref */
+ spinlock_t lock;
+};
+
+/* netdev priv */
+struct otx2_rfoe_ndev_priv {
+ u8 rfoe_num;
+ u8 lmac_id;
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ u32 msg_enable;
+ u32 ptp_ext_clk_rate;
+ void __iomem *bphy_reg_base;
+ void __iomem *psm_reg_base;
+ void __iomem *rfoe_reg_base;
+ void __iomem *bcn_reg_base;
+ void __iomem *ptp_reg_base;
+ struct iommu_domain *iommu_domain;
+ struct rx_ft_cfg rx_ft_cfg[PACKET_TYPE_MAX];
+ struct tx_job_queue_cfg tx_ptp_job_cfg;
+ struct rfoe_common_cfg *rfoe_common;
+ u8 pkt_type_mask;
+ /* priv lock */
+ spinlock_t lock;
+ int rx_hw_tstamp_en;
+ int tx_hw_tstamp_en;
+ struct sk_buff *ptp_tx_skb;
+ u16 ptp_job_tag;
+ struct timer_list tx_timer;
+ unsigned long state;
+ struct work_struct ptp_tx_work;
+ struct work_struct ptp_queue_work;
+ struct ptp_tx_skb_list ptp_skb_list;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ struct cyclecounter cycle_counter;
+ struct timecounter time_counter;
+
+ struct delayed_work extts_work;
+ u64 last_extts;
+ u64 thresh;
+
+ struct ptp_pin_desc extts_config;
+ /* ptp lock */
+ struct mutex ptp_lock;
+ struct otx2_rfoe_stats stats;
+ u8 mac_addr[ETH_ALEN];
+ struct ptp_bcn_off_cfg *ptp_cfg;
+ s32 sec_bcn_offset;
+ int if_type;
+ u8 link_state;
+ unsigned long last_tx_jiffies;
+ unsigned long last_tx_ptp_jiffies;
+ unsigned long last_rx_jiffies;
+ unsigned long last_rx_ptp_jiffies;
+ unsigned long last_tx_dropped_jiffies;
+ unsigned long last_tx_ptp_dropped_jiffies;
+ unsigned long last_rx_dropped_jiffies;
+ unsigned long last_rx_ptp_dropped_jiffies;
+};
+
+void otx2_rfoe_rx_napi_schedule(int rfoe_num, u32 status);
+
+int otx2_rfoe_parse_and_init_intf(struct otx2_bphy_cdev_priv *cdev,
+ struct bphy_netdev_comm_intf_cfg *cfg);
+
+void otx2_bphy_rfoe_cleanup(void);
+
+void otx2_rfoe_disable_intf(int rfoe_num);
+
+/* ethtool */
+void otx2_rfoe_set_ethtool_ops(struct net_device *netdev);
+
+/* ptp */
+void otx2_rfoe_calc_ptp_ts(struct otx2_rfoe_ndev_priv *priv, u64 *ts);
+int otx2_rfoe_ptp_init(struct otx2_rfoe_ndev_priv *priv);
+void otx2_rfoe_ptp_destroy(struct otx2_rfoe_ndev_priv *priv);
+
+/* update carrier state */
+void otx2_rfoe_set_link_state(struct net_device *netdev, u8 state);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe_ethtool.c
new file mode 100644
index 000000000000..d697c2e27bec
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe_ethtool.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "otx2_rfoe.h"
+#include "otx2_bphy_hw.h"
+
+static const char ethtool_stat_strings[][ETH_GSTRING_LEN] = {
+ "oth_rx_packets",
+ "ptp_rx_packets",
+ "ecpri_rx_packets",
+ "rx_bytes",
+ "oth_rx_dropped",
+ "ptp_rx_dropped",
+ "ecpri_rx_dropped",
+ "oth_tx_packets",
+ "ptp_tx_packets",
+ "ecpri_tx_packets",
+ "tx_bytes",
+ "oth_tx_dropped",
+ "ptp_tx_dropped",
+ "ecpri_tx_dropped",
+ "ptp_tx_hwtstamp_failures",
+ "EthIfInFrames",
+ "EthIfInOctets",
+ "EthIfOutFrames",
+ "EthIfOutOctets",
+ "EthIfInUnknownVlan",
+};
+
+static void otx2_rfoe_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ memcpy(data, *ethtool_stat_strings,
+ sizeof(ethtool_stat_strings));
+ break;
+ }
+}
+
+static int otx2_rfoe_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(ethtool_stat_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void otx2_rfoe_update_lmac_stats(struct otx2_rfoe_ndev_priv *priv)
+{
+ struct otx2_rfoe_stats *stats = &priv->stats;
+
+ stats->EthIfInFrames = readq(priv->rfoe_reg_base +
+ RFOEX_RX_CGX_PKT_STAT(priv->rfoe_num,
+ priv->lmac_id));
+ stats->EthIfInOctets = readq(priv->rfoe_reg_base +
+ RFOEX_RX_CGX_OCTS_STAT(priv->rfoe_num,
+ priv->lmac_id));
+ stats->EthIfOutFrames = readq(priv->rfoe_reg_base +
+ RFOEX_TX_PKT_STAT(priv->rfoe_num,
+ priv->lmac_id));
+ stats->EthIfOutOctets = readq(priv->rfoe_reg_base +
+ RFOEX_TX_OCTS_STAT(priv->rfoe_num,
+ priv->lmac_id));
+ stats->EthIfInUnknownVlan =
+ readq(priv->rfoe_reg_base +
+ RFOEX_RX_VLAN_DROP_STAT(priv->rfoe_num,
+ priv->lmac_id));
+}
+
+static void otx2_rfoe_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ otx2_rfoe_update_lmac_stats(priv);
+ spin_lock(&priv->stats.lock);
+ memcpy(data, &priv->stats,
+ ARRAY_SIZE(ethtool_stat_strings) * sizeof(u64));
+ spin_unlock(&priv->stats.lock);
+}
+
+static void otx2_rfoe_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *p)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ snprintf(p->driver, sizeof(p->driver), "otx2_rfoe {rfoe%d lmac%d}",
+ priv->rfoe_num, priv->lmac_id);
+ strlcpy(p->bus_info, "platform", sizeof(p->bus_info));
+}
+
+static int otx2_rfoe_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = ptp_clock_index(priv->ptp_clock);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static u32 otx2_rfoe_get_msglevel(struct net_device *netdev)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ return priv->msg_enable;
+}
+
+static void otx2_rfoe_set_msglevel(struct net_device *netdev, u32 level)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ priv->msg_enable = level;
+}
+
+static const struct ethtool_ops otx2_rfoe_ethtool_ops = {
+ .get_drvinfo = otx2_rfoe_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = otx2_rfoe_get_ts_info,
+ .get_strings = otx2_rfoe_get_strings,
+ .get_sset_count = otx2_rfoe_get_sset_count,
+ .get_ethtool_stats = otx2_rfoe_get_ethtool_stats,
+ .get_msglevel = otx2_rfoe_get_msglevel,
+ .set_msglevel = otx2_rfoe_set_msglevel,
+};
+
+void otx2_rfoe_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &otx2_rfoe_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe_ptp.c b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe_ptp.c
new file mode 100644
index 000000000000..a9f58c3bd0ab
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe_ptp.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell BPHY RFOE PTP PHC support.
+ *
+ * Copyright (C) 2020 Marvell.
+ */
+
+#include "otx2_rfoe.h"
+
+#define EXT_PTP_CLK_RATE (125 * 1000000) /* Ext PTP clk rate */
+
+static int otx2_rfoe_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(ptp_info,
+ struct
+ otx2_rfoe_ndev_priv,
+ ptp_clock_info);
+
+ if (priv->pdev->subsystem_device == PCI_SUBSYS_DEVID_OCTX2_95XXN)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&priv->ptp_lock);
+ timecounter_adjtime(&priv->time_counter, delta);
+ mutex_unlock(&priv->ptp_lock);
+
+ return 0;
+}
+
+static int otx2_rfoe_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(ptp,
+ struct
+ otx2_rfoe_ndev_priv,
+ ptp_clock_info);
+ bool neg_adj = false;
+ u64 comp, adj;
+ s64 ppb;
+
+ if (priv->pdev->subsystem_device == PCI_SUBSYS_DEVID_OCTX2_95XXN)
+ return -EOPNOTSUPP;
+
+ if (scaled_ppm < 0) {
+ neg_adj = true;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ /* The hardware adds the clock compensation value to the PTP clock
+ * on every coprocessor clock cycle. Typical convention is that it
+ * represent number of nanosecond betwen each cycle. In this
+ * convention compensation value is in 64 bit fixed-point
+ * representation where upper 32 bits are number of nanoseconds
+ * and lower is fractions of nanosecond.
+ * The scaled_ppm represent the ratio in "parts per million" by which
+ * the compensation value should be corrected.
+ * To calculate new compenstation value we use 64bit fixed point
+ * arithmetic on following formula
+ * comp = tbase + tbase * scaled_ppm / (1M * 2^16)
+ * where tbase is the basic compensation value calculated
+ * initialy in the probe function.
+ */
+ /* convert scaled_ppm to ppb */
+ ppb = 1 + scaled_ppm;
+ ppb *= 125;
+ ppb >>= 13;
+
+ comp = ((u64)1000000000ull << 32) / priv->ptp_ext_clk_rate;
+ adj = comp * ppb;
+ adj = div_u64(adj, 1000000000ull);
+ comp = neg_adj ? comp - adj : comp + adj;
+
+ writeq(comp, priv->ptp_reg_base + MIO_PTP_CLOCK_COMP);
+
+ return 0;
+}
+
+static u64 otx2_rfoe_ptp_cc_read(const struct cyclecounter *cc)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(cc, struct
+ otx2_rfoe_ndev_priv,
+ cycle_counter);
+
+ return readq(priv->ptp_reg_base + MIO_PTP_CLOCK_HI);
+}
+
+static int otx2_rfoe_ptp_gettime(struct ptp_clock_info *ptp_info,
+ struct timespec64 *ts)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(ptp_info,
+ struct
+ otx2_rfoe_ndev_priv,
+ ptp_clock_info);
+ u64 nsec;
+
+ mutex_lock(&priv->ptp_lock);
+
+ if (priv->pdev->subsystem_device == PCI_SUBSYS_DEVID_OCTX2_95XXN) {
+ nsec = readq(priv->ptp_reg_base + MIO_PTP_CLOCK_HI);
+ otx2_rfoe_calc_ptp_ts(priv, &nsec);
+ } else {
+ nsec = timecounter_read(&priv->time_counter);
+ }
+ mutex_unlock(&priv->ptp_lock);
+
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+static int otx2_rfoe_ptp_settime(struct ptp_clock_info *ptp_info,
+ const struct timespec64 *ts)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(ptp_info,
+ struct
+ otx2_rfoe_ndev_priv,
+ ptp_clock_info);
+ u64 nsec;
+
+ if (priv->pdev->subsystem_device == PCI_SUBSYS_DEVID_OCTX2_95XXN)
+ return -EOPNOTSUPP;
+
+ nsec = timespec64_to_ns(ts);
+
+ mutex_lock(&priv->ptp_lock);
+ timecounter_init(&priv->time_counter, &priv->cycle_counter, nsec);
+ mutex_unlock(&priv->ptp_lock);
+
+ return 0;
+}
+
+static int otx2_rfoe_ptp_verify_pin(struct ptp_clock_info *ptp,
+ unsigned int pin,
+ enum ptp_pin_function func,
+ unsigned int chan)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(ptp,
+ struct
+ otx2_rfoe_ndev_priv,
+ ptp_clock_info);
+
+ if (priv->pdev->subsystem_device == PCI_SUBSYS_DEVID_OCTX2_95XXN)
+ return -EOPNOTSUPP;
+
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_EXTTS:
+ break;
+ case PTP_PF_PEROUT:
+ case PTP_PF_PHYSYNC:
+ return -1;
+ }
+ return 0;
+}
+
+static void otx2_rfoe_ptp_extts_check(struct work_struct *work)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(work, struct
+ otx2_rfoe_ndev_priv,
+ extts_work.work);
+ struct ptp_clock_event event;
+ u64 tstmp, new_thresh;
+
+ mutex_lock(&priv->ptp_lock);
+ tstmp = readq(priv->ptp_reg_base + MIO_PTP_TIMESTAMP);
+ mutex_unlock(&priv->ptp_lock);
+
+ if (tstmp != priv->last_extts) {
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 0;
+ event.timestamp = timecounter_cyc2time(&priv->time_counter, tstmp);
+ ptp_clock_event(priv->ptp_clock, &event);
+ priv->last_extts = tstmp;
+
+ new_thresh = tstmp % 500000000;
+ if (priv->thresh != new_thresh) {
+ mutex_lock(&priv->ptp_lock);
+ writeq(new_thresh,
+ priv->ptp_reg_base + MIO_PTP_PPS_THRESH_HI);
+ mutex_unlock(&priv->ptp_lock);
+ priv->thresh = new_thresh;
+ }
+ }
+ schedule_delayed_work(&priv->extts_work, msecs_to_jiffies(200));
+}
+
+static int otx2_rfoe_ptp_enable(struct ptp_clock_info *ptp_info,
+ struct ptp_clock_request *rq, int on)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(ptp_info,
+ struct
+ otx2_rfoe_ndev_priv,
+ ptp_clock_info);
+ int pin = -1;
+
+ if (priv->pdev->subsystem_device == PCI_SUBSYS_DEVID_OCTX2_95XXN)
+ return -EOPNOTSUPP;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS,
+ rq->extts.index);
+ if (pin < 0)
+ return -EBUSY;
+ if (on)
+ schedule_delayed_work(&priv->extts_work,
+ msecs_to_jiffies(200));
+ else
+ cancel_delayed_work_sync(&priv->extts_work);
+ return 0;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static const struct ptp_clock_info otx2_rfoe_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "RFOE PTP",
+ .max_adj = 1000000000ull,
+ .n_ext_ts = 1,
+ .n_pins = 1,
+ .pps = 0,
+ .adjfine = otx2_rfoe_ptp_adjfine,
+ .adjtime = otx2_rfoe_ptp_adjtime,
+ .gettime64 = otx2_rfoe_ptp_gettime,
+ .settime64 = otx2_rfoe_ptp_settime,
+ .enable = otx2_rfoe_ptp_enable,
+ .verify = otx2_rfoe_ptp_verify_pin,
+};
+
+int otx2_rfoe_ptp_init(struct otx2_rfoe_ndev_priv *priv)
+{
+ struct cyclecounter *cc;
+ int err;
+
+ cc = &priv->cycle_counter;
+ cc->read = otx2_rfoe_ptp_cc_read;
+ cc->mask = CYCLECOUNTER_MASK(64);
+ cc->mult = 1;
+ cc->shift = 0;
+
+ timecounter_init(&priv->time_counter, &priv->cycle_counter,
+ ktime_to_ns(ktime_get_real()));
+ snprintf(priv->extts_config.name, sizeof(priv->extts_config.name),
+ "RFOE TSTAMP");
+ priv->extts_config.index = 0;
+ priv->extts_config.func = PTP_PF_NONE;
+ priv->ptp_clock_info = otx2_rfoe_ptp_clock_info;
+ priv->ptp_ext_clk_rate = EXT_PTP_CLK_RATE;
+ snprintf(priv->ptp_clock_info.name, 16, "%s", priv->netdev->name);
+ priv->ptp_clock_info.pin_config = &priv->extts_config;
+ INIT_DELAYED_WORK(&priv->extts_work, otx2_rfoe_ptp_extts_check);
+ priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_info,
+ &priv->pdev->dev);
+ if (IS_ERR_OR_NULL(priv->ptp_clock)) {
+ priv->ptp_clock = NULL;
+ err = PTR_ERR(priv->ptp_clock);
+ return err;
+ }
+
+ mutex_init(&priv->ptp_lock);
+
+ return 0;
+}
+
+void otx2_rfoe_ptp_destroy(struct otx2_rfoe_ndev_priv *priv)
+{
+ ptp_clock_unregister(priv->ptp_clock);
+ priv->ptp_clock = NULL;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/rfoe_bphy_netdev_comm_if.h b/drivers/net/ethernet/marvell/octeontx2/bphy/rfoe_bphy_netdev_comm_if.h
new file mode 100644
index 000000000000..06ce9660988f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/rfoe_bphy_netdev_comm_if.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _RFOE_BPHY_NETDEV_COMM_IF_H_
+#define _RFOE_BPHY_NETDEV_COMM_IF_H_
+
+#include <linux/etherdevice.h>
+#include "bphy_netdev_comm_if.h"
+
+/**
+ * @enum bphy_netdev_tx_gpint
+ * @brief GP_INT numbers for packet notification by netdev to BPHY.
+ *
+ */
+enum bphy_netdev_tx_gpint {
+ TX_GP_INT_RFOE0_LMAC0 = 32, //PSM_GPINT32,
+ TX_GP_INT_RFOE0_LMAC1 = 33, //PSM_GPINT33,
+ TX_GP_INT_RFOE0_LMAC2 = 34, //PSM_GPINT34,
+ TX_GP_INT_RFOE0_LMAC3 = 35, //PSM_GPINT35,
+
+ TX_GP_INT_RFOE1_LMAC0 = 36, //PSM_GPINT36,
+ TX_GP_INT_RFOE1_LMAC1 = 37, //PSM_GPINT37,
+ TX_GP_INT_RFOE1_LMAC2 = 38, //PSM_GPINT38,
+ TX_GP_INT_RFOE1_LMAC3 = 39, //PSM_GPINT39,
+
+ TX_GP_INT_RFOE2_LMAC0 = 40, //PSM_GPINT40,
+ TX_GP_INT_RFOE2_LMAC1 = 41, //PSM_GPINT41
+};
+
+/**
+ * @enum bphy_netdev_rx_gpint
+ * @brief GP_INT numbers for packet notification by BPHY to netdev.
+ *
+ */
+enum bphy_netdev_rx_gpint {
+ RX_GP_INT_RFOE0_PTP = 63, //PSM_GPINT63,
+ RX_GP_INT_RFOE0_ECPRI = 62, //PSM_GPINT62,
+ RX_GP_INT_RFOE0_GENERIC = 61, //PSM_GPINT61,
+
+ RX_GP_INT_RFOE1_PTP = 60, //PSM_GPINT60,
+ RX_GP_INT_RFOE1_ECPRI = 59, //PSM_GPINT59,
+ RX_GP_INT_RFOE1_GENERIC = 58, //PSM_GPINT58,
+
+ RX_GP_INT_RFOE2_PTP = 57, //PSM_GPINT57,
+ RX_GP_INT_RFOE2_ECPRI = 56, //PSM_GPINT56,
+ RX_GP_INT_RFOE2_GENERIC = 55, //PSM_GPINT55
+};
+
+/**
+ * @enum bphy_netdev_cpri_rx_gpint
+ * @brief GP_INT numbers for CPRI Ethernet packet Rx notification to netdev.
+ *
+ */
+enum bphy_netdev_cpri_rx_gpint {
+ RX_GP_INT_CPRI0_ETH = 45, //PSM_GPINT45,
+ RX_GP_INT_CPRI1_ETH = 46, //PSM_GPINT46,
+ RX_GP_INT_CPRI2_ETH = 47, //PSM_GPINT47
+};
+
+/**
+ * @struct bphy_netdev_intf_info
+ * @brief LMAC lane number, mac address and status information
+ *
+ */
+struct bphy_netdev_intf_info {
+ u8 rfoe_num;
+ u8 lane_num;
+ /* Source mac address */
+ u8 eth_addr[ETH_ALEN];
+ /* LMAC interface status */
+ u8 status; //0-DOWN, 1-UP
+ /* Configuration valid status; This interface shall be
+ * invalid if this field is set to 0
+ */
+ u8 is_valid;
+};
+
+/**
+ * @struct bphy_netdev_rbuf_info
+ * @brief Information abnout the packet ring buffer which shall be used to send
+ * the packets from BPHY to netdev.
+ *
+ */
+struct bphy_netdev_rbuf_info {
+ enum bphy_netdev_packet_type pkt_type;
+ /* gp_int = 0 can be treated as pkt type not enabled */
+ enum bphy_netdev_rx_gpint gp_int_num;
+ u16 flow_id;
+ u16 mbt_index;
+ /* Maximum number of buffers in the Ring/Pool */
+ u16 num_bufs;
+ /* MAX Buffer Size configured */
+ u16 buf_size; // TBC: 1536?
+ /* MBT byffer target memory */
+ u8 mbt_target_mem;
+ u8 reserved;
+ /* Buffers starting address */
+ u64 mbt_iova_addr;
+ u16 jdt_index;
+ /* Maximum number of JD buffers in the Ring/Pool */
+ u16 num_jd;
+ /* MAX JD size configured */
+ u8 jd_size;
+ /* MBT byffer target memory */
+ u8 jdt_target_mem;
+ /* Buffers starting address */
+ u64 jdt_iova_addr;
+};
+
+/**
+ * @brief
+ *
+ */
+struct bphy_netdev_tx_psm_cmd_info {
+ enum bphy_netdev_tx_gpint gp_int_num; /* Valid only for PTP messages */
+ u64 jd_iova_addr;
+ u64 rd_dma_iova_addr;
+ u64 low_cmd;
+ u64 high_cmd;
+};
+
+/**
+ * @struct bphy_netdev_comm_if
+ * @brief The communication interface defnitions which would be used by
+ * the netdev and bphy application.
+ *
+ */
+struct bphy_netdev_comm_if {
+ struct bphy_netdev_intf_info lmac_info;
+ struct bphy_netdev_rbuf_info rbuf_info[PACKET_TYPE_MAX];
+ /* Defining single array to handle both PTP and OTHER cmds info */
+ struct bphy_netdev_tx_psm_cmd_info ptp_pkt_info[MAX_PTP_MSG_PER_LMAC];
+};
+
+/**
+ * @struct bphy_netdev_cpri_if
+ * @brief communication interface structure defnition to be used by
+ * BPHY and NETDEV applications for CPRI Interface.
+ *
+ */
+struct bphy_netdev_cpri_if {
+ u8 id; /* CPRI ID 0..2 */
+ u8 active_lane_mask; /* lane mask */
+ u8 ul_gp_int_num; /* UL GP INT NUM */
+ u8 ul_int_threshold; /* UL INT THRESHOLD */
+ u8 num_ul_buf; /* Num UL Buffers */
+ u8 num_dl_buf; /* Num DL Buffers */
+ u8 reserved[2];
+ u64 ul_buf_iova_addr;
+ u64 dl_buf_iova_addr;
+ u8 eth_addr[MAX_LANE_PER_CPRI][ETH_ALEN];
+};
+
+/**
+ * @struct bphy_netdev_rfoe_if
+ * @brief communication interface structure defnition to be used by
+ * BPHY and NETDEV applications for RFOE Interface.
+ *
+ */
+struct bphy_netdev_rfoe_if {
+ /* Interface configuration */
+ struct bphy_netdev_comm_if if_cfg[MAX_LMAC_PER_RFOE];
+ /* TX JD cmds to send packets other than PTP;
+ * These are defined per RFoE and all LMAC can share
+ */
+ struct bphy_netdev_tx_psm_cmd_info oth_pkt_info[MAX_OTH_MSG_PER_RFOE];
+ /* Packet types for which the RX flows are configured.*/
+ u8 pkt_type_mask;
+};
+
+/**
+ * @struct bphy_netdev_comm_intf_cfg
+ * @brief ODP-NETDEV communication interface defnition structure to share
+ * the RX/TX intrefaces information.
+ *
+ */
+struct bphy_netdev_comm_intf_cfg {
+ enum bphy_netdev_if_type if_type; /* 0 --> ETHERNET, 1 --> CPRI */
+ struct bphy_netdev_rfoe_if rfoe_if_cfg; /* RFOE INTF configuration */
+ struct bphy_netdev_cpri_if cpri_if_cfg; /* CPRI INTF configuration */
+};
+
+#endif //_BPHY_NETDEV_COMM_IF_H_
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/rfoe_common.h b/drivers/net/ethernet/marvell/octeontx2/bphy/rfoe_common.h
new file mode 100644
index 000000000000..6fb7c315bd0f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/rfoe_common.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell BPHY RFOE Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#ifndef _OTX2_RFOE_COMMON_H_
+#define _OTX2_RFOE_COMMON_H_
+
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+
+#include "bphy_netdev_comm_if.h"
+
+/* PTP register offsets */
+#define MIO_PTP_CLOCK_HI 0x10
+#define MIO_PTP_TIMESTAMP 0x20
+#define MIO_PTP_PPS_THRESH_HI 0x58ULL
+#define MIO_PTP_CLOCK_COMP 0x18ULL
+
+/* max tx job entries */
+#define MAX_TX_JOB_ENTRIES 64
+
+/* GPINT(1) RFOE definitions */
+#define RX_PTP_INTR BIT(2) /* PTP packet intr */
+#define RX_ECPRI_INTR BIT(1) /* ECPRI packet intr */
+#define RX_GEN_INTR BIT(0) /* GENERIC packet intr */
+#define RFOE_RX_INTR_EN (RX_PTP_INTR | \
+ RX_ECPRI_INTR | \
+ RX_GEN_INTR)
+/* Interrupt processing definitions */
+#define INTR_TO_PKT_TYPE(a) (PACKET_TYPE_OTHER - (a))
+#define PKT_TYPE_TO_INTR(a) (1UL << (PACKET_TYPE_OTHER - (a)))
+
+enum state {
+ PTP_TX_IN_PROGRESS = 1,
+ RFOE_INTF_DOWN,
+};
+
+/* rfoe rx ind register configuration */
+struct otx2_rfoe_rx_ind_cfg {
+ u8 rfoe_num; /* rfoe idx */
+ u16 rx_ind_idx; /* RFOE(0..2)_RX_INDIRECT_INDEX */
+ u64 regoff; /* RFOE(0..2)_RX_IND_* reg offset */
+ u64 regval; /* input when write, output when read */
+#define OTX2_RFOE_RX_IND_READ 0
+#define OTX2_RFOE_RX_IND_WRITE 1
+ u8 dir; /* register access dir (read/write) */
+};
+
+/* tx job entry */
+struct tx_job_entry {
+ u64 job_cmd_lo;
+ u64 job_cmd_hi;
+ u64 jd_iova_addr;
+ u64 rd_dma_iova_addr;
+ void __iomem *jd_ptr;
+ void __iomem *rd_dma_ptr;
+ void __iomem *jd_cfg_ptr;
+};
+
+/* tx job queue */
+struct tx_job_queue_cfg {
+ u8 psm_queue_id;
+ struct tx_job_entry job_entries[MAX_TX_JOB_ENTRIES];
+ /* actual number of entries configured by ODP */
+ int num_entries;
+ /* queue index */
+ int q_idx;
+ /* lmac protection lock */
+ spinlock_t lock;
+};
+
+/* rfoe common (for all lmac's) */
+struct rfoe_common_cfg {
+ struct tx_job_queue_cfg tx_oth_job_cfg;
+ int rx_mbt_last_idx[PACKET_TYPE_MAX];
+ u16 nxt_buf[PACKET_TYPE_MAX];
+ u8 refcnt;
+ u8 rx_vlan_fwd_refcnt[VLAN_N_VID];
+};
+
+/* ptp pending skb list */
+struct ptp_tx_skb_list {
+ struct list_head list;
+ unsigned int count;
+};
+
+/* ptp skb list entry */
+struct ptp_tstamp_skb {
+ struct list_head list;
+ struct sk_buff *skb;
+};
+
+struct otx2_rfoe_stats {
+ /* rx */
+ u64 rx_packets; /* rx packets */
+ u64 ptp_rx_packets; /* ptp rx packets */
+ u64 ecpri_rx_packets; /* ecpri rx packets */
+ u64 rx_bytes; /* rx bytes count */
+ u64 rx_dropped; /* rx dropped */
+ u64 ptp_rx_dropped; /* ptp rx dropped */
+ u64 ecpri_rx_dropped; /* ptp rx dropped */
+
+ /* tx */
+ u64 tx_packets; /* tx packets */
+ u64 ptp_tx_packets; /* ptp rx packets */
+ u64 ecpri_tx_packets; /* ecpri rx packets */
+ u64 tx_bytes; /* tx bytes count */
+ u64 tx_dropped; /* tx dropped */
+ u64 ptp_tx_dropped; /* ptp tx dropped */
+ u64 ecpri_tx_dropped; /* ptp tx dropped */
+ u64 tx_hwtstamp_failures; /* ptp tx timestamp failures */
+
+ /* per LMAC stats */
+ u64 EthIfInFrames;
+ u64 EthIfInOctets;
+ u64 EthIfOutFrames;
+ u64 EthIfOutOctets;
+ u64 EthIfInUnknownVlan;
+
+ /* stats update lock */
+ spinlock_t lock;
+};
+
+struct otx2_rfoe_link_event {
+ u8 rfoe_num;
+ u8 lmac_id;
+ u8 link_state;
+};
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index b2c6385707c9..c42abc2593e0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -1,13 +1,17 @@
# SPDX-License-Identifier: GPL-2.0
#
-# Makefile for Marvell's OcteonTX2 ethernet device drivers
+# Makefile for Marvell's RVU Ethernet device drivers
#
-obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o
-obj-$(CONFIG_OCTEONTX2_VF) += octeontx2_nicvf.o
+obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o otx2_ptp.o
+obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o
-octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
- otx2_ptp.o
-octeontx2_nicvf-y := otx2_vf.o
+rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
+ otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
+ otx2_devlink.o
+rvu_nicvf-y := otx2_vf.o otx2_smqvf.o otx2_devlink.o
+
+rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
+rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
new file mode 100644
index 000000000000..d844611cad83
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -0,0 +1,482 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include "cn10k.h"
+#include "otx2_reg.h"
+#include "otx2_struct.h"
+
+static struct dev_hw_ops otx2_hw_ops = {
+ .sq_aq_init = otx2_sq_aq_init,
+ .sqe_flush = otx2_sqe_flush,
+ .aura_freeptr = otx2_aura_freeptr,
+ .refill_pool_ptrs = otx2_refill_pool_ptrs,
+};
+
+static struct dev_hw_ops cn10k_hw_ops = {
+ .sq_aq_init = cn10k_sq_aq_init,
+ .sqe_flush = cn10k_sqe_flush,
+ .aura_freeptr = cn10k_aura_freeptr,
+ .refill_pool_ptrs = cn10k_refill_pool_ptrs,
+};
+
+int cn10k_lmtst_init(struct otx2_nic *pfvf)
+{
+ struct lmtst_tbl_setup_req *req;
+ struct otx2_lmt_info *lmt_info;
+ int err, cpu;
+
+ if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ pfvf->hw_ops = &otx2_hw_ops;
+ return 0;
+ }
+
+ pfvf->hw_ops = &cn10k_hw_ops;
+ /* Total LMTLINES = num_online_cpus() * 32 (For Burst flush).*/
+ pfvf->tot_lmt_lines = (num_online_cpus() * LMT_BURST_SIZE);
+ pfvf->hw.lmt_info = alloc_percpu(struct otx2_lmt_info);
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_lmtst_tbl_setup(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->use_local_lmt_region = true;
+
+ err = qmem_alloc(pfvf->dev, &pfvf->dync_lmt, pfvf->tot_lmt_lines,
+ LMT_LINE_SIZE);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+ pfvf->hw.lmt_base = (u64 *)pfvf->dync_lmt->base;
+ req->lmt_iova = (u64)pfvf->dync_lmt->iova;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ for_each_possible_cpu(cpu) {
+ lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, cpu);
+ lmt_info->lmt_addr = ((u64)pfvf->hw.lmt_base +
+ (cpu * LMT_BURST_SIZE * LMT_LINE_SIZE));
+ lmt_info->lmt_id = cpu * LMT_BURST_SIZE;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(cn10k_lmtst_init);
+
+int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura)
+{
+ struct nix_cn10k_aq_enq_req *aq;
+ struct otx2_nic *pfvf = dev;
+
+ /* Get memory to put this msg */
+ aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ aq->sq.cq = pfvf->hw.rx_queues + qidx;
+ aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
+ aq->sq.cq_ena = 1;
+ aq->sq.ena = 1;
+ /* Only one SMQ is allocated, map all SQ's to that SMQ */
+ aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
+ aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset;
+ aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
+ aq->sq.sqb_aura = sqb_aura;
+ aq->sq.sq_int_ena = NIX_SQINT_BITS;
+ aq->sq.qint_idx = 0;
+ /* Due pipelining impact minimum 2000 unused SQ CQE's
+ * need to maintain to avoid CQ overflow.
+ */
+ aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt));
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+#define NPA_MAX_BURST 16
+void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
+{
+ struct otx2_nic *pfvf = dev;
+ u64 ptrs[NPA_MAX_BURST];
+ int num_ptrs = 1;
+ dma_addr_t bufptr;
+
+ /* Refill pool with new buffers */
+ while (cq->pool_ptrs) {
+ if (otx2_alloc_buffer(pfvf, cq, &bufptr)) {
+ if (num_ptrs--)
+ __cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
+ num_ptrs);
+ break;
+ }
+ cq->pool_ptrs--;
+ ptrs[num_ptrs] = (u64)bufptr + OTX2_HEAD_ROOM;
+ num_ptrs++;
+ if (num_ptrs == NPA_MAX_BURST || cq->pool_ptrs == 0) {
+ __cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
+ num_ptrs);
+ num_ptrs = 1;
+ }
+ }
+}
+
+void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
+{
+ struct otx2_lmt_info *lmt_info;
+ struct otx2_nic *pfvf = dev;
+ u64 val = 0, tar_addr = 0;
+
+ lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, smp_processor_id());
+ /* FIXME: val[0:10] LMT_ID.
+ * [12:15] no of LMTST - 1 in the burst.
+ * [19:63] data size of each LMTST in the burst except first.
+ */
+ val = (lmt_info->lmt_id & 0x7FF);
+ /* Target address for LMTST flush tells HW how many 128bit
+ * words are present.
+ * tar_addr[6:4] size of first LMTST - 1 in units of 128b.
+ */
+ tar_addr |= sq->io_addr | (((size / 16) - 1) & 0x7) << 4;
+ dma_wmb();
+ memcpy((u64 *)lmt_info->lmt_addr, sq->sqe_base, size);
+ cn10k_lmt_flush(val, tar_addr);
+
+ sq->head++;
+ sq->head &= (sq->sqe_cnt - 1);
+}
+
+int cn10k_free_all_ipolicers(struct otx2_nic *pfvf)
+{
+ struct nix_bandprof_free_req *req;
+ int rc;
+
+ if (is_dev_otx2(pfvf->pdev))
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_nix_bandprof_free(&pfvf->mbox);
+ if (!req) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Free all bandwidth profiles allocated */
+ req->free_all = true;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+out:
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
+int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf)
+{
+ struct nix_bandprof_alloc_req *req;
+ struct nix_bandprof_alloc_rsp *rsp;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_nix_bandprof_alloc(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->prof_count[BAND_PROF_LEAF_LAYER] = 1;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (rc)
+ goto out;
+
+ rsp = (struct nix_bandprof_alloc_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (!rsp->prof_count[BAND_PROF_LEAF_LAYER]) {
+ rc = -EIO;
+ goto out;
+ }
+
+ *leaf = rsp->prof_idx[BAND_PROF_LEAF_LAYER][0];
+out:
+ if (rc) {
+ dev_warn(pfvf->dev,
+ "Failed to allocate ingress bandwidth policer\n");
+ }
+
+ return rc;
+}
+
+int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int ret;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ ret = cn10k_alloc_leaf_profile(pfvf, &hw->matchall_ipolicer);
+
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return ret;
+}
+
+#define POLICER_TIMESTAMP 1 /* 1 second */
+#define MAX_RATE_EXP 22 /* Valid rate exponent range: 0 - 22 */
+
+static void cn10k_get_ingress_burst_cfg(u32 burst, u32 *burst_exp,
+ u32 *burst_mantissa)
+{
+ int tmp;
+
+ /* Burst is calculated as
+ * (1+[BURST_MANTISSA]/256)*2^[BURST_EXPONENT]
+ * This is the upper limit on number tokens (bytes) that
+ * can be accumulated in the bucket.
+ */
+ *burst_exp = ilog2(burst);
+ if (burst < 256) {
+ /* No float: can't express mantissa in this case */
+ *burst_mantissa = 0;
+ return;
+ }
+
+ if (*burst_exp > MAX_RATE_EXP)
+ *burst_exp = MAX_RATE_EXP;
+
+ /* Calculate mantissa
+ * Find remaining bytes 'burst - 2^burst_exp'
+ * mantissa = (remaining bytes) / 2^ (burst_exp - 8)
+ */
+ tmp = burst - rounddown_pow_of_two(burst);
+ *burst_mantissa = tmp / (1UL << (*burst_exp - 8));
+}
+
+static void cn10k_get_ingress_rate_cfg(u64 rate, u32 *rate_exp,
+ u32 *rate_mantissa, u32 *rdiv)
+{
+ u32 div = 0;
+ u32 exp = 0;
+ u64 tmp;
+
+ /* Figure out mantissa, exponent and divider from given max pkt rate
+ *
+ * To achieve desired rate HW adds
+ * (1+[RATE_MANTISSA]/256)*2^[RATE_EXPONENT] tokens (bytes) at every
+ * policer timeunit * 2^rdiv ie 2 * 2^rdiv usecs, to the token bucket.
+ * Here policer timeunit is 2 usecs and rate is in bits per sec.
+ * Since floating point cannot be used below algorithm uses 1000000
+ * scale factor to support rates upto 100Gbps.
+ */
+ tmp = rate * 32 * 2;
+ if (tmp < 256000000) {
+ while (tmp < 256000000) {
+ tmp = tmp * 2;
+ div++;
+ }
+ } else {
+ for (exp = 0; tmp >= 512000000 && exp <= MAX_RATE_EXP; exp++)
+ tmp = tmp / 2;
+
+ if (exp > MAX_RATE_EXP)
+ exp = MAX_RATE_EXP;
+ }
+
+ *rate_mantissa = (tmp - 256000000) / 1000000;
+ *rate_exp = exp;
+ *rdiv = div;
+}
+
+int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx,
+ u16 policer, bool map)
+{
+ struct nix_cn10k_aq_enq_req *aq;
+
+ aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ /* Enable policing and set the bandwidth profile (policer) index */
+ if (map)
+ aq->rq.policer_ena = 1;
+ else
+ aq->rq.policer_ena = 0;
+ aq->rq_mask.policer_ena = 1;
+
+ aq->rq.band_prof_id = policer;
+ aq->rq_mask.band_prof_id = GENMASK(9, 0);
+
+ /* Fill AQ info */
+ aq->qidx = rq_idx;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf)
+{
+ struct nix_bandprof_free_req *req;
+
+ req = otx2_mbox_alloc_msg_nix_bandprof_free(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->prof_count[BAND_PROF_LEAF_LAYER] = 1;
+ req->prof_idx[BAND_PROF_LEAF_LAYER][0] = leaf;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int qidx, rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ /* Remove RQ's policer mapping */
+ for (qidx = 0; qidx < hw->rx_queues; qidx++)
+ cn10k_map_unmap_rq_policer(pfvf, qidx,
+ hw->matchall_ipolicer, false);
+
+ rc = cn10k_free_leaf_profile(pfvf, hw->matchall_ipolicer);
+
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
+int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
+ u32 burst, u64 rate, bool pps)
+{
+ struct nix_cn10k_aq_enq_req *aq;
+ u32 burst_exp, burst_mantissa;
+ u32 rate_exp, rate_mantissa;
+ u32 rdiv;
+
+ /* Get exponent and mantissa values for the desired rate */
+ cn10k_get_ingress_burst_cfg(burst, &burst_exp, &burst_mantissa);
+ cn10k_get_ingress_rate_cfg(rate, &rate_exp, &rate_mantissa, &rdiv);
+
+ /* Init bandwidth profile */
+ aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ /* Set initial color mode to blind */
+ aq->prof.icolor = 0x03;
+ aq->prof_mask.icolor = 0x03;
+
+ /* Set rate and burst values */
+ aq->prof.cir_exponent = rate_exp;
+ aq->prof_mask.cir_exponent = 0x1F;
+
+ aq->prof.cir_mantissa = rate_mantissa;
+ aq->prof_mask.cir_mantissa = 0xFF;
+
+ aq->prof.cbs_exponent = burst_exp;
+ aq->prof_mask.cbs_exponent = 0x1F;
+
+ aq->prof.cbs_mantissa = burst_mantissa;
+ aq->prof_mask.cbs_mantissa = 0xFF;
+
+ aq->prof.rdiv = rdiv;
+ aq->prof_mask.rdiv = 0xF;
+
+ if (pps) {
+ /* The amount of decremented tokens is calculated according to
+ * the following equation:
+ * max([ LMODE ? 0 : (packet_length - LXPTR)] +
+ * ([ADJUST_MANTISSA]/256 - 1) * 2^[ADJUST_EXPONENT],
+ * 1/256)
+ * if LMODE is 1 then rate limiting will be based on
+ * PPS otherwise bps.
+ * The aim of the ADJUST value is to specify a token cost per
+ * packet in contrary to the packet length that specifies a
+ * cost per byte. To rate limit based on PPS adjust mantissa
+ * is set as 384 and exponent as 1 so that number of tokens
+ * decremented becomes 1 i.e, 1 token per packeet.
+ */
+ aq->prof.adjust_exponent = 1;
+ aq->prof_mask.adjust_exponent = 0x1F;
+
+ aq->prof.adjust_mantissa = 384;
+ aq->prof_mask.adjust_mantissa = 0x1FF;
+
+ aq->prof.lmode = 0x1;
+ aq->prof_mask.lmode = 0x1;
+ }
+
+ /* Two rate three color marker
+ * With PEIR/EIR set to zero, color will be either green or red
+ */
+ aq->prof.meter_algo = 2;
+ aq->prof_mask.meter_algo = 0x3;
+
+ aq->prof.rc_action = NIX_RX_BAND_PROF_ACTIONRESULT_DROP;
+ aq->prof_mask.rc_action = 0x3;
+
+ aq->prof.yc_action = NIX_RX_BAND_PROF_ACTIONRESULT_PASS;
+ aq->prof_mask.yc_action = 0x3;
+
+ aq->prof.gc_action = NIX_RX_BAND_PROF_ACTIONRESULT_PASS;
+ aq->prof_mask.gc_action = 0x3;
+
+ /* Setting exponent value as 24 and mantissa as 0 configures
+ * the bucket with zero values making bucket unused. Peak
+ * information rate and Excess information rate buckets are
+ * unused here.
+ */
+ aq->prof.peir_exponent = 24;
+ aq->prof_mask.peir_exponent = 0x1F;
+
+ aq->prof.peir_mantissa = 0;
+ aq->prof_mask.peir_mantissa = 0xFF;
+
+ aq->prof.pebs_exponent = 24;
+ aq->prof_mask.pebs_exponent = 0x1F;
+
+ aq->prof.pebs_mantissa = 0;
+ aq->prof_mask.pebs_mantissa = 0xFF;
+
+ /* Fill AQ info */
+ aq->qidx = profile;
+ aq->ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int cn10k_set_matchall_ipolicer_rate(struct otx2_nic *pfvf,
+ u32 burst, u64 rate)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int qidx, rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ rc = cn10k_set_ipolicer_rate(pfvf, hw->matchall_ipolicer, burst,
+ rate, false);
+ if (rc)
+ goto out;
+
+ for (qidx = 0; qidx < hw->rx_queues; qidx++) {
+ rc = cn10k_map_unmap_rq_policer(pfvf, qidx,
+ hw->matchall_ipolicer, true);
+ if (rc)
+ break;
+ }
+
+out:
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
new file mode 100644
index 000000000000..28b3b3275fe6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#ifndef CN10K_H
+#define CN10K_H
+
+#include "otx2_common.h"
+
+static inline int mtu_to_dwrr_weight(struct otx2_nic *pfvf, int mtu)
+{
+ u32 weight;
+
+ /* On OTx2, since AF returns DWRR_MTU as '1', this logic
+ * will work on those silicons as well.
+ */
+ weight = mtu / pfvf->hw.dwrr_mtu;
+ if (mtu % pfvf->hw.dwrr_mtu)
+ weight += 1;
+
+ return weight;
+}
+
+void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
+void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
+int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura);
+int cn10k_lmtst_init(struct otx2_nic *pfvf);
+int cn10k_free_all_ipolicers(struct otx2_nic *pfvf);
+int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf);
+int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf);
+int cn10k_set_matchall_ipolicer_rate(struct otx2_nic *pfvf,
+ u32 burst, u64 rate);
+int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx,
+ u16 policer, bool map);
+int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf);
+int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
+ u32 burst, u64 rate, bool pps);
+int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf);
+#endif /* CN10K_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index b062ed06235d..58ddd0c54a79 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -1,11 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Ethernet driver
+/* Marvell RVU Ethernet driver
*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/interrupt.h>
@@ -15,6 +12,7 @@
#include "otx2_reg.h"
#include "otx2_common.h"
#include "otx2_struct.h"
+#include "cn10k.h"
static void otx2_nix_rq_op_stats(struct queue_stats *stats,
struct otx2_nic *pfvf, int qidx)
@@ -60,6 +58,19 @@ void otx2_update_lmac_stats(struct otx2_nic *pfvf)
mutex_unlock(&pfvf->mbox.lock);
}
+void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf)
+{
+ struct msg_req *req;
+
+ if (!netif_running(pfvf->netdev))
+ return;
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_fec_stats(&pfvf->mbox);
+ if (req)
+ otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+}
+
int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx)
{
struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx];
@@ -191,10 +202,18 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data))
+ if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) {
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- else
+ /* update dmac field in vlan offload rule */
+ if (netif_running(netdev) &&
+ pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
+ otx2_install_rxvlan_offload_flow(pfvf);
+ /* update dmac address in ntuple and DMAC filter list */
+ if (pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
+ otx2_dmacflt_update_pfmac_flow(pfvf);
+ } else {
return -EPERM;
+ }
return 0;
}
@@ -203,8 +222,11 @@ EXPORT_SYMBOL(otx2_set_mac_address);
int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
{
struct nix_frs_cfg *req;
+ u16 maxlen;
int err;
+ maxlen = otx2_get_max_mtu(pfvf) + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+
mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox);
if (!req) {
@@ -212,10 +234,18 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
return -ENOMEM;
}
- pfvf->max_frs = mtu + OTX2_ETH_HLEN;
- req->maxlen = pfvf->max_frs;
+ /* Add EDSA/HIGIG2 header length and timestamp length to maxlen */
+ req->maxlen = pfvf->netdev->mtu + OTX2_ETH_HLEN + pfvf->addl_mtu +
+ OTX2_HW_TIMESTAMP_LEN + pfvf->xtra_hdr;
+
+ if (is_otx2_lbkvf(pfvf->pdev))
+ req->maxlen = maxlen;
+
+ if (is_otx2_sdpvf(pfvf->pdev))
+ req->sdp_link = true;
err = otx2_sync_mbox_msg(&pfvf->mbox);
+
mutex_unlock(&pfvf->mbox.lock);
return err;
}
@@ -225,7 +255,7 @@ int otx2_config_pause_frm(struct otx2_nic *pfvf)
struct cgx_pause_frm_cfg *req;
int err;
- if (is_otx2_lbkvf(pfvf->pdev))
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdpvf(pfvf->pdev))
return 0;
mutex_lock(&pfvf->mbox.lock);
@@ -244,10 +274,32 @@ unlock:
mutex_unlock(&pfvf->mbox.lock);
return err;
}
+EXPORT_SYMBOL(otx2_config_pause_frm);
+
+int otx2_config_serdes_link_state(struct otx2_nic *pfvf, bool en)
+{
+ struct cgx_set_link_state_msg *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_set_link_state(&pfvf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ req->enable = !!en;
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+unlock:
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+EXPORT_SYMBOL(otx2_config_serdes_link_state);
int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
{
struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct nix_rss_flowkey_cfg_rsp *rsp;
struct nix_rss_flowkey_cfg *req;
int err;
@@ -262,18 +314,33 @@ int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
req->group = DEFAULT_RSS_CONTEXT_GROUP;
err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto fail;
+
+ rsp = (struct nix_rss_flowkey_cfg_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ err = PTR_ERR(rsp);
+ goto fail;
+ }
+
+ pfvf->hw.flowkey_alg_idx = rsp->alg_idx;
+fail:
mutex_unlock(&pfvf->mbox.lock);
return err;
}
-int otx2_set_rss_table(struct otx2_nic *pfvf)
+int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id)
{
struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ const int index = rss->rss_size * ctx_id;
struct mbox *mbox = &pfvf->mbox;
+ struct otx2_rss_ctx *rss_ctx;
struct nix_aq_enq_req *aq;
int idx, err;
mutex_lock(&mbox->lock);
+ rss_ctx = rss->rss_ctx[ctx_id];
/* Get memory to put this msg */
for (idx = 0; idx < rss->rss_size; idx++) {
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
@@ -293,10 +360,10 @@ int otx2_set_rss_table(struct otx2_nic *pfvf)
}
}
- aq->rss.rq = rss->ind_tbl[idx];
+ aq->rss.rq = rss_ctx->ind_tbl[idx];
/* Fill AQ info */
- aq->qidx = idx;
+ aq->qidx = index + idx;
aq->ctype = NIX_AQ_CTYPE_RSS;
aq->op = NIX_AQ_INSTOP_INIT;
}
@@ -331,9 +398,10 @@ void otx2_set_rss_key(struct otx2_nic *pfvf)
int otx2_rss_init(struct otx2_nic *pfvf)
{
struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct otx2_rss_ctx *rss_ctx;
int idx, ret = 0;
- rss->rss_size = sizeof(rss->ind_tbl);
+ rss->rss_size = sizeof(*rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
/* Init RSS key if it is not setup already */
if (!rss->enable)
@@ -341,13 +409,19 @@ int otx2_rss_init(struct otx2_nic *pfvf)
otx2_set_rss_key(pfvf);
if (!netif_is_rxfh_configured(pfvf->netdev)) {
- /* Default indirection table */
+ /* Set RSS group 0 as default indirection table */
+ rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP] = kzalloc(rss->rss_size,
+ GFP_KERNEL);
+ if (!rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP])
+ return -ENOMEM;
+
+ rss_ctx = rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP];
for (idx = 0; idx < rss->rss_size; idx++)
- rss->ind_tbl[idx] =
+ rss_ctx->ind_tbl[idx] =
ethtool_rxfh_indir_default(idx,
pfvf->hw.rx_queues);
}
- ret = otx2_set_rss_table(pfvf);
+ ret = otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP);
if (ret)
return ret;
@@ -355,7 +429,8 @@ int otx2_rss_init(struct otx2_nic *pfvf)
rss->flowkey_cfg = rss->enable ? rss->flowkey_cfg :
NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6 |
NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_UDP |
- NIX_FLOW_KEY_TYPE_SCTP | NIX_FLOW_KEY_TYPE_VLAN;
+ NIX_FLOW_KEY_TYPE_SCTP | NIX_FLOW_KEY_TYPE_VLAN |
+ NIX_FLOW_KEY_TYPE_IPV4_PROTO;
ret = otx2_set_flowkey_cfg(pfvf);
if (ret)
@@ -468,34 +543,53 @@ void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
(pfvf->hw.cq_ecount_wait - 1));
}
-dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
+int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma)
{
- dma_addr_t iova;
u8 *buf;
- buf = napi_alloc_frag(pool->rbsize + OTX2_ALIGN);
+ buf = napi_alloc_frag_align(pool->rbsize, OTX2_ALIGN);
if (unlikely(!buf))
return -ENOMEM;
- buf = PTR_ALIGN(buf, OTX2_ALIGN);
- iova = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
+ *dma = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
- if (unlikely(dma_mapping_error(pfvf->dev, iova))) {
+ if (unlikely(dma_mapping_error(pfvf->dev, *dma))) {
page_frag_free(buf);
return -ENOMEM;
}
- return iova;
+ return 0;
}
-static dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
+static int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma)
{
- dma_addr_t addr;
-
+ int ret;
local_bh_disable();
- addr = __otx2_alloc_rbuf(pfvf, pool);
+ ret = __otx2_alloc_rbuf(pfvf, pool, dma);
local_bh_enable();
- return addr;
+ return ret;
+}
+
+int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
+ dma_addr_t *dma)
+{
+ if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) {
+ struct refill_work *work;
+ struct delayed_work *dwork;
+
+ work = &pfvf->refill_wrk[cq->cq_idx];
+ dwork = &work->pool_refill_work;
+ /* Schedule a task if no other task is running */
+ if (!cq->refill_task_sched) {
+ cq->refill_task_sched = true;
+ schedule_delayed_work(dwork,
+ msecs_to_jiffies(100));
+ }
+ return -ENOMEM;
+ }
+ return 0;
}
void otx2_tx_timeout(struct net_device *netdev, unsigned int txq)
@@ -521,28 +615,14 @@ void otx2_get_mac_from_af(struct net_device *netdev)
}
EXPORT_SYMBOL(otx2_get_mac_from_af);
-static int otx2_get_link(struct otx2_nic *pfvf)
-{
- int link = 0;
- u16 map;
-
- /* cgx lmac link */
- if (pfvf->hw.tx_chan_base >= CGX_CHAN_BASE) {
- map = pfvf->hw.tx_chan_base & 0x7FF;
- link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
- }
- /* LBK channel */
- if (pfvf->hw.tx_chan_base < SDP_CHAN_BASE)
- link = 12;
-
- return link;
-}
-
int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
{
struct otx2_hw *hw = &pfvf->hw;
struct nix_txschq_config *req;
u64 schq, parent;
+ u64 dwrr_val;
+
+ dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
if (!req)
@@ -555,9 +635,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
/* Set topology e.t.c configuration */
if (lvl == NIX_TXSCH_LVL_SMQ) {
req->reg[0] = NIX_AF_SMQX_CFG(schq);
- req->regval[0] = ((OTX2_MAX_MTU + OTX2_ETH_HLEN) << 8) |
- OTX2_MIN_MTU;
-
+ req->regval[0] = ((u64)pfvf->tx_max_pktlen << 8) | OTX2_MIN_MTU;
req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
(0x2ULL << 36);
req->num_regs++;
@@ -568,21 +646,26 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->num_regs++;
/* Set DWRR quantum */
req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
- req->regval[2] = DFLT_RR_QTM;
+ req->regval[2] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL4) {
parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0];
req->reg[0] = NIX_AF_TL4X_PARENT(schq);
req->regval[0] = parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
- req->regval[1] = DFLT_RR_QTM;
+ req->regval[1] = dwrr_val;
+ if (is_otx2_sdpvf(pfvf->pdev)) {
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
+ req->regval[2] = BIT_ULL(12);
+ }
} else if (lvl == NIX_TXSCH_LVL_TL3) {
parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0];
req->reg[0] = NIX_AF_TL3X_PARENT(schq);
req->regval[0] = parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
- req->regval[1] = DFLT_RR_QTM;
+ req->regval[1] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL2) {
parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0];
req->reg[0] = NIX_AF_TL2X_PARENT(schq);
@@ -590,20 +673,24 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->num_regs++;
req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
- req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | DFLT_RR_QTM;
+ req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val;
- req->num_regs++;
- req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
- otx2_get_link(pfvf));
- /* Enable this queue and backpressure */
- req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
+ if (!is_otx2_sdpvf(pfvf->pdev)) {
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
+ /* Enable this queue and backpressure */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
+ }
} else if (lvl == NIX_TXSCH_LVL_TL1) {
/* Default config for TL1.
* For VF this is always ignored.
*/
- /* Set DWRR quantum */
+ /* On CN10K, if RR_WEIGHT is greater than 16384, HW will
+ * clip it to 16384, so configuring a 24bit max value
+ * will work on both OTx2 and CN10K.
+ */
req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
@@ -668,7 +755,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
int timeout = 1000;
ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
- for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
incr = (u64)qidx << 32;
while (timeout) {
val = otx2_atomic64_add(incr, ptr);
@@ -698,9 +785,6 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
-/* Send skid of 2000 packets required for CQ size of 4K CQEs. */
-#define SEND_CQ_SKID 2000
-
static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
{
struct otx2_qset *qset = &pfvf->qset;
@@ -734,12 +818,50 @@ static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
return otx2_sync_mbox_msg(&pfvf->mbox);
}
+int otx2_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura)
+{
+ struct otx2_nic *pfvf = dev;
+ struct otx2_snd_queue *sq;
+ struct nix_aq_enq_req *aq;
+
+ sq = &pfvf->qset.sq[qidx];
+ sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx));
+ /* Get memory to put this msg */
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ aq->sq.cq = pfvf->hw.rx_queues + qidx;
+ aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
+ aq->sq.cq_ena = 1;
+ aq->sq.ena = 1;
+ /* Only one SMQ is allocated, map all SQ's to that SMQ */
+ aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
+ aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset;
+ aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
+ aq->sq.sqb_aura = sqb_aura;
+ aq->sq.sq_int_ena = NIX_SQINT_BITS;
+ aq->sq.qint_idx = 0;
+ /* Due pipelining impact minimum 2000 unused SQ CQE's
+ * need to maintain to avoid CQ overflow.
+ */
+ aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt));
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
{
struct otx2_qset *qset = &pfvf->qset;
struct otx2_snd_queue *sq;
- struct nix_aq_enq_req *aq;
struct otx2_pool *pool;
+ u8 chan_offset;
int err;
pool = &pfvf->qset.pool[sqb_aura];
@@ -751,17 +873,19 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (err)
return err;
- err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
- TSO_HEADER_SIZE);
- if (err)
- return err;
+ if (qidx < pfvf->hw.tx_queues) {
+ err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
+ TSO_HEADER_SIZE);
+ if (err)
+ return err;
+ }
sq->sqe_base = sq->sqe->base;
sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL);
if (!sq->sg)
return -ENOMEM;
- if (pfvf->ptp) {
+ if (pfvf->ptp && qidx < pfvf->hw.tx_queues) {
err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
sizeof(*sq->timestamps));
if (err)
@@ -775,59 +899,37 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
sq->sqe_thresh = ((sq->num_sqbs * sq->sqe_per_sqb) * 10) / 100;
sq->aura_id = sqb_aura;
sq->aura_fc_addr = pool->fc_addr->base;
- sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx));
sq->io_addr = (__force u64)otx2_get_regaddr(pfvf, NIX_LF_OP_SENDX(0));
sq->stats.bytes = 0;
sq->stats.pkts = 0;
- /* Get memory to put this msg */
- aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
- if (!aq)
- return -ENOMEM;
-
- aq->sq.cq = pfvf->hw.rx_queues + qidx;
- aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
- aq->sq.cq_ena = 1;
- aq->sq.ena = 1;
- /* Only one SMQ is allocated, map all SQ's to that SMQ */
- aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
- aq->sq.smq_rr_quantum = DFLT_RR_QTM;
- aq->sq.default_chan = pfvf->hw.tx_chan_base;
- aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
- aq->sq.sqb_aura = sqb_aura;
- aq->sq.sq_int_ena = NIX_SQINT_BITS;
- aq->sq.qint_idx = 0;
- /* Due pipelining impact minimum 2000 unused SQ CQE's
- * need to maintain to avoid CQ overflow.
- */
- aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (sq->sqe_cnt));
-
- /* Fill AQ info */
- aq->qidx = qidx;
- aq->ctype = NIX_AQ_CTYPE_SQ;
- aq->op = NIX_AQ_INSTOP_INIT;
-
- return otx2_sync_mbox_msg(&pfvf->mbox);
+ chan_offset = qidx % pfvf->hw.tx_chan_cnt;
+ return pfvf->hw_ops->sq_aq_init(pfvf, qidx, chan_offset, sqb_aura);
}
static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
{
struct otx2_qset *qset = &pfvf->qset;
+ int err, pool_id, non_xdp_queues;
struct nix_aq_enq_req *aq;
struct otx2_cq_queue *cq;
- int err, pool_id;
cq = &qset->cq[qidx];
cq->cq_idx = qidx;
+ non_xdp_queues = pfvf->hw.rx_queues + pfvf->hw.tx_queues;
if (qidx < pfvf->hw.rx_queues) {
cq->cq_type = CQ_RX;
cq->cint_idx = qidx;
cq->cqe_cnt = qset->rqe_cnt;
- } else {
+ } else if (qidx < non_xdp_queues) {
cq->cq_type = CQ_TX;
cq->cint_idx = qidx - pfvf->hw.rx_queues;
cq->cqe_cnt = qset->sqe_cnt;
+ } else {
+ cq->cq_type = CQ_XDP;
+ cq->cint_idx = qidx - non_xdp_queues;
+ cq->cqe_cnt = qset->sqe_cnt;
}
cq->cqe_size = pfvf->qset.xqe_size;
@@ -845,6 +947,7 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
(pfvf->hw.rqpool_cnt != pfvf->hw.rx_queues)) ? 0 : qidx;
cq->rbpool = &qset->pool[pool_id];
cq->refill_task_sched = false;
+ cq->pend_cqe = 0;
/* Get memory to put this msg */
aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
@@ -864,12 +967,19 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt);
aq->cq.drop_ena = 1;
- /* Enable receive CQ backpressure */
- aq->cq.bp_ena = 1;
- aq->cq.bpid = pfvf->bpid[0];
-
- /* Set backpressure level is same as cq pass level */
- aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
+ if (!is_otx2_lbkvf(pfvf->pdev)) {
+ /* Enable receive CQ backpressure */
+ aq->cq.bp_ena = 1;
+#ifdef CONFIG_DCB
+ aq->cq.bpid = pfvf->bpid[pfvf->queue_to_pfc_map[qidx]];
+#else
+ aq->cq.bpid = pfvf->bpid[0];
+#endif
+
+ /* Set backpressure level is same as cq pass level */
+ aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid,
+ qset->rqe_cnt);
+ }
}
/* Fill AQ info */
@@ -887,7 +997,7 @@ static void otx2_pool_refill_task(struct work_struct *work)
struct refill_work *wrk;
int qidx, free_ptrs = 0;
struct otx2_nic *pfvf;
- s64 bufptr;
+ dma_addr_t bufptr;
wrk = container_of(work, struct refill_work, pool_refill_work.work);
pfvf = wrk->pf;
@@ -897,8 +1007,7 @@ static void otx2_pool_refill_task(struct work_struct *work)
free_ptrs = cq->pool_ptrs;
while (cq->pool_ptrs) {
- bufptr = otx2_alloc_rbuf(pfvf, rbpool);
- if (bufptr <= 0) {
+ if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) {
/* Schedule a WQ if we fails to free atleast half of the
* pointers else enable napi for this RQ.
*/
@@ -913,7 +1022,7 @@ static void otx2_pool_refill_task(struct work_struct *work)
}
return;
}
- otx2_aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
+ pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--;
}
cq->refill_task_sched = false;
@@ -933,7 +1042,7 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf)
}
/* Initialize TX queues */
- for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
err = otx2_sq_init(pfvf, qidx, sqb_aura);
@@ -948,6 +1057,8 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf)
return err;
}
+ pfvf->cq_op_addr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_CQ_OP_STATUS);
+
/* Initialize work queue for receive buffer refill */
pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt,
sizeof(struct refill_work), GFP_KERNEL);
@@ -968,7 +1079,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
struct nix_lf_alloc_rsp *rsp;
int err;
- pfvf->qset.xqe_size = NIX_XQESZ_W16 ? 128 : 512;
+ pfvf->qset.xqe_size = pfvf->hw.xqe_size;
/* Get memory to put this msg */
nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox);
@@ -977,11 +1088,11 @@ int otx2_config_nix(struct otx2_nic *pfvf)
/* Set RQ/SQ/CQ counts */
nixlf->rq_cnt = pfvf->hw.rx_queues;
- nixlf->sq_cnt = pfvf->hw.tx_queues;
+ nixlf->sq_cnt = pfvf->hw.tot_tx_queues;
nixlf->cq_cnt = pfvf->qset.cq_cnt;
nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
- nixlf->rss_grps = 1; /* Single RSS indir table supported, for now */
- nixlf->xqe_sz = NIX_XQESZ_W16;
+ nixlf->rss_grps = MAX_RSS_GROUPS;
+ nixlf->xqe_sz = pfvf->hw.xqe_size == 128 ? NIX_XQESZ_W16 : NIX_XQESZ_W64;
/* We don't know absolute NPA LF idx attached.
* AF will replace 'RVU_DEFAULT_PF_FUNC' with
* NPA LF attached to this RVU PF/VF.
@@ -1015,7 +1126,7 @@ void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
int sqb, qidx;
u64 iova, pa;
- for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
sq = &qset->sq[qidx];
if (!sq->sqb_ptrs)
continue;
@@ -1127,7 +1238,7 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
/* Enable backpressure for RQ aura */
- if (aura_id < pfvf->hw.rqpool_cnt) {
+ if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) {
aq->aura.bp_ena = 0;
/* If NIX1 LF is attached then specify NIX1_RX.
*
@@ -1140,10 +1251,16 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
* "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to
* NIX-RX based on [BP] level. One bit per NIX-RX; index
* enumerated by NPA_BPINTF_E."
+ * In the above description 'One bit per NIX-RX' is written
+ * presumably by mistake in HRM.
*/
if (pfvf->nix_blkaddr == BLKADDR_NIX1)
aq->aura.bp_ena = 1;
+#ifdef CONFIG_DCB
+ aq->aura.nix0_bpid = pfvf->bpid[pfvf->queue_to_pfc_map[aura_id]];
+#else
aq->aura.nix0_bpid = pfvf->bpid[0];
+#endif
/* Set backpressure level for RQ's Aura */
aq->aura.bp = RQ_BP_LVL_AURA;
@@ -1212,8 +1329,8 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
struct otx2_hw *hw = &pfvf->hw;
struct otx2_snd_queue *sq;
struct otx2_pool *pool;
+ dma_addr_t bufptr;
int err, ptr;
- s64 bufptr;
/* Calculate number of SQBs needed.
*
@@ -1227,7 +1344,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
stack_pages =
(num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
- for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
/* Initialize aura context */
err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
@@ -1247,26 +1364,30 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
goto fail;
/* Allocate pointers and free them to aura/pool */
- for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
pool = &pfvf->qset.pool[pool_id];
sq = &qset->sq[qidx];
sq->sqb_count = 0;
- sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(u64 *), GFP_KERNEL);
- if (!sq->sqb_ptrs)
- return -ENOMEM;
+ sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL);
+ if (!sq->sqb_ptrs) {
+ err = -ENOMEM;
+ goto err_mem;
+ }
for (ptr = 0; ptr < num_sqbs; ptr++) {
- bufptr = otx2_alloc_rbuf(pfvf, pool);
- if (bufptr <= 0)
- return bufptr;
- otx2_aura_freeptr(pfvf, pool_id, bufptr);
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
+ if (err)
+ goto err_mem;
+ pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
}
}
- return 0;
+err_mem:
+ return err ? -ENOMEM : 0;
+
fail:
otx2_mbox_reset(&pfvf->mbox.mbox, 0);
otx2_aura_pool_free(pfvf);
@@ -1279,7 +1400,7 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
int stack_pages, pool_id, rq;
struct otx2_pool *pool;
int err, ptr, num_ptrs;
- s64 bufptr;
+ dma_addr_t bufptr;
num_ptrs = pfvf->qset.rqe_cnt;
@@ -1309,15 +1430,15 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
pool = &pfvf->qset.pool[pool_id];
for (ptr = 0; ptr < num_ptrs; ptr++) {
- bufptr = otx2_alloc_rbuf(pfvf, pool);
- if (bufptr <= 0)
- return bufptr;
- otx2_aura_freeptr(pfvf, pool_id,
- bufptr + OTX2_HEAD_ROOM);
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
+ if (err)
+ goto err_mem;
+ pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
+ bufptr + OTX2_HEAD_ROOM);
}
}
-
- return 0;
+err_mem:
+ return err ? -ENOMEM : 0;
fail:
otx2_mbox_reset(&pfvf->mbox.mbox, 0);
otx2_aura_pool_free(pfvf);
@@ -1472,11 +1593,18 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
return -ENOMEM;
req->chan_base = 0;
- req->chan_cnt = 1;
+#ifdef CONFIG_DCB
+ req->chan_cnt = pfvf->pfc_en ? IEEE_8021QAZ_MAX_TCS : 1;
+ req->bpid_per_chan = pfvf->pfc_en ? 1 : 0;
+#else
+ req->chan_cnt = 1;
req->bpid_per_chan = 0;
+#endif
+
return otx2_sync_mbox_msg(&pfvf->mbox);
}
+EXPORT_SYMBOL(otx2_nix_config_bp);
/* Mbox message handlers */
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
@@ -1490,6 +1618,13 @@ void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
pfvf->hw.cgx_tx_stats[id] = rsp->tx_stats[id];
}
+void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf,
+ struct cgx_fec_stats_rsp *rsp)
+{
+ pfvf->hw.cgx_fec_corr_blks += rsp->fec_corr_blks;
+ pfvf->hw.cgx_fec_uncorr_blks += rsp->fec_uncorr_blks;
+}
+
void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
struct nix_txsch_alloc_rsp *rsp)
{
@@ -1517,8 +1652,13 @@ void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
pfvf->hw.sqb_size = rsp->sqb_size;
pfvf->hw.rx_chan_base = rsp->rx_chan_base;
pfvf->hw.tx_chan_base = rsp->tx_chan_base;
+ pfvf->hw.rx_chan_cnt = rsp->rx_chan_cnt;
+ pfvf->hw.tx_chan_cnt = rsp->tx_chan_cnt;
pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx;
pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx;
+ pfvf->hw.cgx_links = rsp->cgx_links;
+ pfvf->hw.lbk_links = rsp->lbk_links;
+ pfvf->hw.tx_link = rsp->tx_link;
}
EXPORT_SYMBOL(mbox_handler_nix_lf_alloc);
@@ -1583,6 +1723,101 @@ void otx2_set_cints_affinity(struct otx2_nic *pfvf)
}
}
+u16 otx2_get_max_mtu(struct otx2_nic *pfvf)
+{
+ struct nix_hw_info *rsp;
+ struct msg_req *req;
+ u16 max_mtu;
+ int rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_nix_get_hw_info(&pfvf->mbox);
+ if (!req) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (!rc) {
+ rsp = (struct nix_hw_info *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+
+ /* HW counts VLAN insertion bytes (8 for double tag)
+ * irrespective of whether SQE is requesting to insert VLAN
+ * in the packet or not. Hence these 8 bytes have to be
+ * discounted from max packet size otherwise HW will throw
+ * SMQ errors
+ */
+ max_mtu = rsp->max_mtu - 8 - OTX2_ETH_HLEN;
+
+ /* Also save DWRR MTU, needed for DWRR weight calculation */
+ pfvf->hw.dwrr_mtu = rsp->rpm_dwrr_mtu;
+ if (!pfvf->hw.dwrr_mtu)
+ pfvf->hw.dwrr_mtu = 1;
+ }
+
+out:
+ mutex_unlock(&pfvf->mbox.lock);
+ if (rc) {
+ dev_warn(pfvf->dev,
+ "Failed to get MTU from hardware setting default value(1500)\n");
+ max_mtu = 1500;
+ }
+ return max_mtu;
+}
+EXPORT_SYMBOL(otx2_get_max_mtu);
+
+int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t features)
+{
+ netdev_features_t changed = features ^ netdev->features;
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ bool ntuple = !!(features & NETIF_F_NTUPLE);
+ bool tc = !!(features & NETIF_F_HW_TC);
+
+ if ((changed & NETIF_F_NTUPLE) && !ntuple)
+ otx2_destroy_ntuple_flows(pfvf);
+
+ if ((changed & NETIF_F_NTUPLE) && ntuple) {
+ if (!pfvf->flow_cfg->max_flows) {
+ netdev_err(netdev,
+ "Can't enable NTUPLE, MCAM entries not allocated\n");
+ return -EINVAL;
+ }
+ }
+
+ if ((changed & NETIF_F_HW_TC) && tc) {
+ if (!pfvf->flow_cfg->max_flows) {
+ netdev_err(netdev,
+ "Can't enable TC, MCAM entries not allocated\n");
+ return -EINVAL;
+ }
+ }
+
+ if ((changed & NETIF_F_HW_TC) && !tc &&
+ pfvf->flow_cfg && pfvf->flow_cfg->nr_flows) {
+ netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n");
+ return -EBUSY;
+ }
+
+ if ((changed & NETIF_F_NTUPLE) && ntuple &&
+ (netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) {
+ netdev_err(netdev,
+ "Can't enable NTUPLE when TC is active, disable TC and retry\n");
+ return -EINVAL;
+ }
+
+ if ((changed & NETIF_F_HW_TC) && tc &&
+ (netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) {
+ netdev_err(netdev,
+ "Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2_handle_ntuple_tc_features);
+
#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
int __weak \
otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index d6253f2a414d..1fc59d74318e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -1,11 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 RVU Ethernet driver
+/* Marvell RVU Ethernet driver
*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef OTX2_COMMON_H
@@ -16,18 +13,26 @@
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
+#include <linux/soc/marvell/octeontx2/asm.h>
+#include <net/pkt_cls.h>
+#include <net/devlink.h>
+#include <linux/time64.h>
#include <mbox.h>
+#include <npc.h>
#include "otx2_reg.h"
#include "otx2_txrx.h"
+#include "otx2_devlink.h"
#include <rvu_trace.h>
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
#define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064
+#define PCI_DEVID_OCTEONTX2_SDP_VF 0xA0F7
#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
+#define PCI_SUBSYS_DEVID_95XX_RVU_PFVF 0xB200
/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 2
@@ -47,14 +52,25 @@ enum arua_mapped_qtypes {
#define NIX_LF_ERR_VEC 0x81
#define NIX_LF_POISON_VEC 0x82
+/* Send skid of 2000 packets required for CQ size of 4K CQEs. */
+#define SEND_CQ_SKID 2000
+
+struct otx2_lmt_info {
+ u64 lmt_addr;
+ u16 lmt_id;
+};
/* RSS configuration */
+struct otx2_rss_ctx {
+ u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
+};
+
struct otx2_rss_info {
u8 enable;
u32 flowkey_cfg;
u16 rss_size;
- u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
#define RSS_HASH_KEY_SIZE 44 /* 352 bit key */
u8 key[RSS_HASH_KEY_SIZE];
+ struct otx2_rss_ctx *rss_ctx[MAX_RSS_GROUPS];
};
/* NIX (or NPC) RX errors */
@@ -157,10 +173,14 @@ struct otx2_hw {
struct otx2_rss_info rss_info;
u16 rx_queues;
u16 tx_queues;
+ u16 xdp_queues;
+ u16 tot_tx_queues;
u16 max_queues;
u16 pool_cnt;
u16 rqpool_cnt;
u16 sqpool_cnt;
+ u16 xqe_size;
+ u16 rbuf_fixed_size;
/* NPA */
u32 stack_pg_ptrs; /* No of ptrs per stack page */
@@ -169,10 +189,14 @@ struct otx2_hw {
/* NIX */
u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ u16 matchall_ipolicer;
+ u32 dwrr_mtu;
/* HW settings, coalescing etc */
u16 rx_chan_base;
u16 tx_chan_base;
+ u8 rx_chan_cnt;
+ u8 tx_chan_cnt;
u16 cq_qcount_wait;
u16 cq_ecount_wait;
u16 rq_skid;
@@ -183,7 +207,9 @@ struct otx2_hw {
u8 lso_tsov6_idx;
u8 lso_udpv4_idx;
u8 lso_udpv6_idx;
- u8 hw_tso;
+
+ /* RSS */
+ u8 flowkey_alg_idx;
/* MSI-X */
u8 cint_cnt; /* CQ interrupt count */
@@ -197,12 +223,45 @@ struct otx2_hw {
struct otx2_drv_stats drv_stats;
u64 cgx_rx_stats[CGX_RX_STATS_COUNT];
u64 cgx_tx_stats[CGX_TX_STATS_COUNT];
+ u64 cgx_fec_corr_blks;
+ u64 cgx_fec_uncorr_blks;
+ u8 cgx_links; /* No. of CGX links present in HW */
+ u8 lbk_links; /* No. of LBK links present in HW */
+ u8 tx_link; /* Transmit channel link number */
+#define HW_TSO 0
+#define CN10K_MBOX 1
+#define CN10K_LMTST 2
+#define CN10K_RPM 3
+#define CN10K_PTP_ONESTEP 4
+ unsigned long cap_flag;
+
+#define LMT_LINE_SIZE 128
+#define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */
+ u64 *lmt_base;
+ struct otx2_lmt_info __percpu *lmt_info;
+};
+
+struct vfvlan {
+ u16 vlan;
+ u16 proto;
+ u8 qos;
+};
+
+enum vfperm {
+ OTX2_RESET_VF_PERM,
+ OTX2_TRUSTED_VF,
};
struct otx2_vf_config {
struct otx2_nic *pf;
struct delayed_work link_event_work;
+ struct delayed_work ptp_info_work;
bool intf_down; /* interface was either configured or not */
+ u8 mac[ETH_ALEN];
+ u16 vlan;
+ int tx_vtag_idx;
+ struct vfvlan rule;
+ bool trusted;
};
struct flr_work {
@@ -220,26 +279,94 @@ struct otx2_ptp {
struct ptp_clock *ptp_clock;
struct otx2_nic *nic;
- struct cyclecounter cycle_counter;
- struct timecounter time_counter;
+ struct delayed_work extts_work;
+ u64 last_extts;
+ u64 thresh;
+
+ struct ptp_pin_desc extts_config;
+ bool ptp_en;
+ u64 (*convert_rx_ptp_tstmp)(u64 timestamp);
+ u64 (*convert_tx_ptp_tstmp)(u64 timestamp);
+ struct delayed_work synctstamp_work;
+ u64 tstamp;
+};
+
+struct otx2_mac_table {
+ u8 addr[ETH_ALEN];
+ u16 mcam_entry;
+ bool inuse;
+};
+
+struct otx2_flow_config {
+ u16 *flow_ent;
+ u16 *def_ent;
+ u16 nr_flows;
+#define OTX2_DEFAULT_FLOWCOUNT 16
+#define OTX2_MAX_UNICAST_FLOWS 8
+#define OTX2_MAX_VLAN_FLOWS 1
+#define OTX2_MAX_TC_FLOWS OTX2_DEFAULT_FLOWCOUNT
+#define OTX2_MCAM_COUNT (OTX2_DEFAULT_FLOWCOUNT + \
+ OTX2_MAX_UNICAST_FLOWS + \
+ OTX2_MAX_VLAN_FLOWS)
+ u16 unicast_offset;
+ u16 rx_vlan_offset;
+ u16 vf_vlan_offset;
+#define OTX2_PER_VF_VLAN_FLOWS 2 /* Rx + Tx per VF */
+#define OTX2_VF_VLAN_RX_INDEX 0
+#define OTX2_VF_VLAN_TX_INDEX 1
+ u16 max_flows;
+ u8 dmacflt_max_flows;
+ u8 *bmap_to_dmacindex;
+ unsigned long dmacflt_bmap;
+ struct list_head flow_list;
};
#define OTX2_HW_TIMESTAMP_LEN 8
+struct otx2_tc_info {
+ /* hash table to store TC offloaded flows */
+ struct rhashtable flow_table;
+ struct rhashtable_params flow_ht_params;
+ unsigned long *tc_entries_bitmap;
+};
+
+struct dev_hw_ops {
+ int (*sq_aq_init)(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura);
+ void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq,
+ int size, int qidx);
+ void (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
+ void (*aura_freeptr)(void *dev, int aura, u64 buf);
+};
+
struct otx2_nic {
void __iomem *reg_base;
struct net_device *netdev;
+ struct dev_hw_ops *hw_ops;
void *iommu_domain;
- u16 max_frs;
+ u16 xtra_hdr;
+ u16 tx_max_pktlen;
u16 rbsize; /* Receive buffer size */
#define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0)
#define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1)
#define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
+#define OTX2_FLAG_MCAM_ENTRIES_ALLOC BIT_ULL(3)
+#define OTX2_FLAG_NTUPLE_SUPPORT BIT_ULL(4)
+#define OTX2_FLAG_UCAST_FLTR_SUPPORT BIT_ULL(5)
+#define OTX2_FLAG_RX_VLAN_SUPPORT BIT_ULL(6)
+#define OTX2_FLAG_VF_VLAN_SUPPORT BIT_ULL(7)
+#define OTX2_FLAG_PF_SHUTDOWN BIT_ULL(8)
#define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9)
#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
+#define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11)
+#define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12)
+#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
+#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
+#define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15)
u64 flags;
+ u64 *cq_op_addr;
+ struct bpf_prog *xdp_prog;
struct otx2_qset qset;
struct otx2_hw hw;
struct pci_dev *pdev;
@@ -259,6 +386,10 @@ struct otx2_nic {
u64 reset_count;
struct work_struct reset_task;
+
+ /* NPC MCAM */
+ struct otx2_flow_config *flow_cfg;
+
struct workqueue_struct *flr_wq;
struct flr_work *flr_wrk;
struct refill_work *refill_wrk;
@@ -270,9 +401,50 @@ struct otx2_nic {
/* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
int nix_blkaddr;
+ struct qmem *dync_lmt;
+ u16 tot_lmt_lines;
+ u16 npa_lmt_lines;
+ u32 nix_lmt_size;
+ unsigned long rq_bmap;
struct otx2_ptp *ptp;
struct hwtstamp_config tstamp;
+ struct otx2_mac_table *mac_table;
+ struct otx2_tc_info tc_info;
+ struct workqueue_struct *otx2_ndo_wq;
+ struct work_struct otx2_rx_mode_work;
+
+#define OTX2_PRIV_FLAG_PAM4 BIT(0)
+#define OTX2_PRIV_FLAG_EDSA_HDR BIT(1)
+#define OTX2_PRIV_FLAG_HIGIG2_HDR BIT(2)
+#define OTX2_PRIV_FLAG_FDSA_HDR BIT(3)
+#define OTX2_INTF_MOD_MASK GENMASK(3, 1)
+#define OTX2_PRIV_FLAG_DEF_MODE BIT(4)
+#define OTX2_IS_EDSA_ENABLED(flags) ((flags) & \
+ OTX2_PRIV_FLAG_EDSA_HDR)
+#define OTX2_IS_HIGIG2_ENABLED(flags) ((flags) & \
+ OTX2_PRIV_FLAG_HIGIG2_HDR)
+#define OTX2_IS_DEF_MODE_ENABLED(flags) ((flags) & \
+ OTX2_PRIV_FLAG_DEF_MODE)
+#define OTX2_IS_INTFMOD_SET(flags) hweight32((flags) & OTX2_INTF_MOD_MASK)
+
+ u32 ethtool_flags;
+
+ /* extended DSA and EDSA header lengths are 8/16 bytes
+ * so take max length 16 bytes here
+ */
+#define OTX2_EDSA_HDR_LEN 16
+#define OTX2_HIGIG2_HDR_LEN 16
+#define OTX2_FDSA_HDR_LEN 4
+ u32 addl_mtu;
+
+ /* Devlink */
+ struct otx2_devlink *dl;
+#ifdef CONFIG_DCB
+ /* PFC */
+ u8 pfc_en;
+ u8 *queue_to_pfc_map;
+#endif
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -280,10 +452,19 @@ static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF;
}
+static inline bool is_otx2_sdpvf(struct pci_dev *pdev)
+{
+ return pdev->device == PCI_DEVID_OCTEONTX2_SDP_VF;
+}
+
static inline bool is_96xx_A0(struct pci_dev *pdev)
{
- return (pdev->revision == 0x00) &&
- (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
+ return (pdev->revision == 0x00);
+}
+
+static inline bool is_95xx_A0(struct pci_dev *pdev)
+{
+ return (pdev->revision == 0x10) || (pdev->revision == 0x11);
}
static inline bool is_96xx_B0(struct pci_dev *pdev)
@@ -292,6 +473,26 @@ static inline bool is_96xx_B0(struct pci_dev *pdev)
(pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
}
+/* REVID for PCIe devices.
+ * Bits 0..1: minor pass, bit 3..2: major pass
+ * bits 7..4: midr id
+ */
+#define PCI_REVISION_ID_96XX 0x00
+#define PCI_REVISION_ID_95XX 0x10
+#define PCI_REVISION_ID_95XXN 0x20
+#define PCI_REVISION_ID_98XX 0x30
+#define PCI_REVISION_ID_95XXMM 0x40
+#define PCI_REVISION_ID_95XXO 0xE0
+
+static inline bool is_dev_otx2(struct pci_dev *pdev)
+{
+ u8 midr = pdev->revision & 0xF0;
+
+ return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX ||
+ midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX ||
+ midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
+}
+
static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
{
struct otx2_hw *hw = &pfvf->hw;
@@ -300,10 +501,10 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT;
pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT;
- hw->hw_tso = true;
+ __set_bit(HW_TSO, &hw->cap_flag);
if (is_96xx_A0(pfvf->pdev)) {
- hw->hw_tso = false;
+ __clear_bit(HW_TSO, &hw->cap_flag);
/* Time based irq coalescing is not supported */
pfvf->hw.cq_qcount_wait = 0x0;
@@ -314,6 +515,24 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
pfvf->hw.rq_skid = 600;
pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K);
}
+ if (is_96xx_A0(pfvf->pdev)) {
+ pfvf->hw.cq_qcount_wait = 0x0;
+
+ /* Due to HW errata there will be frequent stalls on the
+ * transmit side, instead of disabling set timeout to a
+ * very high value.
+ */
+ pfvf->netdev->watchdog_timeo = 10000 * HZ;
+ }
+ if (is_96xx_B0(pfvf->pdev))
+ __clear_bit(HW_TSO, &hw->cap_flag);
+
+ if (!is_dev_otx2(pfvf->pdev)) {
+ __set_bit(CN10K_MBOX, &hw->cap_flag);
+ __set_bit(CN10K_LMTST, &hw->cap_flag);
+ __set_bit(CN10K_RPM, &hw->cap_flag);
+ __set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag);
+ }
}
/* Register read/write APIs */
@@ -421,23 +640,53 @@ static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
return result;
}
-static inline u64 otx2_lmt_flush(uint64_t addr)
-{
- u64 result = 0;
-
- __asm__ volatile(".cpu generic+lse\n"
- "ldeor xzr,%x[rf],[%[rs]]"
- : [rf]"=r"(result)
- : [rs]"r"(addr));
- return result;
-}
-
#else
-#define otx2_write128(lo, hi, addr)
+#define otx2_write128(lo, hi, addr) writeq((hi) | (lo), addr)
#define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; })
-#define otx2_lmt_flush(addr) ({ 0; })
#endif
+static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
+ u64 *ptrs, u64 num_ptrs)
+{
+ struct otx2_lmt_info *lmt_info;
+ u64 size = 0, count_eot = 0;
+ u64 tar_addr, val = 0;
+
+ lmt_info = get_cpu_ptr(pfvf->hw.lmt_info);
+ tar_addr = (__force u64)otx2_get_regaddr(pfvf, NPA_LF_AURA_BATCH_FREE0);
+ /* LMTID is same as AURA Id */
+ val = (lmt_info->lmt_id & 0x7FF) | BIT_ULL(63);
+ /* Set if [127:64] of last 128bit word has a valid pointer */
+ count_eot = (num_ptrs % 2) ? 0ULL : 1ULL;
+ /* Set AURA ID to free pointer */
+ ptrs[0] = (count_eot << 32) | (aura & 0xFFFFF);
+ /* Target address for LMTST flush tells HW how many 128bit
+ * words are valid from NPA_LF_AURA_BATCH_FREE0.
+ *
+ * tar_addr[6:4] is LMTST size-1 in units of 128b.
+ */
+ if (num_ptrs > 2) {
+ size = (sizeof(u64) * num_ptrs) / 16;
+ if (!count_eot)
+ size++;
+ tar_addr |= ((size - 1) & 0x7) << 4;
+ }
+ dma_wmb();
+ memcpy((u64 *)lmt_info->lmt_addr, ptrs, sizeof(u64) * num_ptrs);
+ put_cpu_ptr(pfvf->hw.lmt_info);
+ /* Perform LMTST flush */
+ cn10k_lmt_flush(val, tar_addr);
+}
+
+static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
+{
+ struct otx2_nic *pfvf = dev;
+ u64 ptrs[2] = {0};
+
+ ptrs[1] = buf;
+ /* Free only one buffer at time during init and teardown */
+ __cn10k_aura_freeptr(pfvf, aura, ptrs, 2);
+}
/* Alloc pointer from pool/aura */
static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
{
@@ -449,11 +698,12 @@ static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
}
/* Free pointer to a pool/aura */
-static inline void otx2_aura_freeptr(struct otx2_nic *pfvf,
- int aura, s64 buf)
+static inline void otx2_aura_freeptr(void *dev, int aura, u64 buf)
{
- otx2_write128((u64)buf, (u64)aura | BIT_ULL(63),
- otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0));
+ struct otx2_nic *pfvf = dev;
+ void __iomem *addr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0);
+
+ otx2_write128(buf, (u64)aura | BIT_ULL(63), addr);
}
static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx)
@@ -548,6 +798,11 @@ MBOX_UP_CGX_MESSAGES
#define RVU_PFVF_FUNC_SHIFT 0
#define RVU_PFVF_FUNC_MASK 0x3FF
+static inline bool is_otx2_vf(u16 pcifunc)
+{
+ return !!(pcifunc & RVU_PFVF_FUNC_MASK);
+}
+
static inline int rvu_get_pf(u16 pcifunc)
{
return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
@@ -585,6 +840,7 @@ void otx2_get_mac_from_af(struct net_device *netdev);
void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
int otx2_config_pause_frm(struct otx2_nic *pfvf);
void otx2_setup_segmentation(struct otx2_nic *pfvf);
+int otx2_config_serdes_link_state(struct otx2_nic *pfvf, bool en);
/* RVU block related APIs */
int otx2_attach_npa_nix(struct otx2_nic *pfvf);
@@ -601,18 +857,23 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl);
int otx2_txsch_alloc(struct otx2_nic *pfvf);
int otx2_txschq_stop(struct otx2_nic *pfvf);
void otx2_sqb_flush(struct otx2_nic *pfvf);
-dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool);
+int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma);
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
+int otx2_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura);
+int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura);
+int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
+ dma_addr_t *dma);
/* RSS configuration APIs*/
int otx2_rss_init(struct otx2_nic *pfvf);
int otx2_set_flowkey_cfg(struct otx2_nic *pfvf);
void otx2_set_rss_key(struct otx2_nic *pfvf);
-int otx2_set_rss_table(struct otx2_nic *pfvf);
+int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id);
/* Mbox handlers */
void mbox_handler_msix_offset(struct otx2_nic *pfvf,
@@ -625,6 +886,9 @@ void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
struct nix_txsch_alloc_rsp *rsp);
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
struct cgx_stats_rsp *rsp);
+void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf,
+ struct cgx_fec_stats_rsp *rsp);
+void otx2_set_fec_stats_count(struct otx2_nic *pfvf);
void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
struct nix_bp_cfg_rsp *rsp);
@@ -633,6 +897,7 @@ void otx2_get_dev_stats(struct otx2_nic *pfvf);
void otx2_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats);
void otx2_update_lmac_stats(struct otx2_nic *pfvf);
+void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf);
int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx);
int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx);
void otx2_set_ethtool_ops(struct net_device *netdev);
@@ -642,4 +907,58 @@ int otx2_open(struct net_device *netdev);
int otx2_stop(struct net_device *netdev);
int otx2_set_real_num_queues(struct net_device *netdev,
int tx_queues, int rx_queues);
+int otx2_set_npc_parse_mode(struct otx2_nic *pfvf, bool unbind);
+int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd);
+int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr);
+
+/* MCAM filter related APIs */
+int otx2_mcam_flow_init(struct otx2_nic *pf);
+int otx2vf_mcam_flow_init(struct otx2_nic *pfvf);
+int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count);
+void otx2_mcam_flow_del(struct otx2_nic *pf);
+int otx2_destroy_ntuple_flows(struct otx2_nic *pf);
+int otx2_destroy_mcam_flows(struct otx2_nic *pfvf);
+int otx2_get_flow(struct otx2_nic *pfvf,
+ struct ethtool_rxnfc *nfc, u32 location);
+int otx2_get_all_flows(struct otx2_nic *pfvf,
+ struct ethtool_rxnfc *nfc, u32 *rule_locs);
+int otx2_add_flow(struct otx2_nic *pfvf,
+ struct ethtool_rxnfc *nfc);
+int otx2_remove_flow(struct otx2_nic *pfvf, u32 location);
+int otx2_get_maxflows(struct otx2_flow_config *flow_cfg);
+void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id);
+int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
+int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
+int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
+int otx2_enable_vf_vlan(struct otx2_nic *pf);
+int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
+int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
+ u16 proto);
+int otx2smqvf_probe(struct otx2_nic *vf);
+int otx2smqvf_remove(struct otx2_nic *vf);
+
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
+u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
+int otx2_handle_ntuple_tc_features(struct net_device *netdev,
+ netdev_features_t features);
+/* tc support */
+int otx2_init_tc(struct otx2_nic *nic);
+void otx2_shutdown_tc(struct otx2_nic *nic);
+int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data);
+int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic);
+/* CGX/RPM DMAC filters support */
+int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
+int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
+int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
+int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos);
+void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf);
+void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf);
+
+#ifdef CONFIG_DCB
+/* DCB support*/
+void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable);
+int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf);
+int otx2_dcbnl_set_ops(struct net_device *dev);
+#endif
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
new file mode 100644
index 000000000000..290f04436050
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include "otx2_common.h"
+
+int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
+{
+ struct cgx_pfc_cfg *req;
+ struct cgx_pfc_rsp *rsp;
+ int err = 0;
+
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdpvf(pfvf->pdev))
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(&pfvf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ if (pfvf->pfc_en) {
+ req->rx_pause = true;
+ req->tx_pause = true;
+ } else {
+ req->rx_pause = false;
+ req->tx_pause = false;
+ }
+ req->pfc_en = pfvf->pfc_en;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
+ rsp = (struct cgx_pfc_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (req->rx_pause != rsp->rx_pause || req->tx_pause != rsp->tx_pause) {
+ dev_warn(pfvf->dev,
+ "Failed to config PFC\n");
+ err = -EPERM;
+ }
+ }
+unlock:
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx,
+ bool pfc_enable)
+{
+ bool if_up = netif_running(pfvf->netdev);
+ struct npa_aq_enq_req *npa_aq;
+ struct nix_aq_enq_req *aq;
+ int err = 0;
+
+ if (pfvf->queue_to_pfc_map[qidx] && pfc_enable) {
+ dev_warn(pfvf->dev,
+ "PFC enable not permitted as Priority %d already mapped to Queue %d\n",
+ pfvf->queue_to_pfc_map[qidx], qidx);
+ return;
+ }
+
+ if (if_up) {
+ netif_tx_stop_all_queues(pfvf->netdev);
+ netif_carrier_off(pfvf->netdev);
+ }
+
+ pfvf->queue_to_pfc_map[qidx] = vlan_prio;
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ aq->cq.bpid = pfvf->bpid[vlan_prio];
+ aq->cq_mask.bpid = GENMASK(8, 0);
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_CQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ otx2_sync_mbox_msg(&pfvf->mbox);
+
+ npa_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!npa_aq) {
+ err = -ENOMEM;
+ goto out;
+ }
+ npa_aq->aura.nix0_bpid = pfvf->bpid[vlan_prio];
+ npa_aq->aura_mask.nix0_bpid = GENMASK(8, 0);
+
+ /* Fill NPA AQ info */
+ npa_aq->aura_id = qidx;
+ npa_aq->ctype = NPA_AQ_CTYPE_AURA;
+ npa_aq->op = NPA_AQ_INSTOP_WRITE;
+ otx2_sync_mbox_msg(&pfvf->mbox);
+
+out:
+ if (if_up) {
+ netif_carrier_on(pfvf->netdev);
+ netif_tx_start_all_queues(pfvf->netdev);
+ }
+
+ if (err)
+ dev_warn(pfvf->dev,
+ "Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n",
+ qidx, err);
+}
+
+static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
+ pfc->pfc_en = pfvf->pfc_en;
+
+ return 0;
+}
+
+static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ int err;
+
+ /* Save PFC configuration to interface */
+ pfvf->pfc_en = pfc->pfc_en;
+
+ err = otx2_config_priority_flow_ctrl(pfvf);
+ if (err)
+ return err;
+
+ /* Request Per channel Bpids */
+ if (pfc->pfc_en)
+ otx2_nix_config_bp(pfvf, true);
+
+ return 0;
+}
+
+static u8 otx2_dcbnl_getdcbx(struct net_device __always_unused *dev)
+{
+ return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+}
+
+static u8 otx2_dcbnl_setdcbx(struct net_device __always_unused *dev, u8 mode)
+{
+ return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0;
+}
+
+static const struct dcbnl_rtnl_ops otx2_dcbnl_ops = {
+ .ieee_getpfc = otx2_dcbnl_ieee_getpfc,
+ .ieee_setpfc = otx2_dcbnl_ieee_setpfc,
+ .getdcbx = otx2_dcbnl_getdcbx,
+ .setdcbx = otx2_dcbnl_setdcbx,
+};
+
+int otx2_dcbnl_set_ops(struct net_device *dev)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ pfvf->queue_to_pfc_map = devm_kzalloc(pfvf->dev, pfvf->hw.rx_queues,
+ GFP_KERNEL);
+ if (!pfvf->queue_to_pfc_map)
+ return -ENOMEM;
+ dev->dcbnl_ops = &otx2_dcbnl_ops;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
new file mode 100644
index 000000000000..3284a2b353f0
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU PF/VF Netdev Devlink
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include "otx2_common.h"
+
+/* Devlink Params APIs */
+static int otx2_dl_mcam_count_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+ struct otx2_flow_config *flow_cfg;
+
+ if (!pfvf->flow_cfg) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "pfvf->flow_cfg not initialized");
+ return -EINVAL;
+ }
+
+ flow_cfg = pfvf->flow_cfg;
+ if (flow_cfg && flow_cfg->nr_flows) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot modify count when there are active rules");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+
+ if (!pfvf->flow_cfg)
+ return 0;
+
+ otx2_alloc_mcam_entries(pfvf, ctx->val.vu16);
+ otx2_tc_alloc_ent_bitmap(pfvf);
+
+ return 0;
+}
+
+static int otx2_dl_mcam_count_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+ struct otx2_flow_config *flow_cfg;
+
+ if (!pfvf->flow_cfg) {
+ ctx->val.vu16 = 0;
+ return 0;
+ }
+
+ flow_cfg = pfvf->flow_cfg;
+ ctx->val.vu16 = flow_cfg->max_flows;
+
+ return 0;
+}
+
+static int otx2_dl_rbuf_size_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ /* Hardware supports max size of 32k for a receive buffer
+ * and 1536 is typical ethernet frame size.
+ */
+ if (val.vu16 < 1536 || val.vu16 > 32768) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Receive buffer range is 1536 - 32768");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int otx2_dl_rbuf_size_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+ struct net_device *netdev;
+ int err = 0;
+ bool if_up;
+
+ rtnl_lock();
+
+ netdev = pfvf->netdev;
+ if_up = netif_running(netdev);
+ if (if_up)
+ netdev->netdev_ops->ndo_stop(netdev);
+
+ pfvf->hw.rbuf_fixed_size = ALIGN(ctx->val.vu16, OTX2_ALIGN) +
+ OTX2_HEAD_ROOM;
+
+ if (if_up)
+ err = netdev->netdev_ops->ndo_open(netdev);
+
+ rtnl_unlock();
+
+ return err;
+}
+
+static int otx2_dl_rbuf_size_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+
+ ctx->val.vu16 = pfvf->hw.rbuf_fixed_size;
+
+ return 0;
+}
+
+static int otx2_dl_cqe_size_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (val.vu16 != 128 && val.vu16 != 512) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only 128 or 512 byte descriptor allowed");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int otx2_dl_cqe_size_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+ struct net_device *netdev;
+ int err = 0;
+ bool if_up;
+
+ rtnl_lock();
+
+ netdev = pfvf->netdev;
+ if_up = netif_running(netdev);
+ if (if_up)
+ netdev->netdev_ops->ndo_stop(netdev);
+
+ pfvf->hw.xqe_size = ctx->val.vu16;
+
+ if (if_up)
+ err = netdev->netdev_ops->ndo_open(netdev);
+
+ rtnl_unlock();
+
+ return err;
+}
+
+static int otx2_dl_cqe_size_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+
+ ctx->val.vu16 = pfvf->hw.xqe_size;
+
+ return 0;
+}
+
+static int otx2_dl_serdes_link_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+
+ if (!is_otx2_vf(pfvf->pcifunc))
+ return otx2_config_serdes_link_state(pfvf, ctx->val.vbool);
+
+ return -EOPNOTSUPP;
+}
+
+static int otx2_dl_serdes_link_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+
+ ctx->val.vbool = (pfvf->linfo.link_up) ? true : false;
+
+ return 0;
+}
+
+enum otx2_dl_param_id {
+ OTX2_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ OTX2_DEVLINK_PARAM_ID_MCAM_COUNT,
+ OTX2_DEVLINK_PARAM_ID_CQE_SIZE,
+ OTX2_DEVLINK_PARAM_ID_RBUF_SIZE,
+ OTX2_DEVLINK_PARAM_ID_SERDES_LINK,
+};
+
+static const struct devlink_param otx2_dl_params[] = {
+ DEVLINK_PARAM_DRIVER(OTX2_DEVLINK_PARAM_ID_MCAM_COUNT,
+ "mcam_count", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ otx2_dl_mcam_count_get, otx2_dl_mcam_count_set,
+ otx2_dl_mcam_count_validate),
+ DEVLINK_PARAM_DRIVER(OTX2_DEVLINK_PARAM_ID_CQE_SIZE,
+ "completion_descriptor_size", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ otx2_dl_cqe_size_get, otx2_dl_cqe_size_set,
+ otx2_dl_cqe_size_validate),
+ DEVLINK_PARAM_DRIVER(OTX2_DEVLINK_PARAM_ID_RBUF_SIZE,
+ "receive_buffer_size", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ otx2_dl_rbuf_size_get, otx2_dl_rbuf_size_set,
+ otx2_dl_rbuf_size_validate),
+ DEVLINK_PARAM_DRIVER(OTX2_DEVLINK_PARAM_ID_SERDES_LINK,
+ "serdes_link", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ otx2_dl_serdes_link_get, otx2_dl_serdes_link_set,
+ NULL),
+};
+
+/* Devlink OPs */
+static int otx2_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+
+ if (is_otx2_vf(pfvf->pcifunc))
+ return devlink_info_driver_name_put(req, "rvu_nicvf");
+
+ return devlink_info_driver_name_put(req, "rvu_nicpf");
+}
+
+static const struct devlink_ops otx2_devlink_ops = {
+ .info_get = otx2_devlink_info_get,
+};
+
+int otx2_register_dl(struct otx2_nic *pfvf)
+{
+ struct otx2_devlink *otx2_dl;
+ struct devlink *dl;
+ int err;
+
+ dl = devlink_alloc(&otx2_devlink_ops, sizeof(struct otx2_devlink));
+ if (!dl) {
+ dev_warn(pfvf->dev, "devlink_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ err = devlink_register(dl, pfvf->dev);
+ if (err) {
+ dev_err(pfvf->dev, "devlink register failed with error %d\n", err);
+ devlink_free(dl);
+ return err;
+ }
+
+ otx2_dl = devlink_priv(dl);
+ otx2_dl->dl = dl;
+ otx2_dl->pfvf = pfvf;
+ pfvf->dl = otx2_dl;
+
+ err = devlink_params_register(dl, otx2_dl_params,
+ ARRAY_SIZE(otx2_dl_params));
+ if (err) {
+ dev_err(pfvf->dev,
+ "devlink params register failed with error %d", err);
+ goto err_dl;
+ }
+
+ devlink_params_publish(dl);
+
+ return 0;
+
+err_dl:
+ devlink_unregister(dl);
+ devlink_free(dl);
+ return err;
+}
+
+void otx2_unregister_dl(struct otx2_nic *pfvf)
+{
+ struct otx2_devlink *otx2_dl = pfvf->dl;
+ struct devlink *dl;
+
+ if (!otx2_dl || !otx2_dl->dl)
+ return;
+
+ dl = otx2_dl->dl;
+
+ devlink_params_unregister(dl, otx2_dl_params,
+ ARRAY_SIZE(otx2_dl_params));
+
+ devlink_unregister(dl);
+ devlink_free(dl);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h
new file mode 100644
index 000000000000..c7bd4f3c6c6b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU PF/VF Netdev Devlink
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#ifndef OTX2_DEVLINK_H
+#define OTX2_DEVLINK_H
+
+struct otx2_devlink {
+ struct devlink *dl;
+ struct otx2_nic *pfvf;
+};
+
+/* Devlink APIs */
+int otx2_register_dl(struct otx2_nic *pfvf);
+void otx2_unregister_dl(struct otx2_nic *pfvf);
+
+#endif /* RVU_DEVLINK_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
new file mode 100644
index 000000000000..2ec800f741d8
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include "otx2_common.h"
+
+static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac,
+ u8 *dmac_index)
+{
+ struct cgx_mac_addr_add_req *req;
+ struct cgx_mac_addr_add_rsp *rsp;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_add(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, mac);
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ if (!err) {
+ rsp = (struct cgx_mac_addr_add_rsp *)
+ otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+ *dmac_index = rsp->index;
+ }
+
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf)
+{
+ struct cgx_mac_addr_set_or_get *req;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_set(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, pf->netdev->dev_addr);
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos)
+{
+ u8 *dmacindex;
+
+ /* Store dmacindex returned by CGX/RPM driver which will
+ * be used for macaddr update/remove
+ */
+ dmacindex = &pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+
+ if (ether_addr_equal(mac, pf->netdev->dev_addr))
+ return otx2_dmacflt_add_pfmac(pf);
+ else
+ return otx2_dmacflt_do_add(pf, mac, dmacindex);
+}
+
+static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac,
+ u8 dmac_index)
+{
+ struct cgx_mac_addr_del_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_del(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->index = dmac_index;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return err;
+}
+
+static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf)
+{
+ struct msg_req *req;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_reset(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac,
+ u8 bit_pos)
+{
+ u8 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+
+ if (ether_addr_equal(mac, pf->netdev->dev_addr))
+ return otx2_dmacflt_remove_pfmac(pf);
+ else
+ return otx2_dmacflt_do_remove(pf, mac, dmacindex);
+}
+
+/* CGX/RPM blocks support max unicast entries of 32.
+ * on typical configuration MAC block associated
+ * with 4 lmacs, each lmac will have 8 dmac entries
+ */
+int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf)
+{
+ struct cgx_max_dmac_entries_get_rsp *rsp;
+ struct msg_req *msg;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+ msg = otx2_mbox_alloc_msg_cgx_mac_max_entries_get(&pf->mbox);
+
+ if (!msg) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ rsp = (struct cgx_max_dmac_entries_get_rsp *)
+ otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &msg->hdr);
+ pf->flow_cfg->dmacflt_max_flows = rsp->max_dmac_filters;
+
+out:
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos)
+{
+ struct cgx_mac_addr_update_req *req;
+ int rc;
+
+ mutex_lock(&pf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_update(&pf->mbox);
+
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, mac);
+ req->index = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+ rc = otx2_sync_mbox_msg(&pf->mbox);
+
+ mutex_unlock(&pf->mbox.lock);
+ return rc;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index c6d408de0605..94a4043d47a3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -1,11 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Ethernet driver
+/* Marvell RVU Ethernet driver
*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/pci.h>
@@ -14,12 +11,20 @@
#include <linux/etherdevice.h>
#include <linux/log2.h>
#include <linux/net_tstamp.h>
+#include <linux/linkmode.h>
#include "otx2_common.h"
#include "otx2_ptp.h"
-#define DRV_NAME "octeontx2-nicpf"
-#define DRV_VF_NAME "octeontx2-nicvf"
+#define DRV_NAME "rvu-nicpf"
+#define DRV_VF_NAME "rvu-nicvf"
+
+static const char otx2_priv_flags_strings[][ETH_GSTRING_LEN] = {
+ "pam4",
+ "edsa",
+ "higig2",
+ "fdsa",
+};
struct otx2_stat {
char name[ETH_GSTRING_LEN];
@@ -32,6 +37,11 @@ struct otx2_stat {
.index = offsetof(struct otx2_dev_stats, stat) / sizeof(u64), \
}
+enum link_mode {
+ OTX2_MODE_SUPPORTED,
+ OTX2_MODE_ADVERTISED
+};
+
static const struct otx2_stat otx2_dev_stats[] = {
OTX2_DEV_STAT(rx_ucast_frames),
OTX2_DEV_STAT(rx_bcast_frames),
@@ -66,6 +76,8 @@ static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats);
static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats);
static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats);
+static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf);
+
static void otx2_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
@@ -87,7 +99,7 @@ static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset)
*data += ETH_GSTRING_LEN;
}
}
- for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
for (stats = 0; stats < otx2_n_queue_stats; stats++) {
sprintf(*data, "txq%d: %s", qidx + start_qidx,
otx2_queue_stats[stats].name);
@@ -101,6 +113,12 @@ static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
struct otx2_nic *pfvf = netdev_priv(netdev);
int stats;
+ if (sset == ETH_SS_PRIV_FLAGS) {
+ memcpy(data, otx2_priv_flags_strings,
+ ARRAY_SIZE(otx2_priv_flags_strings) * ETH_GSTRING_LEN);
+ return;
+ }
+
if (sset != ETH_SS_STATS)
return;
@@ -116,18 +134,24 @@ static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
otx2_get_qset_strings(pfvf, &data, 0);
- for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) {
- sprintf(data, "cgx_rxstat%d: ", stats);
- data += ETH_GSTRING_LEN;
- }
+ if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) {
+ for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) {
+ sprintf(data, "cgx_rxstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
- for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) {
- sprintf(data, "cgx_txstat%d: ", stats);
- data += ETH_GSTRING_LEN;
+ for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) {
+ sprintf(data, "cgx_txstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
}
strcpy(data, "reset_count");
data += ETH_GSTRING_LEN;
+ sprintf(data, "Fec Corrected Errors: ");
+ data += ETH_GSTRING_LEN;
+ sprintf(data, "Fec Uncorrected Errors: ");
+ data += ETH_GSTRING_LEN;
}
static void otx2_get_qset_stats(struct otx2_nic *pfvf,
@@ -148,7 +172,7 @@ static void otx2_get_qset_stats(struct otx2_nic *pfvf,
[otx2_queue_stats[stat].index];
}
- for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
if (!otx2_update_sq_stats(pfvf, qidx)) {
for (stat = 0; stat < otx2_n_queue_stats; stat++)
*((*data)++) = 0;
@@ -160,11 +184,30 @@ static void otx2_get_qset_stats(struct otx2_nic *pfvf,
}
}
+static int otx2_get_phy_fec_stats(struct otx2_nic *pfvf)
+{
+ struct msg_req *req;
+ int rc = -ENOMEM;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_get_phy_fec_stats(&pfvf->mbox);
+ if (!req)
+ goto end;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox))
+ rc = 0;
+end:
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
/* Get device and per queue statistics */
static void otx2_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
+ u64 fec_corr_blks, fec_uncorr_blks;
+ struct cgx_fw_data *rsp;
int stat;
otx2_get_dev_stats(pfvf);
@@ -177,27 +220,63 @@ static void otx2_get_ethtool_stats(struct net_device *netdev,
[otx2_drv_stats[stat].index]);
otx2_get_qset_stats(pfvf, stats, &data);
- otx2_update_lmac_stats(pfvf);
- for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++)
- *(data++) = pfvf->hw.cgx_rx_stats[stat];
- for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++)
- *(data++) = pfvf->hw.cgx_tx_stats[stat];
+
+ if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) {
+ otx2_update_lmac_stats(pfvf);
+ for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++)
+ *(data++) = pfvf->hw.cgx_rx_stats[stat];
+ for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++)
+ *(data++) = pfvf->hw.cgx_tx_stats[stat];
+ }
+
*(data++) = pfvf->reset_count;
+
+ fec_corr_blks = pfvf->hw.cgx_fec_corr_blks;
+ fec_uncorr_blks = pfvf->hw.cgx_fec_uncorr_blks;
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (!IS_ERR(rsp) && rsp->fwdata.phy.misc.has_fec_stats &&
+ !otx2_get_phy_fec_stats(pfvf)) {
+ /* Fetch fwdata again because it's been recently populated with
+ * latest PHY FEC stats.
+ */
+ rsp = otx2_get_fwdata(pfvf);
+ if (!IS_ERR(rsp)) {
+ struct fec_stats_s *p = &rsp->fwdata.phy.fec_stats;
+
+ if (pfvf->linfo.fec == OTX2_FEC_BASER) {
+ fec_corr_blks = p->brfec_corr_blks;
+ fec_uncorr_blks = p->brfec_uncorr_blks;
+ } else {
+ fec_corr_blks = p->rsfec_corr_cws;
+ fec_uncorr_blks = p->rsfec_uncorr_cws;
+ }
+ }
+ }
+
+ *(data++) = fec_corr_blks;
+ *(data++) = fec_uncorr_blks;
}
static int otx2_get_sset_count(struct net_device *netdev, int sset)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- int qstats_count;
+ int qstats_count, mac_stats = 0;
+
+ if (sset == ETH_SS_PRIV_FLAGS)
+ return ARRAY_SIZE(otx2_priv_flags_strings);
if (sset != ETH_SS_STATS)
return -EINVAL;
qstats_count = otx2_n_queue_stats *
- (pfvf->hw.rx_queues + pfvf->hw.tx_queues);
+ (pfvf->hw.rx_queues + pfvf->hw.tot_tx_queues);
+ if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag))
+ mac_stats = CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT;
+ otx2_update_lmac_fec_stats(pfvf);
return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count +
- CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + 1;
+ mac_stats + OTX2_FEC_STATS_CNT + 1;
}
/* Get no of queues device supports and current queue count */
@@ -224,6 +303,12 @@ static int otx2_set_channels(struct net_device *dev,
if (!channel->rx_count || !channel->tx_count)
return -EINVAL;
+ if (bitmap_weight(&pfvf->rq_bmap, pfvf->hw.rx_queues) > 1) {
+ netdev_err(dev,
+ "Receive queues are in use by TC police action\n");
+ return -EINVAL;
+ }
+
if (if_up)
dev->netdev_ops->ndo_stop(dev);
@@ -234,6 +319,9 @@ static int otx2_set_channels(struct net_device *dev,
pfvf->hw.rx_queues = channel->rx_count;
pfvf->hw.tx_queues = channel->tx_count;
+ if (pfvf->xdp_prog)
+ pfvf->hw.xdp_queues = channel->rx_count;
+ pfvf->hw.tot_tx_queues = pfvf->hw.tx_queues + pfvf->hw.xdp_queues;
pfvf->qset.cq_cnt = pfvf->hw.tx_queues + pfvf->hw.rx_queues;
if (if_up)
@@ -254,9 +342,12 @@ static void otx2_get_pauseparam(struct net_device *netdev,
if (is_otx2_lbkvf(pfvf->pdev))
return;
+ mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
- if (!req)
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
return;
+ }
if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
rsp = (struct cgx_pause_frm_cfg *)
@@ -264,6 +355,7 @@ static void otx2_get_pauseparam(struct net_device *netdev,
pause->rx_pause = rsp->rx_pause;
pause->tx_pause = rsp->tx_pause;
}
+ mutex_unlock(&pfvf->mbox.lock);
}
static int otx2_set_pauseparam(struct net_device *netdev,
@@ -447,10 +539,14 @@ static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case AH_ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_ESP)
+ nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
case AH_V4_FLOW:
case ESP_V4_FLOW:
case IPV4_FLOW:
- case AH_ESP_V6_FLOW:
+ break;
case AH_V6_FLOW:
case ESP_V6_FLOW:
case IPV6_FLOW:
@@ -458,6 +554,7 @@ static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
default:
return -EINVAL;
}
+
return 0;
}
@@ -526,6 +623,36 @@ static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf,
return -EINVAL;
}
break;
+ case AH_ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ switch (nfc->data & rxh_l4) {
+ case 0:
+ rss_cfg &= ~(NIX_FLOW_KEY_TYPE_ESP |
+ NIX_FLOW_KEY_TYPE_AH);
+ rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN |
+ NIX_FLOW_KEY_TYPE_IPV4_PROTO;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ /* If VLAN hashing is also requested for ESP then do not
+ * allow because of hardware 40 bytes flow key limit.
+ */
+ if (rss_cfg & NIX_FLOW_KEY_TYPE_VLAN) {
+ netdev_err(pfvf->netdev,
+ "RSS hash of ESP or AH with VLAN is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ rss_cfg |= NIX_FLOW_KEY_TYPE_ESP | NIX_FLOW_KEY_TYPE_AH;
+ /* Disable IPv4 proto hashing since IPv6 SA+DA(32 bytes)
+ * and ESP SPI+sequence(8 bytes) uses hardware maximum
+ * limit of 40 byte flow key.
+ */
+ rss_cfg &= ~NIX_FLOW_KEY_TYPE_IPV4_PROTO;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
case IPV4_FLOW:
case IPV6_FLOW:
rss_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
@@ -542,6 +669,7 @@ static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf,
static int otx2_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *nfc, u32 *rules)
{
+ bool ntuple = !!(dev->features & NETIF_F_NTUPLE);
struct otx2_nic *pfvf = netdev_priv(dev);
int ret = -EOPNOTSUPP;
@@ -550,6 +678,20 @@ static int otx2_get_rxnfc(struct net_device *dev,
nfc->data = pfvf->hw.rx_queues;
ret = 0;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ if (netif_running(dev) && ntuple) {
+ nfc->rule_cnt = pfvf->flow_cfg->nr_flows;
+ ret = 0;
+ }
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ if (netif_running(dev) && ntuple)
+ ret = otx2_get_flow(pfvf, nfc, nfc->fs.location);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ if (netif_running(dev) && ntuple)
+ ret = otx2_get_all_flows(pfvf, nfc, rules);
+ break;
case ETHTOOL_GRXFH:
return otx2_get_rss_hash_opts(pfvf, nfc);
default:
@@ -560,6 +702,7 @@ static int otx2_get_rxnfc(struct net_device *dev,
static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
{
+ bool ntuple = !!(dev->features & NETIF_F_NTUPLE);
struct otx2_nic *pfvf = netdev_priv(dev);
int ret = -EOPNOTSUPP;
@@ -567,6 +710,14 @@ static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
case ETHTOOL_SRXFH:
ret = otx2_set_rss_hash_opts(pfvf, nfc);
break;
+ case ETHTOOL_SRXCLSRLINS:
+ if (netif_running(dev) && ntuple)
+ ret = otx2_add_flow(pfvf, nfc);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ if (netif_running(dev) && ntuple)
+ ret = otx2_remove_flow(pfvf, nfc->fs.location);
+ break;
default:
break;
}
@@ -586,46 +737,59 @@ static u32 otx2_get_rxfh_key_size(struct net_device *netdev)
static u32 otx2_get_rxfh_indir_size(struct net_device *dev)
{
- struct otx2_nic *pfvf = netdev_priv(dev);
-
- return pfvf->hw.rss_info.rss_size;
+ return MAX_RSS_INDIR_TBL_SIZE;
}
-/* Get RSS configuration */
-static int otx2_get_rxfh(struct net_device *dev, u32 *indir,
- u8 *hkey, u8 *hfunc)
+static int otx2_rss_ctx_delete(struct otx2_nic *pfvf, int ctx_id)
{
- struct otx2_nic *pfvf = netdev_priv(dev);
- struct otx2_rss_info *rss;
- int idx;
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
- rss = &pfvf->hw.rss_info;
+ otx2_rss_ctx_flow_del(pfvf, ctx_id);
+ kfree(rss->rss_ctx[ctx_id]);
+ rss->rss_ctx[ctx_id] = NULL;
- if (indir) {
- for (idx = 0; idx < rss->rss_size; idx++)
- indir[idx] = rss->ind_tbl[idx];
- }
+ return 0;
+}
- if (hkey)
- memcpy(hkey, rss->key, sizeof(rss->key));
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+static int otx2_rss_ctx_create(struct otx2_nic *pfvf,
+ u32 *rss_context)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ u8 ctx;
+
+ for (ctx = 0; ctx < MAX_RSS_GROUPS; ctx++) {
+ if (!rss->rss_ctx[ctx])
+ break;
+ }
+ if (ctx == MAX_RSS_GROUPS)
+ return -EINVAL;
+
+ rss->rss_ctx[ctx] = kzalloc(sizeof(*rss->rss_ctx[ctx]), GFP_KERNEL);
+ if (!rss->rss_ctx[ctx])
+ return -ENOMEM;
+ *rss_context = ctx;
return 0;
}
-/* Configure RSS table and hash key */
-static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *hkey, const u8 hfunc)
+/* RSS context configuration */
+static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
+ const u8 *hkey, const u8 hfunc,
+ u32 *rss_context, bool delete)
{
struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_ctx *rss_ctx;
struct otx2_rss_info *rss;
- int idx;
+ int ret, idx;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
+ if (*rss_context != ETH_RXFH_CONTEXT_ALLOC &&
+ *rss_context >= MAX_RSS_GROUPS)
+ return -EINVAL;
+
rss = &pfvf->hw.rss_info;
if (!rss->enable) {
@@ -633,20 +797,83 @@ static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
return -EIO;
}
+ if (hkey) {
+ memcpy(rss->key, hkey, sizeof(rss->key));
+ otx2_set_rss_key(pfvf);
+ }
+ if (delete)
+ return otx2_rss_ctx_delete(pfvf, *rss_context);
+
+ if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
+ ret = otx2_rss_ctx_create(pfvf, rss_context);
+ if (ret)
+ return ret;
+ }
if (indir) {
+ rss_ctx = rss->rss_ctx[*rss_context];
for (idx = 0; idx < rss->rss_size; idx++)
- rss->ind_tbl[idx] = indir[idx];
+ rss_ctx->ind_tbl[idx] = indir[idx];
}
- if (hkey) {
- memcpy(rss->key, hkey, sizeof(rss->key));
- otx2_set_rss_key(pfvf);
+ return 0;
+}
+
+static int otx2_get_rxfh_context(struct net_device *dev, u32 *indir,
+ u8 *hkey, u8 *hfunc, u32 rss_context)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_ctx *rss_ctx;
+ struct otx2_rss_info *rss;
+ int idx, rx_queues;
+
+ rss = &pfvf->hw.rss_info;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ if (!indir)
+ return 0;
+
+ if (!rss->enable && rss_context == DEFAULT_RSS_CONTEXT_GROUP) {
+ rx_queues = pfvf->hw.rx_queues;
+ for (idx = 0; idx < MAX_RSS_INDIR_TBL_SIZE; idx++)
+ indir[idx] = ethtool_rxfh_indir_default(idx, rx_queues);
+ return 0;
+ }
+ if (rss_context >= MAX_RSS_GROUPS)
+ return -ENOENT;
+
+ rss_ctx = rss->rss_ctx[rss_context];
+ if (!rss_ctx)
+ return -ENOENT;
+
+ if (indir) {
+ for (idx = 0; idx < rss->rss_size; idx++)
+ indir[idx] = rss_ctx->ind_tbl[idx];
}
+ if (hkey)
+ memcpy(hkey, rss->key, sizeof(rss->key));
- otx2_set_rss_table(pfvf);
return 0;
}
+/* Get RSS configuration */
+static int otx2_get_rxfh(struct net_device *dev, u32 *indir,
+ u8 *hkey, u8 *hfunc)
+{
+ return otx2_get_rxfh_context(dev, indir, hkey, hfunc,
+ DEFAULT_RSS_CONTEXT_GROUP);
+}
+
+/* Configure RSS table and hash key */
+static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *hkey, const u8 hfunc)
+{
+ u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
+
+ return otx2_set_rxfh_context(dev, indir, hkey, hfunc, &rss_context, 0);
+}
+
static u32 otx2_get_msglevel(struct net_device *netdev)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
@@ -688,15 +915,602 @@ static int otx2_get_ts_info(struct net_device *netdev,
info->phc_index = otx2_ptp_clock_index(pfvf);
- info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+ if (test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag))
+ info->tx_types |= BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf)
+{
+ struct cgx_fw_data *rsp = NULL;
+ struct msg_req *req;
+ int err = 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_get_aux_link_info(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (!err) {
+ rsp = (struct cgx_fw_data *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ } else {
+ rsp = ERR_PTR(err);
+ }
- info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_ALL);
+ mutex_unlock(&pfvf->mbox.lock);
+ return rsp;
+}
+static int otx2_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_fw_data *rsp;
+ const int fec[] = {
+ ETHTOOL_FEC_OFF,
+ ETHTOOL_FEC_BASER,
+ ETHTOOL_FEC_RS,
+ ETHTOOL_FEC_BASER | ETHTOOL_FEC_RS
+ };
+
+ if (pfvf->linfo.fec < ARRAY_SIZE(fec))
+ fecparam->active_fec = fec[pfvf->linfo.fec];
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ if (rsp->fwdata.supported_fec < ARRAY_SIZE(fec)) {
+ if (!rsp->fwdata.supported_fec)
+ fecparam->fec = ETHTOOL_FEC_NONE;
+ else
+ fecparam->fec = fec[rsp->fwdata.supported_fec];
+ }
return 0;
}
-static const struct ethtool_ops otx2_ethtool_ops = {
+static int otx2_set_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct mbox *mbox = &pfvf->mbox;
+ struct fec_mode *req, *rsp;
+ int err = 0, fec = 0;
+
+ switch (fecparam->fec) {
+ /* Firmware does not support AUTO mode consider it as FEC_OFF */
+ case ETHTOOL_FEC_OFF:
+ case ETHTOOL_FEC_AUTO:
+ fec = OTX2_FEC_OFF;
+ break;
+ case ETHTOOL_FEC_RS:
+ fec = OTX2_FEC_RS;
+ break;
+ case ETHTOOL_FEC_BASER:
+ fec = OTX2_FEC_BASER;
+ break;
+ default:
+ netdev_warn(pfvf->netdev, "Unsupported FEC mode: %d",
+ fecparam->fec);
+ return -EINVAL;
+ }
+
+ if (fec == pfvf->linfo.fec)
+ return 0;
+
+ mutex_lock(&mbox->lock);
+ req = otx2_mbox_alloc_msg_cgx_set_fec_param(&pfvf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto end;
+ }
+ req->fec = fec;
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto end;
+
+ rsp = (struct fec_mode *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (rsp->fec >= 0)
+ pfvf->linfo.fec = rsp->fec;
+ else
+ err = rsp->fec;
+end:
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+static void otx2_get_fec_info(u64 index, int req_mode,
+ struct ethtool_link_ksettings *link_ksettings)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_fec_modes) = { 0, };
+
+ switch (index) {
+ case OTX2_FEC_NONE:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ otx2_fec_modes);
+ break;
+ case OTX2_FEC_BASER:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ otx2_fec_modes);
+ break;
+ case OTX2_FEC_RS:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ otx2_fec_modes);
+ break;
+ case OTX2_FEC_BASER | OTX2_FEC_RS:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ otx2_fec_modes);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ otx2_fec_modes);
+ break;
+ }
+
+ /* Add fec modes to existing modes */
+ if (req_mode == OTX2_MODE_ADVERTISED)
+ linkmode_or(link_ksettings->link_modes.advertising,
+ link_ksettings->link_modes.advertising,
+ otx2_fec_modes);
+ else
+ linkmode_or(link_ksettings->link_modes.supported,
+ link_ksettings->link_modes.supported,
+ otx2_fec_modes);
+}
+
+static void otx2_get_link_mode_info(u64 link_mode_bmap,
+ bool req_mode,
+ struct ethtool_link_ksettings
+ *link_ksettings)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_link_modes) = { 0, };
+ const int otx2_sgmii_features[6] = {
+ ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ };
+ /* CGX link modes to Ethtool link mode mapping */
+ const int cgx_link_mode[38] = {
+ 0, /*SGMII Mode */
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+ ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseDR_Full_BIT,
+ ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT
+ };
+ u8 bit;
+
+ for_each_set_bit(bit, (unsigned long *)&link_mode_bmap,
+ ARRAY_SIZE(cgx_link_mode)) {
+ /* SGMII mode is set */
+ if (bit == 0)
+ linkmode_set_bit_array(otx2_sgmii_features,
+ ARRAY_SIZE(otx2_sgmii_features),
+ otx2_link_modes);
+ else
+ linkmode_set_bit(cgx_link_mode[bit], otx2_link_modes);
+ }
+
+ if (req_mode == OTX2_MODE_ADVERTISED)
+ linkmode_or(link_ksettings->link_modes.advertising,
+ link_ksettings->link_modes.advertising,
+ otx2_link_modes);
+ else
+ linkmode_or(link_ksettings->link_modes.supported,
+ link_ksettings->link_modes.supported,
+ otx2_link_modes);
+}
+
+static int otx2_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_fw_data *rsp;
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ modinfo->type = rsp->fwdata.sfp_eeprom.sff_id;
+ modinfo->eeprom_len = SFP_EEPROM_SIZE;
+ return 0;
+}
+
+static int otx2_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_fw_data *rsp;
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ memcpy(data, &rsp->fwdata.sfp_eeprom.buf, ee->len);
+
+ return 0;
+}
+
+static int otx2_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_fw_data *rsp = NULL;
+
+ cmd->base.duplex = pfvf->linfo.full_duplex;
+ cmd->base.speed = pfvf->linfo.speed;
+ cmd->base.autoneg = pfvf->linfo.an;
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ if (rsp->fwdata.supported_an)
+ ethtool_link_ksettings_add_link_mode(cmd,
+ supported,
+ Autoneg);
+
+ otx2_get_link_mode_info(rsp->fwdata.advertised_link_modes,
+ OTX2_MODE_ADVERTISED, cmd);
+ otx2_get_fec_info(rsp->fwdata.advertised_fec,
+ OTX2_MODE_ADVERTISED, cmd);
+ otx2_get_link_mode_info(rsp->fwdata.supported_link_modes,
+ OTX2_MODE_SUPPORTED, cmd);
+ otx2_get_fec_info(rsp->fwdata.supported_fec,
+ OTX2_MODE_SUPPORTED, cmd);
+ return 0;
+}
+
+static void otx2_get_advertised_mode(const struct ethtool_link_ksettings *cmd,
+ u64 *mode)
+{
+ u32 bit_pos;
+
+ /* Firmware does not support requesting multiple advertised modes
+ * return first set bit
+ */
+ bit_pos = find_first_bit(cmd->link_modes.advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ if (bit_pos != __ETHTOOL_LINK_MODE_MASK_NBITS)
+ *mode = bit_pos;
+}
+
+#define OTX2_OVERWRITE_DEF 1
+static int otx2_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct ethtool_link_ksettings cur_ks;
+ struct cgx_set_link_mode_req *req;
+ struct mbox *mbox = &pf->mbox;
+ int err = 0;
+
+ memset(&cur_ks, 0, sizeof(struct ethtool_link_ksettings));
+
+ if (!ethtool_validate_speed(cmd->base.speed) ||
+ !ethtool_validate_duplex(cmd->base.duplex))
+ return -EINVAL;
+
+ if (cmd->base.autoneg != AUTONEG_ENABLE &&
+ cmd->base.autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+
+ otx2_get_link_ksettings(netdev, &cur_ks);
+
+ /* Check requested modes against supported modes by hardware */
+ if (!bitmap_subset(cmd->link_modes.advertising,
+ cur_ks.link_modes.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
+ return -EINVAL;
+
+ mutex_lock(&mbox->lock);
+ req = otx2_mbox_alloc_msg_cgx_set_link_mode(&pf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto end;
+ }
+
+ if (cmd->base.phy_address == OTX2_OVERWRITE_DEF) {
+ req->args.speed = cmd->base.speed;
+ /* firmware expects 1 for half duplex and 0 for full duplex
+ * hence inverting
+ */
+ req->args.duplex = cmd->base.duplex ^ 0x1;
+ req->args.an = cmd->base.autoneg;
+ } else {
+ req->args.speed = SPEED_UNKNOWN;
+ req->args.duplex = DUPLEX_UNKNOWN;
+ req->args.an = AUTONEG_UNKNOWN;
+ }
+
+ otx2_get_advertised_mode(cmd, &req->args.mode);
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+end:
+ mutex_unlock(&mbox->lock);
+ return err;
+}
+
+static u32 otx2_get_priv_flags(struct net_device *netdev)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_fw_data *rsp;
+
+ rsp = otx2_get_fwdata(pfvf);
+
+ if (IS_ERR(rsp)) {
+ pfvf->ethtool_flags &= ~OTX2_PRIV_FLAG_PAM4;
+ } else {
+ if (rsp->fwdata.phy.misc.mod_type)
+ pfvf->ethtool_flags |= OTX2_PRIV_FLAG_PAM4;
+ else
+ pfvf->ethtool_flags &= ~OTX2_PRIV_FLAG_PAM4;
+ }
+
+ return pfvf->ethtool_flags;
+}
+
+static int otx2_set_phy_mod_type(struct net_device *netdev, bool enable)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_phy_mod_type *req;
+ struct cgx_fw_data *fwd;
+ int rc = -EAGAIN;
+
+ fwd = otx2_get_fwdata(pfvf);
+ if (IS_ERR(fwd))
+ return -EAGAIN;
+
+ /* ret here if phy does not support this feature */
+ if (!fwd->fwdata.phy.misc.can_change_mod_type)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_set_phy_mod_type(&pfvf->mbox);
+ if (!req)
+ goto end;
+
+ req->mod = enable;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox))
+ rc = 0;
+end:
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
+int otx2_set_npc_parse_mode(struct otx2_nic *pfvf, bool unbind)
+{
+ struct npc_set_pkind *req;
+ u32 interface_mode = 0;
+ int rc = -EAGAIN;
+
+ if (OTX2_IS_DEF_MODE_ENABLED(pfvf->ethtool_flags))
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_set_pkind(&pfvf->mbox);
+ if (!req)
+ goto end;
+
+ if (unbind) {
+ req->mode = OTX2_PRIV_FLAGS_DEFAULT;
+ interface_mode = OTX2_PRIV_FLAG_DEF_MODE;
+ } else if (OTX2_IS_HIGIG2_ENABLED(pfvf->ethtool_flags)) {
+ req->mode = OTX2_PRIV_FLAGS_HIGIG;
+ interface_mode = OTX2_PRIV_FLAG_HIGIG2_HDR;
+ } else if (OTX2_IS_EDSA_ENABLED(pfvf->ethtool_flags)) {
+ req->mode = OTX2_PRIV_FLAGS_EDSA;
+ interface_mode = OTX2_PRIV_FLAG_EDSA_HDR;
+ } else if (pfvf->ethtool_flags & OTX2_PRIV_FLAG_FDSA_HDR) {
+ req->mode = OTX2_PRIV_FLAGS_FDSA;
+ interface_mode = OTX2_PRIV_FLAG_FDSA_HDR;
+ } else {
+ req->mode = OTX2_PRIV_FLAGS_DEFAULT;
+ interface_mode = OTX2_PRIV_FLAG_DEF_MODE;
+ }
+
+ req->dir = PKIND_RX;
+
+ /* req AF to change pkind on both the dir */
+ if (req->mode == OTX2_PRIV_FLAGS_HIGIG ||
+ req->mode == OTX2_PRIV_FLAGS_DEFAULT)
+ req->dir |= PKIND_TX;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox))
+ rc = 0;
+ else
+ pfvf->ethtool_flags &= ~interface_mode;
+end:
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
+static int otx2_enable_addl_header(struct net_device *netdev, int bitpos,
+ u32 len, bool enable)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ bool if_up = netif_running(netdev);
+
+ if (enable) {
+ pfvf->ethtool_flags |= BIT(bitpos);
+ pfvf->ethtool_flags &= ~OTX2_PRIV_FLAG_DEF_MODE;
+ } else {
+ pfvf->ethtool_flags &= ~BIT(bitpos);
+ len = 0;
+ }
+
+ if (if_up)
+ otx2_stop(netdev);
+
+ /* Update max FRS so that additional hdrs are considered */
+ pfvf->addl_mtu = len;
+
+ /* Incase HIGIG2 mode is set packet will have 16 bytes of
+ * extra header at start of packet which stack does not need.
+ */
+ if (OTX2_IS_HIGIG2_ENABLED(pfvf->ethtool_flags))
+ pfvf->xtra_hdr = 16;
+ else
+ pfvf->xtra_hdr = 0;
+
+ /* NPC parse mode will be updated here */
+ if (if_up) {
+ otx2_open(netdev);
+
+ if (!enable)
+ pfvf->ethtool_flags |= OTX2_PRIV_FLAG_DEF_MODE;
+ }
+
+ return 0;
+}
+
+/* This function disables vfvlan rules upon enabling
+ * fdsa and vice versa
+ */
+static void otx2_endis_vfvlan_rules(struct otx2_nic *pfvf, bool enable)
+{
+ struct vfvlan *rule;
+ int vf;
+
+ for (vf = 0; vf < pci_num_vf(pfvf->pdev); vf++) {
+ /* pass vlan as 0 to disable rule */
+ if (enable) {
+ otx2_do_set_vf_vlan(pfvf, vf, 0, 0, 0);
+ } else {
+ rule = &pfvf->vf_configs[vf].rule;
+ otx2_do_set_vf_vlan(pfvf, vf, rule->vlan, rule->qos,
+ rule->proto);
+ }
+ }
+}
+
+static int otx2_set_priv_flags(struct net_device *netdev, u32 new_flags)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ bool enable = false;
+ int bitnr, rc = 0;
+ u32 chg_flags;
+
+ /* Get latest PAM4 settings */
+ otx2_get_priv_flags(netdev);
+
+ chg_flags = new_flags ^ pfvf->ethtool_flags;
+ if (!chg_flags)
+ return 0;
+
+ /* Some are mutually exclusive, so allow only change at a time */
+ if (hweight32(chg_flags) != 1)
+ return -EINVAL;
+
+ bitnr = ffs(chg_flags) - 1;
+ if (new_flags & BIT(bitnr))
+ enable = true;
+
+ if ((BIT(bitnr) != OTX2_PRIV_FLAG_PAM4) && (pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)) {
+ netdev_info(netdev, "Can't enable requested mode when PTP HW timestamping is ON\n");
+ return -EINVAL;
+ }
+
+ switch (BIT(bitnr)) {
+ case OTX2_PRIV_FLAG_PAM4:
+ rc = otx2_set_phy_mod_type(netdev, enable);
+ break;
+ case OTX2_PRIV_FLAG_EDSA_HDR:
+ /* HIGIG & EDSA are mutual exclusive */
+ if (enable && OTX2_IS_INTFMOD_SET(pfvf->ethtool_flags)) {
+ netdev_info(netdev,
+ "Disable mutually exclusive modes higig2/fdsa\n");
+ return -EINVAL;
+ }
+ return otx2_enable_addl_header(netdev, bitnr,
+ OTX2_EDSA_HDR_LEN, enable);
+ break;
+ case OTX2_PRIV_FLAG_HIGIG2_HDR:
+ if (test_bit(CN10K_RPM, &pfvf->hw.cap_flag))
+ return -EOPNOTSUPP;
+
+ if (enable && OTX2_IS_INTFMOD_SET(pfvf->ethtool_flags)) {
+ netdev_info(netdev,
+ "Disable mutually exclusive modes edsa/fdsa\n");
+ return -EINVAL;
+ }
+ return otx2_enable_addl_header(netdev, bitnr,
+ OTX2_HIGIG2_HDR_LEN, enable);
+ break;
+ case OTX2_PRIV_FLAG_FDSA_HDR:
+ if (enable && OTX2_IS_INTFMOD_SET(pfvf->ethtool_flags)) {
+ netdev_info(netdev,
+ "Disable mutually exclusive modes edsa/higig2\n");
+ return -EINVAL;
+ }
+ otx2_enable_addl_header(netdev, bitnr,
+ OTX2_FDSA_HDR_LEN, enable);
+ if (enable)
+ netdev_warn(netdev,
+ "Disabling VF VLAN rules as FDSA & VFVLAN are mutual exclusive\n");
+ otx2_endis_vfvlan_rules(pfvf, enable);
+ break;
+ default:
+ break;
+ }
+
+ /* save the change */
+ if (!rc) {
+ if (enable)
+ pfvf->ethtool_flags |= BIT(bitnr);
+ else
+ pfvf->ethtool_flags &= ~BIT(bitnr);
+ }
+
+ return rc;
+}
+
+static struct ethtool_ops otx2_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
.get_link = otx2_get_link,
@@ -716,11 +1530,21 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
+ .get_rxfh_context = otx2_get_rxfh_context,
+ .set_rxfh_context = otx2_set_rxfh_context,
+ .get_ts_info = otx2_get_ts_info,
+ .get_link_ksettings = otx2_get_link_ksettings,
+ .set_link_ksettings = otx2_set_link_ksettings,
.get_msglevel = otx2_get_msglevel,
.set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
.set_pauseparam = otx2_set_pauseparam,
- .get_ts_info = otx2_get_ts_info,
+ .get_fecparam = otx2_get_fecparam,
+ .set_fecparam = otx2_set_fecparam,
+ .get_module_info = otx2_get_module_info,
+ .get_module_eeprom = otx2_get_module_eeprom,
+ .get_priv_flags = otx2_get_priv_flags,
+ .set_priv_flags = otx2_set_priv_flags,
};
void otx2_set_ethtool_ops(struct net_device *netdev)
@@ -795,6 +1619,20 @@ static int otx2vf_get_sset_count(struct net_device *netdev, int sset)
return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 1;
}
+static int otx2vf_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ if (pfvf->pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF) {
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.speed = SPEED_100000;
+ } else {
+ return otx2_get_link_ksettings(netdev, cmd);
+ }
+ return 0;
+}
+
static const struct ethtool_ops otx2vf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@@ -811,6 +1649,8 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
+ .get_rxfh_context = otx2_get_rxfh_context,
+ .set_rxfh_context = otx2_set_rxfh_context,
.get_ringparam = otx2_get_ringparam,
.set_ringparam = otx2_set_ringparam,
.get_coalesce = otx2_get_coalesce,
@@ -819,6 +1659,8 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
.set_pauseparam = otx2_set_pauseparam,
+ .get_link_ksettings = otx2vf_get_link_ksettings,
+ .get_ts_info = otx2_get_ts_info,
};
void otx2vf_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
new file mode 100644
index 000000000000..2187ea798d05
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -0,0 +1,1478 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <net/ipv6.h>
+#include <linux/sort.h>
+
+#include "otx2_common.h"
+
+#define OTX2_DEFAULT_ACTION 0x1
+#define FDSA_MAX_SPORT 32
+#define FDSA_SPORT_MASK 0xf8
+
+static int otx2_mcam_entry_init(struct otx2_nic *pfvf);
+
+struct otx2_flow {
+ struct ethtool_rx_flow_spec flow_spec;
+ struct list_head list;
+ u32 location;
+ u16 entry;
+ bool is_vf;
+ u8 rss_ctx_id;
+#define DMAC_FILTER_RULE BIT(0)
+#define PFC_FLOWCTRL_RULE BIT(1)
+ u16 rule_type;
+ int vf;
+};
+
+enum dmac_req {
+ DMAC_ADDR_UPDATE,
+ DMAC_ADDR_DEL
+};
+
+static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
+{
+ devm_kfree(pfvf->dev, flow_cfg->flow_ent);
+ flow_cfg->flow_ent = NULL;
+ flow_cfg->max_flows = 0;
+}
+
+static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_mcam_free_entry_req *req;
+ int ent, err;
+
+ if (!flow_cfg->max_flows)
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ for (ent = 0; ent < flow_cfg->max_flows; ent++) {
+ req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
+ if (!req)
+ break;
+
+ req->entry = flow_cfg->flow_ent[ent];
+
+ /* Send message to AF to free MCAM entries */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ break;
+ }
+ mutex_unlock(&pfvf->mbox.lock);
+ otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
+ return 0;
+}
+
+static int mcam_entry_cmp(const void *a, const void *b)
+{
+ return *(u16 *)a - *(u16 *)b;
+}
+
+int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_mcam_alloc_entry_req *req;
+ struct npc_mcam_alloc_entry_rsp *rsp;
+ int ent, allocated = 0;
+
+ /* Free current ones and allocate new ones with requested count */
+ otx2_free_ntuple_mcam_entries(pfvf);
+
+ if (!count)
+ return 0;
+
+ flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
+ sizeof(u16), GFP_KERNEL);
+ if (!flow_cfg->flow_ent) {
+ netdev_err(pfvf->netdev,
+ "%s: Unable to allocate memory for flow entries\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ /* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries
+ * can only be allocated.
+ */
+ while (allocated < count) {
+ req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
+ if (!req)
+ goto exit;
+
+ req->contig = false;
+ req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
+ NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
+
+ /* Allocate higher priority entries for PFs, so that VF's entries
+ * will be on top of PF.
+ */
+ if (!is_otx2_vf(pfvf->pcifunc)) {
+ req->priority = NPC_MCAM_HIGHER_PRIO;
+ req->ref_entry = flow_cfg->def_ent[0];
+ }
+
+ /* Send message to AF */
+ if (otx2_sync_mbox_msg(&pfvf->mbox))
+ goto exit;
+
+ rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
+ (&pfvf->mbox.mbox, 0, &req->hdr);
+
+ for (ent = 0; ent < rsp->count; ent++)
+ flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
+
+ allocated += rsp->count;
+
+ /* If this request is not fulfilled, no need to send
+ * further requests.
+ */
+ if (rsp->count != req->count)
+ break;
+ }
+
+ /* Multiple MCAM entry alloc requests could result in non-sequential
+ * MCAM entries in the flow_ent[] array. Sort them in an ascending order,
+ * otherwise user installed ntuple filter index and MCAM entry index will
+ * not be in sync.
+ */
+ if (allocated)
+ sort(&flow_cfg->flow_ent[0], allocated,
+ sizeof(flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL);
+
+exit:
+ mutex_unlock(&pfvf->mbox.lock);
+
+ flow_cfg->max_flows = allocated;
+
+ if (allocated) {
+ pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+ pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
+ }
+
+ if (allocated != count)
+ netdev_info(pfvf->netdev,
+ "Unable to allocate %d MCAM entries, got only %d\n",
+ count, allocated);
+ return allocated;
+}
+EXPORT_SYMBOL(otx2_alloc_mcam_entries);
+
+static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_mcam_alloc_entry_req *req;
+ struct npc_mcam_alloc_entry_rsp *rsp;
+ int vf_vlan_max_flows;
+ int ent, count;
+
+ vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
+ count = OTX2_MAX_UNICAST_FLOWS +
+ OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
+
+ flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
+ sizeof(u16), GFP_KERNEL);
+ if (!flow_cfg->def_ent)
+ return -ENOMEM;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->contig = false;
+ req->count = count;
+
+ /* Send message to AF */
+ if (otx2_sync_mbox_msg(&pfvf->mbox)) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -EINVAL;
+ }
+
+ rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
+ (&pfvf->mbox.mbox, 0, &req->hdr);
+
+ if (rsp->count != req->count) {
+ netdev_info(pfvf->netdev,
+ "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n");
+ mutex_unlock(&pfvf->mbox.lock);
+ devm_kfree(pfvf->dev, flow_cfg->def_ent);
+ return 0;
+ }
+
+ for (ent = 0; ent < rsp->count; ent++)
+ flow_cfg->def_ent[ent] = rsp->entry_list[ent];
+
+ flow_cfg->vf_vlan_offset = 0;
+ flow_cfg->unicast_offset = vf_vlan_max_flows;
+ flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
+ OTX2_MAX_UNICAST_FLOWS;
+ pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
+ pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
+ pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
+
+ pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+ mutex_unlock(&pfvf->mbox.lock);
+
+ /* Allocate entries for Ntuple filters */
+ count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
+ if (count <= 0) {
+ otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
+ return 0;
+ }
+
+ pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
+
+ return 0;
+}
+
+int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg;
+
+ pfvf->flow_cfg = devm_kzalloc(pfvf->dev,
+ sizeof(struct otx2_flow_config),
+ GFP_KERNEL);
+ if (!pfvf->flow_cfg)
+ return -ENOMEM;
+
+ flow_cfg = pfvf->flow_cfg;
+ INIT_LIST_HEAD(&flow_cfg->flow_list);
+ flow_cfg->max_flows = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2vf_mcam_flow_init);
+
+int otx2_mcam_flow_init(struct otx2_nic *pf)
+{
+ int err;
+
+ pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
+ GFP_KERNEL);
+ if (!pf->flow_cfg)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
+
+ /* Allocate bare minimum number of MCAM entries needed for
+ * unicast and ntuple filters.
+ */
+ err = otx2_mcam_entry_init(pf);
+ if (err)
+ return err;
+
+ /* Check if MCAM entries are allocate or not */
+ if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
+ return 0;
+
+ pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
+ * OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
+
+ if (!pf->mac_table)
+ return -ENOMEM;
+
+ otx2_dmacflt_get_max_cnt(pf);
+
+ /* DMAC filters are not allocated */
+ if (!pf->flow_cfg->dmacflt_max_flows)
+ return 0;
+
+ pf->flow_cfg->bmap_to_dmacindex =
+ devm_kzalloc(pf->dev, sizeof(u8) *
+ pf->flow_cfg->dmacflt_max_flows,
+ GFP_KERNEL);
+
+ if (!pf->flow_cfg->bmap_to_dmacindex)
+ return -ENOMEM;
+
+ pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
+
+ return 0;
+}
+
+void otx2_mcam_flow_del(struct otx2_nic *pf)
+{
+ otx2_destroy_mcam_flows(pf);
+}
+EXPORT_SYMBOL(otx2_mcam_flow_del);
+
+/* On success adds mcam entry
+ * On failure enable promisous mode
+ */
+static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
+{
+ struct otx2_flow_config *flow_cfg = pf->flow_cfg;
+ struct npc_install_flow_req *req;
+ int err, i;
+
+ if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
+ return -ENOMEM;
+
+ /* dont have free mcam entries or uc list is greater than alloted */
+ if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
+ return -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ /* unicast offset starts with 32 0..31 for ntuple */
+ for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
+ if (pf->mac_table[i].inuse)
+ continue;
+ ether_addr_copy(pf->mac_table[i].addr, mac);
+ pf->mac_table[i].inuse = true;
+ pf->mac_table[i].mcam_entry =
+ flow_cfg->def_ent[i + flow_cfg->unicast_offset];
+ req->entry = pf->mac_table[i].mcam_entry;
+ break;
+ }
+
+ ether_addr_copy(req->packet.dmac, mac);
+ eth_broadcast_addr((u8 *)&req->mask.dmac);
+ req->features = BIT_ULL(NPC_DMAC);
+ req->channel = pf->hw.rx_chan_base;
+ req->intf = NIX_INTF_RX;
+ req->op = NIX_RX_ACTION_DEFAULT;
+ req->set_cntr = 1;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+
+ return err;
+}
+
+int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+
+ if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+ pf->flow_cfg->dmacflt_max_flows))
+ netdev_warn(netdev,
+ "Add %pM to CGX/RPM DMAC filters list as well\n",
+ mac);
+
+ return otx2_do_add_macfilter(pf, mac);
+}
+
+static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
+ int *mcam_entry)
+{
+ int i;
+
+ for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
+ if (!pf->mac_table[i].inuse)
+ continue;
+
+ if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
+ *mcam_entry = pf->mac_table[i].mcam_entry;
+ pf->mac_table[i].inuse = false;
+ return true;
+ }
+ }
+ return false;
+}
+
+int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct npc_delete_flow_req *req;
+ int err, mcam_entry;
+
+ /* check does mcam entry exists for given mac */
+ if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
+ return 0;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+ req->entry = mcam_entry;
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+
+ return err;
+}
+
+static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
+{
+ struct otx2_flow *iter;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if (iter->location == location)
+ return iter;
+ }
+
+ return NULL;
+}
+
+static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
+{
+ struct list_head *head = &pfvf->flow_cfg->flow_list;
+ struct otx2_flow *iter;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if (iter->location > flow->location)
+ break;
+ head = &iter->list;
+ }
+
+ list_add(&flow->list, head);
+}
+
+int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
+{
+ if (!flow_cfg)
+ return 0;
+
+ if (flow_cfg->nr_flows == flow_cfg->max_flows ||
+ bitmap_weight(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows))
+ return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows;
+ else
+ return flow_cfg->max_flows;
+}
+EXPORT_SYMBOL(otx2_get_maxflows);
+
+int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
+ u32 location)
+{
+ struct otx2_flow *iter;
+
+ if (location >= otx2_get_maxflows(pfvf->flow_cfg))
+ return -EINVAL;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if (iter->location == location) {
+ nfc->fs = iter->flow_spec;
+ nfc->rss_context = iter->rss_ctx_id;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
+ u32 *rule_locs)
+{
+ u32 rule_cnt = nfc->rule_cnt;
+ u32 location = 0;
+ int idx = 0;
+ int err = 0;
+
+ nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
+ while ((!err || err == -ENOENT) && idx < rule_cnt) {
+ err = otx2_get_flow(pfvf, nfc, location);
+ if (!err)
+ rule_locs[idx++] = location;
+ location++;
+ }
+ nfc->rule_cnt = rule_cnt;
+
+ return err;
+}
+
+static void otx2_prepare_fdsa_flow_request(struct npc_install_flow_req *req,
+ bool is_vlan)
+{
+ struct flow_msg *pmask = &req->mask;
+ struct flow_msg *pkt = &req->packet;
+
+ /* In FDSA tag srcport starts from b3..b7 */
+ if (!is_vlan) {
+ pkt->vlan_tci <<= 3;
+ pmask->vlan_tci = cpu_to_be16(FDSA_SPORT_MASK);
+ }
+ /* Strip FDSA tag */
+ req->features |= BIT_ULL(NPC_FDSA_VAL);
+ req->vtag0_valid = true;
+ req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE6;
+ req->op = NIX_RX_ACTION_DEFAULT;
+}
+
+static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req,
+ u32 flow_type)
+{
+ struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
+ struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
+ struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
+ struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
+ struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
+ struct flow_msg *pmask = &req->mask;
+ struct flow_msg *pkt = &req->packet;
+
+ switch (flow_type) {
+ case IP_USER_FLOW:
+ if (ipv4_usr_mask->ip4src) {
+ memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
+ sizeof(pkt->ip4src));
+ memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
+ sizeof(pmask->ip4src));
+ req->features |= BIT_ULL(NPC_SIP_IPV4);
+ }
+ if (ipv4_usr_mask->ip4dst) {
+ memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
+ sizeof(pkt->ip4dst));
+ memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
+ sizeof(pmask->ip4dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV4);
+ }
+ if (ipv4_usr_mask->tos) {
+ pkt->tos = ipv4_usr_hdr->tos;
+ pmask->tos = ipv4_usr_mask->tos;
+ req->features |= BIT_ULL(NPC_TOS);
+ }
+ if (ipv4_usr_mask->proto) {
+ switch (ipv4_usr_hdr->proto) {
+ case IPPROTO_ICMP:
+ req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
+ break;
+ case IPPROTO_TCP:
+ req->features |= BIT_ULL(NPC_IPPROTO_TCP);
+ break;
+ case IPPROTO_UDP:
+ req->features |= BIT_ULL(NPC_IPPROTO_UDP);
+ break;
+ case IPPROTO_SCTP:
+ req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
+ break;
+ case IPPROTO_AH:
+ req->features |= BIT_ULL(NPC_IPPROTO_AH);
+ break;
+ case IPPROTO_ESP:
+ req->features |= BIT_ULL(NPC_IPPROTO_ESP);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+ pkt->etype = cpu_to_be16(ETH_P_IP);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IP);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
+ if (ipv4_l4_mask->ip4src) {
+ memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
+ sizeof(pkt->ip4src));
+ memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
+ sizeof(pmask->ip4src));
+ req->features |= BIT_ULL(NPC_SIP_IPV4);
+ }
+ if (ipv4_l4_mask->ip4dst) {
+ memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
+ sizeof(pkt->ip4dst));
+ memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
+ sizeof(pmask->ip4dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV4);
+ }
+ if (ipv4_l4_mask->tos) {
+ pkt->tos = ipv4_l4_hdr->tos;
+ pmask->tos = ipv4_l4_mask->tos;
+ req->features |= BIT_ULL(NPC_TOS);
+ }
+ if (ipv4_l4_mask->psrc) {
+ memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
+ sizeof(pkt->sport));
+ memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
+ sizeof(pmask->sport));
+ if (flow_type == UDP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_SPORT_UDP);
+ else if (flow_type == TCP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_SPORT_TCP);
+ else
+ req->features |= BIT_ULL(NPC_SPORT_SCTP);
+ }
+ if (ipv4_l4_mask->pdst) {
+ memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
+ sizeof(pkt->dport));
+ memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
+ sizeof(pmask->dport));
+ if (flow_type == UDP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_DPORT_UDP);
+ else if (flow_type == TCP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_DPORT_TCP);
+ else
+ req->features |= BIT_ULL(NPC_DPORT_SCTP);
+ }
+ if (flow_type == UDP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_UDP);
+ else if (flow_type == TCP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_TCP);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IP);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
+ if (ah_esp_mask->ip4src) {
+ memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
+ sizeof(pkt->ip4src));
+ memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
+ sizeof(pmask->ip4src));
+ req->features |= BIT_ULL(NPC_SIP_IPV4);
+ }
+ if (ah_esp_mask->ip4dst) {
+ memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
+ sizeof(pkt->ip4dst));
+ memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
+ sizeof(pmask->ip4dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV4);
+ }
+ if (ah_esp_mask->tos) {
+ pkt->tos = ah_esp_hdr->tos;
+ pmask->tos = ah_esp_mask->tos;
+ req->features |= BIT_ULL(NPC_TOS);
+ }
+
+ /* NPC profile doesn't extract AH/ESP header fields */
+ if (ah_esp_mask->spi & ah_esp_hdr->spi)
+ return -EOPNOTSUPP;
+
+ if (flow_type == AH_V4_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_AH);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_ESP);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req,
+ u32 flow_type)
+{
+ struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
+ struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
+ struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
+ struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
+ struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
+ struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
+ struct flow_msg *pmask = &req->mask;
+ struct flow_msg *pkt = &req->packet;
+
+ switch (flow_type) {
+ case IPV6_USER_FLOW:
+ if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
+ memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
+ sizeof(pkt->ip6src));
+ memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
+ sizeof(pmask->ip6src));
+ req->features |= BIT_ULL(NPC_SIP_IPV6);
+ }
+ if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
+ memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
+ sizeof(pkt->ip6dst));
+ memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
+ sizeof(pmask->ip6dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV6);
+ }
+ pkt->etype = cpu_to_be16(ETH_P_IPV6);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IPV6);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
+ if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
+ memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
+ sizeof(pkt->ip6src));
+ memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
+ sizeof(pmask->ip6src));
+ req->features |= BIT_ULL(NPC_SIP_IPV6);
+ }
+ if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
+ memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
+ sizeof(pkt->ip6dst));
+ memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
+ sizeof(pmask->ip6dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV6);
+ }
+ if (ipv6_l4_mask->psrc) {
+ memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
+ sizeof(pkt->sport));
+ memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
+ sizeof(pmask->sport));
+ if (flow_type == UDP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_SPORT_UDP);
+ else if (flow_type == TCP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_SPORT_TCP);
+ else
+ req->features |= BIT_ULL(NPC_SPORT_SCTP);
+ }
+ if (ipv6_l4_mask->pdst) {
+ memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
+ sizeof(pkt->dport));
+ memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
+ sizeof(pmask->dport));
+ if (flow_type == UDP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_DPORT_UDP);
+ else if (flow_type == TCP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_DPORT_TCP);
+ else
+ req->features |= BIT_ULL(NPC_DPORT_SCTP);
+ }
+ if (flow_type == UDP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_UDP);
+ else if (flow_type == TCP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_TCP);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
+ break;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IPV6);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
+ if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
+ memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
+ sizeof(pkt->ip6src));
+ memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
+ sizeof(pmask->ip6src));
+ req->features |= BIT_ULL(NPC_SIP_IPV6);
+ }
+ if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
+ memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
+ sizeof(pkt->ip6dst));
+ memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
+ sizeof(pmask->ip6dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV6);
+ }
+
+ /* NPC profile doesn't extract AH/ESP header fields */
+ if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
+ (ah_esp_mask->tclass & ah_esp_mask->tclass))
+ return -EOPNOTSUPP;
+
+ if (flow_type == AH_V6_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_AH);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_ESP);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req,
+ struct otx2_nic *pfvf)
+{
+ struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
+ struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
+ struct flow_msg *pmask = &req->mask;
+ struct flow_msg *pkt = &req->packet;
+ u32 flow_type;
+ int ret;
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+ switch (flow_type) {
+ /* bits not set in mask are don't care */
+ case ETHER_FLOW:
+ if (!is_zero_ether_addr(eth_mask->h_source)) {
+ ether_addr_copy(pkt->smac, eth_hdr->h_source);
+ ether_addr_copy(pmask->smac, eth_mask->h_source);
+ req->features |= BIT_ULL(NPC_SMAC);
+ }
+ if (!is_zero_ether_addr(eth_mask->h_dest)) {
+ ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
+ ether_addr_copy(pmask->dmac, eth_mask->h_dest);
+ req->features |= BIT_ULL(NPC_DMAC);
+ }
+ if (eth_hdr->h_proto) {
+ memcpy(&pkt->etype, &eth_hdr->h_proto,
+ sizeof(pkt->etype));
+ memcpy(&pmask->etype, &eth_mask->h_proto,
+ sizeof(pmask->etype));
+ req->features |= BIT_ULL(NPC_ETYPE);
+ }
+ break;
+ case IP_USER_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
+ if (ret)
+ return ret;
+ break;
+ case IPV6_USER_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ if (fsp->flow_type & FLOW_EXT) {
+ int skip_user_def = false;
+ u16 vlan_etype;
+
+ if (fsp->m_ext.vlan_etype) {
+ /* Partial masks not supported */
+ if (fsp->m_ext.vlan_etype != 0xFFFF)
+ return -EINVAL;
+
+ vlan_etype = be16_to_cpu(fsp->h_ext.vlan_etype);
+ /* Only ETH_P_8021Q and ETH_P_802AD types supported */
+ if (vlan_etype != ETH_P_8021Q &&
+ vlan_etype != ETH_P_8021AD)
+ return -EINVAL;
+
+ memcpy(&pkt->vlan_etype, &fsp->h_ext.vlan_etype,
+ sizeof(pkt->vlan_etype));
+ memcpy(&pmask->vlan_etype, &fsp->m_ext.vlan_etype,
+ sizeof(pmask->vlan_etype));
+
+ if (vlan_etype == ETH_P_8021Q)
+ req->features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG);
+ else
+ req->features |= BIT_ULL(NPC_VLAN_ETYPE_STAG);
+ }
+ if (fsp->m_ext.vlan_tci) {
+ memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
+ sizeof(pkt->vlan_tci));
+ memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
+ sizeof(pmask->vlan_tci));
+
+ if (pfvf->ethtool_flags & OTX2_PRIV_FLAG_FDSA_HDR) {
+ otx2_prepare_fdsa_flow_request(req, true);
+ skip_user_def = true;
+ } else {
+ req->features |= BIT_ULL(NPC_OUTER_VID);
+ }
+ }
+
+ if (fsp->m_ext.data[1] && !skip_user_def) {
+ if (pfvf->ethtool_flags & OTX2_PRIV_FLAG_FDSA_HDR) {
+ if (be32_to_cpu(fsp->h_ext.data[1]) >=
+ FDSA_MAX_SPORT)
+ return -EINVAL;
+
+ memcpy(&pkt->vlan_tci,
+ (u8 *)&fsp->h_ext.data[1] + 2,
+ sizeof(pkt->vlan_tci));
+ otx2_prepare_fdsa_flow_request(req, false);
+ } else if (fsp->h_ext.data[1] ==
+ cpu_to_be32(OTX2_DEFAULT_ACTION)) {
+ /* Not Drop/Direct to queue but use action
+ * in default entry
+ */
+ req->op = NIX_RX_ACTION_DEFAULT;
+ }
+ }
+ }
+
+ if (fsp->flow_type & FLOW_MAC_EXT &&
+ !is_zero_ether_addr(fsp->m_ext.h_dest)) {
+ ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
+ ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
+ req->features |= BIT_ULL(NPC_DMAC);
+ }
+
+ if (!req->features)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
+ struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
+ u64 ring_cookie = fsp->ring_cookie;
+ u32 flow_type;
+
+ if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
+ return false;
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+
+ /* CGX/RPM block dmac filtering configured for white listing
+ * check for action other than DROP
+ */
+ if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
+ !ethtool_get_flow_spec_ring_vf(ring_cookie)) {
+ if (is_zero_ether_addr(eth_mask->h_dest) &&
+ is_valid_ether_addr(eth_hdr->h_dest))
+ return true;
+ }
+
+ return false;
+}
+
+static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
+{
+ u64 ring_cookie = flow->flow_spec.ring_cookie;
+#ifdef CONFIG_DCB
+ int vlan_prio, qidx, pfc_rule = 0;
+#endif
+ struct npc_install_flow_req *req;
+ int err, vf = 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_prepare_flow_request(&flow->flow_spec, req, pfvf);
+ if (err) {
+ /* free the allocated msg above */
+ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+
+ req->entry = flow->entry;
+ req->intf = NIX_INTF_RX;
+ req->set_cntr = 1;
+ req->channel = pfvf->hw.rx_chan_base;
+ if (ring_cookie == RX_CLS_FLOW_DISC) {
+ req->op = NIX_RX_ACTIONOP_DROP;
+ } else {
+ /* change to unicast only if action of default entry is not
+ * requested by user
+ */
+ if (flow->flow_spec.flow_type & FLOW_RSS) {
+ req->op = NIX_RX_ACTIONOP_RSS;
+ req->index = flow->rss_ctx_id;
+ req->flow_key_alg = pfvf->hw.flowkey_alg_idx;
+ } else {
+ req->op = NIX_RX_ACTIONOP_UCAST;
+ req->index = ethtool_get_flow_spec_ring(ring_cookie);
+ }
+ vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
+ if (vf > pci_num_vf(pfvf->pdev)) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -EINVAL;
+ }
+
+#ifdef CONFIG_DCB
+ /* Identify PFC rule if PFC enabled and ntuple rule is vlan */
+ if (!vf && (req->features & BIT_ULL(NPC_OUTER_VID)) &&
+ pfvf->pfc_en && req->op != NIX_RX_ACTIONOP_RSS) {
+ vlan_prio = ntohs(req->packet.vlan_tci) &
+ ntohs(req->mask.vlan_tci);
+
+ /* Get the priority */
+ vlan_prio >>= 13;
+ flow->rule_type |= PFC_FLOWCTRL_RULE;
+ /* Check if PFC enabled for this priority */
+ if (pfvf->pfc_en & BIT(vlan_prio)) {
+ pfc_rule = true;
+ qidx = req->index;
+ }
+ }
+#endif
+ }
+
+ /* ethtool ring_cookie has (VF + 1) for VF */
+ if (vf) {
+ req->vf = vf;
+ flow->is_vf = true;
+ flow->vf = vf;
+ }
+
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+
+#ifdef CONFIG_DCB
+ if (!err && pfc_rule)
+ otx2_update_bpid_in_rqctx(pfvf, vlan_prio, qidx, true);
+#endif
+
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
+ struct otx2_flow *flow)
+{
+ struct ethhdr *eth_hdr;
+ struct otx2_flow *pf_mac;
+
+ pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
+ if (!pf_mac)
+ return -ENOMEM;
+
+ pf_mac->entry = 0;
+ pf_mac->rule_type |= DMAC_FILTER_RULE;
+ pf_mac->location = pfvf->flow_cfg->max_flows;
+ memcpy(&pf_mac->flow_spec, &flow->flow_spec,
+ sizeof(struct ethtool_rx_flow_spec));
+ pf_mac->flow_spec.location = pf_mac->location;
+
+ /* Copy PF mac address */
+ eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
+ ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
+
+ /* Install DMAC filter with PF mac address */
+ otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
+
+ otx2_add_flow_to_list(pfvf, pf_mac);
+ pfvf->flow_cfg->nr_flows++;
+ set_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
+
+ return 0;
+}
+
+int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct ethtool_rx_flow_spec *fsp = &nfc->fs;
+ struct otx2_flow *flow;
+ struct ethhdr *eth_hdr;
+ bool new = false;
+ int err = 0;
+ u32 ring;
+
+ if (!flow_cfg->max_flows) {
+ netdev_err(pfvf->netdev,
+ "Ntuple rule count is 0, allocate and retry\n");
+ return -EINVAL;
+ }
+
+ ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+ if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
+ return -ENOMEM;
+
+ if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
+ return -EINVAL;
+
+ if (fsp->location >= otx2_get_maxflows(flow_cfg))
+ return -EINVAL;
+
+ flow = otx2_find_flow(pfvf, fsp->location);
+ if (!flow) {
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ if (!flow)
+ return -ENOMEM;
+ flow->location = fsp->location;
+ flow->entry = flow_cfg->flow_ent[flow->location];
+ new = true;
+ }
+ /* struct copy */
+ flow->flow_spec = *fsp;
+
+ if (fsp->flow_type & FLOW_RSS)
+ flow->rss_ctx_id = nfc->rss_context;
+
+ if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
+ eth_hdr = &flow->flow_spec.h_u.ether_spec;
+
+ /* Sync dmac filter table with updated fields */
+ if (flow->rule_type & DMAC_FILTER_RULE)
+ return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
+ flow->entry);
+
+ if (bitmap_full(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows)) {
+ netdev_warn(pfvf->netdev,
+ "Can't insert the rule %d as max allowed dmac filters are %d\n",
+ flow->location +
+ flow_cfg->dmacflt_max_flows,
+ flow_cfg->dmacflt_max_flows);
+ err = -EINVAL;
+ if (new)
+ kfree(flow);
+ return err;
+ }
+
+ /* Install PF mac address to DMAC filter list */
+ if (!test_bit(0, &flow_cfg->dmacflt_bmap))
+ otx2_add_flow_with_pfmac(pfvf, flow);
+
+ flow->rule_type |= DMAC_FILTER_RULE;
+ flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows);
+ fsp->location = flow_cfg->max_flows + flow->entry;
+ flow->flow_spec.location = fsp->location;
+ flow->location = fsp->location;
+
+ set_bit(flow->entry, &flow_cfg->dmacflt_bmap);
+ otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
+
+ } else {
+ if (flow->location >= pfvf->flow_cfg->max_flows) {
+ netdev_warn(pfvf->netdev,
+ "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
+ flow->location,
+ flow_cfg->max_flows - 1);
+ err = -EINVAL;
+ } else {
+ err = otx2_add_flow_msg(pfvf, flow);
+ }
+ }
+
+ if (err) {
+ if (err == MBOX_MSG_INVALID)
+ err = -EINVAL;
+ if (new)
+ kfree(flow);
+ return err;
+ }
+
+ /* add the new flow installed to list */
+ if (new) {
+ otx2_add_flow_to_list(pfvf, flow);
+ flow_cfg->nr_flows++;
+ }
+
+ return 0;
+}
+
+static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
+{
+ struct npc_delete_flow_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = entry;
+ if (all)
+ req->all = 1;
+
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
+{
+ struct otx2_flow *iter;
+ struct ethhdr *eth_hdr;
+ bool found = false;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if ((iter->rule_type & DMAC_FILTER_RULE) && iter->entry == 0) {
+ eth_hdr = &iter->flow_spec.h_u.ether_spec;
+ if (req == DMAC_ADDR_DEL) {
+ otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
+ 0);
+ clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
+ found = true;
+ } else {
+ ether_addr_copy(eth_hdr->h_dest,
+ pfvf->netdev->dev_addr);
+ otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
+ }
+ break;
+ }
+ }
+
+ if (found) {
+ list_del(&iter->list);
+ kfree(iter);
+ pfvf->flow_cfg->nr_flows--;
+ }
+}
+
+int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct otx2_flow *flow;
+ int err;
+
+ if (location >= otx2_get_maxflows(flow_cfg))
+ return -EINVAL;
+
+ flow = otx2_find_flow(pfvf, location);
+ if (!flow)
+ return -ENOENT;
+
+ if (flow->rule_type & DMAC_FILTER_RULE) {
+ struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
+
+ /* user not allowed to remove dmac filter with interface mac */
+ if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
+ return -EPERM;
+
+ err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
+ flow->entry);
+ clear_bit(flow->entry, &flow_cfg->dmacflt_bmap);
+ /* If all dmac filters are removed delete macfilter with
+ * interface mac address and configure CGX/RPM block in
+ * promiscuous mode
+ */
+ if (bitmap_weight(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows) == 1)
+ otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
+ } else {
+#ifdef CONFIG_DCB
+ if (flow->rule_type & PFC_FLOWCTRL_RULE)
+ otx2_update_bpid_in_rqctx(pfvf, 0,
+ flow->flow_spec.ring_cookie,
+ false);
+#endif
+
+ err = otx2_remove_flow_msg(pfvf, flow->entry, false);
+ }
+
+ if (err)
+ return err;
+
+ list_del(&flow->list);
+ kfree(flow);
+ flow_cfg->nr_flows--;
+
+ return 0;
+}
+
+void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
+{
+ struct otx2_flow *flow, *tmp;
+ int err;
+
+ list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
+ if (flow->rss_ctx_id != ctx_id)
+ continue;
+ err = otx2_remove_flow(pfvf, flow->location);
+ if (err)
+ netdev_warn(pfvf->netdev,
+ "Can't delete the rule %d associated with this rss group err:%d",
+ flow->location, err);
+ }
+}
+
+int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_delete_flow_req *req;
+ struct otx2_flow *iter, *tmp;
+ int err;
+
+ if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
+ return 0;
+
+ if (!flow_cfg->max_flows)
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->start = flow_cfg->flow_ent[0];
+ req->end = flow_cfg->flow_ent[flow_cfg->max_flows - 1];
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
+ list_del(&iter->list);
+ kfree(iter);
+ flow_cfg->nr_flows--;
+ }
+ return err;
+}
+
+int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_mcam_free_entry_req *req;
+ struct otx2_flow *iter, *tmp;
+ int err;
+
+ if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
+ return 0;
+
+ /* remove all flows */
+ err = otx2_remove_flow_msg(pfvf, 0, true);
+ if (err)
+ return err;
+
+ list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
+ list_del(&iter->list);
+ kfree(iter);
+ flow_cfg->nr_flows--;
+ }
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->all = 1;
+ /* Send message to AF to free MCAM entries */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+
+ pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return 0;
+}
+
+int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_install_flow_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
+ req->intf = NIX_INTF_RX;
+ ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
+ eth_broadcast_addr((u8 *)&req->mask.dmac);
+ req->channel = pfvf->hw.rx_chan_base;
+ req->op = NIX_RX_ACTION_DEFAULT;
+ req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
+ req->vtag0_valid = true;
+ req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
+
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_delete_flow_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
+{
+ struct nix_vtag_config *req;
+ struct mbox_msghdr *rsp_hdr;
+ int err;
+
+ /* Dont have enough mcam entries */
+ if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
+ return -ENOMEM;
+
+ /* FDSA & RXVLAN are mutually exclusive */
+ if (pf->ethtool_flags & OTX2_PRIV_FLAG_FDSA_HDR)
+ enable = false;
+
+ if (enable) {
+ err = otx2_install_rxvlan_offload_flow(pf);
+ if (err)
+ return err;
+ } else {
+ err = otx2_delete_rxvlan_offload_flow(pf);
+ if (err)
+ return err;
+ }
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ /* config strip, capture and size */
+ req->vtag_size = VTAGSIZE_T4;
+ req->cfg_type = 1; /* rx vlan cfg */
+ req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
+ req->rx.strip_vtag = enable;
+ req->rx.capture_vtag = enable;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err) {
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+ }
+
+ rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp_hdr)) {
+ mutex_unlock(&pf->mbox.lock);
+ return PTR_ERR(rsp_hdr);
+ }
+
+ mutex_unlock(&pf->mbox.lock);
+ return rsp_hdr->rc;
+}
+
+void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
+{
+ struct otx2_flow *iter;
+ struct ethhdr *eth_hdr;
+
+ list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
+ if (iter->rule_type & DMAC_FILTER_RULE) {
+ eth_hdr = &iter->flow_spec.h_u.ether_spec;
+ otx2_dmacflt_add(pf, eth_hdr->h_dest,
+ iter->entry);
+ }
+ }
+}
+
+void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
+{
+ otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 161174be51c3..7fac19c72c61 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1,11 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
+/* Marvell RVU Physical Function ethernet driver
*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
@@ -16,16 +13,19 @@
#include <linux/if_vlan.h>
#include <linux/iommu.h>
#include <net/ip.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include "otx2_reg.h"
#include "otx2_common.h"
#include "otx2_txrx.h"
#include "otx2_struct.h"
#include "otx2_ptp.h"
+#include "cn10k.h"
#include <rvu_trace.h>
-#define DRV_NAME "octeontx2-nicpf"
-#define DRV_STRING "Marvell OcteonTX2 NIC Physical Function Driver"
+#define DRV_NAME "rvu_nicpf"
+#define DRV_STRING "Marvell RVU NIC Physical Function Driver"
/* Supported devices */
static const struct pci_device_id otx2_pf_id_table[] = {
@@ -38,6 +38,10 @@ MODULE_DESCRIPTION(DRV_STRING);
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
+static void otx2_vf_link_event_task(struct work_struct *work);
+static void otx2_vf_ptp_info_task(struct work_struct *work);
+static void otx2_do_set_rx_mode(struct otx2_nic *pf);
+
enum {
TYPE_PFAF,
TYPE_PFVF,
@@ -48,6 +52,7 @@ static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);
static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
{
+ struct otx2_nic *pf = netdev_priv(netdev);
bool if_up = netif_running(netdev);
int err = 0;
@@ -57,6 +62,10 @@ static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
netdev_info(netdev, "Changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
+ /* Modify receive buffer size based on MTU and do not
+ * use the fixed size set.
+ */
+ pf->hw.rbuf_fixed_size = 0;
if (if_up)
err = otx2_open(netdev);
@@ -590,9 +599,17 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
if (!pf->mbox_pfvf_wq)
return -ENOMEM;
- base = readq((void __iomem *)((u64)pf->reg_base + RVU_PF_VF_BAR4_ADDR));
- hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
+ /* On CN10K platform, PF <-> VF mailbox region follows after
+ * PF <-> AF mailbox region.
+ */
+ if (test_bit(CN10K_MBOX, &pf->hw.cap_flag))
+ base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
+ MBOX_SIZE;
+ else
+ base = readq((void __iomem *)((u64)pf->reg_base +
+ RVU_PF_VF_BAR4_ADDR));
+ hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
if (!hwbase) {
err = -ENOMEM;
goto free_wq;
@@ -784,6 +801,9 @@ static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
case MBOX_MSG_CGX_STATS:
mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
break;
+ case MBOX_MSG_CGX_FEC_STATS:
+ mbox_handler_cgx_fec_stats(pf, (struct cgx_fec_stats_rsp *)msg);
+ break;
default:
if (msg->rc)
dev_err(pf->dev,
@@ -867,6 +887,30 @@ int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
return 0;
}
+int otx2_mbox_up_handler_cgx_ptp_rx_info(struct otx2_nic *pf,
+ struct cgx_ptp_rx_info_msg *msg,
+ struct msg_rsp *rsp)
+{
+ int i;
+
+ if (!pf->ptp)
+ return 0;
+
+ pf->ptp->ptp_en = msg->ptp_en;
+
+ /* notify VFs about ptp event */
+ for (i = 0; i < pci_num_vf(pf->pdev); i++) {
+ struct otx2_vf_config *config = &pf->vf_configs[i];
+ struct delayed_work *dwork = &config->ptp_info_work;
+
+ if (config->intf_down)
+ continue;
+
+ schedule_delayed_work(dwork, msecs_to_jiffies(100));
+ }
+ return 0;
+}
+
static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
struct mbox_msghdr *req)
{
@@ -1044,7 +1088,7 @@ static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
* device memory to allow unaligned accesses.
*/
hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM),
- pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM));
+ MBOX_SIZE);
if (!hwbase) {
dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
err = -ENOMEM;
@@ -1101,6 +1145,11 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
struct msg_req *msg;
int err;
+ if (enable && bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+ pf->flow_cfg->dmacflt_max_flows))
+ netdev_warn(pf->netdev,
+ "CGX/RPM internal loopback might not work as DMAC filters are active\n");
+
mutex_lock(&pf->mbox.lock);
if (enable)
msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
@@ -1169,7 +1218,7 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
}
/* SQ */
- for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
val = otx2_atomic64_add((qidx << 44), ptr);
otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
@@ -1272,17 +1321,48 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
/* Free SQB pointers */
otx2_sq_free_sqbs(pf);
- for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
sq = &qset->sq[qidx];
qmem_free(pf->dev, sq->sqe);
qmem_free(pf->dev, sq->tso_hdrs);
kfree(sq->sg);
kfree(sq->sqb_ptrs);
+ qmem_free(pf->dev, sq->timestamps);
}
}
+static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
+{
+ int frame_size;
+ int total_size;
+ int rbuf_size;
+
+ if (pf->hw.rbuf_fixed_size)
+ return pf->hw.rbuf_fixed_size;
+
+ /* The data transferred by NIX to memory consists of actual packet
+ * plus additional data which has timestamp and/or EDSA/HIGIG2
+ * headers if interface is configured in corresponding modes.
+ * NIX transfers entire data using 6 segments/buffers and writes
+ * a CQE_RX descriptor with those segment addresses. First segment
+ * has additional data prepended to packet. Also software omits a
+ * headroom of 128 bytes in each segment. Hence the total size of
+ * memory needed to receive a packet with 'mtu' is:
+ * frame size = mtu + additional data;
+ * memory = frame_size + headroom * 6;
+ * each receive buffer size = memory / 6;
+ */
+ frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN +
+ pf->addl_mtu + pf->xtra_hdr;
+ total_size = frame_size + OTX2_HEAD_ROOM * 6;
+ rbuf_size = total_size / 6;
+
+ return ALIGN(rbuf_size, 2048);
+}
+
static int otx2_init_hw_resources(struct otx2_nic *pf)
{
+ struct nix_lf_free_req *free_req;
struct mbox *mbox = &pf->mbox;
struct otx2_hw *hw = &pf->hw;
struct msg_req *req;
@@ -1293,12 +1373,13 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
* so, aura count = pool count.
*/
hw->rqpool_cnt = hw->rx_queues;
- hw->sqpool_cnt = hw->tx_queues;
+ hw->sqpool_cnt = hw->tot_tx_queues;
hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
- /* Get the size of receive buffers to allocate */
- pf->rbsize = RCV_FRAG_LEN(OTX2_HW_TIMESTAMP_LEN + pf->netdev->mtu +
- OTX2_ETH_HLEN);
+ /* Maximum hardware supported transmit length */
+ pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN;
+
+ pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
mutex_lock(&mbox->lock);
/* NPA init */
@@ -1364,8 +1445,8 @@ err_free_rq_ptrs:
otx2_aura_pool_free(pf);
err_free_nix_lf:
mutex_lock(&mbox->lock);
- req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
- if (req) {
+ free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
+ if (free_req) {
if (otx2_sync_mbox_msg(mbox))
dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
}
@@ -1384,6 +1465,7 @@ exit:
static void otx2_free_hw_resources(struct otx2_nic *pf)
{
struct otx2_qset *qset = &pf->qset;
+ struct nix_lf_free_req *free_req;
struct mbox *mbox = &pf->mbox;
struct otx2_cq_queue *cq;
struct msg_req *req;
@@ -1422,10 +1504,16 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
otx2_free_cq_res(pf);
+ /* Free all ingress bandwidth profiles allocated */
+ cn10k_free_all_ipolicers(pf);
+
mutex_lock(&mbox->lock);
/* Reset NIX LF */
- req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
- if (req) {
+ free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
+ if (free_req) {
+ free_req->flags = NIX_LF_DISABLE_FLOWS;
+ if (!(pf->flags & OTX2_FLAG_PF_SHUTDOWN))
+ free_req->flags |= NIX_LF_DONT_FREE_TX_VTAG;
if (otx2_sync_mbox_msg(mbox))
dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
}
@@ -1456,7 +1544,7 @@ int otx2_open(struct net_device *netdev)
netif_carrier_off(netdev);
- pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tx_queues;
+ pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tot_tx_queues;
/* RQ and SQs are mapped to different CQs,
* so find out max CQ IRQs (i.e CINTs) needed.
*/
@@ -1476,7 +1564,7 @@ int otx2_open(struct net_device *netdev)
if (!qset->cq)
goto err_free_mem;
- qset->sq = kcalloc(pf->hw.tx_queues,
+ qset->sq = kcalloc(pf->hw.tot_tx_queues,
sizeof(struct otx2_snd_queue), GFP_KERNEL);
if (!qset->sq)
goto err_free_mem;
@@ -1497,11 +1585,20 @@ int otx2_open(struct net_device *netdev)
/* RQ0 & SQ0 are mapped to CINT0 and so on..
* 'cq_ids[0]' points to RQ's CQ and
* 'cq_ids[1]' points to SQ's CQ and
+ * 'cq_ids[2]' points to XDP's CQ and
*/
cq_poll->cq_ids[CQ_RX] =
(qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
+ if (pf->xdp_prog)
+ cq_poll->cq_ids[CQ_XDP] = (qidx < pf->hw.xdp_queues) ?
+ (qidx + pf->hw.rx_queues +
+ pf->hw.tx_queues) :
+ CINT_INVALID_CQ;
+ else
+ cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
+
cq_poll->dev = (void *)pf;
netif_napi_add(netdev, &cq_poll->napi,
otx2_napi_handler, NAPI_POLL_WEIGHT);
@@ -1585,13 +1682,22 @@ int otx2_open(struct net_device *netdev)
if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
otx2_handle_link_event(pf);
- /* Restore pause frame settings */
- otx2_config_pause_frm(pf);
+ if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
+ otx2_enable_rxvlan(pf, true);
+
+ /* Set NPC parsing mode */
+ otx2_set_npc_parse_mode(pf, false);
+
+ /* Install DMAC Filters */
+ if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
+ otx2_dmacflt_reinstall_flows(pf);
err = otx2_rxtx_enable(pf, true);
if (err)
goto err_tx_stop_queues;
+ otx2_do_set_rx_mode(pf);
+
return 0;
err_tx_stop_queues:
@@ -1696,7 +1802,7 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Check for minimum and maximum packet length */
if (skb->len <= ETH_HLEN ||
- (!skb_shinfo(skb)->gso_size && skb->len > pf->max_frs)) {
+ (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -1719,6 +1825,17 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
+static netdev_features_t otx2_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ features |= NETIF_F_HW_VLAN_STAG_RX;
+ else
+ features &= ~NETIF_F_HW_VLAN_STAG_RX;
+
+ return features;
+}
+
static void otx2_set_rx_mode(struct net_device *netdev)
{
struct otx2_nic *pf = netdev_priv(netdev);
@@ -1726,15 +1843,24 @@ static void otx2_set_rx_mode(struct net_device *netdev)
queue_work(pf->otx2_wq, &pf->rx_mode_work);
}
-static void otx2_do_set_rx_mode(struct work_struct *work)
+static void otx2_do_set_rx_mode(struct otx2_nic *pf)
{
- struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
struct net_device *netdev = pf->netdev;
struct nix_rx_mode *req;
+ bool promisc = false;
if (!(netdev->flags & IFF_UP))
return;
+ if ((netdev->flags & IFF_PROMISC) ||
+ (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) {
+ promisc = true;
+ }
+
+ /* Write unicast address to mcam entries or del from mcam */
+ if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT)
+ __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
+
mutex_lock(&pf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
if (!req) {
@@ -1744,16 +1870,24 @@ static void otx2_do_set_rx_mode(struct work_struct *work)
req->mode = NIX_RX_MODE_UCAST;
- /* We don't support MAC address filtering yet */
- if (netdev->flags & IFF_PROMISC)
+ if (promisc)
req->mode |= NIX_RX_MODE_PROMISC;
- else if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
+ if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
req->mode |= NIX_RX_MODE_ALLMULTI;
+ req->mode |= NIX_RX_MODE_USE_MCE;
+
otx2_sync_mbox_msg(&pf->mbox);
mutex_unlock(&pf->mbox.lock);
}
+static void otx2_rx_mode_wrk_handler(struct work_struct *work)
+{
+ struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
+
+ otx2_do_set_rx_mode(pf);
+}
+
static int otx2_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -1763,7 +1897,12 @@ static int otx2_set_features(struct net_device *netdev,
if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
return otx2_cgx_config_loopback(pf,
features & NETIF_F_LOOPBACK);
- return 0;
+
+ if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(netdev))
+ return otx2_enable_rxvlan(pf,
+ features & NETIF_F_HW_VLAN_CTAG_RX);
+
+ return otx2_handle_ntuple_tc_features(netdev, features);
}
static void otx2_reset_task(struct work_struct *work)
@@ -1845,7 +1984,7 @@ static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
return 0;
}
-static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
+int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct hwtstamp_config config;
@@ -1860,10 +1999,24 @@ static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
if (config.flags)
return -EINVAL;
+ if (OTX2_IS_INTFMOD_SET(pfvf->ethtool_flags)) {
+ netdev_info(netdev, "Can't support PTP HW timestamping when switch features are enabled\n");
+ return -EOPNOTSUPP;
+ }
+
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
+ if (pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC)
+ pfvf->flags &= ~OTX2_FLAG_PTP_ONESTEP_SYNC;
+
+ cancel_delayed_work(&pfvf->ptp->synctstamp_work);
otx2_config_hw_tx_tstamp(pfvf, false);
break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ pfvf->flags |= OTX2_FLAG_PTP_ONESTEP_SYNC;
+ schedule_delayed_work(&pfvf->ptp->synctstamp_work,
+ msecs_to_jiffies(500));
+ /* fall through */
case HWTSTAMP_TX_ON:
otx2_config_hw_tx_tstamp(pfvf, true);
break;
@@ -1901,8 +2054,9 @@ static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
return copy_to_user(ifr->ifr_data, &config,
sizeof(config)) ? -EFAULT : 0;
}
+EXPORT_SYMBOL(otx2_config_hwtstamp);
-static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct hwtstamp_config *cfg = &pfvf->tstamp;
@@ -1917,11 +2071,417 @@ static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
return -EOPNOTSUPP;
}
}
+EXPORT_SYMBOL(otx2_ioctl);
+
+static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac)
+{
+ struct npc_install_flow_req *req;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ ether_addr_copy(req->packet.dmac, mac);
+ eth_broadcast_addr((u8 *)&req->mask.dmac);
+ req->features = BIT_ULL(NPC_DMAC);
+ req->channel = pf->hw.rx_chan_base;
+ req->intf = NIX_INTF_RX;
+ req->default_rule = 1;
+ req->append = 1;
+ req->vf = vf + 1;
+ req->op = NIX_RX_ACTION_DEFAULT;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+out:
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct pci_dev *pdev = pf->pdev;
+ struct otx2_vf_config *config;
+ int ret;
+
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
+ if (vf >= pf->total_vfs)
+ return -EINVAL;
+
+ if (!is_valid_ether_addr(mac))
+ return -EINVAL;
+
+ config = &pf->vf_configs[vf];
+ ether_addr_copy(config->mac, mac);
+
+ ret = otx2_do_set_vf_mac(pf, vf, mac);
+ if (ret == 0)
+ dev_info(&pdev->dev,
+ "Load/Reload VF driver\n");
+
+ return ret;
+}
+
+int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
+ __be16 proto)
+{
+ struct otx2_flow_config *flow_cfg = pf->flow_cfg;
+ struct nix_vtag_config_rsp *vtag_rsp;
+ struct npc_delete_flow_req *del_req;
+ struct nix_vtag_config *vtag_req;
+ struct npc_install_flow_req *req;
+ struct otx2_vf_config *config;
+ int err = 0;
+ u32 idx;
+
+ config = &pf->vf_configs[vf];
+
+ if (!vlan && !config->vlan)
+ goto out;
+
+ mutex_lock(&pf->mbox.lock);
+
+ /* free old tx vtag entry */
+ if (config->vlan) {
+ vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
+ if (!vtag_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+ vtag_req->cfg_type = 0;
+ vtag_req->tx.free_vtag0 = 1;
+ vtag_req->tx.vtag0_idx = config->tx_vtag_idx;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+ }
+
+ if (!vlan && config->vlan) {
+ /* rx */
+ del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
+ if (!del_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+ idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
+ del_req->entry =
+ flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ /* tx */
+ del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
+ if (!del_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+ idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
+ del_req->entry =
+ flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ if (!(pf->ethtool_flags & OTX2_PRIV_FLAG_FDSA_HDR))
+ memset(&config->rule, 0, sizeof(config->rule));
+ goto out;
+ }
+
+ /* rx */
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
+ req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
+ req->packet.vlan_tci = htons(vlan);
+ req->mask.vlan_tci = htons(VLAN_VID_MASK);
+ /* af fills the destination mac addr */
+ eth_broadcast_addr((u8 *)&req->mask.dmac);
+ req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
+ req->channel = pf->hw.rx_chan_base;
+ req->intf = NIX_INTF_RX;
+ req->vf = vf + 1;
+ req->op = NIX_RX_ACTION_DEFAULT;
+ req->vtag0_valid = true;
+ req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
+ req->set_cntr = 1;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ /* tx */
+ vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
+ if (!vtag_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* configure tx vtag params */
+ vtag_req->vtag_size = VTAGSIZE_T4;
+ vtag_req->cfg_type = 0; /* tx vlan cfg */
+ vtag_req->tx.cfg_vtag0 = 1;
+ vtag_req->tx.vtag0 = (((u64)ntohs(proto)) << 16) | vlan;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ vtag_rsp = (struct nix_vtag_config_rsp *)otx2_mbox_get_rsp
+ (&pf->mbox.mbox, 0, &vtag_req->hdr);
+ if (IS_ERR(vtag_rsp)) {
+ err = PTR_ERR(vtag_rsp);
+ goto out;
+ }
+ config->tx_vtag_idx = vtag_rsp->vtag0_idx;
+
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ eth_zero_addr((u8 *)&req->mask.dmac);
+ idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
+ req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
+ req->features = BIT_ULL(NPC_DMAC);
+ req->channel = pf->hw.tx_chan_base;
+ req->intf = NIX_INTF_TX;
+ req->vf = vf + 1;
+ req->op = NIX_TX_ACTIONOP_UCAST_DEFAULT;
+ req->vtag0_def = vtag_rsp->vtag0_idx;
+ req->vtag0_op = VTAG_INSERT;
+ req->set_cntr = 1;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ /* Update these values to reinstall the vfvlan rule */
+ config->rule.vlan = vlan;
+ config->rule.proto = proto;
+ config->rule.qos = qos;
+out:
+ config->vlan = vlan;
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+static int otx2_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 proto)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct pci_dev *pdev = pf->pdev;
+
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
+ if (vf >= pci_num_vf(pdev))
+ return -EINVAL;
+
+ /* qos is currently unsupported */
+ if (vlan >= VLAN_N_VID || qos)
+ return -EINVAL;
+
+ if (proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ if (!(pf->flags & OTX2_FLAG_VF_VLAN_SUPPORT))
+ return -EOPNOTSUPP;
+
+ if (pf->ethtool_flags & OTX2_PRIV_FLAG_FDSA_HDR)
+ return -EOPNOTSUPP;
+
+ return otx2_do_set_vf_vlan(pf, vf, vlan, qos, proto);
+}
+
+static int otx2_get_vf_config(struct net_device *netdev, int vf,
+ struct ifla_vf_info *ivi)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct pci_dev *pdev = pf->pdev;
+ struct otx2_vf_config *config;
+
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
+ if (vf >= pci_num_vf(pdev))
+ return -EINVAL;
+
+ config = &pf->vf_configs[vf];
+ ivi->vf = vf;
+ ether_addr_copy(ivi->mac, config->mac);
+ ivi->vlan = config->vlan;
+ ivi->trusted = config->trusted;
+
+ return 0;
+}
+
+static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
+ int qidx)
+{
+ struct page *page;
+ u64 dma_addr;
+ int err = 0;
+
+ dma_addr = otx2_dma_map_page(pf, virt_to_page(xdpf->data),
+ offset_in_page(xdpf->data), xdpf->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(pf->dev, dma_addr))
+ return -ENOMEM;
+
+ err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx);
+ if (!err) {
+ otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE);
+ page = virt_to_page(xdpf->data);
+ put_page(page);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int otx2_xdp_xmit(struct net_device *netdev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int qidx = smp_processor_id();
+ struct otx2_snd_queue *sq;
+ int drops = 0, i;
+
+ if (!netif_running(netdev))
+ return -ENETDOWN;
+
+ qidx += pf->hw.tx_queues;
+ sq = pf->xdp_prog ? &pf->qset.sq[qidx] : NULL;
+
+ /* Abort xmit if xdp queue is not */
+ if (unlikely(!sq))
+ return -ENXIO;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+ int err;
+
+ err = otx2_xdp_xmit_tx(pf, xdpf, qidx);
+ if (err)
+ drops++;
+ }
+ return n - drops;
+}
+
+static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
+{
+ bool if_up = netif_running(pf->netdev);
+ struct bpf_prog *old_prog;
+ int err = 0;
+
+ if (if_up)
+ otx2_stop(pf->netdev);
+
+ old_prog = xchg(&pf->xdp_prog, prog);
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (pf->xdp_prog) {
+ bpf_prog_add(pf->xdp_prog, pf->hw.rx_queues - 1);
+ if (IS_ERR(pf->xdp_prog))
+ err = PTR_ERR(pf->xdp_prog);
+ }
+ /* Network stack and XDP shared same rx queues.
+ * Use separate tx queues for XDP and network stack.
+ */
+ if (pf->xdp_prog)
+ pf->hw.xdp_queues = pf->hw.rx_queues;
+ else
+ pf->hw.xdp_queues = 0;
+
+ pf->hw.tot_tx_queues += pf->hw.xdp_queues;
+
+ if (if_up)
+ otx2_open(pf->netdev);
+
+ return err;
+}
+
+static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return otx2_xdp_setup(pf, xdp->prog);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf,
+ int req_perm)
+{
+ struct set_vf_perm *req;
+ int rc;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_set_vf_perm(&pf->mbox);
+ if (!req) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Let AF reset VF permissions as sriov is disabled */
+ if (req_perm == OTX2_RESET_VF_PERM) {
+ req->flags |= RESET_VF_PERM;
+ } else if (req_perm == OTX2_TRUSTED_VF) {
+ if (pf->vf_configs[vf].trusted)
+ req->flags |= VF_TRUSTED;
+ }
+
+ req->vf = vf;
+ rc = otx2_sync_mbox_msg(&pf->mbox);
+out:
+ mutex_unlock(&pf->mbox.lock);
+ return rc;
+}
+
+static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf,
+ bool enable)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct pci_dev *pdev = pf->pdev;
+ int rc;
+
+ if (vf >= pci_num_vf(pdev))
+ return -EINVAL;
+
+ if (pf->vf_configs[vf].trusted == enable)
+ return 0;
+
+ pf->vf_configs[vf].trusted = enable;
+ rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF);
+
+ if (rc)
+ pf->vf_configs[vf].trusted = !enable;
+ else
+ netdev_info(pf->netdev, "VF %d is %strusted\n",
+ vf, enable ? "" : "not ");
+ return rc;
+}
static const struct net_device_ops otx2_netdev_ops = {
.ndo_open = otx2_open,
.ndo_stop = otx2_stop,
.ndo_start_xmit = otx2_xmit,
+ .ndo_fix_features = otx2_fix_features,
.ndo_set_mac_address = otx2_set_mac_address,
.ndo_change_mtu = otx2_change_mtu,
.ndo_set_rx_mode = otx2_set_rx_mode,
@@ -1929,6 +2489,13 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_tx_timeout = otx2_tx_timeout,
.ndo_get_stats64 = otx2_get_stats64,
.ndo_do_ioctl = otx2_ioctl,
+ .ndo_set_vf_mac = otx2_set_vf_mac,
+ .ndo_set_vf_vlan = otx2_set_vf_vlan,
+ .ndo_get_vf_config = otx2_get_vf_config,
+ .ndo_bpf = otx2_xdp,
+ .ndo_xdp_xmit = otx2_xdp_xmit,
+ .ndo_setup_tc = otx2_setup_tc,
+ .ndo_set_vf_trust = otx2_ndo_set_vf_trust,
};
static int otx2_wq_init(struct otx2_nic *pf)
@@ -1937,7 +2504,7 @@ static int otx2_wq_init(struct otx2_nic *pf)
if (!pf->otx2_wq)
return -ENOMEM;
- INIT_WORK(&pf->rx_mode_work, otx2_do_set_rx_mode);
+ INIT_WORK(&pf->rx_mode_work, otx2_rx_mode_wrk_handler);
INIT_WORK(&pf->reset_task, otx2_reset_task);
return 0;
}
@@ -1983,6 +2550,43 @@ static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
return otx2_register_mbox_intr(pf, false);
}
+static int otx2_sriov_vfcfg_init(struct otx2_nic *pf)
+{
+ int i;
+
+ pf->vf_configs = devm_kcalloc(pf->dev, pf->total_vfs,
+ sizeof(struct otx2_vf_config),
+ GFP_KERNEL);
+ if (!pf->vf_configs)
+ return -ENOMEM;
+
+ for (i = 0; i < pf->total_vfs; i++) {
+ pf->vf_configs[i].pf = pf;
+ pf->vf_configs[i].intf_down = true;
+ pf->vf_configs[i].trusted = false;
+ INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
+ otx2_vf_link_event_task);
+ INIT_DELAYED_WORK(&pf->vf_configs[i].ptp_info_work,
+ otx2_vf_ptp_info_task);
+ }
+
+ return 0;
+}
+
+static void otx2_sriov_vfcfg_cleanup(struct otx2_nic *pf)
+{
+ int i;
+
+ if (!pf->vf_configs)
+ return;
+
+ for (i = 0; i < pf->total_vfs; i++) {
+ cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
+ cancel_delayed_work_sync(&pf->vf_configs[i].ptp_info_work);
+ otx2_set_vf_permissions(pf, i, OTX2_RESET_VF_PERM);
+ }
+}
+
static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
@@ -2034,7 +2638,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->pdev = pdev;
hw->rx_queues = qcount;
hw->tx_queues = qcount;
+ hw->tot_tx_queues = qcount;
hw->max_queues = qcount;
+ /* Use CQE of 128 byte descriptor size by default */
+ hw->xqe_size = 128;
num_vec = pci_msix_vec_count(pdev);
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
@@ -2071,6 +2678,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_free_netdev;
}
+ otx2_setup_dev_hw_settings(pf);
+
/* Init PF <=> AF mailbox stuff */
err = otx2_pfaf_mbox_init(pf);
if (err)
@@ -2096,7 +2705,9 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_detach_rsrc;
- otx2_setup_dev_hw_settings(pf);
+ err = cn10k_lmtst_init(pf);
+ if (err)
+ goto err_detach_rsrc;
/* Assign default mac address */
otx2_get_mac_from_af(netdev);
@@ -2123,21 +2734,44 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
NETIF_F_GSO_UDP_L4);
netdev->features |= netdev->hw_features;
+ err = otx2_mcam_flow_init(pf);
+ if (err)
+ goto err_ptp_destroy;
+
+ if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT)
+ netdev->hw_features |= NETIF_F_NTUPLE;
+
+ if (pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* Support TSO on tag interface */
+ netdev->vlan_features |= netdev->features;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
+ if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX;
+ netdev->features |= netdev->hw_features;
+
+ /* HW supports tc offload but mutually exclusive with n-tuple filters */
+ if (pf->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)
+ netdev->hw_features |= NETIF_F_HW_TC;
+
netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
- netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
+ netdev->watchdog_timeo = netdev->watchdog_timeo ?
+ netdev->watchdog_timeo : OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2_netdev_ops;
- /* MTU range: 64 - 9190 */
netdev->min_mtu = OTX2_MIN_MTU;
- netdev->max_mtu = OTX2_MAX_MTU;
+ netdev->max_mtu = otx2_get_max_mtu(pf);
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_ptp_destroy;
+ goto err_del_mcam_entries;
}
err = otx2_wq_init(pf);
@@ -2146,25 +2780,54 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
otx2_set_ethtool_ops(netdev);
+ err = otx2_init_tc(pf);
+ if (err)
+ goto err_mcam_flow_del;
+
+ err = otx2_register_dl(pf);
+ if (err)
+ goto err_mcam_flow_del;
+
+ /* Initialize SR-IOV resources */
+ err = otx2_sriov_vfcfg_init(pf);
+ if (err)
+ goto err_pf_sriov_init;
+
/* Enable link notifications */
otx2_cgx_config_linkevents(pf, true);
- /* Enable pause frames by default */
- pf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
- pf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+ /* Set interface mode as Default */
+ pf->ethtool_flags |= OTX2_PRIV_FLAG_DEF_MODE;
+
+#ifdef CONFIG_DCB
+ err = otx2_dcbnl_set_ops(netdev);
+ if (err)
+ goto err_pf_sriov_init;
+#endif
return 0;
+err_pf_sriov_init:
+ otx2_shutdown_tc(pf);
+err_mcam_flow_del:
+ otx2_mcam_flow_del(pf);
err_unreg_netdev:
unregister_netdev(netdev);
+err_del_mcam_entries:
+ otx2_mcam_flow_del(pf);
err_ptp_destroy:
otx2_ptp_destroy(pf);
err_detach_rsrc:
+ if (pf->hw.lmt_info)
+ free_percpu(pf->hw.lmt_info);
+ if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
+ qmem_free(pf->dev, pf->dync_lmt);
otx2_detach_resources(&pf->mbox);
err_disable_mbox_intr:
otx2_disable_mbox_intr(pf);
err_mbox_destroy:
otx2_pfaf_mbox_destroy(pf);
+ otx2_pfvf_mbox_destroy(pf);
err_free_irq_vectors:
pci_free_irq_vectors(hw->pdev);
err_free_netdev:
@@ -2203,11 +2866,39 @@ static void otx2_vf_link_event_task(struct work_struct *work)
otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
}
+static void otx2_vf_ptp_info_task(struct work_struct *work)
+{
+ struct cgx_ptp_rx_info_msg *req;
+ struct otx2_vf_config *config;
+ struct mbox_msghdr *msghdr;
+ struct otx2_nic *pf;
+ int vf_idx;
+
+ config = container_of(work, struct otx2_vf_config,
+ ptp_info_work.work);
+ vf_idx = config - config->pf->vf_configs;
+ pf = config->pf;
+
+ msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
+ sizeof(*req), sizeof(struct msg_rsp));
+ if (!msghdr) {
+ dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
+ return;
+ }
+
+ req = (struct cgx_ptp_rx_info_msg *)msghdr;
+ req->hdr.id = MBOX_MSG_CGX_PTP_RX_INFO;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->ptp_en = pf->ptp->ptp_en;
+
+ otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
+}
+
static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct otx2_nic *pf = netdev_priv(netdev);
- int ret, i;
+ int ret;
/* Init PF <=> VF mailbox stuff */
ret = otx2_pfvf_mbox_init(pf, numvfs);
@@ -2218,23 +2909,9 @@ static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
if (ret)
goto free_mbox;
- pf->vf_configs = kcalloc(numvfs, sizeof(struct otx2_vf_config),
- GFP_KERNEL);
- if (!pf->vf_configs) {
- ret = -ENOMEM;
- goto free_intr;
- }
-
- for (i = 0; i < numvfs; i++) {
- pf->vf_configs[i].pf = pf;
- pf->vf_configs[i].intf_down = true;
- INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
- otx2_vf_link_event_task);
- }
-
ret = otx2_pf_flr_init(pf, numvfs);
if (ret)
- goto free_configs;
+ goto free_intr;
ret = otx2_register_flr_me_intr(pf, numvfs);
if (ret)
@@ -2249,8 +2926,6 @@ free_flr_intr:
otx2_disable_flr_me_intr(pf);
free_flr:
otx2_flr_wq_destroy(pf);
-free_configs:
- kfree(pf->vf_configs);
free_intr:
otx2_disable_pfvf_mbox_intr(pf, numvfs);
free_mbox:
@@ -2263,17 +2938,12 @@ static int otx2_sriov_disable(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct otx2_nic *pf = netdev_priv(netdev);
int numvfs = pci_num_vf(pdev);
- int i;
if (!numvfs)
return 0;
pci_disable_sriov(pdev);
- for (i = 0; i < pci_num_vf(pdev); i++)
- cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
- kfree(pf->vf_configs);
-
otx2_disable_flr_me_intr(pf);
otx2_flr_wq_destroy(pf);
otx2_disable_pfvf_mbox_intr(pf, numvfs);
@@ -2299,23 +2969,49 @@ static void otx2_remove(struct pci_dev *pdev)
return;
pf = netdev_priv(netdev);
+ pf->flags |= OTX2_FLAG_PF_SHUTDOWN;
if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED)
otx2_config_hw_tx_tstamp(pf, false);
if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
otx2_config_hw_rx_tstamp(pf, false);
+ /* Disable 802.3x pause frames */
+ if (pf->flags & OTX2_FLAG_RX_PAUSE_ENABLED ||
+ (pf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) {
+ pf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
+ pf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
+ otx2_config_pause_frm(pf);
+ }
+
+#ifdef CONFIG_DCB
+ /* Disable PFC config */
+ if (pf->pfc_en) {
+ pf->pfc_en = 0;
+ otx2_config_priority_flow_ctrl(pf);
+ }
+#endif
+ otx2_set_npc_parse_mode(pf, true);
+
cancel_work_sync(&pf->reset_task);
/* Disable link notifications */
otx2_cgx_config_linkevents(pf, false);
+ otx2_unregister_dl(pf);
unregister_netdev(netdev);
otx2_sriov_disable(pf->pdev);
+ otx2_sriov_vfcfg_cleanup(pf);
if (pf->otx2_wq)
destroy_workqueue(pf->otx2_wq);
otx2_ptp_destroy(pf);
+ otx2_mcam_flow_del(pf);
+ otx2_shutdown_tc(pf);
otx2_detach_resources(&pf->mbox);
+ if (pf->hw.lmt_info)
+ free_percpu(pf->hw.lmt_info);
+ if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
+ qmem_free(pf->dev, pf->dync_lmt);
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
pci_free_irq_vectors(pf->pdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index 7bcf5246350f..d1c6fe4559cb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -1,9 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 PTP support for ethernet driver
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
*
- * Copyright (C) 2020 Marvell International Ltd.
*/
+#include <linux/module.h>
#include "otx2_common.h"
#include "otx2_ptp.h"
@@ -12,7 +14,6 @@ static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
struct ptp_req *req;
- int err;
if (!ptp->nic)
return -ENODEV;
@@ -24,16 +25,28 @@ static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
req->op = PTP_OP_ADJFINE;
req->scaled_ppm = scaled_ppm;
- err = otx2_sync_mbox_msg(&ptp->nic->mbox);
- if (err)
- return err;
+ return otx2_sync_mbox_msg(&ptp->nic->mbox);
+}
- return 0;
+static int ptp_set_thresh(struct otx2_ptp *ptp, u64 thresh)
+{
+ struct ptp_req *req;
+
+ if (!ptp->nic)
+ return -ENODEV;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->op = PTP_OP_SET_THRESH;
+ req->thresh = thresh;
+
+ return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
-static u64 ptp_cc_read(const struct cyclecounter *cc)
+static u64 ptp_tstmp_read(struct otx2_ptp *ptp)
{
- struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
struct ptp_req *req;
struct ptp_rsp *rsp;
int err;
@@ -45,7 +58,7 @@ static u64 ptp_cc_read(const struct cyclecounter *cc)
if (!req)
return 0;
- req->op = PTP_OP_GET_CLOCK;
+ req->op = PTP_OP_GET_TSTMP;
err = otx2_sync_mbox_msg(&ptp->nic->mbox);
if (err)
@@ -59,17 +72,50 @@ static u64 ptp_cc_read(const struct cyclecounter *cc)
return rsp->clk;
}
+static void otx2_get_ptpclock(struct otx2_nic *pfvf, u64 *tstamp)
+{
+ struct ptp_req *req;
+ struct ptp_rsp *rsp;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_ptp_op(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return;
+ }
+
+ req->op = PTP_OP_GET_CLOCK;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
+ rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0,
+ &req->hdr);
+ *tstamp = rsp->clk;
+ }
+
+ mutex_unlock(&pfvf->mbox.lock);
+}
+
static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
struct otx2_nic *pfvf = ptp->nic;
+ struct ptp_req *req;
+ int err;
mutex_lock(&pfvf->mbox.lock);
- timecounter_adjtime(&ptp->time_counter, delta);
+ req = otx2_mbox_alloc_msg_ptp_op(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+ req->op = PTP_OP_ADJ_CLOCK;
+ req->delta = delta;
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
+ return err;
- return 0;
}
static int otx2_ptp_gettime(struct ptp_clock_info *ptp_info,
@@ -78,13 +124,10 @@ static int otx2_ptp_gettime(struct ptp_clock_info *ptp_info,
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
struct otx2_nic *pfvf = ptp->nic;
- u64 nsec;
+ u64 tstamp;
- mutex_lock(&pfvf->mbox.lock);
- nsec = timecounter_read(&ptp->time_counter);
- mutex_unlock(&pfvf->mbox.lock);
-
- *ts = ns_to_timespec64(nsec);
+ otx2_get_ptpclock(pfvf, &tstamp);
+ *ts = ns_to_timespec64(tstamp);
return 0;
}
@@ -95,30 +138,113 @@ static int otx2_ptp_settime(struct ptp_clock_info *ptp_info,
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
struct otx2_nic *pfvf = ptp->nic;
- u64 nsec;
-
- nsec = timespec64_to_ns(ts);
+ struct ptp_req *req;
+ int err;
mutex_lock(&pfvf->mbox.lock);
- timecounter_init(&ptp->time_counter, &ptp->cycle_counter, nsec);
+ req = otx2_mbox_alloc_msg_ptp_op(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->op = PTP_OP_SET_CLOCK;
+ req->nsec = timespec64_to_ns(ts);
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+static int otx2_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_EXTTS:
+ break;
+ case PTP_PF_PEROUT:
+ case PTP_PF_PHYSYNC:
+ return -1;
+ }
return 0;
}
+static void otx2_ptp_extts_check(struct work_struct *work)
+{
+ struct otx2_ptp *ptp = container_of(work, struct otx2_ptp,
+ extts_work.work);
+ struct ptp_clock_event event;
+ u64 tstmp, new_thresh;
+
+ mutex_lock(&ptp->nic->mbox.lock);
+ tstmp = ptp_tstmp_read(ptp);
+ mutex_unlock(&ptp->nic->mbox.lock);
+
+ if (tstmp != ptp->last_extts) {
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 0;
+ event.timestamp = ptp->convert_tx_ptp_tstmp(tstmp);
+ ptp_clock_event(ptp->ptp_clock, &event);
+ ptp->last_extts = tstmp;
+
+ new_thresh = tstmp % 500000000;
+ if (ptp->thresh != new_thresh) {
+ mutex_lock(&ptp->nic->mbox.lock);
+ ptp_set_thresh(ptp, new_thresh);
+ mutex_unlock(&ptp->nic->mbox.lock);
+ ptp->thresh = new_thresh;
+ }
+ }
+ schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
+}
+
+static void otx2_sync_tstamp(struct work_struct *work)
+{
+ struct otx2_ptp *ptp = container_of(work, struct otx2_ptp,
+ synctstamp_work.work);
+
+ otx2_get_ptpclock(ptp->nic, &ptp->tstamp);
+ schedule_delayed_work(&ptp->synctstamp_work, msecs_to_jiffies(500));
+}
+
static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
struct ptp_clock_request *rq, int on)
{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ int pin = -1;
+
+ if (!ptp->nic)
+ return -ENODEV;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS,
+ rq->extts.index);
+ if (pin < 0)
+ return -EBUSY;
+ if (on)
+ schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
+ else
+ cancel_delayed_work_sync(&ptp->extts_work);
+ return 0;
+ default:
+ break;
+ }
return -EOPNOTSUPP;
}
int otx2_ptp_init(struct otx2_nic *pfvf)
{
struct otx2_ptp *ptp_ptr;
- struct cyclecounter *cc;
struct ptp_req *req;
int err;
+ if (is_otx2_lbkvf(pfvf->pdev)) {
+ pfvf->ptp = NULL;
+ return 0;
+ }
+
mutex_lock(&pfvf->mbox.lock);
/* check if PTP block is available */
req = otx2_mbox_alloc_msg_ptp_op(&pfvf->mbox);
@@ -144,29 +270,28 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
ptp_ptr->nic = pfvf;
- cc = &ptp_ptr->cycle_counter;
- cc->read = ptp_cc_read;
- cc->mask = CYCLECOUNTER_MASK(64);
- cc->mult = 1;
- cc->shift = 0;
-
- timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter,
- ktime_to_ns(ktime_get_real()));
+ snprintf(ptp_ptr->extts_config.name, sizeof(ptp_ptr->extts_config.name), "TSTAMP");
+ ptp_ptr->extts_config.index = 0;
+ ptp_ptr->extts_config.func = PTP_PF_NONE;
ptp_ptr->ptp_info = (struct ptp_clock_info) {
.owner = THIS_MODULE,
.name = "OcteonTX2 PTP",
.max_adj = 1000000000ull,
- .n_ext_ts = 0,
- .n_pins = 0,
+ .n_ext_ts = 1,
+ .n_pins = 1,
.pps = 0,
+ .pin_config = &ptp_ptr->extts_config,
.adjfine = otx2_ptp_adjfine,
.adjtime = otx2_ptp_adjtime,
.gettime64 = otx2_ptp_gettime,
.settime64 = otx2_ptp_settime,
.enable = otx2_ptp_enable,
+ .verify = otx2_ptp_verify_pin,
};
+ INIT_DELAYED_WORK(&ptp_ptr->extts_work, otx2_ptp_extts_check);
+
ptp_ptr->ptp_clock = ptp_clock_register(&ptp_ptr->ptp_info, pfvf->dev);
if (IS_ERR_OR_NULL(ptp_ptr->ptp_clock)) {
err = ptp_ptr->ptp_clock ?
@@ -175,11 +300,22 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
goto error;
}
+ if (is_dev_otx2(pfvf->pdev)) {
+ ptp_ptr->convert_rx_ptp_tstmp = &otx2_ptp_convert_rx_timestamp;
+ ptp_ptr->convert_tx_ptp_tstmp = &otx2_ptp_convert_tx_timestamp;
+ } else {
+ ptp_ptr->convert_rx_ptp_tstmp = &cn10k_ptp_convert_timestamp;
+ ptp_ptr->convert_tx_ptp_tstmp = &cn10k_ptp_convert_timestamp;
+ }
+
+ INIT_DELAYED_WORK(&ptp_ptr->synctstamp_work, otx2_sync_tstamp);
+
pfvf->ptp = ptp_ptr;
error:
return err;
}
+EXPORT_SYMBOL_GPL(otx2_ptp_init);
void otx2_ptp_destroy(struct otx2_nic *pfvf)
{
@@ -188,10 +324,13 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf)
if (!ptp)
return;
+ cancel_delayed_work(&pfvf->ptp->synctstamp_work);
+
ptp_clock_unregister(ptp->ptp_clock);
kfree(ptp);
pfvf->ptp = NULL;
}
+EXPORT_SYMBOL_GPL(otx2_ptp_destroy);
int otx2_ptp_clock_index(struct otx2_nic *pfvf)
{
@@ -200,13 +339,8 @@ int otx2_ptp_clock_index(struct otx2_nic *pfvf)
return ptp_clock_index(pfvf->ptp->ptp_clock);
}
+EXPORT_SYMBOL_GPL(otx2_ptp_clock_index);
-int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns)
-{
- if (!pfvf->ptp)
- return -ENODEV;
-
- *tsns = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp);
-
- return 0;
-}
+MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
+MODULE_DESCRIPTION("Marvell RVU NIC PTP Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
index 706d63a43ae1..7ff41927ceaf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
@@ -1,9 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 PTP support for ethernet driver */
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
#ifndef OTX2_PTP_H
#define OTX2_PTP_H
+static inline u64 otx2_ptp_convert_rx_timestamp(u64 timestamp)
+{
+ return be64_to_cpu(*(__be64 *)&timestamp);
+}
+
+static inline u64 otx2_ptp_convert_tx_timestamp(u64 timestamp)
+{
+ return timestamp;
+}
+
+static inline u64 cn10k_ptp_convert_timestamp(u64 timestamp)
+{
+ return ((timestamp >> 32) * NSEC_PER_SEC) + (timestamp & 0xFFFFFFFFUL);
+}
+
int otx2_ptp_init(struct otx2_nic *pfvf);
void otx2_ptp_destroy(struct otx2_nic *pfvf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
index 867f646e0802..6ef52051ab09 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
@@ -1,11 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 RVU Ethernet driver
+/* Marvell RVU Ethernet driver
*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef OTX2_REG_H
@@ -44,6 +41,8 @@
#define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+#define RVU_PF_VF_MBOX_ADDR (0xC40)
+#define RVU_PF_LMTLINE_ADDR (0xC48)
/* RVU VF registers */
#define RVU_VF_VFPF_MBOX0 (0x00000)
@@ -57,6 +56,7 @@
#define RVU_VF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
#define RVU_VF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
#define RVU_VF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+#define RVU_VF_MBOX_REGION (0xC0000)
#define RVU_FUNC_BLKADDR_SHIFT 20
#define RVU_FUNC_BLKADDR_MASK 0x1FULL
@@ -91,6 +91,7 @@
#define NPA_LF_QINTX_INT_W1S(a) (NPA_LFBASE | 0x318 | (a) << 12)
#define NPA_LF_QINTX_ENA_W1S(a) (NPA_LFBASE | 0x320 | (a) << 12)
#define NPA_LF_QINTX_ENA_W1C(a) (NPA_LFBASE | 0x330 | (a) << 12)
+#define NPA_LF_AURA_BATCH_FREE0 (NPA_LFBASE | 0x400)
/* NIX LF registers */
#define NIX_LFBASE (BLKTYPE_NIX << RVU_FUNC_BLKADDR_SHIFT)
@@ -139,6 +140,7 @@
/* NIX AF transmit scheduler registers */
#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
+#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (a) << 16)
#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16)
#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16)
#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16)
@@ -148,6 +150,7 @@
#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16)
#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16)
#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16)
+#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16)
#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16)
#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16)
#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_smqvf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_smqvf.c
new file mode 100644
index 000000000000..20f2ebb78945
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_smqvf.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Virtual Function ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+#include "otx2_common.h"
+#include "otx2_reg.h"
+#include "otx2_struct.h"
+#include "rvu_fixes.h"
+
+/* serialize device removal and xmit */
+DEFINE_MUTEX(remove_lock);
+
+static char pkt_data[64] = { 0x00, 0x0f, 0xb7, 0x11, 0xa6, 0x87, 0x02, 0xe0,
+ 0x28, 0xa5, 0xf6, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x32, 0x00, 0x00, 0x00, 0x00, 0x04, 0x11,
+ 0xee, 0x53, 0x50, 0x50, 0x50, 0x02, 0x14, 0x14,
+ 0x14, 0x02, 0x10, 0x00, 0x10, 0x01, 0x00, 0x1e,
+ 0x00, 0x00, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
+ 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e,
+ 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76 };
+
+static struct sk_buff *the_skb;
+static struct otx2_nic *the_smqvf;
+static u16 drop_entry = 0xFFFF;
+
+static bool is_otx2_smqvf(struct otx2_nic *vf)
+{
+ if (vf->pcifunc == RVU_SMQVF_PCIFUNC &&
+ (is_96xx_A0(vf->pdev) || is_95xx_A0(vf->pdev)))
+ return true;
+
+ return false;
+}
+
+static void __otx2_sqe_flush(struct otx2_snd_queue *sq, int size)
+{
+ u64 status;
+
+ /* Packet data stores should finish before SQE is flushed to HW */
+ dma_wmb();
+
+ do {
+ memcpy(sq->lmt_addr, sq->sqe_base, size);
+ status = otx2_lmt_flush(sq->io_addr);
+ } while (status == 0);
+
+ sq->head++;
+ sq->head &= (sq->sqe_cnt - 1);
+}
+
+static int otx2_ctx_update(struct otx2_nic *vf, u16 qidx)
+{
+ struct nix_aq_enq_req *sq_aq, *rq_aq, *cq_aq;
+
+ /* Do not link CQ for SQ and disable RQ, CQ */
+ sq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&vf->mbox);
+ if (!sq_aq)
+ return -ENOMEM;
+
+ sq_aq->sq.cq_ena = 0;
+ sq_aq->sq_mask.cq_ena = 1;
+ sq_aq->qidx = qidx;
+ sq_aq->ctype = NIX_AQ_CTYPE_SQ;
+ sq_aq->op = NIX_AQ_INSTOP_WRITE;
+
+ rq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&vf->mbox);
+ if (!rq_aq)
+ return -ENOMEM;
+
+ rq_aq->rq.ena = 0;
+ rq_aq->rq_mask.ena = 1;
+ rq_aq->qidx = qidx;
+ rq_aq->ctype = NIX_AQ_CTYPE_RQ;
+ rq_aq->op = NIX_AQ_INSTOP_WRITE;
+
+ cq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&vf->mbox);
+ if (!cq_aq)
+ return -ENOMEM;
+
+ cq_aq->cq.ena = 0;
+ cq_aq->cq_mask.ena = 1;
+ cq_aq->qidx = qidx;
+ cq_aq->ctype = NIX_AQ_CTYPE_CQ;
+ cq_aq->op = NIX_AQ_INSTOP_WRITE;
+
+ return otx2_sync_mbox_msg(&vf->mbox);
+}
+
+void otx2smqvf_xmit(void)
+{
+ struct otx2_snd_queue *sq;
+ int i, size;
+
+ mutex_lock(&remove_lock);
+
+ if (!the_smqvf) {
+ mutex_unlock(&remove_lock);
+ return;
+ }
+
+ sq = &the_smqvf->qset.sq[0];
+ /* Min. set of send descriptors required to send packets */
+ size = sizeof(struct nix_sqe_hdr_s) + sizeof(struct nix_sqe_sg_s) +
+ sizeof(struct nix_sqe_ext_s) + sizeof(u64);
+
+ for (i = 0; i < 256; i++)
+ __otx2_sqe_flush(sq, size);
+
+ mutex_unlock(&remove_lock);
+}
+EXPORT_SYMBOL(otx2smqvf_xmit);
+
+static int otx2smqvf_install_flow(struct otx2_nic *vf)
+{
+ struct npc_mcam_alloc_entry_req *alloc_req;
+ struct npc_mcam_free_entry_req *free_req;
+ struct npc_install_flow_req *install_req;
+ struct npc_mcam_alloc_entry_rsp *rsp;
+ struct msg_req *msg;
+ int err, qid;
+ size_t size;
+ void *data;
+
+ size = SKB_DATA_ALIGN(64 + OTX2_ALIGN) + NET_SKB_PAD +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ err = -ENOMEM;
+
+ data = kzalloc(size, GFP_KERNEL);
+ if (!data)
+ return err;
+
+ memcpy(data, &pkt_data, 64);
+
+ the_skb = build_skb(data, 0);
+ the_skb->len = 64;
+
+ for (qid = 0; qid < vf->hw.tx_queues; qid++) {
+ err = otx2_ctx_update(vf, qid);
+ /* If something wrong with Q0 then treat as error */
+ if (err && !qid)
+ goto err_free_mem;
+ }
+
+ mutex_lock(&vf->mbox.lock);
+
+ alloc_req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&vf->mbox);
+ if (!alloc_req) {
+ mutex_unlock(&vf->mbox.lock);
+ goto err_free_mem;
+ }
+ alloc_req->count = 1;
+ alloc_req->contig = true;
+
+ /* Send message to AF */
+ if (otx2_sync_mbox_msg(&vf->mbox)) {
+ err = -EINVAL;
+ mutex_unlock(&vf->mbox.lock);
+ goto err_free_mem;
+ }
+ mutex_unlock(&vf->mbox.lock);
+
+ rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
+ (&vf->mbox.mbox, 0, &alloc_req->hdr);
+ drop_entry = rsp->entry;
+
+ mutex_lock(&vf->mbox.lock);
+
+ /* Send messages to drop Tx packets at NPC and stop Rx traffic */
+ install_req = otx2_mbox_alloc_msg_npc_install_flow(&vf->mbox);
+ if (!install_req) {
+ err = -ENOMEM;
+ mutex_unlock(&vf->mbox.lock);
+ goto err_free_entry;
+ }
+
+ u64_to_ether_addr(0x0ull, install_req->mask.dmac);
+ install_req->entry = drop_entry;
+ install_req->features = BIT_ULL(NPC_DMAC);
+ install_req->intf = NIX_INTF_TX;
+ install_req->op = NIX_TX_ACTIONOP_DROP;
+ install_req->set_cntr = 1;
+
+ msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&vf->mbox);
+ if (!msg) {
+ mutex_unlock(&vf->mbox.lock);
+ goto err_free_entry;
+ }
+
+ /* Send message to AF */
+ if (otx2_sync_mbox_msg(&vf->mbox)) {
+ err = -EINVAL;
+ mutex_unlock(&vf->mbox.lock);
+ goto err_free_entry;
+ }
+ mutex_unlock(&vf->mbox.lock);
+
+ otx2_sq_append_skb(vf->netdev, &vf->qset.sq[0], the_skb, 0);
+
+ return 0;
+
+err_free_entry:
+ mutex_lock(&vf->mbox.lock);
+ free_req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&vf->mbox);
+ if (!free_req) {
+ dev_err(vf->dev, "Could not allocate msg for freeing entry\n");
+ } else {
+ free_req->entry = drop_entry;
+ WARN_ON(otx2_sync_mbox_msg(&vf->mbox));
+ }
+ mutex_unlock(&vf->mbox.lock);
+err_free_mem:
+ kfree_skb(the_skb);
+ drop_entry = 0xFFFF;
+ return err;
+}
+
+int otx2smqvf_probe(struct otx2_nic *vf)
+{
+ int err;
+
+ if (!is_otx2_smqvf(vf))
+ return -EPERM;
+
+ err = otx2_open(vf->netdev);
+ if (err)
+ return -EINVAL;
+
+ /* Disable QINT interrupts because we do not use a CQ for SQ and
+ * drop TX packets intentionally
+ */
+ otx2_write64(vf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
+
+ err = otx2smqvf_install_flow(vf);
+ if (err) {
+ otx2_stop(vf->netdev);
+ return -EINVAL;
+ }
+
+ the_smqvf = vf;
+
+ return 0;
+}
+
+int otx2smqvf_remove(struct otx2_nic *vf)
+{
+ struct npc_mcam_free_entry_req *free_req;
+ struct npc_delete_flow_req *del_req;
+
+ if (!is_otx2_smqvf(vf))
+ return -EPERM;
+
+ mutex_lock(&remove_lock);
+ kfree_skb(the_skb);
+ the_smqvf = NULL;
+ the_skb = NULL;
+ mutex_unlock(&remove_lock);
+
+ mutex_lock(&vf->mbox.lock);
+ del_req = otx2_mbox_alloc_msg_npc_delete_flow(&vf->mbox);
+ free_req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&vf->mbox);
+ if (!del_req || !free_req) {
+ dev_err(vf->dev, "Could not allocate msg for freeing entry\n");
+ } else {
+ del_req->entry = drop_entry;
+ free_req->entry = drop_entry;
+ WARN_ON(otx2_sync_mbox_msg(&vf->mbox));
+ }
+ mutex_unlock(&vf->mbox.lock);
+
+ otx2_stop(vf->netdev);
+ drop_entry = 0xFFFF;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
index cba59ddf71bb..aa205a0d158f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
@@ -1,11 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 RVU Ethernet driver
+/* Marvell RVU Ethernet driver
*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef OTX2_STRUCT_H
@@ -142,7 +139,9 @@ struct nix_rx_parse_s {
u64 vtag0_ptr : 8; /* W5 */
u64 vtag1_ptr : 8;
u64 flow_key_alg : 5;
- u64 rsvd_383_341 : 43;
+ u64 rsvd_359_341 : 19;
+ u64 color : 2;
+ u64 rsvd_383_362 : 22;
u64 rsvd_447_384; /* W6 */
};
@@ -218,7 +217,8 @@ struct nix_sqe_ext_s {
u64 vlan1_ins_tci : 16;
u64 vlan0_ins_ena : 1;
u64 vlan1_ins_ena : 1;
- u64 rsvd_127_114 : 14;
+ u64 init_color : 2;
+ u64 rsvd_127_116 : 12;
};
struct nix_sqe_sg_s {
@@ -236,8 +236,16 @@ struct nix_sqe_sg_s {
/* NIX send memory subdescriptor structure */
struct nix_sqe_mem_s {
- u64 offset : 16; /* W0 */
- u64 rsvd_52_16 : 37;
+ u64 start_offset : 8;
+ u64 rsvd_11_8 : 4;
+ u64 rsvd_12 : 1;
+ u64 udp_csum_crt : 1;
+ u64 update64 : 1;
+ u64 rsvd_15_16 : 1;
+ u64 base_ns : 32;
+ u64 step_type : 1;
+ u64 rsvd_51_49 : 3;
+ u64 per_lso_seg : 1;
u64 wmem : 1;
u64 dsz : 2;
u64 alg : 4;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
new file mode 100644
index 000000000000..c6229b3593d7
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -0,0 +1,1065 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/rhashtable.h>
+#include <linux/bitfield.h>
+#include <net/flow_dissector.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_vlan.h>
+#include <net/ipv6.h>
+
+#include "cn10k.h"
+#include "otx2_common.h"
+
+/* Egress rate limiting definitions */
+#define MAX_BURST_EXPONENT 0x0FULL
+#define MAX_BURST_MANTISSA 0xFFULL
+#define MAX_BURST_SIZE 130816ULL
+#define MAX_RATE_DIVIDER_EXPONENT 12ULL
+#define MAX_RATE_EXPONENT 0x0FULL
+#define MAX_RATE_MANTISSA 0xFFULL
+
+/* Bitfields in NIX_TLX_PIR register */
+#define TLX_RATE_MANTISSA GENMASK_ULL(8, 1)
+#define TLX_RATE_EXPONENT GENMASK_ULL(12, 9)
+#define TLX_RATE_DIVIDER_EXPONENT GENMASK_ULL(16, 13)
+#define TLX_BURST_MANTISSA GENMASK_ULL(36, 29)
+#define TLX_BURST_EXPONENT GENMASK_ULL(40, 37)
+
+struct otx2_tc_flow_stats {
+ u64 bytes;
+ u64 pkts;
+ u64 used;
+};
+
+struct otx2_tc_flow {
+ struct rhash_head node;
+ unsigned long cookie;
+ unsigned int bitpos;
+ struct rcu_head rcu;
+ struct otx2_tc_flow_stats stats;
+ spinlock_t lock; /* lock for stats */
+ u16 rq;
+ u16 entry;
+ u16 leaf_profile;
+ bool is_act_police;
+};
+
+int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic)
+{
+ struct otx2_tc_info *tc = &nic->tc_info;
+
+ if (!nic->flow_cfg->max_flows)
+ return 0;
+
+ /* Max flows changed, free the existing bitmap */
+ kfree(tc->tc_entries_bitmap);
+
+ tc->tc_entries_bitmap =
+ kcalloc(BITS_TO_LONGS(nic->flow_cfg->max_flows),
+ sizeof(long), GFP_KERNEL);
+ if (!tc->tc_entries_bitmap) {
+ netdev_err(nic->netdev,
+ "Unable to alloc TC flow entries bitmap\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap);
+
+static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp,
+ u32 *burst_mantissa)
+{
+ unsigned int tmp;
+
+ /* Burst is calculated as
+ * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256
+ * Max supported burst size is 130,816 bytes.
+ */
+ burst = min_t(u32, burst, MAX_BURST_SIZE);
+ if (burst) {
+ *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0;
+ tmp = burst - rounddown_pow_of_two(burst);
+ if (burst < MAX_BURST_MANTISSA)
+ *burst_mantissa = tmp * 2;
+ else
+ *burst_mantissa = tmp / (1ULL << (*burst_exp - 7));
+ } else {
+ *burst_exp = MAX_BURST_EXPONENT;
+ *burst_mantissa = MAX_BURST_MANTISSA;
+ }
+}
+
+static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp,
+ u32 *mantissa, u32 *div_exp)
+{
+ unsigned int tmp;
+
+ /* Rate calculation by hardware
+ *
+ * PIR_ADD = ((256 + mantissa) << exp) / 256
+ * rate = (2 * PIR_ADD) / ( 1 << div_exp)
+ * The resultant rate is in Mbps.
+ */
+
+ /* 2Mbps to 100Gbps can be expressed with div_exp = 0.
+ * Setting this to '0' will ease the calculation of
+ * exponent and mantissa.
+ */
+ *div_exp = 0;
+
+ if (maxrate) {
+ *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0;
+ tmp = maxrate - rounddown_pow_of_two(maxrate);
+ if (maxrate < MAX_RATE_MANTISSA)
+ *mantissa = tmp * 2;
+ else
+ *mantissa = tmp / (1ULL << (*exp - 7));
+ } else {
+ /* Instead of disabling rate limiting, set all values to max */
+ *exp = MAX_RATE_EXPONENT;
+ *mantissa = MAX_RATE_MANTISSA;
+ }
+}
+
+static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 maxrate)
+{
+ struct otx2_hw *hw = &nic->hw;
+ struct nix_txschq_config *req;
+ u32 burst_exp, burst_mantissa;
+ u32 exp, mantissa, div_exp;
+ int txschq, err;
+
+ /* All SQs share the same TL4, so pick the first scheduler */
+ txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
+
+ /* Get exponent and mantissa values from the desired rate */
+ otx2_get_egress_burst_cfg(burst, &burst_exp, &burst_mantissa);
+ otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
+
+ mutex_lock(&nic->mbox.lock);
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox);
+ if (!req) {
+ mutex_unlock(&nic->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->lvl = NIX_TXSCH_LVL_TL4;
+ req->num_regs = 1;
+ req->reg[0] = NIX_AF_TL4X_PIR(txschq);
+ req->regval[0] = FIELD_PREP(TLX_BURST_EXPONENT, burst_exp) |
+ FIELD_PREP(TLX_BURST_MANTISSA, burst_mantissa) |
+ FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
+ FIELD_PREP(TLX_RATE_EXPONENT, exp) |
+ FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
+
+ err = otx2_sync_mbox_msg(&nic->mbox);
+ mutex_unlock(&nic->mbox.lock);
+ return err;
+}
+
+static int otx2_tc_validate_flow(struct otx2_nic *nic,
+ struct flow_action *actions,
+ struct netlink_ext_ack *extack)
+{
+ if (nic->flags & OTX2_FLAG_INTF_DOWN) {
+ NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
+ return -EINVAL;
+ }
+
+ if (!flow_action_has_entries(actions)) {
+ NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action");
+ return -EINVAL;
+ }
+
+ if (!flow_offload_has_one_action(actions)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Egress MATCHALL offload supports only 1 policing action");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct flow_action *actions = &cls->rule->action;
+ struct flow_action_entry *entry;
+ u32 rate;
+ int err;
+
+ err = otx2_tc_validate_flow(nic, actions, extack);
+ if (err)
+ return err;
+
+ if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one Egress MATCHALL ratelimiter can be offloaded");
+ return -ENOMEM;
+ }
+
+ entry = &cls->rule->action.entries[0];
+ switch (entry->id) {
+ case FLOW_ACTION_POLICE:
+ if (entry->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+ /* Convert bytes per second to Mbps */
+ rate = entry->police.rate_bytes_ps * 8;
+ rate = max_t(u32, rate / 1000000, 1);
+ err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate);
+ if (err)
+ return err;
+ nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only police action is supported with Egress MATCHALL offload");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ int err;
+
+ if (nic->flags & OTX2_FLAG_INTF_DOWN) {
+ NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
+ return -EINVAL;
+ }
+
+ err = otx2_set_matchall_egress_rate(nic, 0, 0);
+ nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
+ return err;
+}
+
+static int otx2_tc_act_set_police(struct otx2_nic *nic,
+ struct otx2_tc_flow *node,
+ struct flow_cls_offload *f,
+ u64 rate, u32 burst, u32 mark,
+ struct npc_install_flow_req *req, bool pps)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct otx2_hw *hw = &nic->hw;
+ int rq_idx, rc;
+
+ rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
+ if (rq_idx >= hw->rx_queues) {
+ NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
+ return -EINVAL;
+ }
+
+ mutex_lock(&nic->mbox.lock);
+
+ rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile);
+ if (rc) {
+ mutex_unlock(&nic->mbox.lock);
+ return rc;
+ }
+
+ rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps);
+ if (rc)
+ goto free_leaf;
+
+ rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true);
+ if (rc)
+ goto free_leaf;
+
+ mutex_unlock(&nic->mbox.lock);
+
+ req->match_id = mark & 0xFFFFULL;
+ req->index = rq_idx;
+ req->op = NIX_RX_ACTIONOP_UCAST;
+ set_bit(rq_idx, &nic->rq_bmap);
+ node->is_act_police = true;
+ node->rq = rq_idx;
+
+ return 0;
+
+free_leaf:
+ if (cn10k_free_leaf_profile(nic, node->leaf_profile))
+ netdev_err(nic->netdev,
+ "Unable to free leaf bandwidth profile(%d)\n",
+ node->leaf_profile);
+ mutex_unlock(&nic->mbox.lock);
+ return rc;
+}
+
+static int otx2_tc_parse_actions(struct otx2_nic *nic,
+ struct flow_action *flow_action,
+ struct npc_install_flow_req *req,
+ struct flow_cls_offload *f,
+ struct otx2_tc_flow *node)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct flow_action_entry *act;
+ struct net_device *target;
+ struct otx2_nic *priv;
+ u32 burst, mark = 0;
+ u8 nr_police = 0;
+ bool pps = false;
+ u64 rate;
+ int i;
+
+ if (!flow_action_has_entries(flow_action)) {
+ NL_SET_ERR_MSG_MOD(extack, "no tc actions specified");
+ return -EINVAL;
+ }
+
+ flow_action_for_each(i, act, flow_action) {
+ switch (act->id) {
+ case FLOW_ACTION_DROP:
+ req->op = NIX_RX_ACTIONOP_DROP;
+ return 0;
+ case FLOW_ACTION_ACCEPT:
+ req->op = NIX_RX_ACTION_DEFAULT;
+ return 0;
+ case FLOW_ACTION_REDIRECT_INGRESS:
+ target = act->dev;
+ priv = netdev_priv(target);
+ /* npc_install_flow_req doesn't support passing a target pcifunc */
+ if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't redirect to other pf/vf");
+ return -EOPNOTSUPP;
+ }
+ req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
+ req->op = NIX_RX_ACTION_DEFAULT;
+ return 0;
+ case FLOW_ACTION_VLAN_POP:
+ req->vtag0_valid = true;
+ /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */
+ req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
+ break;
+ case FLOW_ACTION_POLICE:
+ /* Ingress ratelimiting is not supported on OcteonTx2 */
+ if (is_dev_otx2(nic->pdev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress policing not supported on this platform");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_bytes_ps > 0) {
+ rate = act->police.rate_bytes_ps * 8;
+ burst = act->police.burst;
+ } else if (act->police.rate_pkt_ps > 0) {
+ /* The algorithm used to calculate rate
+ * mantissa, exponent values for a given token
+ * rate (token can be byte or packet) requires
+ * token rate to be mutiplied by 8.
+ */
+ rate = act->police.rate_pkt_ps * 8;
+ burst = act->police.burst_pkt;
+ pps = true;
+ }
+ nr_police++;
+ break;
+ case FLOW_ACTION_MARK:
+ mark = act->mark;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (nr_police > 1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "rate limit police offload requires a single action");
+ return -EOPNOTSUPP;
+ }
+
+ if (nr_police)
+ return otx2_tc_act_set_police(nic, node, f, rate, burst,
+ mark, req, pps);
+
+ return 0;
+}
+
+static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
+ struct flow_cls_offload *f,
+ struct npc_install_flow_req *req)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct flow_msg *flow_spec = &req->packet;
+ struct flow_msg *flow_mask = &req->mask;
+ struct flow_dissector *dissector;
+ struct flow_rule *rule;
+ u8 ip_proto = 0;
+
+ rule = flow_cls_offload_flow_rule(f);
+ dissector = rule->match.dissector;
+
+ if ((dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_IP)))) {
+ netdev_info(nic->netdev, "unsupported flow used key 0x%x",
+ dissector->used_keys);
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+
+ /* All EtherTypes can be matched, no hw limitation */
+ flow_spec->etype = match.key->n_proto;
+ flow_mask->etype = match.mask->n_proto;
+ req->features |= BIT_ULL(NPC_ETYPE);
+
+ if (match.mask->ip_proto &&
+ (match.key->ip_proto != IPPROTO_TCP &&
+ match.key->ip_proto != IPPROTO_UDP &&
+ match.key->ip_proto != IPPROTO_SCTP &&
+ match.key->ip_proto != IPPROTO_ICMP &&
+ match.key->ip_proto != IPPROTO_ICMPV6)) {
+ netdev_info(nic->netdev,
+ "ip_proto=0x%x not supported\n",
+ match.key->ip_proto);
+ return -EOPNOTSUPP;
+ }
+ if (match.mask->ip_proto)
+ ip_proto = match.key->ip_proto;
+
+ if (ip_proto == IPPROTO_UDP)
+ req->features |= BIT_ULL(NPC_IPPROTO_UDP);
+ else if (ip_proto == IPPROTO_TCP)
+ req->features |= BIT_ULL(NPC_IPPROTO_TCP);
+ else if (ip_proto == IPPROTO_SCTP)
+ req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
+ else if (ip_proto == IPPROTO_ICMP)
+ req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
+ else if (ip_proto == IPPROTO_ICMPV6)
+ req->features |= BIT_ULL(NPC_IPPROTO_ICMP6);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ if (!is_zero_ether_addr(match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack, "src mac match not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!is_zero_ether_addr(match.mask->dst)) {
+ ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst);
+ ether_addr_copy(flow_mask->dmac,
+ (u8 *)&match.mask->dst);
+ req->features |= BIT_ULL(NPC_DMAC);
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_ip(rule, &match);
+ if ((ntohs(flow_spec->etype) != ETH_P_IP) &&
+ match.mask->tos) {
+ NL_SET_ERR_MSG_MOD(extack, "tos not supported");
+ return -EOPNOTSUPP;
+ }
+ if (match.mask->ttl) {
+ NL_SET_ERR_MSG_MOD(extack, "ttl not supported");
+ return -EOPNOTSUPP;
+ }
+ flow_spec->tos = match.key->tos;
+ flow_mask->tos = match.mask->tos;
+ req->features |= BIT_ULL(NPC_TOS);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+ u16 vlan_tci, vlan_tci_mask;
+
+ flow_rule_match_vlan(rule, &match);
+
+ if (ntohs(match.key->vlan_tpid) != ETH_P_8021Q) {
+ netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n",
+ ntohs(match.key->vlan_tpid));
+ return -EOPNOTSUPP;
+ }
+
+ if (match.mask->vlan_id ||
+ match.mask->vlan_dei ||
+ match.mask->vlan_priority) {
+ vlan_tci = match.key->vlan_id |
+ match.key->vlan_dei << 12 |
+ match.key->vlan_priority << 13;
+
+ vlan_tci_mask = match.mask->vlan_id |
+ match.mask->vlan_dei << 12 |
+ match.mask->vlan_priority << 13;
+
+ flow_spec->vlan_tci = htons(vlan_tci);
+ flow_mask->vlan_tci = htons(vlan_tci_mask);
+ req->features |= BIT_ULL(NPC_OUTER_VID);
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+
+ flow_spec->ip4dst = match.key->dst;
+ flow_mask->ip4dst = match.mask->dst;
+ req->features |= BIT_ULL(NPC_DIP_IPV4);
+
+ flow_spec->ip4src = match.key->src;
+ flow_mask->ip4src = match.mask->src;
+ req->features |= BIT_ULL(NPC_SIP_IPV4);
+ } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(rule, &match);
+
+ if (ipv6_addr_loopback(&match.key->dst) ||
+ ipv6_addr_loopback(&match.key->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Flow matching IPv6 loopback addr not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ipv6_addr_any(&match.mask->dst)) {
+ memcpy(&flow_spec->ip6dst,
+ (struct in6_addr *)&match.key->dst,
+ sizeof(flow_spec->ip6dst));
+ memcpy(&flow_mask->ip6dst,
+ (struct in6_addr *)&match.mask->dst,
+ sizeof(flow_spec->ip6dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV6);
+ }
+
+ if (!ipv6_addr_any(&match.mask->src)) {
+ memcpy(&flow_spec->ip6src,
+ (struct in6_addr *)&match.key->src,
+ sizeof(flow_spec->ip6src));
+ memcpy(&flow_mask->ip6src,
+ (struct in6_addr *)&match.mask->src,
+ sizeof(flow_spec->ip6src));
+ req->features |= BIT_ULL(NPC_SIP_IPV6);
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(rule, &match);
+
+ flow_spec->dport = match.key->dst;
+ flow_mask->dport = match.mask->dst;
+ if (ip_proto == IPPROTO_UDP)
+ req->features |= BIT_ULL(NPC_DPORT_UDP);
+ else if (ip_proto == IPPROTO_TCP)
+ req->features |= BIT_ULL(NPC_DPORT_TCP);
+ else if (ip_proto == IPPROTO_SCTP)
+ req->features |= BIT_ULL(NPC_DPORT_SCTP);
+
+ flow_spec->sport = match.key->src;
+ flow_mask->sport = match.mask->src;
+ if (ip_proto == IPPROTO_UDP)
+ req->features |= BIT_ULL(NPC_SPORT_UDP);
+ else if (ip_proto == IPPROTO_TCP)
+ req->features |= BIT_ULL(NPC_SPORT_TCP);
+ else if (ip_proto == IPPROTO_SCTP)
+ req->features |= BIT_ULL(NPC_SPORT_SCTP);
+ }
+
+ return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
+}
+
+static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
+{
+ struct npc_delete_flow_req *req;
+ int err;
+
+ mutex_lock(&nic->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox);
+ if (!req) {
+ mutex_unlock(&nic->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = entry;
+
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&nic->mbox);
+ if (err) {
+ netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n",
+ entry);
+ mutex_unlock(&nic->mbox.lock);
+ return -EFAULT;
+ }
+ mutex_unlock(&nic->mbox.lock);
+
+ return 0;
+}
+
+static int otx2_tc_del_flow(struct otx2_nic *nic,
+ struct flow_cls_offload *tc_flow_cmd)
+{
+ struct otx2_flow_config *flow_cfg = nic->flow_cfg;
+ struct otx2_tc_info *tc_info = &nic->tc_info;
+ struct otx2_tc_flow *flow_node;
+ int err;
+
+ flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
+ &tc_flow_cmd->cookie,
+ tc_info->flow_ht_params);
+ if (!flow_node) {
+ netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n",
+ tc_flow_cmd->cookie);
+ return -EINVAL;
+ }
+
+ if (flow_node->is_act_police) {
+ mutex_lock(&nic->mbox.lock);
+
+ err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
+ flow_node->leaf_profile, false);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unmapping RQ %d & profile %d failed\n",
+ flow_node->rq, flow_node->leaf_profile);
+
+ err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unable to free leaf bandwidth profile(%d)\n",
+ flow_node->leaf_profile);
+
+ __clear_bit(flow_node->rq, &nic->rq_bmap);
+
+ mutex_unlock(&nic->mbox.lock);
+ }
+
+ otx2_del_mcam_flow_entry(nic, flow_node->entry);
+
+ WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table,
+ &flow_node->node,
+ nic->tc_info.flow_ht_params));
+ kfree_rcu(flow_node, rcu);
+
+ clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap);
+ flow_cfg->nr_flows--;
+
+ return 0;
+}
+
+static int otx2_tc_add_flow(struct otx2_nic *nic,
+ struct flow_cls_offload *tc_flow_cmd)
+{
+ struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
+ struct otx2_flow_config *flow_cfg = nic->flow_cfg;
+ struct otx2_tc_info *tc_info = &nic->tc_info;
+ struct otx2_tc_flow *new_node, *old_node;
+ struct npc_install_flow_req *req, dummy;
+ int rc, err;
+
+ if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
+ return -ENOMEM;
+
+ if (bitmap_full(tc_info->tc_entries_bitmap, flow_cfg->max_flows)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Free MCAM entry not available to add the flow");
+ return -ENOMEM;
+ }
+
+ /* allocate memory for the new flow and it's node */
+ new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
+ if (!new_node)
+ return -ENOMEM;
+ spin_lock_init(&new_node->lock);
+ new_node->cookie = tc_flow_cmd->cookie;
+
+ memset(&dummy, 0, sizeof(struct npc_install_flow_req));
+
+ rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy);
+ if (rc) {
+ kfree_rcu(new_node, rcu);
+ return rc;
+ }
+
+ /* If a flow exists with the same cookie, delete it */
+ old_node = rhashtable_lookup_fast(&tc_info->flow_table,
+ &tc_flow_cmd->cookie,
+ tc_info->flow_ht_params);
+ if (old_node)
+ otx2_tc_del_flow(nic, tc_flow_cmd);
+
+ mutex_lock(&nic->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
+ if (!req) {
+ mutex_unlock(&nic->mbox.lock);
+ rc = -ENOMEM;
+ goto free_leaf;
+ }
+
+ memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr));
+ memcpy(req, &dummy, sizeof(struct npc_install_flow_req));
+
+ new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap,
+ flow_cfg->max_flows);
+ req->channel = nic->hw.rx_chan_base;
+ req->entry = flow_cfg->flow_ent[flow_cfg->max_flows - new_node->bitpos - 1];
+ req->intf = NIX_INTF_RX;
+ req->set_cntr = 1;
+ new_node->entry = req->entry;
+
+ /* Send message to AF */
+ rc = otx2_sync_mbox_msg(&nic->mbox);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry");
+ mutex_unlock(&nic->mbox.lock);
+ kfree_rcu(new_node, rcu);
+ goto free_leaf;
+ }
+ mutex_unlock(&nic->mbox.lock);
+
+ /* add new flow to flow-table */
+ rc = rhashtable_insert_fast(&nic->tc_info.flow_table, &new_node->node,
+ nic->tc_info.flow_ht_params);
+ if (rc) {
+ otx2_del_mcam_flow_entry(nic, req->entry);
+ kfree_rcu(new_node, rcu);
+ goto free_leaf;
+ }
+
+ set_bit(new_node->bitpos, tc_info->tc_entries_bitmap);
+ flow_cfg->nr_flows++;
+
+ return 0;
+
+free_leaf:
+ if (new_node->is_act_police) {
+ mutex_lock(&nic->mbox.lock);
+
+ err = cn10k_map_unmap_rq_policer(nic, new_node->rq,
+ new_node->leaf_profile, false);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unmapping RQ %d & profile %d failed\n",
+ new_node->rq, new_node->leaf_profile);
+ err = cn10k_free_leaf_profile(nic, new_node->leaf_profile);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unable to free leaf bandwidth profile(%d)\n",
+ new_node->leaf_profile);
+
+ __clear_bit(new_node->rq, &nic->rq_bmap);
+
+ mutex_unlock(&nic->mbox.lock);
+ }
+
+ return rc;
+}
+
+static int otx2_tc_get_flow_stats(struct otx2_nic *nic,
+ struct flow_cls_offload *tc_flow_cmd)
+{
+ struct otx2_tc_info *tc_info = &nic->tc_info;
+ struct npc_mcam_get_stats_req *req;
+ struct npc_mcam_get_stats_rsp *rsp;
+ struct otx2_tc_flow_stats *stats;
+ struct otx2_tc_flow *flow_node;
+ int err;
+
+ flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
+ &tc_flow_cmd->cookie,
+ tc_info->flow_ht_params);
+ if (!flow_node) {
+ netdev_info(nic->netdev, "tc flow not found for cookie %lx",
+ tc_flow_cmd->cookie);
+ return -EINVAL;
+ }
+
+ mutex_lock(&nic->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox);
+ if (!req) {
+ mutex_unlock(&nic->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = flow_node->entry;
+
+ err = otx2_sync_mbox_msg(&nic->mbox);
+ if (err) {
+ netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n",
+ req->entry);
+ mutex_unlock(&nic->mbox.lock);
+ return -EFAULT;
+ }
+
+ rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp
+ (&nic->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ mutex_unlock(&nic->mbox.lock);
+ return PTR_ERR(rsp);
+ }
+
+ mutex_unlock(&nic->mbox.lock);
+
+ if (!rsp->stat_ena)
+ return -EINVAL;
+
+ stats = &flow_node->stats;
+
+ spin_lock(&flow_node->lock);
+ flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts,
+ 0x0, 0x0, FLOW_ACTION_HW_STATS_IMMEDIATE);
+ stats->pkts = rsp->stat;
+ spin_unlock(&flow_node->lock);
+
+ return 0;
+}
+
+static int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
+ struct flow_cls_offload *cls_flower)
+{
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return otx2_tc_add_flow(nic, cls_flower);
+ case FLOW_CLS_DESTROY:
+ return otx2_tc_del_flow(nic, cls_flower);
+ case FLOW_CLS_STATS:
+ return otx2_tc_get_flow_stats(nic, cls_flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct flow_action *actions = &cls->rule->action;
+ struct flow_action_entry *entry;
+ u64 rate;
+ int err;
+
+ err = otx2_tc_validate_flow(nic, actions, extack);
+ if (err)
+ return err;
+
+ if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one ingress MATCHALL ratelimitter can be offloaded");
+ return -ENOMEM;
+ }
+
+ entry = &cls->rule->action.entries[0];
+ switch (entry->id) {
+ case FLOW_ACTION_POLICE:
+ /* Ingress ratelimiting is not supported on OcteonTx2 */
+ if (is_dev_otx2(nic->pdev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress policing not supported on this platform");
+ return -EOPNOTSUPP;
+ }
+
+ err = cn10k_alloc_matchall_ipolicer(nic);
+ if (err)
+ return err;
+
+ /* Convert to bits per second */
+ rate = entry->police.rate_bytes_ps * 8;
+ err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate);
+ if (err)
+ return err;
+ nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only police action supported with Ingress MATCHALL offload");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ int err;
+
+ if (nic->flags & OTX2_FLAG_INTF_DOWN) {
+ NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
+ return -EINVAL;
+ }
+
+ err = cn10k_free_matchall_ipolicer(nic);
+ nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
+ return err;
+}
+
+static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls_matchall)
+{
+ switch (cls_matchall->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return otx2_tc_ingress_matchall_install(nic, cls_matchall);
+ case TC_CLSMATCHALL_DESTROY:
+ return otx2_tc_ingress_matchall_delete(nic, cls_matchall);
+ case TC_CLSMATCHALL_STATS:
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct otx2_nic *nic = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return otx2_setup_tc_cls_flower(nic, type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return otx2_setup_tc_ingress_matchall(nic, type_data);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls_matchall)
+{
+ switch (cls_matchall->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return otx2_tc_egress_matchall_install(nic, cls_matchall);
+ case TC_CLSMATCHALL_DESTROY:
+ return otx2_tc_egress_matchall_delete(nic, cls_matchall);
+ case TC_CLSMATCHALL_STATS:
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct otx2_nic *nic = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return otx2_setup_tc_egress_matchall(nic, type_data);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static LIST_HEAD(otx2_block_cb_list);
+
+static int otx2_setup_tc_block(struct net_device *netdev,
+ struct flow_block_offload *f)
+{
+ struct otx2_nic *nic = netdev_priv(netdev);
+ flow_setup_cb_t *cb;
+ bool ingress;
+
+ if (f->block_shared)
+ return -EOPNOTSUPP;
+
+ if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
+ cb = otx2_setup_tc_block_ingress_cb;
+ ingress = true;
+ } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
+ cb = otx2_setup_tc_block_egress_cb;
+ ingress = false;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb,
+ nic, nic, ingress);
+}
+
+int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return otx2_setup_tc_block(netdev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+EXPORT_SYMBOL(otx2_setup_tc);
+
+static const struct rhashtable_params tc_flow_ht_params = {
+ .head_offset = offsetof(struct otx2_tc_flow, node),
+ .key_offset = offsetof(struct otx2_tc_flow, cookie),
+ .key_len = sizeof(((struct otx2_tc_flow *)0)->cookie),
+ .automatic_shrinking = true,
+};
+
+int otx2_init_tc(struct otx2_nic *nic)
+{
+ struct otx2_tc_info *tc = &nic->tc_info;
+ int err;
+
+ /* Exclude receive queue 0 being used for police action */
+ set_bit(0, &nic->rq_bmap);
+
+ if (!nic->flow_cfg) {
+ netdev_err(nic->netdev,
+ "Can't init TC, nic->flow_cfg is not setup\n");
+ return -EINVAL;
+ }
+
+ err = otx2_tc_alloc_ent_bitmap(nic);
+ if (err)
+ return err;
+
+ tc->flow_ht_params = tc_flow_ht_params;
+ return rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
+}
+EXPORT_SYMBOL(otx2_init_tc);
+
+void otx2_shutdown_tc(struct otx2_nic *nic)
+{
+ struct otx2_tc_info *tc = &nic->tc_info;
+
+ kfree(tc->tc_entries_bitmap);
+ rhashtable_destroy(&tc->flow_table);
+}
+EXPORT_SYMBOL(otx2_shutdown_tc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index d5d7a2f37493..04e6cce8709a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -1,24 +1,61 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Ethernet driver
+/* Marvell RVU Ethernet driver
*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/etherdevice.h>
#include <net/ip.h>
#include <net/tso.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include "otx2_reg.h"
#include "otx2_common.h"
#include "otx2_struct.h"
#include "otx2_txrx.h"
#include "otx2_ptp.h"
+#include "cn10k.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
+#define PTP_PORT 0x13F
+/* Original timestamp offset starts at 34 byte in PTP Sync packet and its
+ * divided as 6 byte seconds field and 4 byte nano seconds field.
+ * Silicon supports only 4 byte seconds field so adjust seconds field
+ * offset with 2
+ */
+#define PTP_SYNC_SEC_OFFSET 36
+#define PTP_SYNC_NSEC_OFFSET 40
+
+static inline int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
+ struct otx2_cq_queue *cq)
+{
+ u64 incr = (u64)(cq->cq_idx) << 32;
+ u64 status;
+
+ status = otx2_atomic64_fetch_add(incr, pfvf->cq_op_addr);
+
+ if (unlikely(status & BIT_ULL(CQ_OP_STAT_OP_ERR) ||
+ status & BIT_ULL(CQ_OP_STAT_CQ_ERR))) {
+ dev_err(pfvf->dev, "CQ stopped due to error");
+ return -EINVAL;
+ }
+
+ cq->cq_tail = status & 0xFFFFF;
+ cq->cq_head = (status >> 20) & 0xFFFFF;
+ if (cq->cq_tail < cq->cq_head)
+ cq->pend_cqe = (cq->cqe_cnt - cq->cq_head) +
+ cq->cq_tail;
+ else
+ cq->pend_cqe = cq->cq_tail - cq->cq_head;
+
+ return 0;
+}
+
+static inline bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
+ struct nix_cqe_rx_s *cqe,
+ struct otx2_cq_queue *cq);
static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
{
@@ -75,6 +112,24 @@ static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
sg->num_segs = 0;
}
+static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
+ struct otx2_snd_queue *sq,
+ struct nix_cqe_tx_s *cqe)
+{
+ struct nix_send_comp_s *snd_comp = &cqe->comp;
+ struct sg_list *sg;
+ struct page *page;
+ u64 pa;
+
+ sg = &sq->sg[snd_comp->sqe_id];
+
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]);
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[0],
+ sg->size[0], DMA_TO_DEVICE);
+ page = virt_to_page(phys_to_virt(pa));
+ put_page(page);
+}
+
static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq,
struct otx2_snd_queue *sq,
@@ -84,9 +139,8 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
struct nix_send_comp_s *snd_comp = &cqe->comp;
struct skb_shared_hwtstamps ts;
struct sk_buff *skb = NULL;
- u64 timestamp, tsns;
struct sg_list *sg;
- int err;
+ u64 timestamp;
if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf))
net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n",
@@ -101,12 +155,10 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id];
if (timestamp != 1) {
- err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
- if (!err) {
- memset(&ts, 0, sizeof(ts));
- ts.hwtstamp = ns_to_ktime(tsns);
- skb_tstamp_tx(skb, &ts);
- }
+ timestamp = pfvf->ptp->convert_tx_ptp_tstmp(timestamp);
+ memset(&ts, 0, sizeof(ts));
+ ts.hwtstamp = ns_to_ktime(timestamp);
+ skb_tstamp_tx(skb, &ts);
}
}
@@ -120,22 +172,19 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
struct sk_buff *skb, void *data)
{
- u64 tsns;
- int err;
+ u64 timestamp;
if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED))
return;
/* The first 8 bytes is the timestamp */
- err = otx2_ptp_tstamp2time(pfvf, be64_to_cpu(*(__be64 *)data), &tsns);
- if (err)
- return;
-
- skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns);
+ timestamp = pfvf->ptp->convert_rx_ptp_tstmp(*(u64 *)data);
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(timestamp);
}
-static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
- u64 iova, int len, struct nix_rx_parse_s *parse)
+static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
+ u64 iova, int len, struct nix_rx_parse_s *parse,
+ int qidx)
{
struct page *page;
int off = 0;
@@ -156,11 +205,22 @@ static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
}
page = virt_to_page(va);
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- va - page_address(page) + off, len - off, pfvf->rbsize);
+ if (likely(skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)) {
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ va - page_address(page) + off,
+ len - off, pfvf->rbsize);
+
+ otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
+ pfvf->rbsize, DMA_FROM_DEVICE);
+ return true;
+ }
- otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
- pfvf->rbsize, DMA_FROM_DEVICE);
+ /* If more than MAX_SKB_FRAGS fragments are received then
+ * give back those buffer pointers to hardware for reuse.
+ */
+ pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL);
+
+ return false;
}
static void otx2_set_rxhash(struct otx2_nic *pfvf,
@@ -199,7 +259,8 @@ static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe,
sg = (struct nix_rx_sg_s *)start;
seg_addr = &sg->seg_addr;
for (seg = 0; seg < sg->segs; seg++, seg_addr++)
- otx2_aura_freeptr(pfvf, qidx, *seg_addr & ~0x07ULL);
+ pfvf->hw_ops->aura_freeptr(pfvf, qidx,
+ *seg_addr & ~0x07ULL);
start += sizeof(*sg);
}
}
@@ -255,12 +316,11 @@ static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
/* For now ignore all the NPC parser errors and
* pass the packets to stack.
*/
- if (cqe->sg.segs == 1)
- return false;
+ return false;
}
/* If RXALL is enabled pass on packets to stack. */
- if (cqe->sg.segs == 1 && (pfvf->netdev->features & NETIF_F_RXALL))
+ if (pfvf->netdev->features & NETIF_F_RXALL)
return false;
/* Free buffer back to pool */
@@ -275,19 +335,39 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
struct nix_cqe_rx_s *cqe)
{
struct nix_rx_parse_s *parse = &cqe->parse;
+ struct nix_rx_sg_s *sg = &cqe->sg;
struct sk_buff *skb = NULL;
+ void *end, *start;
+ u64 *seg_addr;
+ u16 *seg_size;
+ int seg;
- if (unlikely(parse->errlev || parse->errcode || cqe->sg.segs > 1)) {
+ if (unlikely(parse->errlev || parse->errcode)) {
if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
return;
}
+ if (pfvf->xdp_prog)
+ if (otx2_xdp_rcv_pkt_handler(pfvf, cqe, cq))
+ return;
+
skb = napi_get_frags(napi);
if (unlikely(!skb))
return;
- otx2_skb_add_frag(pfvf, skb, cqe->sg.seg_addr, cqe->sg.seg_size, parse);
- cq->pool_ptrs++;
+ start = (void *)sg;
+ end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
+ while (start < end) {
+ sg = (struct nix_rx_sg_s *)start;
+ seg_addr = &sg->seg_addr;
+ seg_size = (void *)sg;
+ for (seg = 0; seg < sg->segs; seg++, seg_addr++) {
+ if (otx2_skb_add_frag(pfvf, skb, *seg_addr,
+ seg_size[seg], parse, cq->cq_idx))
+ cq->pool_ptrs++;
+ }
+ start += sizeof(*sg);
+ }
otx2_set_rxhash(pfvf, cqe, skb);
@@ -295,6 +375,18 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
if (pfvf->netdev->features & NETIF_F_RXCSUM)
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* This holds true on condition RX VLAN offloads are enabled and
+ * 802.1AD or 802.1Q VLANs were found in frame.
+ */
+ if (parse->vtag0_gone) {
+ if (skb->protocol == htons(ETH_P_8021Q))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
+ parse->vtag0_tci);
+ else
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ parse->vtag0_tci);
+ }
+
napi_gro_frags(napi);
}
@@ -304,9 +396,15 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
{
struct nix_cqe_rx_s *cqe;
int processed_cqe = 0;
- s64 bufptr;
- while (likely(processed_cqe < budget)) {
+ if (cq->pend_cqe >= budget)
+ goto process_cqe;
+
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return 0;
+
+process_cqe:
+ while (likely(processed_cqe < budget) && cq->pend_cqe) {
cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head);
if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID ||
!cqe->sg.seg_addr) {
@@ -322,58 +420,63 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
cqe->sg.seg_addr = 0x00;
processed_cqe++;
+ cq->pend_cqe--;
}
/* Free CQEs to HW */
otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
((u64)cq->cq_idx << 32) | processed_cqe);
- if (unlikely(!cq->pool_ptrs))
- return 0;
+ return processed_cqe;
+}
+
+void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
+{
+ struct otx2_nic *pfvf = dev;
+ dma_addr_t bufptr;
/* Refill pool with new buffers */
while (cq->pool_ptrs) {
- bufptr = __otx2_alloc_rbuf(pfvf, cq->rbpool);
- if (unlikely(bufptr <= 0)) {
- struct refill_work *work;
- struct delayed_work *dwork;
-
- work = &pfvf->refill_wrk[cq->cq_idx];
- dwork = &work->pool_refill_work;
- /* Schedule a task if no other task is running */
- if (!cq->refill_task_sched) {
- cq->refill_task_sched = true;
- schedule_delayed_work(dwork,
- msecs_to_jiffies(100));
- }
+ if (otx2_alloc_buffer(pfvf, cq, &bufptr))
break;
- }
otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--;
}
-
- return processed_cqe;
}
static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq, int budget)
{
- int tx_pkts = 0, tx_bytes = 0;
+ int tx_pkts = 0, tx_bytes = 0, qidx;
struct nix_cqe_tx_s *cqe;
int processed_cqe = 0;
- while (likely(processed_cqe < budget)) {
+ if (cq->pend_cqe >= budget)
+ goto process_cqe;
+
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return 0;
+
+process_cqe:
+ while (likely(processed_cqe < budget) && cq->pend_cqe) {
cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
if (unlikely(!cqe)) {
if (!processed_cqe)
return 0;
break;
}
- otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[cq->cint_idx],
- cqe, budget, &tx_pkts, &tx_bytes);
-
+ if (cq->cq_type == CQ_XDP) {
+ qidx = cq->cq_idx - pfvf->hw.rx_queues;
+ otx2_xdp_snd_pkt_handler(pfvf, &pfvf->qset.sq[qidx],
+ cqe);
+ } else {
+ otx2_snd_pkt_handler(pfvf, cq,
+ &pfvf->qset.sq[cq->cint_idx],
+ cqe, budget, &tx_pkts, &tx_bytes);
+ }
cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
processed_cqe++;
+ cq->pend_cqe--;
}
/* Free CQEs to HW */
@@ -396,6 +499,7 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
int otx2_napi_handler(struct napi_struct *napi, int budget)
{
+ struct otx2_cq_queue *rx_cq = NULL;
struct otx2_cq_poll *cq_poll;
int workdone = 0, cq_idx, i;
struct otx2_cq_queue *cq;
@@ -406,17 +510,13 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
pfvf = (struct otx2_nic *)cq_poll->dev;
qset = &pfvf->qset;
- for (i = CQS_PER_CINT - 1; i >= 0; i--) {
+ for (i = 0; i < CQS_PER_CINT; i++) {
cq_idx = cq_poll->cq_ids[i];
if (unlikely(cq_idx == CINT_INVALID_CQ))
continue;
cq = &qset->cq[cq_idx];
if (cq->cq_type == CQ_RX) {
- /* If the RQ refill WQ task is running, skip napi
- * scheduler for this queue.
- */
- if (cq->refill_task_sched)
- continue;
+ rx_cq = cq;
workdone += otx2_rx_napi_handler(pfvf, napi,
cq, budget);
} else {
@@ -424,6 +524,8 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
}
}
+ if (rx_cq && rx_cq->pool_ptrs)
+ pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
/* Clear the IRQ */
otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
@@ -439,7 +541,8 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
return workdone;
}
-static void otx2_sqe_flush(struct otx2_snd_queue *sq, int size)
+void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
+ int size, int qidx)
{
u64 status;
@@ -556,11 +659,25 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
ext->tstmp = 1;
}
+#define OTX2_VLAN_PTR_OFFSET (ETH_HLEN - ETH_TLEN)
+ if (skb_vlan_tag_present(skb)) {
+ if (skb->vlan_proto == htons(ETH_P_8021Q)) {
+ ext->vlan1_ins_ena = 1;
+ ext->vlan1_ins_ptr = OTX2_VLAN_PTR_OFFSET;
+ ext->vlan1_ins_tci = skb_vlan_tag_get(skb);
+ } else if (skb->vlan_proto == htons(ETH_P_8021AD)) {
+ ext->vlan0_ins_ena = 1;
+ ext->vlan0_ins_ptr = OTX2_VLAN_PTR_OFFSET;
+ ext->vlan0_ins_tci = skb_vlan_tag_get(skb);
+ }
+ }
+
*offset += sizeof(*ext);
}
static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
- int alg, u64 iova)
+ int alg, u64 iova, int ptp_offset,
+ u64 base_ns, int udp_csum)
{
struct nix_sqe_mem_s *mem;
@@ -570,6 +687,13 @@ static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
mem->wmem = 1; /* wait for the memory operation */
mem->addr = iova;
+ if (ptp_offset) {
+ mem->start_offset = ptp_offset;
+ mem->udp_csum_crt = udp_csum;
+ mem->base_ns = base_ns;
+ mem->step_type = 1;
+ }
+
*offset += sizeof(*mem);
}
@@ -784,7 +908,7 @@ static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
sqe_hdr->sizem1 = (offset / 16) - 1;
/* Flush SQE to HW */
- otx2_sqe_flush(sq, offset);
+ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
}
}
@@ -793,16 +917,17 @@ static bool is_hw_tso_supported(struct otx2_nic *pfvf,
{
int payload_len, last_seg_size;
- if (!pfvf->hw.hw_tso)
+ if (test_bit(HW_TSO, &pfvf->hw.cap_flag))
+ return true;
+
+ /* On 96xx A0, HW TSO not supported */
+ if (!is_96xx_B0(pfvf->pdev))
return false;
/* HW has an issue due to which when the payload of the last LSO
* segment is shorter than 16 bytes, some header fields may not
* be correctly modified, hence don't offload such TSO segments.
*/
- if (!is_96xx_B0(pfvf->pdev))
- return true;
-
payload_len = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
if (last_seg_size && last_seg_size < 16)
@@ -824,16 +949,105 @@ static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
return skb_shinfo(skb)->gso_segs;
}
+static bool otx2_validate_network_transport(struct sk_buff *skb)
+{
+ if ((ip_hdr(skb)->protocol == IPPROTO_UDP) ||
+ (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)) {
+ struct udphdr *udph = udp_hdr(skb);
+
+ if (udph->source == htons(PTP_PORT) &&
+ udph->dest == htons(PTP_PORT))
+ return true;
+ }
+
+ return false;
+}
+
+static bool otx2_ptp_is_sync(struct sk_buff *skb, int *offset, int *udp_csum)
+{
+ struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ u16 nix_offload_hlen = 0, inner_vhlen = 0;
+ u8 *data = skb->data, *msgtype;
+ u16 proto = eth->h_proto;
+ int network_depth = 0;
+
+ /* NIX is programmed to offload outer VLAN header
+ * in case of single vlan protocol field holds Network header ETH_IP/V6
+ * in case of stacked vlan protocol field holds Inner vlan (8100)
+ */
+ if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX &&
+ skb->dev->features & NETIF_F_HW_VLAN_STAG_TX) {
+ if (skb->vlan_proto == htons(ETH_P_8021AD)) {
+ /* Get vlan protocol */
+ proto = __vlan_get_protocol(skb, eth->h_proto, NULL);
+ /* SKB APIs like skb_transport_offset does not include
+ * offloaded vlan header length. Need to explicitly add
+ * the length
+ */
+ nix_offload_hlen = VLAN_HLEN;
+ inner_vhlen = VLAN_HLEN;
+ } else if (skb->vlan_proto == htons(ETH_P_8021Q)) {
+ nix_offload_hlen = VLAN_HLEN;
+ }
+ } else if (eth_type_vlan(eth->h_proto)) {
+ proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
+ }
+
+ switch (htons(proto)) {
+ case ETH_P_1588:
+ if (network_depth)
+ *offset = network_depth;
+ else
+ *offset = ETH_HLEN + nix_offload_hlen +
+ inner_vhlen;
+ break;
+ case ETH_P_IP:
+ case ETH_P_IPV6:
+ if (!otx2_validate_network_transport(skb))
+ return false;
+
+ *udp_csum = 1;
+ *offset = nix_offload_hlen + skb_transport_offset(skb) +
+ sizeof(struct udphdr);
+ }
+
+ msgtype = data + *offset;
+
+ /* Check PTP messageId is SYNC or not */
+ return (*msgtype & 0xf) == 0;
+}
+
static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
struct otx2_snd_queue *sq, int *offset)
{
- u64 iova;
+ int ptp_offset = 0, udp_csum = 0;
+ struct timespec64 ts;
+ u64 iova, sec, nsec;
if (!skb_shinfo(skb)->gso_size &&
skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ if (pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC &&
+ otx2_ptp_is_sync(skb, &ptp_offset, &udp_csum)) {
+ ts = ns_to_timespec64(pfvf->ptp->tstamp);
+ sec = ntohl(ts.tv_sec);
+ nsec = ntohl(ts.tv_nsec);
+
+ memcpy((u8 *)skb->data + ptp_offset + PTP_SYNC_SEC_OFFSET,
+ &sec, 4);
+ memcpy((u8 *)skb->data + ptp_offset + PTP_SYNC_NSEC_OFFSET,
+ &nsec, 4);
+ /* Point to correction field in PTP packet */
+ ptp_offset += 8;
+ } else {
+ ptp_offset = 0;
+ }
+
+ if (!(pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC))
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
iova = sq->timestamps->iova + (sq->head * sizeof(u64));
- otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova);
+ otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova,
+ ptp_offset, ts.tv_nsec, udp_csum);
} else {
skb_tx_timestamp(skb);
}
@@ -871,6 +1085,9 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
}
if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
+ /* Insert vlan tag before giving pkt to tso */
+ if (skb_vlan_tag_present(skb))
+ skb = __vlan_hwaccel_push_inside(skb);
otx2_sq_append_tso(pfvf, sq, skb, qidx);
return true;
}
@@ -899,7 +1116,7 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
netdev_tx_sent_queue(txq, skb->len);
/* Flush SQE to HW */
- otx2_sqe_flush(sq, offset);
+ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
return true;
}
@@ -911,10 +1128,16 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
int processed_cqe = 0;
u64 iova, pa;
- while ((cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq))) {
- if (!cqe->sg.subdc)
- continue;
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return;
+
+ while (cq->pend_cqe) {
+ cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq);
processed_cqe++;
+ cq->pend_cqe--;
+
+ if (!cqe)
+ continue;
if (cqe->sg.segs > 1) {
otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx);
continue;
@@ -940,7 +1163,16 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
sq = &pfvf->qset.sq[cq->cint_idx];
- while ((cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq))) {
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return;
+
+ while (cq->pend_cqe) {
+ cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
+ processed_cqe++;
+ cq->pend_cqe--;
+
+ if (!cqe)
+ continue;
sg = &sq->sg[cqe->comp.sqe_id];
skb = (struct sk_buff *)sg->skb;
if (skb) {
@@ -948,7 +1180,6 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
dev_kfree_skb_any(skb);
sg->skb = (u64)NULL;
}
- processed_cqe++;
}
/* Free CQEs to HW */
@@ -976,3 +1207,115 @@ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
mutex_unlock(&pfvf->mbox.lock);
return err;
}
+
+static inline void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
+ int len, int *offset)
+{
+ struct nix_sqe_sg_s *sg = NULL;
+ u64 *iova = NULL;
+
+ sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
+ sg->ld_type = NIX_SEND_LDTYPE_LDD;
+ sg->subdc = NIX_SUBDC_SG;
+ sg->segs = 1;
+ sg->seg1_size = len;
+ iova = (void *)sg + sizeof(*sg);
+ *iova = dma_addr;
+ *offset += sizeof(*sg) + sizeof(u64);
+
+ sq->sg[sq->head].dma_addr[0] = dma_addr;
+ sq->sg[sq->head].size[0] = len;
+ sq->sg[sq->head].num_segs = 1;
+}
+
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
+{
+ struct nix_sqe_hdr_s *sqe_hdr;
+ struct otx2_snd_queue *sq;
+ int offset, free_sqe;
+
+ sq = &pfvf->qset.sq[qidx];
+ free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
+ if (free_sqe < sq->sqe_thresh)
+ return false;
+
+ memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
+
+ sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
+
+ if (!sqe_hdr->total) {
+ sqe_hdr->aura = sq->aura_id;
+ sqe_hdr->df = 1;
+ sqe_hdr->sq = qidx;
+ sqe_hdr->pnc = 1;
+ }
+ sqe_hdr->total = len;
+ sqe_hdr->sqe_id = sq->head;
+
+ offset = sizeof(*sqe_hdr);
+
+ otx2_xdp_sqe_add_sg(sq, iova, len, &offset);
+ sqe_hdr->sizem1 = (offset / 16) - 1;
+ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
+
+ return true;
+}
+
+static inline bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
+ struct nix_cqe_rx_s *cqe,
+ struct otx2_cq_queue *cq)
+{
+ struct bpf_prog *xdp_prog;
+ int qidx = cq->cq_idx;
+ struct xdp_buff xdp;
+ struct page *page;
+ u64 iova, pa;
+ u32 act;
+ int err;
+
+ iova = cqe->sg.seg_addr;
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
+ page = virt_to_page(phys_to_virt(pa));
+
+ xdp.data = phys_to_virt(pa);
+ xdp.data_hard_start = page_address(page) + OTX2_HEAD_ROOM;
+ xdp.data_end = xdp.data + cqe->sg.seg_size;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(pfvf->xdp_prog);
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ rcu_read_unlock();
+
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ qidx += pfvf->hw.tx_queues;
+ cq->pool_ptrs++;
+ return otx2_xdp_sq_append_pkt(pfvf, iova,
+ cqe->sg.seg_size, qidx);
+ case XDP_REDIRECT:
+ cq->pool_ptrs++;
+ err = xdp_do_redirect(pfvf->netdev, &xdp, xdp_prog);
+
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ if (!err)
+ return true;
+ put_page(page);
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ break;
+ case XDP_ABORTED:
+ trace_xdp_exception(pfvf->netdev, xdp_prog, act);
+ break;
+ case XDP_DROP:
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ cq->pool_ptrs++;
+ return true;
+ }
+ return false;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 73af15685657..96a7b7f3ccde 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -1,11 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 RVU Ethernet driver
+/* Marvell RVU Ethernet driver
*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef OTX2_TXRX_H
@@ -24,7 +21,6 @@
#define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN)
#define OTX2_MIN_MTU 64
-#define OTX2_MAX_MTU (9212 - OTX2_ETH_HLEN)
#define OTX2_MAX_GSO_SEGS 255
#define OTX2_MAX_FRAGS_IN_SQE 9
@@ -40,9 +36,7 @@
#define RCV_FRAG_LEN(x) \
((RCV_FRAG_LEN1(x) < 2048) ? 2048 : RCV_FRAG_LEN1(x))
-#define DMA_BUFFER_LEN(x) \
- ((x) - OTX2_HEAD_ROOM - \
- OTX2_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define DMA_BUFFER_LEN(x) ((x) - OTX2_HEAD_ROOM)
/* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT]
* is equal to this value.
@@ -60,6 +54,9 @@
*/
#define CQ_QCOUNT_DEFAULT 1
+#define CQ_OP_STAT_OP_ERR 63
+#define CQ_OP_STAT_CQ_ERR 46
+
struct queue_stats {
u64 bytes;
u64 pkts;
@@ -100,7 +97,8 @@ struct otx2_snd_queue {
enum cq_type {
CQ_RX,
CQ_TX,
- CQS_PER_CINT = 2, /* RQ + SQ */
+ CQ_XDP,
+ CQS_PER_CINT = 3, /* RQ + SQ + XDP */
};
struct otx2_cq_poll {
@@ -126,6 +124,8 @@ struct otx2_cq_queue {
u16 pool_ptrs;
u32 cqe_cnt;
u32 cq_head;
+ u32 cq_tail;
+ u32 pend_cqe;
void *cqe_base;
struct qmem *cqe;
struct otx2_pool *rbpool;
@@ -156,4 +156,10 @@ static inline u64 otx2_iova_to_phys(void *iommu_domain, dma_addr_t dma_addr)
int otx2_napi_handler(struct napi_struct *napi, int budget);
bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
struct sk_buff *skb, u16 qidx);
+void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq,
+ int size, int qidx);
+void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
+ int size, int qidx);
+void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
+void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
#endif /* OTX2_TXRX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 67fabf265fe6..7aaec37f52a7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -1,19 +1,27 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Virtual Function ethernet driver */
+/* Marvell RVU Virtual Function ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
#include <linux/etherdevice.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/net_tstamp.h>
#include "otx2_common.h"
#include "otx2_reg.h"
+#include "otx2_ptp.h"
+#include "cn10k.h"
-#define DRV_NAME "octeontx2-nicvf"
-#define DRV_STRING "Marvell OcteonTX2 NIC Virtual Function Driver"
+#define DRV_NAME "rvu_nicvf"
+#define DRV_STRING "Marvell RVU NIC Virtual Function Driver"
static const struct pci_device_id otx2_vf_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) },
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_SDP_VF) },
{ }
};
@@ -108,9 +116,6 @@ static void otx2vf_vfaf_mbox_handler(struct work_struct *work)
static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf,
struct mbox_msghdr *req)
{
- struct msg_rsp *rsp;
- int err;
-
/* Check if valid, if not reply with a invalid msg */
if (req->sig != OTX2_MBOX_REQ_SIG) {
otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id);
@@ -118,20 +123,29 @@ static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf,
}
switch (req->id) {
- case MBOX_MSG_CGX_LINK_EVENT:
- rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(
- &vf->mbox.mbox_up, 0,
- sizeof(struct msg_rsp));
- if (!rsp)
- return -ENOMEM;
-
- rsp->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
- rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
- rsp->hdr.pcifunc = 0;
- rsp->hdr.rc = 0;
- err = otx2_mbox_up_handler_cgx_link_event(
- vf, (struct cgx_link_info_msg *)req, rsp);
- return err;
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+ case _id: { \
+ struct _rsp_type *rsp; \
+ int err; \
+ \
+ rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
+ &vf->mbox.mbox_up, 0, \
+ sizeof(struct _rsp_type)); \
+ if (!rsp) \
+ return -ENOMEM; \
+ \
+ rsp->hdr.id = _id; \
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
+ rsp->hdr.pcifunc = 0; \
+ rsp->hdr.rc = 0; \
+ \
+ err = otx2_mbox_up_handler_ ## _fn_name( \
+ vf, (struct _req_type *)req, rsp); \
+ return err; \
+ }
+MBOX_UP_CGX_MESSAGES
+#undef M
+ break;
default:
otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id);
return -ENODEV;
@@ -277,7 +291,7 @@ static void otx2vf_vfaf_mbox_destroy(struct otx2_nic *vf)
vf->mbox_wq = NULL;
}
- if (mbox->mbox.hwbase)
+ if (mbox->mbox.hwbase && !test_bit(CN10K_MBOX, &vf->hw.cap_flag))
iounmap((void __iomem *)mbox->mbox.hwbase);
otx2_mbox_destroy(&mbox->mbox);
@@ -297,16 +311,25 @@ static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf)
if (!vf->mbox_wq)
return -ENOMEM;
- /* Mailbox is a reserved memory (in RAM) region shared between
- * admin function (i.e PF0) and this VF, shouldn't be mapped as
- * device memory to allow unaligned accesses.
- */
- hwbase = ioremap_wc(pci_resource_start(vf->pdev, PCI_MBOX_BAR_NUM),
- pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM));
- if (!hwbase) {
- dev_err(vf->dev, "Unable to map VFAF mailbox region\n");
- err = -ENOMEM;
- goto exit;
+ if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) {
+ /* For cn10k platform, VF mailbox region is in its BAR2
+ * register space
+ */
+ hwbase = vf->reg_base + RVU_VF_MBOX_REGION;
+ } else {
+ /* Mailbox is a reserved memory (in RAM) region shared between
+ * admin function (i.e PF0) and this VF, shouldn't be mapped as
+ * device memory to allow unaligned accesses.
+ */
+ hwbase = ioremap_wc(pci_resource_start(vf->pdev,
+ PCI_MBOX_BAR_NUM),
+ pci_resource_len(vf->pdev,
+ PCI_MBOX_BAR_NUM));
+ if (!hwbase) {
+ dev_err(vf->dev, "Unable to map VFAF mailbox region\n");
+ err = -ENOMEM;
+ goto exit;
+ }
}
err = otx2_mbox_init(&mbox->mbox, hwbase, vf->pdev, vf->reg_base,
@@ -329,6 +352,8 @@ static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf)
return 0;
exit:
+ if (hwbase && !test_bit(CN10K_MBOX, &vf->hw.cap_flag))
+ iounmap(hwbase);
destroy_workqueue(vf->mbox_wq);
return err;
}
@@ -344,7 +369,7 @@ static int otx2vf_open(struct net_device *netdev)
/* LBKs do not receive link events so tell everyone we are up here */
vf = netdev_priv(netdev);
- if (is_otx2_lbkvf(vf->pdev)) {
+ if (is_otx2_lbkvf(vf->pdev) || is_otx2_sdpvf(vf->pdev)) {
pr_info("%s NIC Link is UP\n", netdev->name);
netif_carrier_on(netdev);
netif_tx_start_all_queues(netdev);
@@ -383,8 +408,45 @@ static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
+static void otx2vf_set_rx_mode(struct net_device *netdev)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+
+ queue_work(vf->otx2_wq, &vf->rx_mode_work);
+}
+
+static void otx2vf_do_set_rx_mode(struct work_struct *work)
+{
+ struct otx2_nic *vf = container_of(work, struct otx2_nic, rx_mode_work);
+ struct net_device *netdev = vf->netdev;
+ unsigned int flags = netdev->flags;
+ struct nix_rx_mode *req;
+
+ mutex_lock(&vf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_nix_set_rx_mode(&vf->mbox);
+ if (!req) {
+ mutex_unlock(&vf->mbox.lock);
+ return;
+ }
+
+ req->mode = NIX_RX_MODE_UCAST;
+
+ if (flags & IFF_PROMISC)
+ req->mode |= NIX_RX_MODE_PROMISC;
+ if (flags & (IFF_ALLMULTI | IFF_MULTICAST))
+ req->mode |= NIX_RX_MODE_ALLMULTI;
+
+ req->mode |= NIX_RX_MODE_USE_MCE;
+
+ otx2_sync_mbox_msg(&vf->mbox);
+
+ mutex_unlock(&vf->mbox.lock);
+}
+
static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu)
{
+ struct otx2_nic *vf = netdev_priv(netdev);
bool if_up = netif_running(netdev);
int err = 0;
@@ -394,6 +456,10 @@ static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu)
netdev_info(netdev, "Changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
+ /* Modify receive buffer size based on MTU and do not
+ * use the fixed size set.
+ */
+ vf->hw.rbuf_fixed_size = 0;
if (if_up)
err = otx2vf_open(netdev);
@@ -416,16 +482,37 @@ static void otx2vf_reset_task(struct work_struct *work)
rtnl_unlock();
}
+static int otx2vf_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ return otx2_handle_ntuple_tc_features(netdev, features);
+}
+
static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_open = otx2vf_open,
.ndo_stop = otx2vf_stop,
.ndo_start_xmit = otx2vf_xmit,
+ .ndo_set_rx_mode = otx2vf_set_rx_mode,
.ndo_set_mac_address = otx2_set_mac_address,
.ndo_change_mtu = otx2vf_change_mtu,
+ .ndo_set_features = otx2vf_set_features,
.ndo_get_stats64 = otx2_get_stats64,
.ndo_tx_timeout = otx2_tx_timeout,
+ .ndo_do_ioctl = otx2_ioctl,
+ .ndo_setup_tc = otx2_setup_tc,
};
+static int otx2_wq_init(struct otx2_nic *vf)
+{
+ vf->otx2_wq = create_singlethread_workqueue("otx2vf_wq");
+ if (!vf->otx2_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&vf->rx_mode_work, otx2vf_do_set_rx_mode);
+ INIT_WORK(&vf->reset_task, otx2vf_reset_task);
+ return 0;
+}
+
static int otx2vf_realloc_msix_vectors(struct otx2_nic *vf)
{
struct otx2_hw *hw = &vf->hw;
@@ -496,6 +583,9 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->rx_queues = qcount;
hw->tx_queues = qcount;
hw->max_queues = qcount;
+ hw->tot_tx_queues = qcount;
+ /* Use CQE of 128 byte descriptor size by default */
+ hw->xqe_size = 128;
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
GFP_KERNEL);
@@ -525,6 +615,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_free_irq_vectors;
}
+ otx2_setup_dev_hw_settings(vf);
/* Init VF <=> PF mailbox stuff */
err = otx2vf_vfaf_mbox_init(vf);
if (err)
@@ -548,7 +639,18 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_detach_rsrc;
- otx2_setup_dev_hw_settings(vf);
+ err = cn10k_lmtst_init(vf);
+ if (err)
+ goto err_detach_rsrc;
+
+ err = otx2smqvf_probe(vf);
+ if (!err)
+ return 0;
+ else if (err == -EINVAL)
+ goto err_detach_rsrc;
+
+ /* Don't check for error. Proceed without ptp */
+ otx2_ptp_init(vf);
/* Assign default mac address */
otx2_get_mac_from_af(netdev);
@@ -558,17 +660,23 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_L4;
netdev->features = netdev->hw_features;
+ /* Support TSO on tag interface */
+ netdev->vlan_features |= netdev->features;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
+ netdev->features |= netdev->hw_features;
+
+ netdev->hw_features |= NETIF_F_NTUPLE;
+ netdev->hw_features |= NETIF_F_RXALL;
+ netdev->hw_features |= NETIF_F_HW_TC;
netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2vf_netdev_ops;
- /* MTU range: 68 - 9190 */
netdev->min_mtu = OTX2_MIN_MTU;
- netdev->max_mtu = OTX2_MAX_MTU;
-
- INIT_WORK(&vf->reset_task, otx2vf_reset_task);
+ netdev->max_mtu = otx2_get_max_mtu(vf);
/* To distinguish, for LBK VFs set netdev name explicitly */
if (is_otx2_lbkvf(vf->pdev)) {
@@ -580,21 +688,63 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n);
}
+ /* To distinguish, for SDP VFs set netdev name explicitly */
+ if (is_otx2_sdpvf(vf->pdev)) {
+ int n;
+
+ n = (vf->pcifunc >> RVU_PFVF_FUNC_SHIFT) & RVU_PFVF_FUNC_MASK;
+ /* Need to subtract 1 to get proper VF number */
+ n -= 1;
+ snprintf(netdev->name, sizeof(netdev->name), "sdp%d-%d", pdev->bus->number, n);
+ }
+
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_detach_rsrc;
+ goto err_ptp_destroy;
}
- otx2vf_set_ethtool_ops(netdev);
+ err = otx2_wq_init(vf);
+ if (err)
+ goto err_unreg_netdev;
- /* Enable pause frames by default */
- vf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
- vf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+ if (!is_otx2_sdpvf(vf->pdev))
+ otx2vf_set_ethtool_ops(netdev);
+
+ err = otx2vf_mcam_flow_init(vf);
+ if (err)
+ goto err_unreg_netdev;
+
+ err = otx2_init_tc(vf);
+ if (err)
+ goto err_unreg_netdev;
+
+ err = otx2_register_dl(vf);
+ if (err)
+ goto err_shutdown_tc;
+
+ /* Set interface mode as Default */
+ vf->ethtool_flags |= OTX2_PRIV_FLAG_DEF_MODE;
+
+#ifdef CONFIG_DCB
+ err = otx2_dcbnl_set_ops(netdev);
+ if (err)
+ goto err_shutdown_tc;
+#endif
return 0;
+err_shutdown_tc:
+ otx2_shutdown_tc(vf);
+err_unreg_netdev:
+ unregister_netdev(netdev);
+err_ptp_destroy:
+ otx2_ptp_destroy(vf);
err_detach_rsrc:
+ if (vf->hw.lmt_info)
+ free_percpu(vf->hw.lmt_info);
+ if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+ qmem_free(vf->dev, vf->dync_lmt);
otx2_detach_resources(&vf->mbox);
err_disable_mbox_intr:
otx2vf_disable_mbox_intr(vf);
@@ -620,11 +770,38 @@ static void otx2vf_remove(struct pci_dev *pdev)
vf = netdev_priv(netdev);
- cancel_work_sync(&vf->reset_task);
- unregister_netdev(netdev);
- otx2vf_disable_mbox_intr(vf);
+ /* Disable 802.3x pause frames */
+ if (vf->flags & OTX2_FLAG_RX_PAUSE_ENABLED ||
+ (vf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) {
+ vf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
+ vf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
+ otx2_config_pause_frm(vf);
+ }
+
+#ifdef CONFIG_DCB
+ /* Disable PFC config */
+ if (vf->pfc_en) {
+ vf->pfc_en = 0;
+ otx2_config_priority_flow_ctrl(vf);
+ }
+#endif
+ if (otx2smqvf_remove(vf)) {
+ otx2_unregister_dl(vf);
+ cancel_work_sync(&vf->reset_task);
+ unregister_netdev(netdev);
+ }
+
+ if (vf->otx2_wq)
+ destroy_workqueue(vf->otx2_wq);
+
+ otx2_ptp_destroy(vf);
+ otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
+ if (vf->hw.lmt_info)
+ free_percpu(vf->hw.lmt_info);
+ if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+ qmem_free(vf->dev, vf->dync_lmt);
otx2vf_vfaf_mbox_destroy(vf);
pci_free_irq_vectors(vf->pdev);
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/mdio/mdio-cavium.h b/drivers/net/mdio/mdio-cavium.h
index a2245d436f5d..ed4c48d8a38b 100644
--- a/drivers/net/mdio/mdio-cavium.h
+++ b/drivers/net/mdio/mdio-cavium.h
@@ -92,6 +92,7 @@ struct cavium_mdiobus {
struct mii_bus *mii_bus;
void __iomem *register_base;
enum cavium_mdiobus_mode mode;
+ u32 clk_freq;
};
#ifdef CONFIG_CAVIUM_OCTEON_SOC
diff --git a/drivers/net/mdio/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c
index 822d2cdd2f35..adf2aa1d36e5 100644
--- a/drivers/net/mdio/mdio-thunder.c
+++ b/drivers/net/mdio/mdio-thunder.c
@@ -19,6 +19,46 @@ struct thunder_mdiobus_nexus {
struct cavium_mdiobus *buses[4];
};
+#define _calc_clk_freq(_phase) (100000000U / (2 * (_phase)))
+#define _calc_sample(_phase) (2 * (_phase) - 3)
+
+#define PHASE_MIN 3
+#define PHASE_DFLT 16
+#define DFLT_CLK_FREQ _calc_clk_freq(PHASE_DFLT)
+#define MAX_CLK_FREQ _calc_clk_freq(PHASE_MIN)
+
+static inline u32 _config_clk(u32 req_freq, u32 *phase, u32 *sample)
+{
+ unsigned int p;
+ u32 freq = 0, freq_prev;
+
+ for (p = PHASE_MIN; p < PHASE_DFLT; p++) {
+ freq_prev = freq;
+ freq = _calc_clk_freq(p);
+
+ if (req_freq >= freq)
+ break;
+ }
+
+ if (p == PHASE_DFLT)
+ freq = DFLT_CLK_FREQ;
+
+ if (p == PHASE_MIN || p == PHASE_DFLT)
+ goto out;
+
+ /* Check which clock value from the identified range
+ * is closer to the requested value
+ */
+ if ((freq_prev - req_freq) < (req_freq - freq)) {
+ p = p - 1;
+ freq = freq_prev;
+ }
+out:
+ *phase = p;
+ *sample = _calc_sample(p);
+ return freq;
+}
+
static int thunder_mdiobus_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -59,6 +99,8 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev,
struct mii_bus *mii_bus;
struct cavium_mdiobus *bus;
union cvmx_smix_en smi_en;
+ union cvmx_smix_clk smi_clk;
+ u32 req_clk_freq;
/* If it is not an OF node we cannot handle it yet, so
* exit the loop.
@@ -87,9 +129,33 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev,
bus->register_base = nexus->bar0 +
r.start - pci_resource_start(pdev, 0);
+ smi_clk.u64 = oct_mdio_readq(bus->register_base + SMI_CLK);
+ smi_clk.s.clk_idle = 1;
+
+ if (!of_property_read_u32(node, "clock-freq", &req_clk_freq)) {
+ u32 phase, sample;
+
+ dev_info(&pdev->dev, "requested bus clock frequency=%d\n",
+ req_clk_freq);
+
+ bus->clk_freq = _config_clk(req_clk_freq,
+ &phase, &sample);
+
+ smi_clk.s.phase = phase;
+ smi_clk.s.sample_hi = (sample >> 4) & 0x1f;
+ smi_clk.s.sample = sample & 0xf;
+ } else {
+ bus->clk_freq = DFLT_CLK_FREQ;
+ }
+
+ oct_mdio_writeq(smi_clk.u64, bus->register_base + SMI_CLK);
+ dev_info(&pdev->dev, "bus clock frequency set to %d\n",
+ bus->clk_freq);
+
smi_en.u64 = 0;
smi_en.s.en = 1;
oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
+
bus->mii_bus->name = KBUILD_MODNAME;
snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%llx", r.start);
bus->mii_bus->parent = &pdev->dev;
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 2b64318efdba..76af6615cdbd 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -32,6 +32,21 @@
#define MV_PHY_ALASKA_NBT_QUIRK_MASK 0xfffffffe
#define MV_PHY_ALASKA_NBT_QUIRK_REV (MARVELL_PHY_ID_88X3310 | 0xa)
+#define MV_COPPER_PHY_STATUS_LINK 0x0400
+#define MV_PHY_AN_ADVERTISE 0x10
+#define MV_PHY_AN_LPA 0x13
+#define MV_PHY_STATUS_DUPLEX 0x2000
+#define MV_PHY_STATUS_SPD_MASK 0xc00c
+#define MV_PHY_STATUS_10000 0xc000
+#define MV_PHY_STATUS_5000 0xc008
+#define MV_PHY_STATUS_2500 0xc004
+#define MV_PHY_STATUS_1000 0x8000
+#define MV_PHY_STATUS_100 0x4000
+
+#define MV_MGBASET_AN_FS_RETRAIN_10G 0x2
+#define MV_MGBASET_AN_FS_RETRAIN_5G 0x40
+#define MV_MGBASET_AN_FS_RETRAIN_2_5G 0x20
+
enum {
MV_PMA_FW_VER0 = 0xc011,
MV_PMA_FW_VER1 = 0xc012,
@@ -68,6 +83,7 @@ enum {
/* Temperature read register (88E2110 only) */
MV_PCS_TEMP = 0x8042,
+ MV_PCS_COPPER_STATUS = 0x8008,
/* These registers appear at 0x800X and 0xa00X - the 0xa00X control
* registers appear to set themselves to the 0x800X when AN is
@@ -425,6 +441,64 @@ static int mv3310_suspend(struct phy_device *phydev)
return mv3310_power_down(phydev);
}
+static int mv3310_modify(struct phy_device *phydev, int devad, u16 reg,
+ u16 mask, u16 bits)
+{
+ int old, val, ret;
+
+ old = phy_read_mmd(phydev, devad, reg);
+ if (old < 0)
+ return old;
+
+ val = (old & ~mask) | (bits & mask);
+ if (val == old)
+ return 0;
+
+ ret = phy_write_mmd(phydev, devad, reg, val);
+
+ return ret < 0 ? ret : 1;
+}
+
+static void mv_set_adv_config_init(struct phy_device *phydev)
+{
+ u32 mask = MV_MGBASET_AN_FS_RETRAIN_10G | MV_MGBASET_AN_FS_RETRAIN_10G |
+ MDIO_AN_10GBT_CTRL_ADV5G | MV_MGBASET_AN_FS_RETRAIN_5G |
+ MDIO_AN_10GBT_CTRL_ADV2_5G | MV_MGBASET_AN_FS_RETRAIN_2_5G;
+
+ mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, mask, 0);
+
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_XAUI:
+ mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ MDIO_AN_10GBT_CTRL_ADV10G,
+ MDIO_AN_10GBT_CTRL_ADV10G);
+ mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ MV_MGBASET_AN_FS_RETRAIN_10G,
+ MV_MGBASET_AN_FS_RETRAIN_10G);
+ /* Fall-through */
+ case PHY_INTERFACE_MODE_5GKR:
+ mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ MDIO_AN_10GBT_CTRL_ADV5G,
+ MDIO_AN_10GBT_CTRL_ADV5G);
+ mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ MV_MGBASET_AN_FS_RETRAIN_5G,
+ MV_MGBASET_AN_FS_RETRAIN_5G);
+ /* Fall-through */
+ case PHY_INTERFACE_MODE_2500BASET:
+ mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ MDIO_AN_10GBT_CTRL_ADV2_5G,
+ MDIO_AN_10GBT_CTRL_ADV2_5G);
+ mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ MV_MGBASET_AN_FS_RETRAIN_2_5G,
+ MV_MGBASET_AN_FS_RETRAIN_2_5G);
+ break;
+ default:
+ return;
+ }
+}
+
static int mv3310_resume(struct phy_device *phydev)
{
int ret;
@@ -462,11 +536,15 @@ static int mv3310_config_init(struct phy_device *phydev)
/* Check that the PHY interface type is compatible */
if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
+ phydev->interface != PHY_INTERFACE_MODE_2500BASET &&
phydev->interface != PHY_INTERFACE_MODE_XAUI &&
phydev->interface != PHY_INTERFACE_MODE_RXAUI &&
- phydev->interface != PHY_INTERFACE_MODE_10GBASER)
+ phydev->interface != PHY_INTERFACE_MODE_10GBASER &&
+ phydev->interface != PHY_INTERFACE_MODE_5GKR)
return -ENODEV;
+ mv_set_adv_config_init(phydev);
+
phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
/* Power up so reset works */
@@ -584,7 +662,7 @@ static int mv3310_aneg_done(struct phy_device *phydev)
return genphy_c45_aneg_done(phydev);
}
-static void mv3310_update_interface(struct phy_device *phydev)
+static void mv_update_interface(struct phy_device *phydev)
{
struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev);
@@ -599,7 +677,9 @@ static void mv3310_update_interface(struct phy_device *phydev)
if ((phydev->interface == PHY_INTERFACE_MODE_SGMII ||
phydev->interface == PHY_INTERFACE_MODE_2500BASEX ||
- phydev->interface == PHY_INTERFACE_MODE_10GBASER) &&
+ phydev->interface == PHY_INTERFACE_MODE_10GBASER ||
+ phydev->interface == PHY_INTERFACE_MODE_5GKR ||
+ phydev->interface == PHY_INTERFACE_MODE_2500BASET) &&
phydev->link) {
/* The PHY automatically switches its serdes interface (and
* active PHYXS instance) between Cisco SGMII, 10GBase-R and
@@ -611,8 +691,11 @@ static void mv3310_update_interface(struct phy_device *phydev)
case SPEED_10000:
phydev->interface = PHY_INTERFACE_MODE_10GBASER;
break;
+ case SPEED_5000:
+ phydev->interface = PHY_INTERFACE_MODE_5GKR;
+ break;
case SPEED_2500:
- phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ phydev->interface = PHY_INTERFACE_MODE_2500BASET;
break;
case SPEED_1000:
case SPEED_100:
@@ -625,6 +708,35 @@ static void mv3310_update_interface(struct phy_device *phydev)
}
}
+static void mv_set_speed_duplex(struct phy_device *phydev, int status)
+{
+ switch (status & MV_PHY_STATUS_SPD_MASK) {
+ case MV_PHY_STATUS_10000:
+ phydev->speed = SPEED_10000;
+ break;
+ case MV_PHY_STATUS_5000:
+ phydev->speed = SPEED_5000;
+ break;
+ case MV_PHY_STATUS_2500:
+ phydev->speed = SPEED_2500;
+ break;
+ case MV_PHY_STATUS_1000:
+ phydev->speed = SPEED_1000;
+ break;
+ case MV_PHY_STATUS_100:
+ phydev->speed = SPEED_100;
+ break;
+ default:
+ phydev->speed = SPEED_10;
+ break;
+ }
+
+ if (status & MV_PHY_STATUS_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+}
+
/* 10GBASE-ER,LR,LRM,SR do not support autonegotiation. */
static int mv3310_read_status_10gbaser(struct phy_device *phydev)
{
@@ -716,7 +828,7 @@ static int mv3310_read_status_copper(struct phy_device *phydev)
static int mv3310_read_status(struct phy_device *phydev)
{
- int err, val;
+ int err, val, status;
phydev->speed = SPEED_UNKNOWN;
phydev->duplex = DUPLEX_UNKNOWN;
@@ -737,8 +849,63 @@ static int mv3310_read_status(struct phy_device *phydev)
if (err < 0)
return err;
- if (phydev->link)
- mv3310_update_interface(phydev);
+ if (phydev->link) {
+ status = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_COPPER_STATUS);
+ if (status < 0)
+ return status;
+
+ mv_set_speed_duplex(phydev, status);
+ mv_update_interface(phydev);
+ }
+
+ return 0;
+}
+
+static int m88e2110_read_status(struct phy_device *phydev)
+{
+ int adv, lpa, lpagb, status;
+
+ status = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_COPPER_STATUS);
+ if (status < 0)
+ return status;
+
+ if (!(status & MV_COPPER_PHY_STATUS_LINK)) {
+ phydev->link = 0;
+ return 0;
+ }
+
+ phydev->link = 1;
+ mv_set_speed_duplex(phydev, status);
+
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ mv_update_interface(phydev);
+
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ lpa = genphy_c45_read_lpa(phydev);
+ if (lpa < 0)
+ return lpa;
+
+ lpagb = phy_read_mmd(phydev, MDIO_MMD_AN, MV_AN_STAT1000);
+ if (lpagb < 0)
+ return lpagb;
+
+ adv = phy_read_mmd(phydev, MDIO_MMD_AN, MV_PHY_AN_ADVERTISE);
+ if (adv < 0)
+ return adv;
+
+ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, lpagb);
+
+ lpa &= adv;
+
+ if (phydev->duplex == DUPLEX_FULL) {
+ phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
+ phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
+ }
+ } else {
+ linkmode_zero(phydev->lp_advertising);
+ }
return 0;
}
@@ -792,7 +959,7 @@ static struct phy_driver mv3310_drivers[] = {
.config_init = mv3310_config_init,
.config_aneg = mv3310_config_aneg,
.aneg_done = mv3310_aneg_done,
- .read_status = mv3310_read_status,
+ .read_status = m88e2110_read_status,
.get_tunable = mv3310_get_tunable,
.set_tunable = mv3310_set_tunable,
.remove = mv3310_remove,
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 57b1b138522e..7b4fc8543138 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -306,6 +306,28 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
phylink_set(pl->supported, 2500baseX_Full);
break;
+ case PHY_INTERFACE_MODE_2500BASET:
+ phylink_set(pl->supported, 10baseT_Half);
+ phylink_set(pl->supported, 10baseT_Full);
+ phylink_set(pl->supported, 100baseT_Half);
+ phylink_set(pl->supported, 100baseT_Full);
+ phylink_set(pl->supported, 1000baseT_Half);
+ phylink_set(pl->supported, 1000baseT_Full);
+ phylink_set(pl->supported, 2500baseT_Full);
+ break;
+
+ case PHY_INTERFACE_MODE_5GKR:
+ phylink_set(pl->supported, 10baseT_Half);
+ phylink_set(pl->supported, 10baseT_Full);
+ phylink_set(pl->supported, 100baseT_Half);
+ phylink_set(pl->supported, 100baseT_Full);
+ phylink_set(pl->supported, 1000baseT_Half);
+ phylink_set(pl->supported, 1000baseT_Full);
+ phylink_set(pl->supported, 1000baseX_Full);
+ phylink_set(pl->supported, 2500baseT_Full);
+ phylink_set(pl->supported, 5000baseT_Full);
+ break;
+
case PHY_INTERFACE_MODE_USXGMII:
case PHY_INTERFACE_MODE_10GKR:
case PHY_INTERFACE_MODE_10GBASER:
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index fe64430b438a..de32e094c2f0 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -328,7 +328,7 @@ void of_node_release(struct kobject *kobj)
/* We should never be releasing nodes that haven't been detached. */
if (!of_node_check_flag(node, OF_DETACHED)) {
- pr_err("ERROR: Bad of_node_put() on %pOF\n", node);
+ pr_err("ERROR: Bad of_node_put() on %s\n", node->full_name);
dump_stack();
return;
}
@@ -339,8 +339,8 @@ void of_node_release(struct kobject *kobj)
if (!of_node_check_flag(node, OF_OVERLAY_FREE_CSET)) {
/* premature refcount of zero, do not free memory */
- pr_err("ERROR: memory leak before free overlay changeset, %pOF\n",
- node);
+ pr_err("ERROR: memory leak before free overlay changeset, %s\n",
+ node->full_name);
return;
}
@@ -350,8 +350,8 @@ void of_node_release(struct kobject *kobj)
* yet been removed, or by a non-overlay mechanism.
*/
if (node->properties)
- pr_err("ERROR: %s(), unexpected properties in %pOF\n",
- __func__, node);
+ pr_err("ERROR: %s(), unexpected properties in %s\n",
+ __func__, node->full_name);
}
property_list_free(node->properties);
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 64e2f5e379aa..bdaffe652712 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -197,6 +197,14 @@ config PCI_HOST_THUNDER_PEM
help
Say Y here if you want PCIe support for CN88XX Cavium Thunder SoCs.
+config PCI_HOST_OCTEONTX2_PEM
+ bool "Marvell OcteonTX2 PCIe controller to off-chip devices"
+ depends on ARM64
+ depends on OF
+ select PCI_HOST_COMMON
+ help
+ Say Y here if you want PCIe support for CN9XXX Marvell OcteonTX2 SoCs.
+
config PCI_HOST_THUNDER_ECAM
bool "Cavium Thunder ECAM controller to on-chip devices on pass-1.x silicon"
depends on ARM64 || COMPILE_TEST
@@ -205,6 +213,14 @@ config PCI_HOST_THUNDER_ECAM
help
Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs.
+config PCI_OCTEON_PEM
+ bool "Marvell Octeon PEM (PCIe MAC) controller"
+ depends on ARM64 || COMPILE_TEST
+ depends on PCI
+ depends on PCI_MSI_IRQ_DOMAIN
+ help
+ Say Y here if you want PEM controller support for Marvell ARM64 Octeon SoCs.
+
config PCIE_ROCKCHIP
bool
depends on PCI
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index 04c6edc285c5..a9e1d9deb4e0 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -50,5 +50,7 @@ obj-y += mobiveil/
ifdef CONFIG_PCI
obj-$(CONFIG_ARM64) += pci-thunder-ecam.o
obj-$(CONFIG_ARM64) += pci-thunder-pem.o
+obj-$(CONFIG_ARM64) += pci-octeontx2-pem.o
+obj-$(CONFIG_ARM64) += pci-octeon-pem.o
obj-$(CONFIG_ARM64) += pci-xgene.o
endif
diff --git a/drivers/pci/controller/pci-octeon-pem.c b/drivers/pci/controller/pci-octeon-pem.c
new file mode 100644
index 000000000000..b5b3e894c185
--- /dev/null
+++ b/drivers/pci/controller/pci-octeon-pem.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Octeon PEM driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sysfs.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#define DRV_NAME "octeon-pem"
+#define DRV_VERSION "1.0"
+
+#define PCI_DEVID_OCTEON_PEM 0xA06C
+
+#define ID_SHIFT 36
+#define DOMAIN_OFFSET 0x3
+#define RST_INT_OFFSET 0x300
+#define RST_INT_ENA_W1C_OFFSET 0x310
+#define RST_INT_ENA_W1S_OFFSET 0x318
+#define RST_INT_LINKDOWN BIT(1)
+
+struct pem_ctlr {
+ int index;
+ char irq_name[32];
+ void __iomem *base;
+ struct pci_dev *pdev;
+ struct work_struct recover_rc_work;
+};
+
+static void pem_recover_rc_link(struct work_struct *ws)
+{
+ struct pem_ctlr *pem = container_of(ws, struct pem_ctlr,
+ recover_rc_work);
+ struct pci_dev *pem_dev = pem->pdev;
+ struct pci_dev *root_port;
+ struct pci_bus *bus;
+ int rc_domain;
+
+ rc_domain = pem->index + DOMAIN_OFFSET;
+
+ root_port = pci_get_domain_bus_and_slot(rc_domain, 0, 0);
+ if (!root_port) {
+ dev_err(&pem_dev->dev, "failed to get root port\n");
+ return;
+ }
+
+ pci_lock_rescan_remove();
+
+ /* Clean-up device and RC bridge */
+ pci_stop_and_remove_bus_device(root_port);
+
+ /*
+ * Hardware resets and initializes config space of RC bridge
+ * on every link down event with auto-mode in use.
+ * Re-scan will setup RC bridge cleanly in kernel
+ * after removal and to be ready for next link-up event.
+ */
+ bus = NULL;
+ while ((bus = pci_find_next_bus(bus)) != NULL)
+ if (bus->domain_nr == rc_domain)
+ pci_rescan_bus(bus);
+ pci_unlock_rescan_remove();
+ pci_dev_put(root_port);
+
+ /* Ack interrupt */
+ writeq(RST_INT_LINKDOWN, pem->base + RST_INT_OFFSET);
+ /* Enable RST_INT[LINKDOWN] interrupt */
+ writeq(RST_INT_LINKDOWN, pem->base + RST_INT_ENA_W1S_OFFSET);
+}
+
+irqreturn_t pem_irq_handler(int irq, void *dev_id)
+{
+ struct pem_ctlr *pem = (struct pem_ctlr *)dev_id;
+
+ /* Disable RST_INT[LINKDOWN] interrupt */
+ writeq(RST_INT_LINKDOWN, pem->base + RST_INT_ENA_W1C_OFFSET);
+ schedule_work(&pem->recover_rc_work);
+
+ return IRQ_HANDLED;
+}
+
+static int pem_register_interrupts(struct pci_dev *pdev)
+{
+ struct pem_ctlr *pem = pci_get_drvdata(pdev);
+ int nvec, err;
+
+ nvec = pci_msix_vec_count(pdev);
+ /* Some earlier silicon versions do not support RST vector
+ * so check on table size before registering otherwise
+ * return with info message.
+ */
+ if (nvec != 10) {
+ dev_info(&pdev->dev,
+ "No RST MSI-X vector support on silicon\n");
+ return 0;
+ }
+ err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(&pdev->dev, "pci_alloc_irq_vectors() failed %d\n",
+ nvec);
+ return -ENOSPC;
+ }
+
+ snprintf(pem->irq_name, 32, "PEM%d RST_INT", pem->index);
+
+ /* register interrupt for RST_INT */
+ return devm_request_irq(&pdev->dev, pci_irq_vector(pdev, 9),
+ pem_irq_handler, 0,
+ pem->irq_name, pem);
+}
+
+static int pem_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct pem_ctlr *pem;
+ int err;
+
+ pem = devm_kzalloc(dev, sizeof(struct pem_ctlr), GFP_KERNEL);
+ if (pem == NULL)
+ return -ENOMEM;
+
+ pem->pdev = pdev;
+ pci_set_drvdata(pdev, pem);
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto enable_failed;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto region_failed;
+ }
+
+ pci_set_master(pdev);
+
+ /* CSR Space mapping */
+ pem->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
+ if (!pem->base) {
+ dev_err(&pdev->dev, "Unable to map BAR0\n");
+ err = -ENODEV;
+ goto bar0_map_failed;
+ }
+ pem->index = ((u64)pci_resource_start(pdev, 0) >> ID_SHIFT) & 0xf;
+
+ err = pem_register_interrupts(pdev);
+ if (err < 0) {
+ dev_err(dev, "Register interrupt failed\n");
+ goto irq_failed;
+ }
+
+ INIT_WORK(&pem->recover_rc_work, pem_recover_rc_link);
+
+ /* Enable RST_INT[LINKDOWN] interrupt */
+ writeq(RST_INT_LINKDOWN, pem->base + RST_INT_ENA_W1S_OFFSET);
+
+ dev_info(&pdev->dev, "PEM%d probed\n", pem->index);
+ return 0;
+
+irq_failed:
+bar0_map_failed:
+ pci_release_regions(pdev);
+region_failed:
+enable_failed:
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void pem_remove(struct pci_dev *pdev)
+{
+ pci_release_regions(pdev);
+}
+
+/* Supported devices */
+static const struct pci_device_id pem_id_table[] = {
+ {PCI_VDEVICE(CAVIUM, PCI_DEVID_OCTEON_PEM)},
+ {0} /* end of table */
+};
+
+static struct pci_driver pem_driver = {
+ .name = DRV_NAME,
+ .id_table = pem_id_table,
+ .probe = pem_probe,
+ .remove = pem_remove,
+};
+
+module_pci_driver(pem_driver);
+
+MODULE_AUTHOR("Marvell Inc.");
+MODULE_DESCRIPTION("Marvell Octeon PEM Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, pem_id_table);
diff --git a/drivers/pci/controller/pci-octeontx2-pem.c b/drivers/pci/controller/pci-octeontx2-pem.c
new file mode 100644
index 000000000000..71e727c6962e
--- /dev/null
+++ b/drivers/pci/controller/pci-octeontx2-pem.c
@@ -0,0 +1,481 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 PCIe host controller
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
+#include <linux/platform_device.h>
+#include "../pci.h"
+
+#if defined(CONFIG_PCI_HOST_OCTEONTX2_PEM)
+
+/* Bridge config space reads/writes done using
+ * these registers.
+ */
+#define PEM_CFG_WR 0x18
+#define PEM_CFG_RD 0x20
+
+#define PCIERC_RAS_EINJ_EN 0x348
+#define PCIERC_RAS_EINJ_CTL6CMPP0 0x364
+#define PCIERC_RAS_EINJ_CTL6CMPV0 0x374
+#define PCIERC_RAS_EINJ_CTL6CHGP1 0x388
+#define PCIERC_RAS_EINJ_CTL6CHGV1 0x398
+#define PCIERC_RAS_EINJ_CTL6PE 0x3A4
+#define PCIERC_RASDP_EP_CTL 0x420
+#define PCIERC_RASDP_DE_ME 0x440
+
+struct octeontx2_pem_pci {
+ u32 ea_entry[3];
+ void __iomem *pem_reg_base;
+};
+
+static int octeontx2_pem_bridge_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct octeontx2_pem_pci *pem_pci;
+ u64 read_val;
+
+ if (devfn != 0 || where >= 2048) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ pem_pci = (struct octeontx2_pem_pci *)cfg->priv;
+
+ /*
+ * 32-bit accesses only. Write the address to the low order
+ * bits of PEM_CFG_RD, then trigger the read by reading back.
+ * The config data lands in the upper 32-bits of PEM_CFG_RD.
+ */
+ read_val = where & ~3ull;
+ writeq(read_val, pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val >>= 32;
+
+ /* HW reset value at few config space locations are
+ * garbage, fix them.
+ */
+ switch (where & ~3) {
+ case 0x00: /* DevID & VenID */
+ read_val = 0xA02D177D;
+ break;
+ case 0x04:
+ read_val = 0x00100006;
+ break;
+ case 0x08:
+ read_val = 0x06040100;
+ break;
+ case 0x0c:
+ read_val = 0x00010000;
+ break;
+ case 0x18:
+ read_val = 0x00010100;
+ break;
+ case 0x40:
+ read_val &= 0xffff00ff;
+ read_val |= 0x00005000; /* In RC mode, point to EA capability */
+ break;
+ case 0x5c: /* EA_ENTRY2 */
+ read_val = pem_pci->ea_entry[0];
+ break;
+ case 0x60: /* EA_ENTRY3 */
+ read_val = pem_pci->ea_entry[1];
+ break;
+ case 0x64: /* EA_ENTRY4 */
+ read_val = pem_pci->ea_entry[2];
+ break;
+ case 0x70: /* Express Cap */
+ /* HW reset value is '0', set PME interrupt vector to 1 */
+ if (!(read_val & (0x1f << 25)))
+ read_val |= (1u << 25);
+ break;
+ default:
+ break;
+ }
+ read_val >>= (8 * (where & 3));
+ switch (size) {
+ case 1:
+ read_val &= 0xff;
+ break;
+ case 2:
+ read_val &= 0xffff;
+ break;
+ default:
+ break;
+ }
+ *val = read_val;
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int octeontx2_pem_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+
+ if (bus->number < cfg->busr.start ||
+ bus->number > cfg->busr.end)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /*
+ * The first device on the bus is the PEM PCIe bridge.
+ * Special case its config access.
+ */
+ if (bus->number == cfg->busr.start)
+ return octeontx2_pem_bridge_read(bus, devfn, where, size, val);
+
+ return pci_generic_config_read(bus, devfn, where, size, val);
+}
+
+/*
+ * Some of the w1c_bits below also include read-only or non-writable
+ * reserved bits, this makes the code simpler and is OK as the bits
+ * are not affected by writing zeros to them.
+ */
+static u32 octeontx2_pem_bridge_w1c_bits(u64 where_aligned)
+{
+ u32 w1c_bits = 0;
+
+ switch (where_aligned) {
+ case 0x04: /* Command/Status */
+ case 0x1c: /* Base and I/O Limit/Secondary Status */
+ w1c_bits = 0xff000000;
+ break;
+ case 0x44: /* Power Management Control and Status */
+ w1c_bits = 0xfffffe00;
+ break;
+ case 0x78: /* Device Control/Device Status */
+ case 0x80: /* Link Control/Link Status */
+ case 0x88: /* Slot Control/Slot Status */
+ case 0x90: /* Root Status */
+ case 0xa0: /* Link Control 2 Registers/Link Status 2 */
+ w1c_bits = 0xffff0000;
+ break;
+ case 0x104: /* Uncorrectable Error Status */
+ case 0x110: /* Correctable Error Status */
+ case 0x130: /* Error Status */
+ case 0x180: /* Lane error status */
+ w1c_bits = 0xffffffff;
+ break;
+ default:
+ break;
+ }
+ return w1c_bits;
+}
+
+/* Some bits must be written to one so they appear to be read-only. */
+static u32 octeontx2_pem_bridge_w1_bits(u64 where_aligned)
+{
+ u32 w1_bits;
+
+ switch (where_aligned) {
+ case 0x1c: /* I/O Base / I/O Limit, Secondary Status */
+ /* Force 32-bit I/O addressing. */
+ w1_bits = 0x0101;
+ break;
+ case 0x24: /* Prefetchable Memory Base / Prefetchable Memory Limit */
+ /* Force 64-bit addressing */
+ w1_bits = 0x00010001;
+ break;
+ default:
+ w1_bits = 0;
+ break;
+ }
+ return w1_bits;
+}
+
+static int octeontx2_pem_bridge_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct octeontx2_pem_pci *pem_pci;
+ u64 where_aligned = where & ~3ull;
+ u64 write_val, read_val;
+ u32 mask = 0;
+
+
+ if (devfn != 0 || where >= 2048)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ pem_pci = (struct octeontx2_pem_pci *)cfg->priv;
+
+ /*
+ * 32-bit accesses only. If the write is for a size smaller
+ * than 32-bits, we must first read the 32-bit value and merge
+ * in the desired bits and then write the whole 32-bits back
+ * out.
+ */
+ switch (size) {
+ case 1:
+ writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val >>= 32;
+ mask = ~(0xff << (8 * (where & 3)));
+ read_val &= mask;
+ val = (val & 0xff) << (8 * (where & 3));
+ val |= (u32)read_val;
+ break;
+ case 2:
+ writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val >>= 32;
+ mask = ~(0xffff << (8 * (where & 3)));
+ read_val &= mask;
+ val = (val & 0xffff) << (8 * (where & 3));
+ val |= (u32)read_val;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * By expanding the write width to 32 bits, we may
+ * inadvertently hit some W1C bits that were not intended to
+ * be written. Calculate the mask that must be applied to the
+ * data to be written to avoid these cases.
+ */
+ if (mask) {
+ u32 w1c_bits = octeontx2_pem_bridge_w1c_bits(where);
+
+ if (w1c_bits) {
+ mask &= w1c_bits;
+ val &= ~mask;
+ }
+ }
+
+ /*
+ * Some bits must be read-only with value of one. Since the
+ * access method allows these to be cleared if a zero is
+ * written, force them to one before writing.
+ */
+ val |= octeontx2_pem_bridge_w1_bits(where_aligned);
+
+ /*
+ * Low order bits are the config address, the high order 32
+ * bits are the data to be written.
+ */
+ write_val = (((u64)val) << 32) | where_aligned;
+ writeq(write_val, pem_pci->pem_reg_base + PEM_CFG_WR);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static void octeontx2_be_workaround_init(struct pci_bus *bus)
+{
+ u32 val;
+
+ /* Ensure that PCIERC_RASDP_DE_ME.ERR_MODE is set to 0 */
+ octeontx2_pem_bridge_read(bus, 0x00,
+ PCIERC_RASDP_DE_ME, 4, &val);
+ octeontx2_pem_bridge_write(bus, 0x00,
+ PCIERC_RASDP_DE_ME, 4, val & ~BIT(0));
+
+ /* Disable parity error correction */
+ octeontx2_pem_bridge_read(bus, 0x00,
+ PCIERC_RASDP_EP_CTL, 4, &val);
+ octeontx2_pem_bridge_write(bus, 0x00,
+ PCIERC_RASDP_EP_CTL, 4, val | BIT(0));
+
+ /* Enable RAS to change header
+ * PCIERC_RAS_EINJ_EN.EINJ0_EN.set(0);
+ * PCIERC_RAS_EINJ_EN.EINJ1_EN.set(0);
+ * PCIERC_RAS_EINJ_EN.EINJ2_EN.set(0);
+ * PCIERC_RAS_EINJ_EN.EINJ3_EN.set(0);
+ * PCIERC_RAS_EINJ_EN.EINJ4_EN.set(0);
+ * PCIERC_RAS_EINJ_EN.EINJ5_EN.set(0);
+ * PCIERC_RAS_EINJ_EN.EINJ6_EN.set(1);
+ */
+ octeontx2_pem_bridge_write(bus, 0x00,
+ PCIERC_RAS_EINJ_EN, 4, BIT(6));
+
+ /* Set up error injection count to 1 and
+ * set type to TLP and INV_CNTRL must be 0.
+ */
+ octeontx2_pem_bridge_write(bus, 0x00,
+ PCIERC_RAS_EINJ_CTL6PE, 4, 1);
+
+ /* Set up compare point to compare Fmt/Type field in TLP Header word 0
+ * Where bits[31:0] = tlp_dw[7:0], tlp_dw[15:18],
+ * tlp_dw[23:16], tlp_dw[31:24].
+ *
+ * PCIERC_RAS_EINJ_CTL6CMPP0.EINJ6_COM_PT_H0.set(32'hfe00_0000);
+ */
+ octeontx2_pem_bridge_write(bus, 0x00,
+ PCIERC_RAS_EINJ_CTL6CMPP0, 4, 0xFE000000);
+
+ /* Set up the value to compare against,
+ * look for Fmt/Type to indicate CfgRd/CfWr - both type 0 or 1.
+ * Where bits[31:0] = tlp_dw[7:0], tlp_dw[15:18],
+ * tlp_dw[23:16], tlp_dw[31:24]
+ */
+ octeontx2_pem_bridge_write(bus, 0x00,
+ PCIERC_RAS_EINJ_CTL6CMPV0, 4, 0x44000000);
+
+ /* Set up the bit position in TLP Header word 1 to replace
+ * (LBE is bits 7:4, FBE is bits 3:0).
+ *
+ * Where bits[31:0] = tlp_dw[7:0], tlp_dw[15:18],
+ * tlp_dw[23:16], tlp_dw[31:24].
+ */
+ octeontx2_pem_bridge_write(bus, 0x00,
+ PCIERC_RAS_EINJ_CTL6CHGP1, 4, 0xFF);
+}
+
+static void octeontx2_be_workaround(struct pci_bus *bus, int where,
+ int size, u32 val)
+{
+ struct pci_host_bridge *rc;
+ u32 reg, be = 0;
+
+ rc = pci_find_host_bridge(bus);
+
+ /* Setup RAS to inject one error */
+ octeontx2_be_workaround_init(rc->bus);
+
+ /* Get byte-enable to inject into TLP */
+ where &= 0x03;
+ switch (size) {
+ case 1:
+ be = 1 << where;
+ break;
+ case 2:
+ be = 3 << where;
+ break;
+ case 4:
+ be = 0xF;
+ }
+
+ /* Set up the value you'd like to use for FBE (Cfg ops must have LBE==0)
+ * Where bits[31:0] = tlp_dw[7:0], tlp_dw[15:18],
+ * tlp_dw[23:16], tlp_dw[31:24].
+ */
+ octeontx2_pem_bridge_write(rc->bus, 0x00,
+ PCIERC_RAS_EINJ_CTL6CHGV1, 4, be);
+
+ /* To be absolutely sure that the ECAM access does not get to
+ * the MAC prior to the PCIERC register updates that are setting
+ * up for that ECAM access, SW should read back one of the
+ * registers it wrote before launching the ECAM access.
+ */
+ octeontx2_pem_bridge_read(rc->bus, 0x00,
+ PCIERC_RAS_EINJ_CTL6CHGV1, 4, &reg);
+}
+
+static int octeontx2_pem_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+
+ if (bus->number < cfg->busr.start ||
+ bus->number > cfg->busr.end)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ /*
+ * The first device on the bus is the PEM PCIe bridge.
+ * Special case its config access.
+ */
+ if (bus->number == cfg->busr.start)
+ return octeontx2_pem_bridge_write(bus, devfn, where, size, val);
+
+ octeontx2_be_workaround(bus, where, size, val);
+
+ return pci_generic_config_write(bus, devfn, where, size, val);
+}
+
+static int octeontx2_pem_init(struct device *dev, struct pci_config_window *cfg,
+ struct resource *res_pem)
+{
+ struct octeontx2_pem_pci *pem_pci;
+ resource_size_t bar4_start;
+
+ pem_pci = devm_kzalloc(dev, sizeof(*pem_pci), GFP_KERNEL);
+ if (!pem_pci)
+ return -ENOMEM;
+
+ pem_pci->pem_reg_base = devm_ioremap(dev, res_pem->start, 0x10000);
+ if (!pem_pci->pem_reg_base)
+ return -ENOMEM;
+
+ /*
+ * The MSI-X BAR for the PEM and AER interrupts is located at
+ * a fixed offset from the PEM register base. Generate a
+ * fragment of the synthesized Enhanced Allocation capability
+ * structure here for the BAR.
+ */
+ bar4_start = res_pem->start + 0xf00000000;
+ pem_pci->ea_entry[0] = (u32)bar4_start | 2;
+ pem_pci->ea_entry[1] = (u32)(res_pem->end - bar4_start) & ~3u;
+ pem_pci->ea_entry[2] = (u32)(bar4_start >> 32);
+
+ cfg->priv = pem_pci;
+ return 0;
+}
+
+static int octeontx2_pem_platform_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct platform_device *pdev;
+ struct resource *res_pem;
+
+ if (!dev->of_node)
+ return -EINVAL;
+
+ pdev = to_platform_device(dev);
+
+ /*
+ * The second register range is the PEM bridge to the PCIe
+ * bus. It has a different config access method than those
+ * devices behind the bridge.
+ */
+ res_pem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res_pem) {
+ dev_err(dev, "missing \"reg[1]\"property\n");
+ return -EINVAL;
+ }
+
+ return octeontx2_pem_init(dev, cfg, res_pem);
+}
+
+static struct pci_ecam_ops pci_octeontx2_pem_ops = {
+ .bus_shift = 20,
+ .init = octeontx2_pem_platform_init,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = octeontx2_pem_config_read,
+ .write = octeontx2_pem_config_write,
+ }
+};
+
+static const struct of_device_id octeontx2_pem_of_match[] = {
+ {
+ .compatible = "marvell,pci-host-octeontx2-pem",
+ .data = &pci_octeontx2_pem_ops,
+ },
+ { },
+};
+
+static int octeontx2_pem_probe(struct platform_device *pdev)
+{
+ return pci_host_common_probe(pdev);
+}
+
+static struct platform_driver octeontx2_pem_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = octeontx2_pem_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = octeontx2_pem_probe,
+};
+builtin_platform_driver(octeontx2_pem_driver);
+
+#endif
diff --git a/drivers/pci/endpoint/Kconfig b/drivers/pci/endpoint/Kconfig
index 17bbdc9bbde0..58bd2a3b18b1 100644
--- a/drivers/pci/endpoint/Kconfig
+++ b/drivers/pci/endpoint/Kconfig
@@ -28,6 +28,13 @@ config PCI_ENDPOINT_CONFIGFS
configure the endpoint function and used to bind the
function with a endpoint controller.
+config PCIE_ARMADA_DW_EP
+ bool "Armada DesignWare PCI End-point driver "
+ depends on OF && HAS_IOMEM
+ help
+ Enable this configuration option to support configurable Armada
+ PCI endpoint.
+
source "drivers/pci/endpoint/functions/Kconfig"
endmenu
diff --git a/drivers/pci/endpoint/Makefile b/drivers/pci/endpoint/Makefile
index 95b2fe47e3b0..6d51b3e38dde 100644
--- a/drivers/pci/endpoint/Makefile
+++ b/drivers/pci/endpoint/Makefile
@@ -6,3 +6,4 @@
obj-$(CONFIG_PCI_ENDPOINT_CONFIGFS) += pci-ep-cfs.o
obj-$(CONFIG_PCI_ENDPOINT) += pci-epc-core.o pci-epf-core.o\
pci-epc-mem.o functions/
+obj-$(CONFIG_PCIE_ARMADA_DW_EP) += pcie-armada-dw-ep.o
diff --git a/drivers/pci/endpoint/pcie-armada-dw-ep.c b/drivers/pci/endpoint/pcie-armada-dw-ep.c
new file mode 100644
index 000000000000..391e70397d05
--- /dev/null
+++ b/drivers/pci/endpoint/pcie-armada-dw-ep.c
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe end point controller driver for Marvell Armada
+ *
+ * Armada PCIe Glue Layer Source Code
+ *
+ * Based on Armada-SP2 PCIe end-point driver
+ */
+#define MODULE_NAME "armada-pcie-ep"
+
+#include <linux/armada-pcie-ep.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/resource.h>
+#include <linux/of_pci.h>
+#include <linux/of_irq.h>
+#include <uapi/linux/pci_regs.h>
+#include <linux/memory.h>
+
+#define PCIE_GLOBAL_CTRL 0x0
+#define PCIE_GLOBAL_CTRL_CRS_EN (1 << 9)
+#define PCIE_GLOBAL_CTRL_TYPE_OFF 4
+#define PCIE_GLOBAL_CTRL_TYPE_MASK 0xF
+#define PCIE_GLOBAL_CTRL_TYPE_RC (0x4)
+
+#define PCIE_ATU_VIEWPORT 0x900
+#define PCIE_ATU_REGION_INBOUND (0x1 << 31)
+#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
+#define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
+#define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
+#define PCIE_ATU_CR1 0x904
+#define PCIE_ATU_CR1_FUNC_OFF 20
+#define PCIE_ATU_CR1_FUNC_MASK 0x1F
+#define PCIE_ATU_TYPE_MEM (0x0 << 0)
+#define PCIE_ATU_TYPE_IO (0x2 << 0)
+#define PCIE_ATU_TYPE_CFG0 (0x4 << 0)
+#define PCIE_ATU_TYPE_CFG1 (0x5 << 0)
+#define PCIE_ATU_CR2 0x908
+#define PCIE_ATU_CR2_REGION_EN (0x1 << 31)
+#define PCIE_ATU_CR2_BAR_EN (0x1 << 30)
+#define PCIE_ATU_CR2_FUNC_EN (0x1 << 19)
+#define PCIE_ATU_CR2_BAR_OFF 8
+#define PCIE_ATU_LOWER_BASE 0x90C
+#define PCIE_ATU_UPPER_BASE 0x910
+#define PCIE_ATU_LIMIT 0x914
+#define PCIE_ATU_LOWER_TARGET 0x918
+#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24)
+#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19)
+#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
+#define PCIE_ATU_UPPER_TARGET 0x91C
+
+#define PCIE_CAP_MSI_OFFSET 0x50
+#define PCIE_MSI_MSG_CTL 0x2
+#define PCIE_MSI_MSG_ADDR_L 0x4
+#define PCIE_MSI_MSG_ADDR_H 0x8
+#define PCI_MSI_FLAGS_QSIZE_OFF 4
+#define PCIE_MSI_MSG_DATA(is_64) (is_64 ? 0xC : 0x8)
+
+#define PCIE_SRIOV_DEVID_OFFSET 0x192
+
+#define PCIE_RESBAR_EXT_CAP_HDR_REG 0x25c
+#define PCIE_RESBAR_EXT_CAP_REG(bar) (PCIE_RESBAR_EXT_CAP_HDR_REG + 4 + \
+ (((bar) / 2 + (bar) % 2) & 0x3) * 8)
+#define PCIE_RESBAR_EXT_CAP_REG_MASK 0x000fffff
+#define PCIE_RESBAR_EXT_CAP_REG_SHIFT 4
+
+#define PCIE_BAR_IS_RESIZABLE(bar) ((bar) == 5 || (bar) == 4 || \
+ (bar) == 2 || (bar) == 0)
+#define MAX_ATU_REGIONS 16
+#define MAX_ATU_SIZE (4ul * SZ_1G)
+
+#define BAR_ENABLE_OFFSET 0
+#define BAR_ENABLE_MASK (1 << BAR_ENABLE_OFFSET)
+
+struct armada_pcie_ep {
+ void __iomem *regs;
+ void __iomem *shadow_regs;
+ void __iomem *lm_regs;
+ void __iomem *pl_regs; /*port logical register only PF0*/
+ struct device *dev;
+ struct clk *clk;
+};
+
+#define cfg_space_addr(func_id) (0x1000 * (func_id))
+
+#define cfg_func_base(ep, func_id, off) \
+ ((ep)->regs + cfg_space_addr(func_id) + (off))
+
+#define cfg_shadow_func_base(ep, func_id, off) \
+ ((ep)->shadow_regs + cfg_space_addr(func_id) + (off))
+
+
+#define get_out_region_idx(func_id, id) (func_id + id)
+#define get_in_region_idx(func_id, bar) (func_id + bar)
+
+struct armada_pcie_ep *armada_ep;
+
+void armada_pcie_ep_setup_bar(void *ep_hdl, int func_id, u32 bar_num, u32 props,
+ u64 sz)
+{
+ struct armada_pcie_ep *ep = (struct armada_pcie_ep *)ep_hdl;
+ u32 space_type = props & PCI_BASE_ADDRESS_SPACE;
+ u32 sz_type = (props & PCI_BASE_ADDRESS_MEM_TYPE_MASK);
+ u32 v = 0;
+ void __iomem *resbar = ep->pl_regs + PCIE_RESBAR_EXT_CAP_REG(bar_num);
+ void __iomem *bar = cfg_func_base(ep, func_id,
+ PCI_BASE_ADDRESS_0 + (bar_num * 4));
+ void __iomem *bar_mask = cfg_shadow_func_base(ep, func_id,
+ PCI_BASE_ADDRESS_0 + (bar_num * 4));
+
+ dev_dbg(ep->dev, "func%d: BAR%d size=0x%llx set requested\n",
+ func_id, bar_num, sz);
+ if (space_type == PCI_BASE_ADDRESS_SPACE_IO) {
+ v = props & (~PCI_BASE_ADDRESS_IO_MASK);
+ writel_relaxed(v, bar);
+ } else {
+ /* clear the top 32 bits of the size */
+ if (sz_type == PCI_BASE_ADDRESS_MEM_TYPE_64)
+ writel_relaxed(0, bar + 4);
+
+ v = props & (~PCI_BASE_ADDRESS_MEM_MASK);
+ writel_relaxed(v, bar);
+ }
+
+ /*
+ * Set the BAR using resizable BAR capability registers
+ * The minimum (and the default) BAR size is 1MB
+ * Once the Resizable BAR capability register is set
+ * the resizable BAR control register at next offset gets
+ * updated automatically.
+ */
+ if (sz > SZ_1M && PCIE_BAR_IS_RESIZABLE(bar_num)) {
+ /* BAR size should be power of 2 already */
+ v = ((sz >> 20) & PCIE_RESBAR_EXT_CAP_REG_MASK);
+ v <<= PCIE_RESBAR_EXT_CAP_REG_SHIFT;
+ writel_relaxed(v, resbar);
+ }
+
+ /* Enable bar */
+ writel_relaxed(BAR_ENABLE_MASK, bar_mask);
+
+}
+EXPORT_SYMBOL(armada_pcie_ep_setup_bar);
+
+void armada_pcie_ep_disable_bars(void *ep_hdl, int func_id, u16 mask)
+{
+ struct armada_pcie_ep *ep = (struct armada_pcie_ep *)ep_hdl;
+ void __iomem *bar_mask = cfg_shadow_func_base(ep, func_id,
+ PCI_BASE_ADDRESS_0);
+ int bar;
+
+ dev_dbg(ep->dev, "func%d: disable BARs 0x%x\n", func_id, mask);
+ mask &= PCIE_EP_ALL_BARS;
+ for (bar = 0; mask; mask >>= 1, bar++) {
+ if (mask & 1)
+ writel_relaxed(0, bar_mask + bar * 4);
+ }
+}
+EXPORT_SYMBOL(armada_pcie_ep_disable_bars);
+
+int armada_pcie_ep_get_msi(void *ep_hdl, int func_id, int vec_id,
+ struct msi_msg *msg)
+{
+ struct armada_pcie_ep *ep = (struct armada_pcie_ep *)ep_hdl;
+ void __iomem *p = cfg_func_base(ep, func_id, PCIE_CAP_MSI_OFFSET);
+ u16 flags, vec_cnt;
+
+ /* check if MSI is enabled and there are enough vectors
+ * QSIZE field indicates log2 of the amount of MSI vectors
+ */
+ flags = readw(p + PCI_MSI_FLAGS);
+ vec_cnt =
+ 1 << ((flags & PCI_MSI_FLAGS_QSIZE) >> PCI_MSI_FLAGS_QSIZE_OFF);
+ if (!(flags & PCI_MSI_FLAGS_ENABLE) || (vec_id > vec_cnt))
+ return -EINVAL;
+
+ dev_dbg(ep->dev, "func%d: get msi vector id/counter 0x%x/%d\n",
+ func_id, vec_id, vec_cnt);
+ msg->address_lo = readl(p + PCI_MSI_ADDRESS_LO);
+ if (flags & PCI_MSI_FLAGS_64BIT) {
+ msg->address_hi = readl(p + PCI_MSI_ADDRESS_HI);
+ msg->data = readl(p + PCI_MSI_DATA_64) + vec_id;
+ } else {
+ msg->address_hi = 0;
+ msg->data = readl(p + PCI_MSI_DATA_32) + vec_id;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(armada_pcie_ep_get_msi);
+
+void armada_pcie_ep_cfg_enable(void *ep_hdl, int func_id)
+{
+ struct armada_pcie_ep *ep = (struct armada_pcie_ep *)ep_hdl;
+ u32 v;
+
+ dev_dbg(ep->dev, "func%d: config enable\n", func_id);
+ v = readl_relaxed(ep->lm_regs + PCIE_GLOBAL_CTRL);
+ v &= ~PCIE_GLOBAL_CTRL_CRS_EN;
+ writel_relaxed(v, ep->lm_regs + PCIE_GLOBAL_CTRL);
+}
+EXPORT_SYMBOL(armada_pcie_ep_cfg_enable);
+
+/*
+ * Remap the host memory space to the local memory space.
+ * By default the memory spaces conflict so we must offset the
+ * host memory space in our local memory space
+ */
+int armada_pcie_ep_remap_host(void *ep_hdl, u32 func_id, u64 local_base,
+ u64 host_base, u64 size)
+{
+ struct armada_pcie_ep *ep = (struct armada_pcie_ep *)ep_hdl;
+ void __iomem *pl_regs = ep->pl_regs;
+ u32 v, region = 0;
+ u64 remain_size = size;
+
+ /* ATU window size must be power of 2 */
+ if (!is_power_of_2(size))
+ return -EINVAL;
+
+ dev_dbg(ep->dev, "func%d: remap local:host(size) %llx:%llx(%llx)\n",
+ func_id, local_base, host_base, size);
+
+ while (remain_size > 0) {
+ if (region > MAX_ATU_REGIONS) {
+ dev_err(ep->dev,
+ "Insufficient ATU regions to map hosts\n");
+ return -1;
+ }
+
+ v = PCIE_ATU_REGION_OUTBOUND;
+ v |= get_out_region_idx(func_id, region);
+ writel_relaxed(v, pl_regs + PCIE_ATU_VIEWPORT);
+
+ writel_relaxed(local_base & U32_MAX,
+ pl_regs + PCIE_ATU_LOWER_BASE);
+ writel_relaxed(local_base >> 32, pl_regs + PCIE_ATU_UPPER_BASE);
+ writel_relaxed(host_base & U32_MAX,
+ pl_regs + PCIE_ATU_LOWER_TARGET);
+ writel_relaxed(host_base >> 32,
+ pl_regs + PCIE_ATU_UPPER_TARGET);
+
+ if (remain_size > MAX_ATU_SIZE)
+ v = MAX_ATU_SIZE - 1;
+ else
+ v = remain_size - 1;
+ writel_relaxed(v, pl_regs + PCIE_ATU_LIMIT);
+
+ v = (func_id & PCIE_ATU_CR1_FUNC_MASK) << PCIE_ATU_CR1_FUNC_OFF;
+ writel_relaxed(v, pl_regs + PCIE_ATU_CR1);
+
+ v = PCIE_ATU_CR2_REGION_EN;
+ writel_relaxed(v, pl_regs + PCIE_ATU_CR2);
+
+ region++;
+ local_base += MAX_ATU_SIZE;
+ host_base += MAX_ATU_SIZE;
+ remain_size -= MAX_ATU_SIZE;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(armada_pcie_ep_remap_host);
+
+/* setup the internal target for the BAR. When the PCIe host accesses the bar
+ * it will reach the space defined by "addr" and "size"
+ */
+void armada_pcie_ep_bar_map(void *ep_hdl, u32 func_id, int bar,
+ phys_addr_t addr, u64 size)
+{
+ struct armada_pcie_ep *ep = (struct armada_pcie_ep *)ep_hdl;
+ void __iomem *pl_regs = ep->pl_regs;
+ u32 region_indx = get_in_region_idx(func_id, bar);
+ u32 v;
+
+ v = PCIE_ATU_REGION_INBOUND | region_indx;
+ writel_relaxed(v, pl_regs + PCIE_ATU_VIEWPORT);
+
+ addr = addr & ~(size - 1);
+ v = lower_32_bits(addr);
+ writel_relaxed(v, pl_regs + PCIE_ATU_LOWER_TARGET);
+
+ v = upper_32_bits(addr);
+ writel_relaxed(v, pl_regs + PCIE_ATU_UPPER_TARGET);
+
+ v = (func_id & PCIE_ATU_CR1_FUNC_MASK) << PCIE_ATU_CR1_FUNC_OFF;
+ writel_relaxed(v, pl_regs + PCIE_ATU_CR1);
+
+ v = PCIE_ATU_CR2_REGION_EN |
+ PCIE_ATU_CR2_BAR_EN |
+ (bar << PCIE_ATU_CR2_BAR_OFF);
+ writel_relaxed(v, pl_regs + PCIE_ATU_CR2);
+ dev_dbg(ep->dev, "func%d: BAR%d map size@addr %llx@%llx\n",
+ func_id, bar, addr, size);
+}
+EXPORT_SYMBOL(armada_pcie_ep_bar_map);
+
+void *armada_pcie_ep_get(void)
+{
+ return (void *)armada_ep;
+}
+EXPORT_SYMBOL(armada_pcie_ep_get);
+
+static int armada_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct armada_pcie_ep *ep;
+ struct device *dev = &pdev->dev;
+ struct resource *base;
+ void __iomem *p;
+ int ret = 0;
+
+ ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ ep->clk = devm_clk_get(dev, NULL);
+ if (PTR_ERR(ep->clk) == -EPROBE_DEFER) {
+ dev_info(dev, "PCIE EP probe deferred\n");
+ return -EPROBE_DEFER;
+ }
+ if (IS_ERR(ep->clk)) {
+ dev_err(dev, "can't find clock node\n");
+ return -ENODEV;
+ }
+
+ ret = clk_prepare_enable(ep->clk);
+ if (ret) {
+ dev_err(dev, "couldn't enable clock\n");
+ return ret;
+ }
+
+ ep->dev = dev;
+ platform_set_drvdata(pdev, ep);
+
+ /* Get registers bases and remap */
+ base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lm");
+ p = devm_ioremap_resource(dev, base);
+ if (IS_ERR(p)) {
+ dev_err(dev, "couldn't remap lm regs base %pR\n", base);
+ return PTR_ERR(p);
+ }
+ ep->lm_regs = p;
+ dev_dbg(dev, "reg-%s va:pa(sz) %llx:%llx(%llx)\n",
+ "lm ", (phys_addr_t)p, base->start,
+ base->end - base->start);
+
+ base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
+ p = devm_ioremap_resource(dev, base);
+ if (IS_ERR(p)) {
+ dev_err(dev, "couldn't remap core regs base %pR\n", base);
+ return PTR_ERR(p);
+ }
+ ep->regs = p;
+ ep->pl_regs = p;
+ dev_dbg(dev, "reg-%s va:pa(sz) %llx:%llx(%llx)\n",
+ "core ", (phys_addr_t)p, base->start,
+ base->end - base->start);
+
+ base = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "shadow_core");
+ p = devm_ioremap_resource(dev, base);
+ if (IS_ERR(p)) {
+ dev_err(dev, "%s: couldn't remap shadow regs base %pR\n",
+ MODULE_NAME, base);
+ return PTR_ERR(p);
+ }
+ ep->shadow_regs = p;
+ dev_dbg(dev, "reg-%s va:pa(sz) %llx:%llx(%llx)\n",
+ "shadow", (phys_addr_t)p, base->start,
+ base->end - base->start);
+
+ armada_ep = ep;
+
+ return 0;
+}
+
+static const struct of_device_id armada_pcie_ep_of_match[] = {
+ { .compatible = "marvell,armada-pcie-ep", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, armada_pcie_ep_of_match);
+
+static struct platform_driver armada_pcie_ep_driver = {
+ .probe = armada_pcie_ep_probe,
+ .driver = {
+ .name = "armada-pcie-ep",
+ .of_match_table = of_match_ptr(armada_pcie_ep_of_match),
+ },
+};
+
+module_platform_driver(armada_pcie_ep_driver);
+
+MODULE_DESCRIPTION("Armada PCIe EP controller driver");
+MODULE_AUTHOR("Gang Chen <gangc@marvell.com>");
+MODULE_AUTHOR("Yehuda Yitshcak <yehuday@marvell.com>");
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 3779b264dbec..a785019eb46b 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -221,7 +221,16 @@ static int get_port_device_capability(struct pci_dev *dev)
}
#ifdef CONFIG_PCIEAER
+ /*
+ * Some AER interrupt capability registers may not be present on
+ * non Root ports. Since there is no way to check presence of
+ * ROOT_ERR_COMMAND and ROOT_ERR_STATUS registers. Allow AER
+ * service only on root ports. Refer PCIe rev5.0 spec v1.0 7.8.4.
+ * Otherwise AER interrupt message number is read incorrectly
+ * causing MSIX vector registration to fail and fallback to legacy.
+ */
if (dev->aer_cap && pci_aer_available() &&
+ pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT &&
(pcie_ports_native || host->native_aer)) {
services |= PCIE_PORT_SERVICE_AER;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ece90a23936d..62797b6d803e 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1362,6 +1362,21 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
/* We need to blast all three values with a single write */
pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
+ /* When Marvell PCI Bridge links up in Gen1 speed,
+ * back-to-back write to primary bus register
+ * and immediate scan for devices on secondary
+ * bus will not reach end-point devices.
+ * Before the write takes in effect in hardware,
+ * read of vendor & device id on endpoint may return
+ * 0xffff as bus numbers are set to 0 in earlier
+ * write on primary bus register.
+ * To workaround this issue perform a read of primary bus
+ * register after the write which allows write to go
+ * through only for this bridge.
+ */
+ if (dev->vendor == PCI_VENDOR_ID_CAVIUM && dev->device == 0xa02d)
+ pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
+
if (!is_cardbus) {
child->bridge_ctl = bctl;
max = pci_scan_child_bus_extend(child, available_buses);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 1be2894ada70..d274e07b3386 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -4497,7 +4497,7 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
{
- if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
+ if (!pci_is_pcie(dev))
return false;
switch (dev->device) {
@@ -5777,3 +5777,42 @@ static void nvidia_ion_ahci_fixup(struct pci_dev *pdev)
pdev->dev_flags |= PCI_DEV_FLAGS_HAS_MSI_MASKING;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0ab8, nvidia_ion_ahci_fixup);
+
+/* Marvell cnf10ka (0xba00) requires fix for device at slot (0xe), func. 0x0
+ * Wrong values for BAR0 and BAR4 are fetched from config space.
+ * There are some devices that doesn't require fixing, so the fix is not always
+ * applied. Deciding factor is curent value of BAR0/BAR4.
+ * Config. space for cnf10ka is read-only, Changing the registers isn't possible
+ */
+#define CAVIUM_XCP0_ADDR_OK 0x000082c000000000ULL /* Correct PCI BAR base */
+#define CAVIUM_XCP0_FIX_MASK 0xffffffff00000000ULL
+#define CAVIUM_XCP0_SHOULD_FIX(addr) \
+ (((addr) & CAVIUM_XCP0_FIX_MASK) != CAVIUM_XCP0_ADDR_OK)
+#define CAVIUM_XCP0_FIX_ADDR(addr) \
+ (((addr) & (~CAVIUM_XCP0_FIX_MASK)) | CAVIUM_XCP0_ADDR_OK)
+
+static void quirk_cavium_xcp0_bar_fixup(struct pci_dev *dev)
+{
+ int i;
+
+ if (dev->subsystem_device == 0xba00 && dev->devfn == 0xe0) {
+ for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
+
+ struct resource *r = &dev->resource[i];
+ int ret;
+
+ if (!(r->flags & IORESOURCE_MEM))
+ continue;
+
+ /* There are revision of HW that not need fixup */
+ if (CAVIUM_XCP0_SHOULD_FIX(r->start)) {
+ r->start = CAVIUM_XCP0_FIX_ADDR(r->start);
+ r->end = CAVIUM_XCP0_FIX_ADDR(r->end);
+ ret = pci_claim_resource(dev, i);
+ pci_info(dev, "Fixup (%d) %llx - %llx/%lx. (%d)\n",
+ i, r->start, r->end, r->flags, ret);
+ }
+ }
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CAVIUM, 0xa067, quirk_cavium_xcp0_bar_fixup);
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 130327ff0b0e..e9aed072d8f0 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -62,7 +62,7 @@ config ARM_PMU_ACPI
config ARM_SMMU_V3_PMU
tristate "ARM SMMUv3 Performance Monitors Extension"
- depends on ARM64 && ACPI && ARM_SMMU_V3
+ depends on ARM64 && ARM_SMMU_V3
help
Provides support for the ARM SMMUv3 Performance Monitor Counter
Groups (PMCG), which provide monitoring of transactions passing
@@ -130,6 +130,19 @@ config ARM_SPE_PMU
Extension, which provides periodic sampling of operations in
the CPU pipeline and reports this via the perf AUX interface.
+config MARVELL_CN10K_TAD_PMU
+ tristate "Marvell CN10K LLC-TAD PMU"
+ depends on ARM64
+ help
+ Provides support for Last-Level cache Tag-and-data Units (LLC-TAD)
+ performance monitors on CN10K family silicons.
+
+config MARVELL_CN10K_DDR_PMU
+ tristate "MARVELL CN10K DRAM Subsystem(DSS) PMU Support"
+ depends on ARM64
+ help
+ Support for Marvell DDR PMU event on CN10K platform
+
source "drivers/perf/hisilicon/Kconfig"
endmenu
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 5365fd56f88f..99a89a7afc6c 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -13,3 +13,5 @@ obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o
obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o
+obj-$(CONFIG_MARVELL_CN10K_TAD_PMU) += marvell_cn10k_tad_pmu.o
+obj-$(CONFIG_MARVELL_CN10K_DDR_PMU) += marvell_cn10k_ddr_pmu.o
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index afa8efbdad8f..730c8bb0bf58 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -804,7 +804,8 @@ static int smmu_pmu_probe(struct platform_device *pdev)
return -EINVAL;
}
- smmu_pmu_get_acpi_options(smmu_pmu);
+ if (!dev->of_node)
+ smmu_pmu_get_acpi_options(smmu_pmu);
/* Pick one CPU to be the preferred one to use */
smmu_pmu->on_cpu = raw_smp_processor_id();
@@ -858,9 +859,16 @@ static void smmu_pmu_shutdown(struct platform_device *pdev)
smmu_pmu_disable(&smmu_pmu->pmu);
}
+static const struct of_device_id smmu_pmu_of_match[] = {
+ { .compatible = "arm,smmu-pmu-v3", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, smmu_pmu_of_match);
+
static struct platform_driver smmu_pmu_driver = {
.driver = {
.name = "arm-smmu-v3-pmcg",
+ .of_match_table = of_match_ptr(smmu_pmu_of_match),
.suppress_bind_attrs = true,
},
.probe = smmu_pmu_probe,
diff --git a/drivers/perf/marvell_cn10k_ddr_pmu.c b/drivers/perf/marvell_cn10k_ddr_pmu.c
new file mode 100644
index 000000000000..3890bfe10c6f
--- /dev/null
+++ b/drivers/perf/marvell_cn10k_ddr_pmu.c
@@ -0,0 +1,766 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CN10K DRAM Subsystem (DSS) Performance Monitor Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/perf_event.h>
+#include <linux/hrtimer.h>
+
+/* Performance Counters Operating Mode Control Registers */
+#define DDRC_PERF_CNT_OP_MODE_CTRL 0x8020
+#define OP_MODE_CTRL_VAL_MANNUAL 0x1
+
+/* Performance Counters Start Operation Control Registers */
+#define DDRC_PERF_CNT_START_OP_CTRL 0x8028
+#define START_OP_CTRL_VAL_START 0x1ULL
+#define START_OP_CTRL_VAL_ACTIVE 0x2
+
+/* Performance Counters End Operation Control Registers */
+#define DDRC_PERF_CNT_END_OP_CTRL 0x8030
+#define END_OP_CTRL_VAL_END 0x1ULL
+
+/* Performance Counters End Status Registers */
+#define DDRC_PERF_CNT_END_STATUS 0x8038
+#define END_STATUS_VAL_END_TIMER_MODE_END 0x1
+
+/* Performance Counters Configuration Registers */
+#define DDRC_PERF_CFG_BASE 0x8040
+
+/* 8 Generic event counter + 2 fixed event counters */
+#define DDRC_PERF_NUM_GEN_COUNTERS 8
+#define DDRC_PERF_NUM_FIX_COUNTERS 2
+#define DDRC_PERF_READ_COUNTER_IDX DDRC_PERF_NUM_GEN_COUNTERS
+#define DDRC_PERF_WRITE_COUNTER_IDX (DDRC_PERF_NUM_GEN_COUNTERS + 1)
+#define DDRC_PERF_NUM_COUNTERS (DDRC_PERF_NUM_GEN_COUNTERS + \
+ DDRC_PERF_NUM_FIX_COUNTERS)
+
+/* Generic event counter registers */
+#define DDRC_PERF_CFG(n) (DDRC_PERF_CFG_BASE + 8 * (n))
+#define EVENT_ENABLE BIT_ULL(63)
+
+/* Two dedicated event counters for DDR reads and writes */
+#define EVENT_DDR_READS 101
+#define EVENT_DDR_WRITES 100
+
+/*
+ * Programable events IDs in programmable event counters.
+ * DO NOT change these event-id numbers, they are used to
+ * program event bitmap in h/w.
+ */
+#define EVENT_OP_IS_ZQLATCH 55
+#define EVENT_OP_IS_ZQSTART 54
+#define EVENT_OP_IS_TCR_MRR 53
+#define EVENT_OP_IS_DQSOSC_MRR 52
+#define EVENT_OP_IS_DQSOSC_MPC 51
+#define EVENT_VISIBLE_WIN_LIMIT_REACHED_WR 50
+#define EVENT_VISIBLE_WIN_LIMIT_REACHED_RD 49
+#define EVENT_BSM_STARVATION 48
+#define EVENT_BSM_ALLOC 47
+#define EVENT_LPR_REQ_WITH_NOCREDIT 46
+#define EVENT_HPR_REQ_WITH_NOCREDIT 45
+#define EVENT_OP_IS_ZQCS 44
+#define EVENT_OP_IS_ZQCL 43
+#define EVENT_OP_IS_LOAD_MODE 42
+#define EVENT_OP_IS_SPEC_REF 41
+#define EVENT_OP_IS_CRIT_REF 40
+#define EVENT_OP_IS_REFRESH 39
+#define EVENT_OP_IS_ENTER_MPSM 35
+#define EVENT_OP_IS_ENTER_POWERDOWN 31
+#define EVENT_OP_IS_ENTER_SELFREF 27
+#define EVENT_WAW_HAZARD 26
+#define EVENT_RAW_HAZARD 25
+#define EVENT_WAR_HAZARD 24
+#define EVENT_WRITE_COMBINE 23
+#define EVENT_RDWR_TRANSITIONS 22
+#define EVENT_PRECHARGE_FOR_OTHER 21
+#define EVENT_PRECHARGE_FOR_RDWR 20
+#define EVENT_OP_IS_PRECHARGE 19
+#define EVENT_OP_IS_MWR 18
+#define EVENT_OP_IS_WR 17
+#define EVENT_OP_IS_RD 16
+#define EVENT_OP_IS_RD_ACTIVATE 15
+#define EVENT_OP_IS_RD_OR_WR 14
+#define EVENT_OP_IS_ACTIVATE 13
+#define EVENT_WR_XACT_WHEN_CRITICAL 12
+#define EVENT_LPR_XACT_WHEN_CRITICAL 11
+#define EVENT_HPR_XACT_WHEN_CRITICAL 10
+#define EVENT_DFI_RD_DATA_CYCLES 9
+#define EVENT_DFI_WR_DATA_CYCLES 8
+#define EVENT_ACT_BYPASS 7
+#define EVENT_READ_BYPASS 6
+#define EVENT_HIF_HI_PRI_RD 5
+#define EVENT_HIF_RMW 4
+#define EVENT_HIF_RD 3
+#define EVENT_HIF_WR 2
+#define EVENT_HIF_RD_OR_WR 1
+
+/* Event counter value registers */
+#define DDRC_PERF_CNT_VALUE_BASE 0x8080
+#define DDRC_PERF_CNT_VALUE(n) (DDRC_PERF_CNT_VALUE_BASE + 8 * (n))
+
+/* Fixed event counter enable/disable register */
+#define DDRC_PERF_CNT_FREERUN_EN 0x80C0
+#define DDRC_PERF_FREERUN_WRITE_EN 0x1
+#define DDRC_PERF_FREERUN_READ_EN 0x2
+
+/* Fixed event counter control register */
+#define DDRC_PERF_CNT_FREERUN_CTRL 0x80C8
+#define DDRC_FREERUN_WRITE_CNT_CLR 0x1
+#define DDRC_FREERUN_READ_CNT_CLR 0x2
+
+/* Fixed event counter value register */
+#define DDRC_PERF_CNT_VALUE_WR_OP 0x80D0
+#define DDRC_PERF_CNT_VALUE_RD_OP 0x80D8
+#define DDRC_PERF_CNT_VALUE_OVERFLOW BIT_ULL(48)
+#define DDRC_PERF_CNT_MAX_VALUE GENMASK_ULL(48, 0)
+
+struct cn10k_ddr_pmu {
+ struct pmu pmu;
+ int id;
+ void __iomem *base;
+ unsigned int cpu;
+ struct device *dev;
+ int active_events;
+ struct perf_event *events[DDRC_PERF_NUM_COUNTERS];
+ struct hrtimer hrtimer;
+ struct hlist_node node;
+};
+
+#define to_cn10k_ddr_pmu(p) container_of(p, struct cn10k_ddr_pmu, pmu)
+
+static ssize_t cn10k_ddr_pmu_event_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+ return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
+#define CN10K_DDR_PMU_EVENT_ATTR(_name, _id) \
+ (&((struct perf_pmu_events_attr[]) { \
+ { .attr = __ATTR(_name, 0444, cn10k_ddr_pmu_event_show, NULL), \
+ .id = _id, } \
+ })[0].attr.attr)
+
+static struct attribute *cn10k_ddr_perf_events_attrs[] = {
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rd_or_wr_access, EVENT_HIF_RD_OR_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_wr_access, EVENT_HIF_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rd_access, EVENT_HIF_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rmw_access, EVENT_HIF_RMW),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_pri_rdaccess, EVENT_HIF_HI_PRI_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_rd_bypass_access, EVENT_READ_BYPASS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_act_bypass_access, EVENT_ACT_BYPASS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dif_wr_data_access, EVENT_DFI_WR_DATA_CYCLES),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dif_rd_data_access, EVENT_DFI_RD_DATA_CYCLES),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hpri_sched_rd_crit_access,
+ EVENT_HPR_XACT_WHEN_CRITICAL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_lpri_sched_rd_crit_access,
+ EVENT_LPR_XACT_WHEN_CRITICAL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_wr_trxn_crit_access,
+ EVENT_WR_XACT_WHEN_CRITICAL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_active_access, EVENT_OP_IS_ACTIVATE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_rd_or_wr_access, EVENT_OP_IS_RD_OR_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_rd_active_access, EVENT_OP_IS_RD_ACTIVATE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_read, EVENT_OP_IS_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_write, EVENT_OP_IS_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_mwr, EVENT_OP_IS_MWR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge, EVENT_OP_IS_PRECHARGE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge_for_rdwr, EVENT_PRECHARGE_FOR_RDWR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge_for_other,
+ EVENT_PRECHARGE_FOR_OTHER),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_rdwr_transitions, EVENT_RDWR_TRANSITIONS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_write_combine, EVENT_WRITE_COMBINE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_war_hazard, EVENT_WAR_HAZARD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_raw_hazard, EVENT_RAW_HAZARD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_waw_hazard, EVENT_WAW_HAZARD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_selfref, EVENT_OP_IS_ENTER_SELFREF),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_powerdown, EVENT_OP_IS_ENTER_POWERDOWN),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_mpsm, EVENT_OP_IS_ENTER_MPSM),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_refresh, EVENT_OP_IS_REFRESH),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_crit_ref, EVENT_OP_IS_CRIT_REF),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_spec_ref, EVENT_OP_IS_SPEC_REF),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_load_mode, EVENT_OP_IS_LOAD_MODE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_zqcl, EVENT_OP_IS_ZQCL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_wr_access, EVENT_OP_IS_ZQCS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hpr_req_with_nocredit,
+ EVENT_HPR_REQ_WITH_NOCREDIT),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_lpr_req_with_nocredit,
+ EVENT_LPR_REQ_WITH_NOCREDIT),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_bsm_alloc, EVENT_BSM_ALLOC),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_bsm_starvation, EVENT_BSM_STARVATION),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_win_limit_reached_rd,
+ EVENT_VISIBLE_WIN_LIMIT_REACHED_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_win_limit_reached_wr,
+ EVENT_VISIBLE_WIN_LIMIT_REACHED_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dqsosc_mpc, EVENT_OP_IS_DQSOSC_MPC),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dqsosc_mrr, EVENT_OP_IS_DQSOSC_MRR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_tcr_mrr, EVENT_OP_IS_TCR_MRR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_zqstart, EVENT_OP_IS_ZQSTART),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_zqlatch, EVENT_OP_IS_ZQLATCH),
+ /* Free run event counters */
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_ddr_reads, EVENT_DDR_READS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_ddr_writes, EVENT_DDR_WRITES),
+ NULL,
+};
+
+static struct attribute_group cn10k_ddr_perf_events_attr_group = {
+ .name = "events",
+ .attrs = cn10k_ddr_perf_events_attrs,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-8");
+
+static struct attribute *cn10k_ddr_perf_format_attrs[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group cn10k_ddr_perf_format_attr_group = {
+ .name = "format",
+ .attrs = cn10k_ddr_perf_format_attrs,
+};
+
+static ssize_t cn10k_ddr_perf_cpumask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cn10k_ddr_pmu *pmu = dev_get_drvdata(dev);
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
+}
+
+static struct device_attribute cn10k_ddr_perf_cpumask_attr =
+ __ATTR(cpumask, 0444, cn10k_ddr_perf_cpumask_show, NULL);
+
+static struct attribute *cn10k_ddr_perf_cpumask_attrs[] = {
+ &cn10k_ddr_perf_cpumask_attr.attr,
+ NULL,
+};
+
+static struct attribute_group cn10k_ddr_perf_cpumask_attr_group = {
+ .attrs = cn10k_ddr_perf_cpumask_attrs,
+};
+
+static const struct attribute_group *cn10k_attr_groups[] = {
+ &cn10k_ddr_perf_events_attr_group,
+ &cn10k_ddr_perf_format_attr_group,
+ &cn10k_ddr_perf_cpumask_attr_group,
+ NULL,
+};
+
+/* Default poll timeout is 100 sec, which is very sufficient for
+ * 48 bit counter per DDR controller incremented max at 5.6 GT/s,
+ * which may take many hours to overflow.
+ */
+static unsigned long cn10k_ddr_pmu_poll_period_sec = 100;
+module_param_named(poll_period_sec, cn10k_ddr_pmu_poll_period_sec, ulong, 0644);
+
+static ktime_t cn10k_ddr_pmu_timer_period(void)
+{
+ return ms_to_ktime((u64)cn10k_ddr_pmu_poll_period_sec * 1000000ULL);
+}
+
+static uint64_t ddr_perf_get_event_bitmap(int eventid)
+{
+ uint64_t event_bitmap = 0;
+
+ switch (eventid) {
+ case EVENT_HIF_RD_OR_WR ... EVENT_WAW_HAZARD:
+ case EVENT_OP_IS_REFRESH ... EVENT_OP_IS_ZQLATCH:
+ event_bitmap = (1ULL << (eventid - 1));
+ break;
+
+ case EVENT_OP_IS_ENTER_SELFREF:
+ case EVENT_OP_IS_ENTER_POWERDOWN:
+ case EVENT_OP_IS_ENTER_MPSM:
+ event_bitmap = (0xFULL << (eventid - 1));
+ break;
+ default:
+ pr_err("%s Invalid eventid %d\n", __func__, eventid);
+ break;
+ }
+
+ return event_bitmap;
+}
+
+static int cn10k_ddr_perf_alloc_counter(struct cn10k_ddr_pmu *pmu,
+ struct perf_event *event)
+{
+ uint8_t config = event->attr.config;
+ int i;
+
+ /* DDR read free-run counter index */
+ if (config == EVENT_DDR_READS) {
+ pmu->events[DDRC_PERF_READ_COUNTER_IDX] = event;
+ return DDRC_PERF_READ_COUNTER_IDX;
+ }
+
+ /* DDR write free-run counter index */
+ if (config == EVENT_DDR_WRITES) {
+ pmu->events[DDRC_PERF_WRITE_COUNTER_IDX] = event;
+ return DDRC_PERF_WRITE_COUNTER_IDX;
+ }
+
+ /* Allocate DDR generic counters */
+ for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) {
+ if (pmu->events[i] == NULL) {
+ pmu->events[i] = event;
+ return i;
+ }
+ }
+
+ return -ENOENT;
+}
+
+static void cn10k_ddr_perf_free_counter(struct cn10k_ddr_pmu *pmu, int counter)
+{
+ pmu->events[counter] = NULL;
+}
+
+static int cn10k_ddr_perf_event_init(struct perf_event *event)
+{
+ struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ if (is_sampling_event(event)) {
+ dev_info(pmu->dev, "Sampling not supported!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (event->cpu < 0) {
+ dev_warn(pmu->dev, "Can't provide per-task data!\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* We must NOT create groups containing mixed PMUs */
+ if (event->group_leader->pmu != event->pmu &&
+ !is_software_event(event->group_leader))
+ return -EINVAL;
+
+ /* Set ownership of event to one CPU, same event can not be observed
+ * on multiple cpus at same time.
+ */
+ event->cpu = pmu->cpu;
+ hwc->idx = -1;
+ return 0;
+}
+
+static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
+ int counter, bool enable)
+{
+ uint32_t reg;
+ uint64_t val;
+
+ if (counter > DDRC_PERF_NUM_COUNTERS) {
+ pr_err("Error: unsupported counter %d\n", counter);
+ return;
+ }
+
+ if (counter < DDRC_PERF_NUM_GEN_COUNTERS) {
+ reg = DDRC_PERF_CFG(counter);
+ val = readq(pmu->base + reg);
+
+ if (enable)
+ val |= EVENT_ENABLE;
+ else
+ val &= ~EVENT_ENABLE;
+
+ writeq(val, pmu->base + reg);
+ } else {
+ val = readq(pmu->base + DDRC_PERF_CNT_FREERUN_EN);
+ if (enable) {
+ if (counter == DDRC_PERF_READ_COUNTER_IDX)
+ val |= DDRC_PERF_FREERUN_READ_EN;
+ else
+ val |= DDRC_PERF_FREERUN_WRITE_EN;
+ } else {
+ if (counter == DDRC_PERF_READ_COUNTER_IDX)
+ val &= ~DDRC_PERF_FREERUN_READ_EN;
+ else
+ val &= ~DDRC_PERF_FREERUN_WRITE_EN;
+ }
+ writeq(val, pmu->base + DDRC_PERF_CNT_FREERUN_EN);
+ }
+}
+
+static uint64_t cn10k_ddr_perf_read_counter(struct cn10k_ddr_pmu *pmu,
+ int counter)
+{
+ uint64_t val;
+
+ if (counter == DDRC_PERF_READ_COUNTER_IDX)
+ return readq(pmu->base + DDRC_PERF_CNT_VALUE_RD_OP);
+
+ if (counter == DDRC_PERF_WRITE_COUNTER_IDX)
+ return readq(pmu->base + DDRC_PERF_CNT_VALUE_WR_OP);
+
+ val = readq(pmu->base + DDRC_PERF_CNT_VALUE(counter));
+ return val;
+}
+
+static void cn10k_ddr_perf_event_update(struct perf_event *event)
+{
+ struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ uint64_t prev_count, new_count, mask;
+
+ do {
+ prev_count = local64_read(&hwc->prev_count);
+ new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx);
+ } while (local64_xchg(&hwc->prev_count, new_count) != prev_count);
+
+ mask = DDRC_PERF_CNT_MAX_VALUE;
+
+ local64_add((new_count - prev_count) & mask, &event->count);
+}
+
+static void cn10k_ddr_perf_event_start(struct perf_event *event, int flags)
+{
+ struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+
+ local64_set(&hwc->prev_count, 0);
+
+ cn10k_ddr_perf_counter_enable(pmu, counter, true);
+
+ hwc->state = 0;
+}
+
+static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags)
+{
+ struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ uint8_t config = event->attr.config;
+ uint32_t reg_offset;
+ uint64_t val;
+ int counter;
+
+ counter = cn10k_ddr_perf_alloc_counter(pmu, event);
+ if (counter < 0) {
+ dev_dbg(pmu->dev, "There are not enough counters\n");
+ return -EOPNOTSUPP;
+ }
+
+ pmu->active_events++;
+ hwc->idx = counter;
+
+ if (pmu->active_events == 1)
+ hrtimer_start(&pmu->hrtimer, cn10k_ddr_pmu_timer_period(),
+ HRTIMER_MODE_REL_PINNED);
+
+ if (counter < DDRC_PERF_NUM_GEN_COUNTERS) {
+ /* Generic counters, configure event id */
+ reg_offset = DDRC_PERF_CFG(counter);
+ val = ddr_perf_get_event_bitmap(config);
+ writeq(val, pmu->base + reg_offset);
+ } else {
+ /* fixed event counter, clear counter value */
+ if (counter == DDRC_PERF_READ_COUNTER_IDX)
+ val = DDRC_FREERUN_READ_CNT_CLR;
+ else
+ val = DDRC_FREERUN_WRITE_CNT_CLR;
+
+ writeq(val, pmu->base + DDRC_PERF_CNT_FREERUN_CTRL);
+ }
+
+ hwc->state |= PERF_HES_STOPPED;
+
+ if (flags & PERF_EF_START)
+ cn10k_ddr_perf_event_start(event, flags);
+
+ return 0;
+}
+
+static void cn10k_ddr_perf_event_stop(struct perf_event *event, int flags)
+{
+ struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+
+ cn10k_ddr_perf_counter_enable(pmu, counter, false);
+
+ if (flags & PERF_EF_UPDATE)
+ cn10k_ddr_perf_event_update(event);
+
+ hwc->state |= PERF_HES_STOPPED;
+}
+
+static void cn10k_ddr_perf_event_del(struct perf_event *event, int flags)
+{
+ struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+
+ cn10k_ddr_perf_event_stop(event, PERF_EF_UPDATE);
+
+ cn10k_ddr_perf_free_counter(pmu, counter);
+ pmu->active_events--;
+ hwc->idx = -1;
+
+ /* Cancel timer when no events to capture */
+ if (pmu->active_events == 0)
+ hrtimer_cancel(&pmu->hrtimer);
+}
+
+static void cn10k_ddr_perf_pmu_enable(struct pmu *pmu)
+{
+ struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu);
+
+ writeq(START_OP_CTRL_VAL_START, ddr_pmu->base +
+ DDRC_PERF_CNT_START_OP_CTRL);
+}
+
+static void cn10k_ddr_perf_pmu_disable(struct pmu *pmu)
+{
+ struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu);
+
+ writeq(END_OP_CTRL_VAL_END, ddr_pmu->base +
+ DDRC_PERF_CNT_END_OP_CTRL);
+}
+
+static void cn10k_ddr_perf_event_update_all(struct cn10k_ddr_pmu *pmu)
+{
+ struct hw_perf_event *hwc;
+ int i;
+
+ for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) {
+ if (pmu->events[i] == NULL)
+ continue;
+
+ cn10k_ddr_perf_event_update(pmu->events[i]);
+ }
+
+ /* Reset previous count as h/w counter are reset */
+ for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) {
+ if (pmu->events[i] == NULL)
+ continue;
+
+ hwc = &pmu->events[i]->hw;
+ local64_set(&hwc->prev_count, 0);
+ }
+}
+
+static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu)
+{
+ struct perf_event *event;
+ struct hw_perf_event *hwc;
+ uint64_t prev_count, new_count;
+ uint64_t value;
+ int i;
+
+ event = pmu->events[DDRC_PERF_READ_COUNTER_IDX];
+ if (event) {
+ hwc = &event->hw;
+ prev_count = local64_read(&hwc->prev_count);
+ new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx);
+
+ /* Overflow condition is when new count less than
+ * previous count
+ */
+ if (new_count < prev_count)
+ cn10k_ddr_perf_event_update(event);
+ }
+
+ event = pmu->events[DDRC_PERF_WRITE_COUNTER_IDX];
+ if (event) {
+ hwc = &event->hw;
+ prev_count = local64_read(&hwc->prev_count);
+ new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx);
+
+ /* Overflow condition is when new count less than
+ * previous count
+ */
+ if (new_count < prev_count)
+ cn10k_ddr_perf_event_update(event);
+ }
+
+ for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) {
+ if (pmu->events[i] == NULL)
+ continue;
+
+ value = cn10k_ddr_perf_read_counter(pmu, i);
+ if (value == DDRC_PERF_CNT_MAX_VALUE) {
+ pr_info("Counter-(%d) reached max value\n", i);
+ cn10k_ddr_perf_event_update_all(pmu);
+ cn10k_ddr_perf_pmu_disable(&pmu->pmu);
+ cn10k_ddr_perf_pmu_enable(&pmu->pmu);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static enum hrtimer_restart cn10k_ddr_pmu_timer_handler(struct hrtimer *hrtimer)
+{
+ struct cn10k_ddr_pmu *pmu = container_of(hrtimer, struct cn10k_ddr_pmu,
+ hrtimer);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ cn10k_ddr_pmu_overflow_handler(pmu);
+ local_irq_restore(flags);
+
+ hrtimer_forward_now(hrtimer, cn10k_ddr_pmu_timer_period());
+ return HRTIMER_RESTART;
+}
+
+static int cn10k_ddr_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct cn10k_ddr_pmu *pmu = hlist_entry_safe(node, struct cn10k_ddr_pmu,
+ node);
+ unsigned int target;
+
+ if (cpu != pmu->cpu)
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ perf_pmu_migrate_context(&pmu->pmu, cpu, target);
+ pmu->cpu = target;
+ return 0;
+}
+
+static int cn10k_ddr_perf_probe(struct platform_device *pdev)
+{
+ struct cn10k_ddr_pmu *ddr_pmu;
+ struct resource *res;
+ void __iomem *base;
+ static int index;
+ char *name;
+ int ret;
+
+ ddr_pmu = devm_kzalloc(&pdev->dev, sizeof(*ddr_pmu), GFP_KERNEL);
+ if (!ddr_pmu)
+ return -ENOMEM;
+
+ ddr_pmu->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ddr_pmu);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ ddr_pmu->base = base;
+
+ /* Setup the PMU counter to work in mannual mode */
+ writeq(OP_MODE_CTRL_VAL_MANNUAL, ddr_pmu->base +
+ DDRC_PERF_CNT_OP_MODE_CTRL);
+
+ ddr_pmu->pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ .task_ctx_nr = perf_invalid_context,
+ .attr_groups = cn10k_attr_groups,
+ .event_init = cn10k_ddr_perf_event_init,
+ .add = cn10k_ddr_perf_event_add,
+ .del = cn10k_ddr_perf_event_del,
+ .start = cn10k_ddr_perf_event_start,
+ .stop = cn10k_ddr_perf_event_stop,
+ .read = cn10k_ddr_perf_event_update,
+ .pmu_enable = cn10k_ddr_perf_pmu_enable,
+ .pmu_disable = cn10k_ddr_perf_pmu_disable,
+ };
+
+ /* Choose this cpu to collect perf data */
+ ddr_pmu->cpu = raw_smp_processor_id();
+
+ name = devm_kasprintf(ddr_pmu->dev, GFP_KERNEL, "mrvl_ddr_pmu@%llx",
+ res->start);
+ if (!name)
+ return -ENOMEM;
+
+ hrtimer_init(&ddr_pmu->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ddr_pmu->hrtimer.function = cn10k_ddr_pmu_timer_handler;
+
+ cpuhp_state_add_instance_nocalls(
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
+ &ddr_pmu->node);
+
+ ret = perf_pmu_register(&ddr_pmu->pmu, name, -1);
+ if (ret)
+ goto error;
+
+ ddr_pmu->id = index++;
+ pr_info("CN10K DDR PMU Driver for ddrc@%llx - id-%d\n",
+ res->start, ddr_pmu->id);
+ return 0;
+error:
+ cpuhp_state_remove_instance_nocalls(
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
+ &ddr_pmu->node);
+ return ret;
+}
+
+static int cn10k_ddr_perf_remove(struct platform_device *pdev)
+{
+ struct cn10k_ddr_pmu *ddr_pmu = platform_get_drvdata(pdev);
+
+ cpuhp_state_remove_instance_nocalls(
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
+ &ddr_pmu->node);
+
+ perf_pmu_unregister(&ddr_pmu->pmu);
+ return 0;
+}
+
+static const struct of_device_id cn10k_ddr_pmu_of_match[] = {
+ { .compatible = "marvell,cn10k-ddr-pmu", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, cn10k_ddr_pmu_of_match);
+
+static struct platform_driver cn10k_ddr_pmu_driver = {
+ .driver = {
+ .name = "cn10k-ddr-pmu",
+ .of_match_table = cn10k_ddr_pmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = cn10k_ddr_perf_probe,
+ .remove = cn10k_ddr_perf_remove,
+};
+
+static int __init cn10k_ddr_pmu_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
+ "perf/marvell/cn10k/ddr:online", NULL,
+ cn10k_ddr_pmu_offline_cpu);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&cn10k_ddr_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE);
+ return ret;
+}
+
+static void __exit cn10k_ddr_pmu_exit(void)
+{
+ platform_driver_unregister(&cn10k_ddr_pmu_driver);
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE);
+}
+
+module_init(cn10k_ddr_pmu_init);
+module_exit(cn10k_ddr_pmu_exit);
+
+MODULE_AUTHOR("Bharat Bhushan <bbhushan2@marvell.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c
new file mode 100644
index 000000000000..99878de481f0
--- /dev/null
+++ b/drivers/perf/marvell_cn10k_tad_pmu.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CN10K LLC-TAD perf driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "tad_pmu: " fmt
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/cpuhotplug.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/arm-smccc.h>
+
+#define TAD_PFC_OFFSET 0x0800
+#define TAD_PFC(counter) (TAD_PFC_OFFSET | (counter << 3))
+#define TAD_PRF_OFFSET 0x0900
+#define TAD_PRF(counter) (TAD_PRF_OFFSET | (counter << 3))
+#define TAD_PRF_CNTSEL_MASK 0xFF
+#define TAD_MAX_COUNTERS 8
+
+#define to_tad_pmu(p) (container_of(p, struct tad_pmu, pmu))
+
+struct tad_region {
+ void __iomem *base;
+};
+
+struct tad_pmu {
+ struct pmu pmu;
+ struct tad_region *regions;
+ u32 region_cnt;
+ unsigned int cpu;
+ struct hlist_node node;
+ struct perf_event *events[TAD_MAX_COUNTERS];
+ DECLARE_BITMAP(counters_map, TAD_MAX_COUNTERS);
+};
+
+static int tad_pmu_cpuhp_state;
+
+static void tad_pmu_event_counter_read(struct perf_event *event)
+{
+ struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u32 counter_idx = hwc->idx;
+ u64 delta, prev, new;
+ int i;
+
+ do {
+ prev = local64_read(&hwc->prev_count);
+ for (i = 0, new = 0; i < tad_pmu->region_cnt; i++)
+ new += readq(tad_pmu->regions[i].base +
+ TAD_PFC(counter_idx));
+ } while (local64_cmpxchg(&hwc->prev_count, prev, new) != prev);
+
+ delta = (new - prev) & GENMASK_ULL(63, 0);
+ local64_add(delta, &event->count);
+}
+
+static void tad_pmu_event_counter_stop(struct perf_event *event, int flags)
+{
+ struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u32 counter_idx = hwc->idx;
+ int i;
+
+ /* TAD()_PFC() stop counting on the write
+ * which sets TAD()_PRF()[CNTSEL] == 0
+ */
+ for (i = 0; i < tad_pmu->region_cnt; i++)
+ writeq(0, tad_pmu->regions[i].base + TAD_PRF(counter_idx));
+
+ tad_pmu_event_counter_read(event);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static void tad_pmu_event_counter_start(struct perf_event *event, int flags)
+{
+ struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u32 event_idx = event->attr.config;
+ u32 counter_idx = hwc->idx;
+ u64 reg_val;
+ int i;
+
+ hwc->state = 0;
+
+ /* Typically TAD_PFC() are zeroed to start counting */
+ for (i = 0; i < tad_pmu->region_cnt; i++)
+ writeq(0, tad_pmu->regions[i].base + TAD_PFC(counter_idx));
+
+ /* TAD()_PFC() start counting on the write
+ * which sets TAD()_PRF()[CNTSEL] != 0
+ */
+ for (i = 0; i < tad_pmu->region_cnt; i++) {
+ reg_val = readq(tad_pmu->regions[i].base +
+ TAD_PRF(counter_idx));
+ reg_val |= (event_idx & 0xFF);
+ writeq(reg_val, tad_pmu->regions[i].base +
+ TAD_PRF(counter_idx));
+ }
+}
+
+static void tad_pmu_event_counter_del(struct perf_event *event, int flags)
+{
+ struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ tad_pmu_event_counter_stop(event, flags | PERF_EF_UPDATE);
+ tad_pmu->events[idx] = NULL;
+ clear_bit(idx, tad_pmu->counters_map);
+}
+
+static int tad_pmu_event_counter_add(struct perf_event *event, int flags)
+{
+ struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+
+ /* Get a free counter for this event */
+ idx = find_first_zero_bit(tad_pmu->counters_map, TAD_MAX_COUNTERS);
+ if (idx == TAD_MAX_COUNTERS)
+ return -EAGAIN;
+
+ set_bit(idx, tad_pmu->counters_map);
+
+ hwc->idx = idx;
+ hwc->state = PERF_HES_STOPPED;
+ tad_pmu->events[idx] = event;
+
+ if (flags & PERF_EF_START)
+ tad_pmu_event_counter_start(event, flags);
+
+ return 0;
+}
+
+static int tad_pmu_event_init(struct perf_event *event)
+{
+ struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu);
+
+ if (!event->attr.disabled)
+ return -EINVAL;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ if (event->state != PERF_EVENT_STATE_OFF)
+ return -EINVAL;
+
+ event->cpu = tad_pmu->cpu;
+ event->hw.idx = -1;
+ event->hw.config_base = event->attr.config;
+
+ return 0;
+}
+
+static ssize_t tad_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+ return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
+#define TAD_PMU_EVENT_ATTR(_name, _id) \
+ (&((struct perf_pmu_events_attr[]) { \
+ { .attr = __ATTR(_name, 0444, tad_pmu_event_show, NULL),\
+ .id = _id, } \
+ })[0].attr.attr)
+
+static struct attribute *tad_pmu_event_attrs[] = {
+ TAD_PMU_EVENT_ATTR(tad_none, 0x0),
+ TAD_PMU_EVENT_ATTR(tad_req_msh_in_any, 0x1),
+ TAD_PMU_EVENT_ATTR(tad_req_msh_in_mn, 0x2),
+ TAD_PMU_EVENT_ATTR(tad_req_msh_in_exlmn, 0x3),
+ TAD_PMU_EVENT_ATTR(tad_rsp_msh_in_any, 0x4),
+ TAD_PMU_EVENT_ATTR(tad_rsp_msh_in_mn, 0x5),
+ TAD_PMU_EVENT_ATTR(tad_rsp_msh_in_exlmn, 0x6),
+ TAD_PMU_EVENT_ATTR(tad_rsp_msh_in_dss, 0x7),
+ TAD_PMU_EVENT_ATTR(tad_rsp_msh_in_retry_dss, 0x8),
+ TAD_PMU_EVENT_ATTR(tad_dat_msh_in_any, 0x9),
+ TAD_PMU_EVENT_ATTR(tad_dat_msh_in_dss, 0xa),
+ TAD_PMU_EVENT_ATTR(tad_req_msh_out_any, 0xb),
+ TAD_PMU_EVENT_ATTR(tad_req_msh_out_dss_rd, 0xc),
+ TAD_PMU_EVENT_ATTR(tad_req_msh_out_dss_wr, 0xd),
+ TAD_PMU_EVENT_ATTR(tad_req_msh_out_evict, 0xe),
+ TAD_PMU_EVENT_ATTR(tad_rsp_msh_out_any, 0xf),
+ TAD_PMU_EVENT_ATTR(tad_rsp_msh_out_retry_exlmn, 0x10),
+ TAD_PMU_EVENT_ATTR(tad_rsp_msh_out_retry_mn, 0x11),
+ TAD_PMU_EVENT_ATTR(tad_rsp_msh_out_exlmn, 0x12),
+ TAD_PMU_EVENT_ATTR(tad_rsp_msh_out_mn, 0x13),
+ TAD_PMU_EVENT_ATTR(tad_snp_msh_out_any, 0x14),
+ TAD_PMU_EVENT_ATTR(tad_snp_msh_out_mn, 0x15),
+ TAD_PMU_EVENT_ATTR(tad_snp_msh_out_exlmn, 0x16),
+ TAD_PMU_EVENT_ATTR(tad_dat_msh_out_any, 0x17),
+ TAD_PMU_EVENT_ATTR(tad_dat_msh_out_fill, 0x18),
+ TAD_PMU_EVENT_ATTR(tad_dat_msh_out_dss, 0x19),
+ TAD_PMU_EVENT_ATTR(tad_alloc_dtg, 0x1a),
+ TAD_PMU_EVENT_ATTR(tad_alloc_ltg, 0x1b),
+ TAD_PMU_EVENT_ATTR(tad_alloc_any, 0x1c),
+ TAD_PMU_EVENT_ATTR(tad_hit_dtg, 0x1d),
+ TAD_PMU_EVENT_ATTR(tad_hit_ltg, 0x1e),
+ TAD_PMU_EVENT_ATTR(tad_hit_any, 0x1f),
+ TAD_PMU_EVENT_ATTR(tad_tag_rd, 0x20),
+ TAD_PMU_EVENT_ATTR(tad_dat_rd, 0x21),
+ TAD_PMU_EVENT_ATTR(tad_dat_rd_byp, 0x22),
+ TAD_PMU_EVENT_ATTR(tad_ifb_occ, 0x23),
+ TAD_PMU_EVENT_ATTR(tad_req_occ, 0x24),
+ NULL,
+};
+
+static const struct attribute_group tad_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = tad_pmu_event_attrs,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-7");
+
+static struct attribute *tad_pmu_format_attrs[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group tad_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = tad_pmu_format_attrs,
+};
+
+static ssize_t tad_pmu_cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tad_pmu *tad_pmu = to_tad_pmu(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(tad_pmu->cpu));
+}
+
+static DEVICE_ATTR(cpumask, 0444, tad_pmu_cpumask_show, NULL);
+
+static struct attribute *tad_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static struct attribute_group tad_pmu_cpumask_attr_group = {
+ .attrs = tad_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *tad_pmu_attr_groups[] = {
+ &tad_pmu_events_attr_group,
+ &tad_pmu_format_attr_group,
+ &tad_pmu_cpumask_attr_group,
+ NULL,
+};
+
+static int tad_pmu_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct tad_region *regions;
+ struct tad_pmu *tad_pmu;
+ struct resource *res;
+ u32 tad_pmu_page_size;
+ u32 tad_page_size;
+ u32 tad_cnt;
+ int i, ret;
+ char *name;
+
+ tad_pmu = devm_kzalloc(&pdev->dev, sizeof(*tad_pmu), GFP_KERNEL);
+ if (!tad_pmu)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, tad_pmu);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Mem resource not found\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(node, "tad-page-size", &tad_page_size);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't find tad-page-size property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "tad-pmu-page-size",
+ &tad_pmu_page_size);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't find tad-pmu-page-size property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "tad-cnt", &tad_cnt);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't find tad-cnt property\n");
+ return ret;
+ }
+
+ regions = kcalloc(tad_cnt, sizeof(*regions), GFP_KERNEL);
+ if (!regions)
+ return -ENOMEM;
+
+ /* ioremap the distributed TAD pmu regions */
+ for (i = 0; i < tad_cnt && res->start < res->end; i++) {
+ regions[i].base = devm_ioremap(&pdev->dev,
+ res->start,
+ tad_pmu_page_size);
+ if (IS_ERR(regions[i].base)) {
+ dev_err(&pdev->dev, "TAD%d ioremap fail\n", i);
+ return -ENOMEM;
+ }
+ res->start += tad_page_size;
+ }
+
+ tad_pmu->regions = regions;
+ tad_pmu->region_cnt = tad_cnt;
+
+ tad_pmu->pmu = (struct pmu) {
+
+ .module = THIS_MODULE,
+ .attr_groups = tad_pmu_attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ .task_ctx_nr = perf_invalid_context,
+
+ .event_init = tad_pmu_event_init,
+ .add = tad_pmu_event_counter_add,
+ .del = tad_pmu_event_counter_del,
+ .start = tad_pmu_event_counter_start,
+ .stop = tad_pmu_event_counter_stop,
+ .read = tad_pmu_event_counter_read,
+ };
+
+ tad_pmu->cpu = raw_smp_processor_id();
+
+ /* Register pmu instance for cpu hotplug */
+ ret = cpuhp_state_add_instance_nocalls(tad_pmu_cpuhp_state,
+ &tad_pmu->node);
+ if (ret) {
+ dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
+ return ret;
+ }
+
+ name = "tad";
+ ret = perf_pmu_register(&tad_pmu->pmu, name, -1);
+ if (ret)
+ cpuhp_state_remove_instance_nocalls(tad_pmu_cpuhp_state,
+ &tad_pmu->node);
+
+ return ret;
+}
+
+static int tad_pmu_remove(struct platform_device *pdev)
+{
+ struct tad_pmu *pmu = platform_get_drvdata(pdev);
+
+ cpuhp_state_remove_instance_nocalls(tad_pmu_cpuhp_state,
+ &pmu->node);
+ perf_pmu_unregister(&pmu->pmu);
+
+ return 0;
+}
+
+static const struct of_device_id tad_pmu_of_match[] = {
+ { .compatible = "marvell,cn10k-tad-pmu", },
+ {},
+};
+
+static struct platform_driver tad_pmu_driver = {
+ .driver = {
+ .name = "cn10k_tad_pmu",
+ .of_match_table = of_match_ptr(tad_pmu_of_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = tad_pmu_probe,
+ .remove = tad_pmu_remove,
+};
+
+static int tad_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct tad_pmu *pmu = hlist_entry_safe(node, struct tad_pmu, node);
+ unsigned int target;
+
+ if (cpu != pmu->cpu)
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ perf_pmu_migrate_context(&pmu->pmu, cpu, target);
+ pmu->cpu = target;
+
+ return 0;
+}
+
+static int __init tad_pmu_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/cn10k/tadpmu:online",
+ NULL,
+ tad_pmu_offline_cpu);
+ if (ret < 0)
+ return ret;
+ tad_pmu_cpuhp_state = ret;
+ return platform_driver_register(&tad_pmu_driver);
+}
+
+static void __exit tad_pmu_exit(void)
+{
+ platform_driver_unregister(&tad_pmu_driver);
+ cpuhp_remove_multi_state(tad_pmu_cpuhp_state);
+}
+
+module_init(tad_pmu_init);
+module_exit(tad_pmu_exit);
+
+MODULE_DESCRIPTION("Marvell CN10K LLC-TAD Perf driver");
+MODULE_AUTHOR("Bhaskara Budiredla <bbudiredla@marvell.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig
index 8ab9031c9894..209249ea7f1c 100644
--- a/drivers/phy/marvell/Kconfig
+++ b/drivers/phy/marvell/Kconfig
@@ -67,6 +67,14 @@ config PHY_MVEBU_CP110_COMPHY
lanes can be used by various controllers (Ethernet, sata, usb,
PCIe...).
+config PHY_MVEBU_CP110_UTMI
+ tristate "Marvell CP110 UTMI driver"
+ depends on ARCH_MVEBU || COMPILE_TEST
+ depends on OF
+ select GENERIC_PHY
+ help
+ Enable this to support Marvell CP110 UTMI PHY driver.
+
config PHY_MVEBU_SATA
def_bool y
depends on ARCH_DOVE || MACH_DOVE || MACH_KIRKWOOD
diff --git a/drivers/phy/marvell/Makefile b/drivers/phy/marvell/Makefile
index 5a106b1549f4..0265b9f677b4 100644
--- a/drivers/phy/marvell/Makefile
+++ b/drivers/phy/marvell/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_PHY_MVEBU_A3700_COMPHY) += phy-mvebu-a3700-comphy.o
obj-$(CONFIG_PHY_MVEBU_A3700_UTMI) += phy-mvebu-a3700-utmi.o
obj-$(CONFIG_PHY_MVEBU_A38X_COMPHY) += phy-armada38x-comphy.o
obj-$(CONFIG_PHY_MVEBU_CP110_COMPHY) += phy-mvebu-cp110-comphy.o
+obj-$(CONFIG_PHY_MVEBU_CP110_UTMI) += phy-mvebu-cp110-utmi.o
obj-$(CONFIG_PHY_MVEBU_SATA) += phy-mvebu-sata.o
obj-$(CONFIG_PHY_PXA_28NM_HSIC) += phy-pxa-28nm-hsic.o
obj-$(CONFIG_PHY_PXA_28NM_USB2) += phy-pxa-28nm-usb2.o
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
index 53ad127b100f..5f115ab32a91 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
@@ -138,10 +138,11 @@
#define COMPHY_FW_SPEED_OFFSET 2
#define COMPHY_FW_SPEED_MASK GENMASK(7, 2)
#define COMPHY_FW_SPEED_MAX COMPHY_FW_SPEED_MASK
-#define COMPHY_FW_SPEED_1250 0
-#define COMPHY_FW_SPEED_3125 2
+#define COMPHY_FW_SPEED_1250 0 /* SGMII 1G */
+#define COMPHY_FW_SPEED_3125 2 /* SGMII 2.5G */
#define COMPHY_FW_SPEED_5000 3
-#define COMPHY_FW_SPEED_103125 6
+#define COMPHY_FW_SPEED_515625 4 /* XFI 5G */
+#define COMPHY_FW_SPEED_103125 6 /* XFI 10G */
#define COMPHY_FW_PORT_OFFSET 8
#define COMPHY_FW_PORT_MASK GENMASK(11, 8)
#define COMPHY_FW_MODE_OFFSET 12
@@ -208,6 +209,7 @@ static const struct mvebu_comphy_conf mvebu_comphy_cp110_modes[] = {
GEN_CONF(0, 0, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
ETH_CONF(0, 1, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
ETH_CONF(0, 1, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_HS_SGMII),
+ ETH_CONF(0, 1, PHY_INTERFACE_MODE_2500BASET, 0x1, COMPHY_FW_MODE_HS_SGMII),
GEN_CONF(0, 1, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
/* lane 1 */
GEN_CONF(1, 0, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H),
@@ -216,11 +218,14 @@ static const struct mvebu_comphy_conf mvebu_comphy_cp110_modes[] = {
GEN_CONF(1, 0, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
ETH_CONF(1, 2, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
ETH_CONF(1, 2, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_HS_SGMII),
+ ETH_CONF(1, 2, PHY_INTERFACE_MODE_2500BASET, 0x1, COMPHY_FW_MODE_HS_SGMII),
/* lane 2 */
ETH_CONF(2, 0, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
ETH_CONF(2, 0, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_HS_SGMII),
+ ETH_CONF(2, 0, PHY_INTERFACE_MODE_2500BASET, 0x1, COMPHY_FW_MODE_HS_SGMII),
ETH_CONF(2, 0, PHY_INTERFACE_MODE_RXAUI, 0x1, COMPHY_FW_MODE_RXAUI),
ETH_CONF(2, 0, PHY_INTERFACE_MODE_10GBASER, 0x1, COMPHY_FW_MODE_XFI),
+ ETH_CONF(2, 0, PHY_INTERFACE_MODE_5GKR, 0x1, COMPHY_FW_MODE_XFI),
GEN_CONF(2, 0, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H),
GEN_CONF(2, 0, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
GEN_CONF(2, 0, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
@@ -228,13 +233,16 @@ static const struct mvebu_comphy_conf mvebu_comphy_cp110_modes[] = {
GEN_CONF(3, 0, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
ETH_CONF(3, 1, PHY_INTERFACE_MODE_SGMII, 0x2, COMPHY_FW_MODE_SGMII),
ETH_CONF(3, 1, PHY_INTERFACE_MODE_2500BASEX, 0x2, COMPHY_FW_MODE_HS_SGMII),
+ ETH_CONF(3, 1, PHY_INTERFACE_MODE_2500BASET, 0x2, COMPHY_FW_MODE_HS_SGMII),
ETH_CONF(3, 1, PHY_INTERFACE_MODE_RXAUI, 0x1, COMPHY_FW_MODE_RXAUI),
GEN_CONF(3, 1, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H),
GEN_CONF(3, 1, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
/* lane 4 */
ETH_CONF(4, 0, PHY_INTERFACE_MODE_SGMII, 0x2, COMPHY_FW_MODE_SGMII),
ETH_CONF(4, 0, PHY_INTERFACE_MODE_2500BASEX, 0x2, COMPHY_FW_MODE_HS_SGMII),
+ ETH_CONF(4, 0, PHY_INTERFACE_MODE_2500BASET, 0x2, COMPHY_FW_MODE_HS_SGMII),
ETH_CONF(4, 0, PHY_INTERFACE_MODE_10GBASER, 0x2, COMPHY_FW_MODE_XFI),
+ ETH_CONF(4, 0, PHY_INTERFACE_MODE_5GKR, 0x2, COMPHY_FW_MODE_XFI),
ETH_CONF(4, 0, PHY_INTERFACE_MODE_RXAUI, 0x2, COMPHY_FW_MODE_RXAUI),
GEN_CONF(4, 0, PHY_MODE_USB_DEVICE_SS, COMPHY_FW_MODE_USB3D),
GEN_CONF(4, 1, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H),
@@ -242,11 +250,13 @@ static const struct mvebu_comphy_conf mvebu_comphy_cp110_modes[] = {
ETH_CONF(4, 1, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
ETH_CONF(4, 1, PHY_INTERFACE_MODE_2500BASEX, -1, COMPHY_FW_MODE_HS_SGMII),
ETH_CONF(4, 1, PHY_INTERFACE_MODE_10GBASER, -1, COMPHY_FW_MODE_XFI),
+ ETH_CONF(4, 1, PHY_INTERFACE_MODE_5GKR, -1, COMPHY_FW_MODE_XFI),
/* lane 5 */
ETH_CONF(5, 1, PHY_INTERFACE_MODE_RXAUI, 0x2, COMPHY_FW_MODE_RXAUI),
GEN_CONF(5, 1, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
ETH_CONF(5, 2, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
ETH_CONF(5, 2, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_HS_SGMII),
+ ETH_CONF(5, 2, PHY_INTERFACE_MODE_2500BASET, 0x1, COMPHY_FW_MODE_HS_SGMII),
GEN_CONF(5, 2, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
};
@@ -360,6 +370,7 @@ static int mvebu_comphy_ethernet_init_reset(struct mvebu_comphy_lane *lane)
MVEBU_COMPHY_SERDES_CFG0_RXAUI_MODE;
break;
case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_2500BASET:
val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0x8) |
MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0x8) |
MVEBU_COMPHY_SERDES_CFG0_HALF_BUS;
@@ -786,7 +797,8 @@ static int mvebu_comphy_power_on(struct phy *phy)
fw_speed = COMPHY_FW_SPEED_1250;
break;
case PHY_INTERFACE_MODE_2500BASEX:
- dev_dbg(priv->dev, "set lane %d to 2500BASE-X mode\n",
+ case PHY_INTERFACE_MODE_2500BASET:
+ dev_dbg(priv->dev, "set lane %d to 2500BASE mode\n",
lane->id);
fw_speed = COMPHY_FW_SPEED_3125;
break;
@@ -795,6 +807,11 @@ static int mvebu_comphy_power_on(struct phy *phy)
lane->id);
fw_speed = COMPHY_FW_SPEED_103125;
break;
+ case PHY_INTERFACE_MODE_5GKR:
+ dev_dbg(priv->dev, "set lane %d to 5G-KR mode\n",
+ lane->id);
+ fw_speed = COMPHY_FW_SPEED_515625;
+ break;
default:
dev_err(priv->dev, "unsupported Ethernet mode (%d)\n",
lane->submode);
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-utmi.c b/drivers/phy/marvell/phy-mvebu-cp110-utmi.c
new file mode 100644
index 000000000000..08d178a4dc13
--- /dev/null
+++ b/drivers/phy/marvell/phy-mvebu-cp110-utmi.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Marvell
+ *
+ * Authors:
+ * Konstantin Porotchkin <kostap@marvell.com>
+ *
+ * Marvell CP110 UTMI PHY driver
+ */
+
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/usb/of.h>
+#include <linux/usb/otg.h>
+
+#define UTMI_PHY_PORTS 2
+
+/* CP110 UTMI register macro definetions */
+#define SYSCON_USB_CFG_REG 0x420
+#define USB_CFG_DEVICE_EN_MASK BIT(0)
+#define USB_CFG_DEVICE_MUX_OFFSET 1
+#define USB_CFG_DEVICE_MUX_MASK BIT(1)
+#define USB_CFG_PLL_MASK BIT(25)
+
+#define SYSCON_UTMI_CFG_REG(id) (0x440 + (id) * 4)
+#define UTMI_PHY_CFG_PU_MASK BIT(5)
+
+#define UTMI_PLL_CTRL_REG 0x0
+#define PLL_REFDIV_OFFSET 0
+#define PLL_REFDIV_MASK GENMASK(6, 0)
+#define PLL_REFDIV_VAL 0x5
+#define PLL_FBDIV_OFFSET 16
+#define PLL_FBDIV_MASK GENMASK(24, 16)
+#define PLL_FBDIV_VAL 0x60
+#define PLL_SEL_LPFR_MASK GENMASK(29, 28)
+#define PLL_RDY BIT(31)
+#define UTMI_CAL_CTRL_REG 0x8
+#define IMPCAL_VTH_OFFSET 8
+#define IMPCAL_VTH_MASK GENMASK(10, 8)
+#define IMPCAL_VTH_VAL 0x7
+#define IMPCAL_DONE BIT(23)
+#define PLLCAL_DONE BIT(31)
+#define UTMI_TX_CH_CTRL_REG 0xC
+#define DRV_EN_LS_OFFSET 12
+#define DRV_EN_LS_MASK GENMASK(15, 12)
+#define IMP_SEL_LS_OFFSET 16
+#define IMP_SEL_LS_MASK GENMASK(19, 16)
+#define TX_AMP_OFFSET 20
+#define TX_AMP_MASK GENMASK(22, 20)
+#define TX_AMP_VAL 0x4
+#define UTMI_RX_CH_CTRL0_REG 0x14
+#define SQ_DET_EN BIT(15)
+#define SQ_ANA_DTC_SEL BIT(28)
+#define UTMI_RX_CH_CTRL1_REG 0x18
+#define SQ_AMP_CAL_OFFSET 0
+#define SQ_AMP_CAL_MASK GENMASK(2, 0)
+#define SQ_AMP_CAL_VAL 1
+#define SQ_AMP_CAL_EN BIT(3)
+#define UTMI_CTRL_STATUS0_REG 0x24
+#define SUSPENDM BIT(22)
+#define TEST_SEL BIT(25)
+#define UTMI_CHGDTC_CTRL_REG 0x38
+#define VDAT_OFFSET 8
+#define VDAT_MASK GENMASK(9, 8)
+#define VDAT_VAL 1
+#define VSRC_OFFSET 10
+#define VSRC_MASK GENMASK(11, 10)
+#define VSRC_VAL 1
+
+#define PLL_LOCK_DELAY_US 10000
+#define PLL_LOCK_TIMEOUT_US 1000000
+
+#define PORT_REGS(p) ((p)->priv->regs + (p)->id * 0x1000)
+
+/**
+ * struct mvebu_cp110_utmi - PHY driver data
+ *
+ * @regs: PHY registers
+ * @syscom: Regmap with system controller registers
+ * @dev: device driver handle
+ * @caps: PHY capabilities
+ */
+struct mvebu_cp110_utmi {
+ void __iomem *regs;
+ struct regmap *syscon;
+ struct device *dev;
+ const struct phy_ops *ops;
+};
+
+/**
+ * struct mvebu_cp110_utmi_port - PHY port data
+ *
+ * @priv: PHY driver data
+ * @id: PHY port ID
+ * @dr_mode: PHY connection: USB_DR_MODE_HOST or USB_DR_MODE_PERIPHERAL
+ */
+struct mvebu_cp110_utmi_port {
+ struct mvebu_cp110_utmi *priv;
+ u32 id;
+ enum usb_dr_mode dr_mode;
+};
+
+static void mvebu_cp110_utmi_port_setup(struct mvebu_cp110_utmi_port *port)
+{
+ u32 reg;
+
+ /*
+ * Setup PLL.
+ * The reference clock is the frequency of quartz resonator
+ * connected to pins REFCLK_XIN and REFCLK_XOUT of the SoC.
+ * Register init values are matching the 40MHz default clock.
+ * The crystal used for all platform boards is now 25MHz.
+ * See the functional specification for details.
+ */
+ reg = readl(PORT_REGS(port) + UTMI_PLL_CTRL_REG);
+ reg &= ~(PLL_REFDIV_MASK | PLL_FBDIV_MASK | PLL_SEL_LPFR_MASK);
+ reg |= (PLL_REFDIV_VAL << PLL_REFDIV_OFFSET) |
+ (PLL_FBDIV_VAL << PLL_FBDIV_OFFSET);
+ writel(reg, PORT_REGS(port) + UTMI_PLL_CTRL_REG);
+
+ /* Impedance Calibration Threshold Setting */
+ reg = readl(PORT_REGS(port) + UTMI_CAL_CTRL_REG);
+ reg &= ~IMPCAL_VTH_MASK;
+ reg |= IMPCAL_VTH_VAL << IMPCAL_VTH_OFFSET;
+ writel(reg, PORT_REGS(port) + UTMI_CAL_CTRL_REG);
+
+ /* Set LS TX driver strength coarse control */
+ reg = readl(PORT_REGS(port) + UTMI_TX_CH_CTRL_REG);
+ reg &= ~TX_AMP_MASK;
+ reg |= TX_AMP_VAL << TX_AMP_OFFSET;
+ writel(reg, PORT_REGS(port) + UTMI_TX_CH_CTRL_REG);
+
+ /* Disable SQ and enable analog squelch detect */
+ reg = readl(PORT_REGS(port) + UTMI_RX_CH_CTRL0_REG);
+ reg &= ~SQ_DET_EN;
+ reg |= SQ_ANA_DTC_SEL;
+ writel(reg, PORT_REGS(port) + UTMI_RX_CH_CTRL0_REG);
+
+ /*
+ * Set External squelch calibration number and
+ * enable the External squelch calibration
+ */
+ reg = readl(PORT_REGS(port) + UTMI_RX_CH_CTRL1_REG);
+ reg &= ~SQ_AMP_CAL_MASK;
+ reg |= (SQ_AMP_CAL_VAL << SQ_AMP_CAL_OFFSET) | SQ_AMP_CAL_EN;
+ writel(reg, PORT_REGS(port) + UTMI_RX_CH_CTRL1_REG);
+
+ /*
+ * Set Control VDAT Reference Voltage - 0.325V and
+ * Control VSRC Reference Voltage - 0.6V
+ */
+ reg = readl(PORT_REGS(port) + UTMI_CHGDTC_CTRL_REG);
+ reg &= ~(VDAT_MASK | VSRC_MASK);
+ reg |= (VDAT_VAL << VDAT_OFFSET) | (VSRC_VAL << VSRC_OFFSET);
+ writel(reg, PORT_REGS(port) + UTMI_CHGDTC_CTRL_REG);
+}
+
+static int mvebu_cp110_utmi_phy_power_off(struct phy *phy)
+{
+ struct mvebu_cp110_utmi_port *port = phy_get_drvdata(phy);
+ struct mvebu_cp110_utmi *utmi = port->priv;
+ int i;
+
+ /* Power down UTMI PHY port */
+ regmap_clear_bits(utmi->syscon, SYSCON_UTMI_CFG_REG(port->id),
+ UTMI_PHY_CFG_PU_MASK);
+
+ for (i = 0; i < UTMI_PHY_PORTS; i++) {
+ int test = regmap_test_bits(utmi->syscon,
+ SYSCON_UTMI_CFG_REG(i),
+ UTMI_PHY_CFG_PU_MASK);
+ /* skip PLL shutdown if there are active UTMI PHY ports */
+ if (test != 0)
+ return 0;
+ }
+
+ /* PLL Power down if all UTMI PHYs are down */
+ regmap_clear_bits(utmi->syscon, SYSCON_USB_CFG_REG, USB_CFG_PLL_MASK);
+
+ return 0;
+}
+
+static int mvebu_cp110_utmi_phy_power_on(struct phy *phy)
+{
+ struct mvebu_cp110_utmi_port *port = phy_get_drvdata(phy);
+ struct mvebu_cp110_utmi *utmi = port->priv;
+ struct device *dev = &phy->dev;
+ int ret;
+ u32 reg;
+
+ /* It is necessary to power off UTMI before configuration */
+ ret = mvebu_cp110_utmi_phy_power_off(phy);
+ if (ret) {
+ dev_err(dev, "UTMI power OFF before power ON failed\n");
+ return ret;
+ }
+
+ /*
+ * If UTMI port is connected to USB Device controller,
+ * configure the USB MUX prior to UTMI PHY initialization.
+ * The single USB device controller can be connected
+ * to UTMI0 or to UTMI1 PHY port, but not to both.
+ */
+ if (port->dr_mode == USB_DR_MODE_PERIPHERAL) {
+ regmap_update_bits(utmi->syscon, SYSCON_USB_CFG_REG,
+ USB_CFG_DEVICE_EN_MASK | USB_CFG_DEVICE_MUX_MASK,
+ USB_CFG_DEVICE_EN_MASK |
+ (port->id << USB_CFG_DEVICE_MUX_OFFSET));
+ }
+
+ /* Set Test suspendm mode and enable Test UTMI select */
+ reg = readl(PORT_REGS(port) + UTMI_CTRL_STATUS0_REG);
+ reg |= SUSPENDM | TEST_SEL;
+ writel(reg, PORT_REGS(port) + UTMI_CTRL_STATUS0_REG);
+
+ /* Wait for UTMI power down */
+ mdelay(1);
+
+ /* PHY port setup first */
+ mvebu_cp110_utmi_port_setup(port);
+
+ /* Power UP UTMI PHY */
+ regmap_set_bits(utmi->syscon, SYSCON_UTMI_CFG_REG(port->id),
+ UTMI_PHY_CFG_PU_MASK);
+
+ /* Disable Test UTMI select */
+ reg = readl(PORT_REGS(port) + UTMI_CTRL_STATUS0_REG);
+ reg &= ~TEST_SEL;
+ writel(reg, PORT_REGS(port) + UTMI_CTRL_STATUS0_REG);
+
+ /* Wait for impedance calibration */
+ ret = readl_poll_timeout(PORT_REGS(port) + UTMI_CAL_CTRL_REG, reg,
+ reg & IMPCAL_DONE,
+ PLL_LOCK_DELAY_US, PLL_LOCK_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "Failed to end UTMI impedance calibration\n");
+ return ret;
+ }
+
+ /* Wait for PLL calibration */
+ ret = readl_poll_timeout(PORT_REGS(port) + UTMI_CAL_CTRL_REG, reg,
+ reg & PLLCAL_DONE,
+ PLL_LOCK_DELAY_US, PLL_LOCK_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "Failed to end UTMI PLL calibration\n");
+ return ret;
+ }
+
+ /* Wait for PLL ready */
+ ret = readl_poll_timeout(PORT_REGS(port) + UTMI_PLL_CTRL_REG, reg,
+ reg & PLL_RDY,
+ PLL_LOCK_DELAY_US, PLL_LOCK_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "PLL is not ready\n");
+ return ret;
+ }
+
+ /* PLL Power up */
+ regmap_set_bits(utmi->syscon, SYSCON_USB_CFG_REG, USB_CFG_PLL_MASK);
+
+ return 0;
+}
+
+static const struct phy_ops mvebu_cp110_utmi_phy_ops = {
+ .power_on = mvebu_cp110_utmi_phy_power_on,
+ .power_off = mvebu_cp110_utmi_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id mvebu_cp110_utmi_of_match[] = {
+ { .compatible = "marvell,cp110-utmi-phy" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mvebu_cp110_utmi_of_match);
+
+static int mvebu_cp110_utmi_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mvebu_cp110_utmi *utmi;
+ struct phy_provider *provider;
+ struct device_node *child;
+ u32 usb_devices = 0;
+
+ utmi = devm_kzalloc(dev, sizeof(*utmi), GFP_KERNEL);
+ if (!utmi)
+ return -ENOMEM;
+
+ utmi->dev = dev;
+
+ /* Get system controller region */
+ utmi->syscon = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "marvell,system-controller");
+ if (IS_ERR(utmi->syscon)) {
+ dev_err(dev, "Missing UTMI system controller\n");
+ return PTR_ERR(utmi->syscon);
+ }
+
+ /* Get UTMI memory region */
+ utmi->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(utmi->regs))
+ return PTR_ERR(utmi->regs);
+
+ for_each_available_child_of_node(dev->of_node, child) {
+ struct mvebu_cp110_utmi_port *port;
+ struct phy *phy;
+ int ret;
+ u32 port_id;
+
+ ret = of_property_read_u32(child, "reg", &port_id);
+ if ((ret < 0) || (port_id >= UTMI_PHY_PORTS)) {
+ dev_err(dev,
+ "invalid 'reg' property on child %pOF\n",
+ child);
+ continue;
+ }
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port) {
+ of_node_put(child);
+ return -ENOMEM;
+ }
+
+ port->dr_mode = of_usb_get_dr_mode_by_phy(child, -1);
+ if ((port->dr_mode != USB_DR_MODE_HOST) &&
+ (port->dr_mode != USB_DR_MODE_PERIPHERAL)) {
+ dev_err(&pdev->dev,
+ "Missing dual role setting of the port%d, will use HOST mode\n",
+ port_id);
+ port->dr_mode = USB_DR_MODE_HOST;
+ }
+
+ if (port->dr_mode == USB_DR_MODE_PERIPHERAL) {
+ usb_devices++;
+ if (usb_devices > 1) {
+ dev_err(dev,
+ "Single USB device allowed! Port%d will use HOST mode\n",
+ port_id);
+ port->dr_mode = USB_DR_MODE_HOST;
+ }
+ }
+
+ /* Retrieve PHY capabilities */
+ utmi->ops = &mvebu_cp110_utmi_phy_ops;
+
+ /* Instantiate the PHY */
+ phy = devm_phy_create(dev, child, utmi->ops);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "Failed to create the UTMI PHY\n");
+ of_node_put(child);
+ return PTR_ERR(phy);
+ }
+
+ port->priv = utmi;
+ port->id = port_id;
+ phy_set_drvdata(phy, port);
+
+ /* Ensure the PHY is powered off */
+ mvebu_cp110_utmi_phy_power_off(phy);
+ }
+
+ dev_set_drvdata(dev, utmi);
+ provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(provider);
+}
+
+static struct platform_driver mvebu_cp110_utmi_driver = {
+ .probe = mvebu_cp110_utmi_phy_probe,
+ .driver = {
+ .name = "mvebu-cp110-utmi-phy",
+ .of_match_table = mvebu_cp110_utmi_of_match,
+ },
+};
+module_platform_driver(mvebu_cp110_utmi_driver);
+
+MODULE_AUTHOR("Konstatin Porotchkin <kostap@marvell.com>");
+MODULE_DESCRIPTION("Marvell Armada CP110 UTMI PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c b/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c
index 17491b27e487..8ba8f3e9121f 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c
@@ -519,13 +519,13 @@ static struct mvebu_mpp_mode armada_cp110_mpp_modes[] = {
MPP_FUNCTION(4, "synce1", "clk"),
MPP_FUNCTION(8, "led", "data"),
MPP_FUNCTION(10, "sdio", "hw_rst"),
- MPP_FUNCTION(11, "sdio", "wr_protect")),
+ MPP_FUNCTION(11, "sdio_wp", "wr_protect")),
MPP_MODE(55,
MPP_FUNCTION(0, "gpio", NULL),
MPP_FUNCTION(1, "ge1", "rxctl_rxdv"),
MPP_FUNCTION(3, "ptp", "pulse"),
MPP_FUNCTION(10, "sdio", "led"),
- MPP_FUNCTION(11, "sdio", "card_detect")),
+ MPP_FUNCTION(11, "sdio_cd", "card_detect")),
MPP_MODE(56,
MPP_FUNCTION(0, "gpio", NULL),
MPP_FUNCTION(4, "tdm", "drx"),
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index 425ab6f7e375..acedbcab5d9f 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -9,6 +9,7 @@ source "drivers/soc/bcm/Kconfig"
source "drivers/soc/fsl/Kconfig"
source "drivers/soc/imx/Kconfig"
source "drivers/soc/ixp4xx/Kconfig"
+source "drivers/soc/marvell/Kconfig"
source "drivers/soc/mediatek/Kconfig"
source "drivers/soc/qcom/Kconfig"
source "drivers/soc/renesas/Kconfig"
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 36452bed86ef..c60dc3de3fa0 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_ARCH_IXP4XX) += ixp4xx/
obj-$(CONFIG_SOC_XWAY) += lantiq/
obj-y += mediatek/
obj-y += amlogic/
+obj-y += marvell/
obj-y += qcom/
obj-y += renesas/
obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
diff --git a/drivers/soc/marvell/Kconfig b/drivers/soc/marvell/Kconfig
new file mode 100644
index 000000000000..b5f518a0c804
--- /dev/null
+++ b/drivers/soc/marvell/Kconfig
@@ -0,0 +1,308 @@
+#
+# MARVELL SoC drivers
+#
+
+menu "Marvell SoC drivers"
+
+config OCTEONTX2_RM
+ tristate "OcteonTX2 RVU Resource Manager driver"
+ depends on OCTEONTX2_AF
+ help
+ This driver offers resource management interfaces for Marvell's
+ OcteonTX2 Resource Virtualization Unit SSO/TIM PFs which are used
+ for interfacing with non-NIC hardware offload units.
+
+config OCTEONTX2_RM_DOM_SYSFS
+ bool "OcteonTX2 RVU Resource Manager domain sysfs"
+ depends on OCTEONTX2_RM
+ help
+ Enable Application Domain sysfs which simplifies management of
+ SSO/TIM VFs and OcteonTX2 RVU based NIC devices by the system
+ administrator. This interface consists of the following files:
+
+ I. /sys/bus/pci/drivers/octeontx2-rm/0*/create_domain
+
+ Writing to this file will:
+ 1. Create a domain directory in /sys/bus/pci/drivers/octeontx2-rm/0*
+ with the domain name
+ 2. Reserve one of SSO/TIM VFs for this domain and set its limits
+ according to the specification passed in write string
+ 3. Create symlinks to all devices that will be part of the domain
+ in the directory created in point 1
+ 4. Create domain_id file returning the ID assigned to this domain
+ (effectively the domain name)
+ 5. Create domain_in_use file which reports state of domain's
+ SSO/TIM device's in_use file to indicate when domain is used
+ by an application.
+
+ The syntax for writing into this file is:
+
+ name;param:val(;param:val)*
+
+ * name - domain name
+ * param - parameter name, based on the parameter, its value 'val'
+ has to have a different format:
+ * sso, ssow, npa, tim, cpt - 'val' is an integer value of the
+ number of LFs to assign to the domain
+ * port - 'val' is in 'DDDD:BB:DD.F' format and specifies device
+ representing a port.
+
+ There are the following rules when creating a domain:
+
+ 1. Domain names must be unique
+ 2. Each domain must have at least 1 NPA and 1 SSOW LF
+ 3. One port may be only assigned to a single domain
+
+ II. /sys/bus/pci/drivers/octeontx2-rm/0*/destroy_domain
+
+ Writing domain name to this file will cause given domain to be
+ removed from the sysfs. This includes:
+ 1. Setting all limits of domain's SSO/TIM device to 0
+ 2. Removing all sysfs structures for this domain
+ 3. Removing all ports in this application domain from the list of
+ ports in use.
+
+ Removal of the domain is disabled while domain is in use, that
+ is while the 'in_use' flag of the domain's SSO/TIM device is set.
+ User/admin may query the status of this flag using the
+ 'domain_in_use' file in the domain's sysfs directory.
+
+config OCTEONTX2_DPI_PF
+ tristate "OcteonTX2 DPI-DMA PF driver"
+ depends on ARM64 && PCI
+ help
+ Select this option to enable DPI PF driver support.
+ DPI (DMA packet interface) provides DMA support for MAC.
+ This driver intializes dpi PF device and enables VF's for supporting
+ different types of DMA transfers.
+
+config OCTEONTX2_SDP_PF
+ tristate "OcteonTX2 SDP PF driver"
+ depends on ARM64 && PCI && OCTEONTX2_AF && OCTEONTX2_MBOX
+ default y
+ help
+ Select this option to enable SDP PF driver support.
+
+config OCTEONTX2_NPA_PF
+ tristate "OcteonTX2 NPA PF driver"
+ depends on ARM64 && PCI && OCTEONTX2_AF && OCTEONTX2_MBOX
+ default m
+ help
+ Select this option to enable NPA PF driver support.
+
+ NPA stands for Network Pool Allocator. This is a hardware unit on
+ OCTEONTX2 that can maintain pools of buffer pointers. Either software
+ or any other hardware can allocate and free pointers to buffer pools
+ maintained by NPA.
+
+config OCTEONTX2_PCI_CONSOLE
+ tristate "OcteonTX2 PCI Endpoint console driver"
+ depends on ARM64 && PCI
+ default y
+ help
+ Select this option to enable the PCI console driver when OcteonTX2
+ is configured as an Endpoint. This allows the host [PC] to connect
+ to the Linux console via PCI (using the host remote console utility).
+ To enable this console, specify 'console=pci0' on the kernel
+ command line.
+ To compile this as a module, choose M here. However, when used
+ as a module, some kernel boot messages may be missing from the
+ console.
+
+config OCTEONTX2_PCI_CONSOLE_DEBUG
+ bool "OcteonTX2 PCI Endpoint console driver verbose debug messages"
+ depends on OCTEONTX2_PCI_CONSOLE
+ help
+ Say Y here if you want the OcteonTX2 PCI console driver
+ to write verbose debug messages to the system log. Select this
+ if you are having a problem with the OcteonTX2 PCI console
+ and want to see more details of the console's operation.
+
+ If you are unsure about this, say N here.
+
+config GTI_WATCHDOG
+ tristate "GTI Watchdog driver"
+ depends on ARM64
+ help
+ GTI Watchdog driver, provides ioctl's for applications to
+ setup and enable the NMI Watchdog and also provides
+ an mmap() interface to map GTI device memory into process
+ address space for the application to directly poke the
+ GTI watchdog without any kernel support and entry.
+
+config OCTEONTX2_LLC
+ bool "OcteonTX2 LLC Manager driver"
+ depends on ARM64
+ default y
+ help
+ This driver offers cache management interface for
+ Marvell's OcteonTX2 LLC cache Lock and Unlock operations.
+
+config OCTEONTX_INFO
+ tristate "OcteonTX Info driver"
+ depends on PROC_FS
+ help
+ provides procfs interface to board information like board name, board revision
+ mac addresses, board serial number and more. These are boards housing Marvell
+ OcteonTX family chipsets.
+
+config MVMDIO_UIO
+ tristate "Marvell MDIO interface driver"
+ default m
+ depends on MDIO_THUNDER
+ help
+ Select this option if you want to provide an interface for userspace
+ PHY drivers to access Marvell's MDIO bus.
+
+config MRVL_PHY_DIAGNOSTICS
+ tristate "Marvell PHY diagnostics debugfs driver"
+ default m
+ depends on DEBUG_FS
+ help
+ Select this option if you want an additional interface for
+ extended diagnostics functions for the PHY. This complements
+ functionality offered by ethtool. Requires PHY driver support in ATF.
+
+config MARVELL_CN10K_SERDES_DIAGNOSTICS
+ tristate "Marvell SerDes diagnostics debugfs driver"
+ default m
+ depends on DEBUG_FS
+ help
+ Select this option if you want to enable SerDes diagnostics
+ interface for performing PRBS, setting Loopback mode or
+ reading/writing Tx/Rx tuning parameters.
+
+config HW_CSR_ACCESS
+ tristate "Access HW Device Config Space"
+ default m
+ depends on OCTEONTX2_AF
+ help
+ provides an interface which enables user space utility to read from or to write
+ to HW Device Config Space Registers, Contexts, Memory.
+
+config OCTEONTX_SERDES
+ tristate "OcteonTX2 serdes debug commands"
+ default m
+ depends on ARM64 && HAVE_ARM_SMCCC && DEBUG_FS
+ help
+ Provides debugfs commands which allows to collect serdes information,
+ capture eye, use PRBS, configure serdes in loopback mode, and
+ read/write TX tuning parameters.
+
+config MARVELL_CN10K_MAC_MGMT
+ tristate "Marvell CN10K MAC management support"
+ depends on ARM64
+ depends on DEBUG_FS
+ help
+ Provides debugfs entry which allows to set MAC address value
+ for ports presented in the system from userspace. The module creates
+ special file /sys/kernel/debug/marvell_mac_mngt/set_mac.
+ File has write-only type of interface, read from file provides usage
+ for the functionality.
+ Driver is supported only for CN10K platforms.
+
+config MARVELL_CN10K_RPRAM
+ tristate "Octeontx RPRAM debugfs interface"
+ depends on HAVE_ARM_SMCCC
+ default n
+ help
+ This option enables the user to change the persistent data updated by
+ secure firmware. Kernel invokes secure monitor call to make the change
+ requested by user. Enable this option if you are building for octeontx
+ cn10k platforms and memory preservation feature is used.
+
+config MARVELL_CN10K_SWUP
+ tristate "CN10K firmware secure update"
+ depends on ARM64
+ help
+ Provide debugfs interface for firmware update.
+ Supported on cn10k family only.
+ Currently supported operations:
+ -> TIM Version check
+ -> TIM Hash verification
+
+config OCTEONTX2_SDEI_GHES
+ bool "OcteonTX2 Generic Hardware Error Source (GHES) support"
+ select ARM_SDE_INTERFACE
+ select ACPI_APEI
+ select ACPI_APEI_GHES
+ select CRASH_DUMP
+ help
+ Select this option to enable support for RAS Generic Hardware Error
+ Source (GHES) reporting.
+ This will allow RAS errors that are detected by the Marvell to
+ be reported using kernel logging.
+
+config OCTEONTX2_SDEI_GHES_DEBUG
+ bool "OcteonTX2 Generic Hardware Error Source (GHES) verbose debug msgs"
+ depends on OCTEONTX2_SDEI_GHES
+ default n
+ help
+ Say Y here if you want the Marvell GHES support to
+ write verbose debug messages to the system log. Select this
+ if you are having a problem with the Marvell GHES support
+ and want to see more details.
+ If you are unsure about this, say N here.
+
+config OCTEONTX2_EINJ
+ tristate "OcteonTX2 EDAC ECC Injection"
+ depends on OCTEONTX2_SDEI_GHES
+ help
+ Provides support for error injection to OcteonTX2
+ memory controllers (LMC) and cache blocks.
+ This facilitates testing of the memory controller RAS features and
+ is intended to be used by test personnel when conducting system tests.
+
+ To compile this as a module, choose M here. The module will be
+ called otx2-einj.
+ Unless testing, say N here.
+
+config OCTEONTX2_GHES_BERT
+ bool "OcteonTX2 GHES Boot Error Record Table (BERT) support"
+ depends on OCTEONTX2_SDEI_GHES
+ default y
+ help
+ Select this option to enable reporting of fatal Marvell GHES
+ errors on subsequent Linux boot. Normally, fatal errors will
+ reset the system. Enabling support here will allow such errors
+ to be reported when Linux is started again.
+
+config OCTEONTX2_CCU
+ tristate "OcteonTX2 Cache Controller Unit (CCU) driver"
+ depends on ARM64
+ default n
+ help
+ This driver supports configuring and monitoring the OcteonTX2 Cache
+ Controller Unit. The driver provides debugfs files to mask LLC ways
+ for a single or multiple cpus. A README file is included which
+ provides details on how to configure the LLC ways.
+
+ To compile as a module choose M here.
+
+config MARVELL_CN10K_FWLOG
+ tristate "CN10K firmware log interface"
+ depends on ARM64
+ help
+ Provides interface to read firmware boot logs. Firmware bootlogs are saved
+ in memory that is accessible from kernel. Enabling this option allows the
+ user to dump the firmware boot logs.
+
+ Say Y here if you want to dump the firmware boot logs from linux shell. This
+ option is available only for CN10K platforms.
+
+config MARVELL_CN10K_EINJ
+ tristate "CN10K ECC Injection"
+ depends on OCTEONTX2_SDEI_GHES
+ help
+ Provides support for error injection to CN10K
+ Pseudo-fault Generation processor error,
+ dram memory subsystem (DSS).
+ This facilitates testing of the memory controller RAS features and
+ is intended to be used by test personnel when conducting system tests.
+
+ To compile this as a module, choose M here. The module will be
+ called cn10k-einj.
+ Unless testing, say N here.
+
+
+endmenu
diff --git a/drivers/soc/marvell/Makefile b/drivers/soc/marvell/Makefile
new file mode 100644
index 000000000000..ebc7f272d6bf
--- /dev/null
+++ b/drivers/soc/marvell/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y += gti/
+obj-y += octeontx2-rm/
+obj-y += octeontx2-dpi/
+obj-y += octeontx2-sdp/
+obj-y += octeontx2-npa/
+obj-y += octeontx2-llc/
+obj-y += octeontx2-ccu/
+obj-y += hw-access/
+obj-y += octeontx2-serdes/
+obj-y += cn10ka-swup/
+obj-$(CONFIG_OCTEONTX_INFO) += octeontx_info.o
+obj-$(CONFIG_MVMDIO_UIO) += mvmdio_uio.o
+obj-$(CONFIG_MRVL_PHY_DIAGNOSTICS) += phy_diag.o
+obj-$(CONFIG_MARVELL_CN10K_SERDES_DIAGNOSTICS) += cn10k_serdes_diag.o
+obj-$(CONFIG_MARVELL_CN10K_MAC_MGMT) += marvell_mac_mgmt.o
+obj-$(CONFIG_MARVELL_CN10K_RPRAM) += cn10k-rpram.o
+obj-$(CONFIG_OCTEONTX2_SDEI_GHES) += octeontx2-ghes/
+obj-$(CONFIG_OCTEONTX2_PCI_CONSOLE) += octeontx2-pcicons/
+obj-$(CONFIG_MARVELL_CN10K_FWLOG) += cn10k-fwlog.o
+obj-$(CONFIG_MARVELL_CN10K_EINJ) += cn10k-einj.o
+
diff --git a/drivers/soc/marvell/cn10k-einj.c b/drivers/soc/marvell/cn10k-einj.c
new file mode 100644
index 000000000000..8a4a53d003cc
--- /dev/null
+++ b/drivers/soc/marvell/cn10k-einj.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Marvell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/arm-smccc.h>
+
+#define PLAT_OCTEONTX_INJECT_ERROR (0xc2000b10)
+
+#define PLAT_OCTEONTX_EINJ_DSS (0xd)
+
+#define EINJ_MAX_PARAMS 7
+
+static int einj_setup(const char *val, const struct kernel_param *kp);
+
+static const struct kernel_param_ops einj_ops = {
+ .set = einj_setup,
+ .get = param_get_ullong,
+};
+
+static u64 params[EINJ_MAX_PARAMS];
+module_param_cb(smc, &einj_ops, &params, 0644);
+MODULE_PARM_DESC(smc, "Setup error injection parameters "
+ " 0xd: Injecting error to DSS controller"
+ " address: Physical Address to corrupt"
+ " flags:"
+ " [0:7] bit position to corrupt"
+ " [8] error type 0 = DED (double), 1 = SEC (single)"
+ " echo \"0xd,0x3fffff000,0x101\" > /sys/module/cn10k_einj/parameters/smc");
+
+static int einj_setup(const char *val, const struct kernel_param *kp)
+{
+ struct arm_smccc_res res;
+ char *str = (char *) val;
+ int rc = 0;
+ int i = 0;
+
+ if (!str)
+ return -EINVAL;
+
+ for (i = 0; i < EINJ_MAX_PARAMS; i++)
+ params[i] = 0;
+
+ for (i = 0; i < EINJ_MAX_PARAMS && *str; i++) {
+
+ int len = strcspn(str, ",");
+ char *nxt = len ? str + len + 1 : "";
+
+ if (len)
+ str[len] = '\0';
+
+ rc = kstrtoull(str, 0, &params[i]);
+
+ pr_debug("%s: (%s/%s) smc_params[%d]=%llx e?%d\n", __func__, str, nxt,
+ i, params[i], rc);
+ if (len)
+ str[len] = ',';
+ str = nxt;
+ }
+
+ switch (params[0]) {
+ case PLAT_OCTEONTX_EINJ_DSS:
+ params[3] = params[2];
+ params[2] >>= 8;
+ params[2] &= 1;
+ params[3] &= 0xFF;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ pr_debug("%s %llx %llx %llx %llx %llx %llx %llx\n", __func__, params[0],
+ params[1], params[2], params[3], params[4], params[5], params[6]);
+
+ arm_smccc_smc(PLAT_OCTEONTX_INJECT_ERROR, params[0], params[1], params[2],
+ params[3], params[4], params[5], params[6], &res);
+
+ if (kp)
+ WRITE_ONCE(*(ulong *)kp->arg, res.a0);
+
+ return 0;
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marvell Ink");
+MODULE_DESCRIPTION("Marvell CN10K ECC Injector");
diff --git a/drivers/soc/marvell/cn10k-fwlog.c b/drivers/soc/marvell/cn10k-fwlog.c
new file mode 100644
index 000000000000..745bafe736d1
--- /dev/null
+++ b/drivers/soc/marvell/cn10k-fwlog.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+
+#define DEFAULT_FWLOG_MEMBASE (48 * 1024 * 1024)
+#define DEFAULT_FWLOG_MEMSIZE (4 * 1024 * 1024)
+
+static uint64_t fwlog_mem_base, fwlog_mem_size;
+static char *fwlog_buf, *fwlog_mem;
+static int dev_major;
+
+struct fw_logbuf_header {
+ uint64_t fwlog_base;
+ uint64_t fwlog_end;
+ uint64_t fwlog_ptr;
+ uint64_t wraparound;
+} __packed;
+
+static ssize_t fwlogs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ ssize_t ret = 0;
+ char *rdbuf;
+ int rdlen;
+ struct fw_logbuf_header *fwlog_hdr = (struct fw_logbuf_header *) fwlog_mem;
+
+ if (!fwlog_hdr->wraparound) {
+ rdlen = fwlog_hdr->fwlog_ptr - fwlog_hdr->fwlog_base;
+ if (*ppos >= rdlen)
+ return 0;
+ rdbuf = fwlog_buf + *ppos;
+ } else {
+ /* If the buffer is wrappedaround , rdlen is always max buffer size */
+ if (*ppos >= (fwlog_hdr->fwlog_end - fwlog_hdr->fwlog_base))
+ return 0;
+
+ rdlen = fwlog_hdr->fwlog_end - fwlog_hdr->fwlog_base - *ppos;
+ if (rdlen <= 0)
+ return 0;
+
+ /* adjust the bytes left to read */
+ if ((char *)(fwlog_hdr->fwlog_ptr + *ppos) == fwlog_hdr->fwlog_end)
+ rdbuf = fwlog_buf;
+ else
+ rdbuf = (char *)(fwlog_buf +
+ (fwlog_hdr->fwlog_ptr - fwlog_hdr->fwlog_base) +
+ *ppos);
+
+ if ((uint64_t) (*ppos + rdlen) > (fwlog_hdr->fwlog_end - fwlog_hdr->fwlog_base))
+ rdlen = fwlog_hdr->fwlog_end - (fwlog_hdr->fwlog_ptr + *ppos);
+ }
+
+ count = min_t(size_t, count, rdlen);
+ count = min_t(ssize_t, count, PAGE_SIZE);
+
+ if (copy_to_user(buf, rdbuf, count))
+ return -EFAULT;
+
+ *ppos += count;
+
+ return count;
+}
+
+static int fwlogs_open(struct inode *inode, struct file *filep)
+{
+ struct fw_logbuf_header *fwlog_hdr;
+
+ fwlog_mem = memremap(fwlog_mem_base, fwlog_mem_size, MEMREMAP_WB);
+ if (!fwlog_mem) {
+ pr_err("Could not map FWLOG Memory\n");
+ return -ENOMEM;
+ }
+
+ fwlog_hdr = (struct fw_logbuf_header *) fwlog_mem;
+ fwlog_buf = fwlog_mem + sizeof(struct fw_logbuf_header);
+
+ return 0;
+}
+
+static int fwlogs_release(struct inode *inode, struct file *filep)
+{
+ if (!fwlog_mem)
+ memunmap((void *) fwlog_mem);
+
+ return 0;
+}
+
+const struct file_operations fwlogs_ops = {
+ .open = fwlogs_open,
+ .read = fwlogs_read,
+ .release = fwlogs_release,
+};
+
+struct fwifdev {
+ const char *name;
+ umode_t mode;
+ const struct file_operations *fops;
+};
+
+const static struct fwifdev fwlog_dev = {
+ .name = "fwlogs",
+ .mode = 0644,
+ .fops = &fwlogs_ops,
+};
+
+static int fwlog_dev_open(struct inode *inode, struct file *filp)
+{
+ int minor;
+ const struct fwifdev *dev;
+
+ minor = iminor(inode);
+ if (minor < 0)
+ return -ENXIO;
+
+ dev = &fwlog_dev;
+ filp->f_op = dev->fops;
+
+ if (dev->fops->open)
+ return dev->fops->open(inode, filp);
+
+ return 0;
+}
+
+static const struct file_operations fwlog_dev_fops = {
+ .open = fwlog_dev_open,
+ .llseek = noop_llseek,
+};
+
+static char *fwlog_devnode(struct device *dev, umode_t *mode)
+{
+ if (mode && fwlog_dev.mode)
+ *mode = fwlog_dev.mode;
+ return NULL;
+}
+
+static struct class *fwif_class;
+
+static int __init fwlog_dev_init(void)
+{
+ struct device_node *parent, *node;
+
+ dev_major = register_chrdev(0, "fwif", &fwlog_dev_fops);
+
+ parent = of_find_node_by_path("/reserved-memory");
+ if (!parent) {
+ unregister_chrdev(dev_major, "fwif");
+ } else {
+ for_each_child_of_node(parent, node) {
+ const __be32 *prop;
+ u64 size;
+
+ if (of_node_name_prefix(node, "fwlogs")) {
+ prop = of_get_property(node, "reg", NULL);
+ if (!prop)
+ break;
+ fwlog_mem_base = be32_to_cpu(prop[1]) |
+ (unsigned long long)be32_to_cpu(prop[0]) << 32;
+ size = be32_to_cpu(prop[3]) |
+ (unsigned long long)be32_to_cpu(prop[2]) << 32;
+ if (fwlog_mem_base == 0ULL)
+ fwlog_mem_base = DEFAULT_FWLOG_MEMBASE;
+
+ if (size == 0ULL)
+ size = DEFAULT_FWLOG_MEMSIZE;
+
+ fwlog_mem_size = size / 1024 / 1024;
+ }
+ }
+ }
+
+ fwif_class = class_create(THIS_MODULE, "fwif");
+ if (IS_ERR(fwif_class)) {
+ unregister_chrdev(dev_major, "fwif");
+ return PTR_ERR(fwif_class);
+ }
+
+ fwif_class->devnode = fwlog_devnode;
+
+ device_create(fwif_class, NULL, MKDEV(dev_major, 0),
+ NULL, fwlog_dev.name);
+
+ return 0;
+}
+
+static void __exit fwlog_dev_exit(void)
+{
+ unregister_chrdev(dev_major, "fwif");
+}
+
+module_init(fwlog_dev_init);
+module_exit(fwlog_dev_exit);
diff --git a/drivers/soc/marvell/cn10k-rpram.c b/drivers/soc/marvell/cn10k-rpram.c
new file mode 100644
index 000000000000..37f30426862d
--- /dev/null
+++ b/drivers/soc/marvell/cn10k-rpram.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/arm-smccc.h>
+#include <soc/marvell/octeontx/octeontx_smc.h>
+
+/* Minimum size in MB, 0 means region is disabled
+ * 16 MB is the minimum size when it is eanbled.
+ */
+#define MIN_USERDEF_PRESERVE_MEMSZ 0
+
+/* Maximum size is 1GB */
+#define MAX_USERDEF_PRESERVE_MEMSZ 1024
+
+/* Region size must be multiples of 16 MB */
+#define PRESERVE_MEMSZ_ALIGN 16
+
+/* SMC function id to check the platform type is CN10K */
+#define ARM_SMC_SVC_UID 0xc200ff01
+/* SMC function id to update persistent memory */
+#define PLAT_OCTEONTX_PERSIST_DATA_COMMAND 0xc2000b0d
+/* Arg 0: UPDATE_USERDEF_PRESERVE_MEMSZ, Update user defined
+ * preserve memory size
+ * Arg 1: Size of the preserved memory size
+ */
+#define UPDATE_USERDEF_PRESERVE_MEMSZ 1
+
+static u32 current_rpram_size;
+static u32 nextboot_rpram_size;
+static u64 current_rpram_base;
+static struct dentry *preserve_mem_root;
+static const size_t len = PAGE_SIZE;
+
+static ssize_t cn10k_rpram_info_read(struct file *f, char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ ssize_t out, pos = 0;
+ unsigned long addr = get_zeroed_page(GFP_KERNEL);
+ char *buf = (char *)addr;
+
+ if (!buf)
+ return -ENOMEM;
+
+ pos += snprintf(buf+pos, len - pos, "RPRAM size %d MB @0x%llx\n",
+ current_rpram_size, (unsigned long long) current_rpram_base);
+
+ out = simple_read_from_buffer(user_buf, count, off,
+ buf, pos);
+
+ free_page(addr);
+ return out;
+}
+
+static ssize_t cn10k_rpram_info_write(struct file *f, const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ return 0;
+}
+
+static ssize_t cn10k_rpram_config_read(struct file *f, char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ ssize_t out, pos = 0;
+ unsigned long addr = get_zeroed_page(GFP_KERNEL);
+ char *buf = (char *)addr;
+
+ if (!buf)
+ return -ENOMEM;
+
+ pos += snprintf(buf+pos, len - pos, "Next boot RPRAM size %d MB\n",
+ nextboot_rpram_size);
+
+ out = simple_read_from_buffer(user_buf, count, off,
+ buf, pos);
+
+ free_page(addr);
+ return out;
+}
+
+static ssize_t cn10k_rpram_config_write(struct file *f, const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ struct arm_smccc_res res;
+ unsigned long function_id, arg0, arg1;
+ int value;
+ ssize_t rc;
+ char buf[20];
+
+ if (count > sizeof(buf) - 1)
+ goto ret_err;
+
+ if (copy_from_user(buf, user_buf, count))
+ goto ret_err;
+
+ buf[count] = 0;
+
+ rc = kstrtouint(buf, 10, &value);
+ if (rc)
+ return rc;
+
+ /* size should be multiples of 16 in MB */
+ if ((value < MIN_USERDEF_PRESERVE_MEMSZ) || (value > MAX_USERDEF_PRESERVE_MEMSZ)
+ || (value % PRESERVE_MEMSZ_ALIGN))
+ goto ret_err;
+
+ nextboot_rpram_size = value;
+
+ function_id = PLAT_OCTEONTX_PERSIST_DATA_COMMAND;
+ arg0 = UPDATE_USERDEF_PRESERVE_MEMSZ;
+ arg1 = nextboot_rpram_size;
+
+ /* Secure firmware call to update the size of user defined memory */
+ arm_smccc_smc(function_id, arg0, arg1, 0, 0, 0, 0, 0, &res);
+ return count;
+
+ret_err:
+ pr_err("Invalid size, max 1024, multiples of 16\n");
+ return -EINVAL;
+}
+
+static const struct file_operations rpram_config_fops = {
+ .read = cn10k_rpram_config_read,
+ .write = cn10k_rpram_config_write,
+};
+
+static const struct file_operations rpram_currentsz_ops = {
+ .read = cn10k_rpram_info_read,
+ .write = cn10k_rpram_info_write,
+};
+
+/* module init */
+static int __init cn10k_rpram_init(void)
+{
+ struct dentry *root, *entry;
+ struct device_node *parent, *node;
+ int ret;
+
+ ret = octeontx_soc_check_smc();
+
+ if (ret != 2) {
+ pr_debug("%s: Not supported\n", __func__);
+ return -EPERM;
+ }
+
+ parent = of_find_node_by_path("/reserved-memory");
+ if (!parent) {
+ current_rpram_size = 0;
+ } else {
+ for_each_child_of_node(parent, node) {
+ const __be32 *prop;
+ u64 size;
+
+ if (of_node_name_prefix(node, "user-def")) {
+ prop = of_get_property(node, "reg", NULL);
+ if (!prop)
+ break;
+ current_rpram_base = be32_to_cpu(prop[1]) |
+ (unsigned long long)be32_to_cpu(prop[0]) << 32;
+ size = be32_to_cpu(prop[3]) |
+ (unsigned long long)be32_to_cpu(prop[2]) << 32;
+ current_rpram_size = size / 1024 / 1024;
+ }
+ }
+ }
+
+ /* root directory : rpram */
+ root = debugfs_create_dir("rpram", NULL);
+ if (!root) {
+ pr_err("rpram debugfs creation failed\n");
+ return -ENOMEM;
+ }
+
+ preserve_mem_root = root;
+ nextboot_rpram_size = current_rpram_size;
+
+ /* root/preserve_memsz_inMB creation */
+ entry = debugfs_create_file("rpram_config_szMB", 0600, root,
+ &nextboot_rpram_size, &rpram_config_fops);
+
+ if (!entry) {
+ pr_err("rpram->rpram_config_szMB debugfs file creation failed\n");
+ debugfs_remove_recursive(preserve_mem_root);
+ preserve_mem_root = NULL;
+ return -ENOMEM;
+ }
+
+ entry = debugfs_create_file("rpram_info", 0444, root,
+ &current_rpram_size, &rpram_currentsz_ops);
+
+ if (!entry) {
+ pr_err("rpram->rpram_info debugfs file creation failed\n");
+ debugfs_remove_recursive(preserve_mem_root);
+ preserve_mem_root = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* module exit */
+static void __exit cn10k_rpram_exit(void)
+{
+ if (preserve_mem_root != NULL)
+ debugfs_remove_recursive(preserve_mem_root);
+}
+
+module_init(cn10k_rpram_init);
+module_exit(cn10k_rpram_exit);
+
+MODULE_DESCRIPTION("Marvell driver for managing rpram");
+MODULE_AUTHOR("Jayanthi Annadurai <jannadurai@marvell.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/marvell/cn10k_serdes_diag.c b/drivers/soc/marvell/cn10k_serdes_diag.c
new file mode 100644
index 000000000000..76a71b571e5c
--- /dev/null
+++ b/drivers/soc/marvell/cn10k_serdes_diag.c
@@ -0,0 +1,1490 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Marvell
+ *
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/uaccess.h>
+#include <linux/ioctl.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <soc/marvell/octeontx/octeontx_smc.h>
+
+#define DRV_NAME "cn10k_serdes_diag"
+#define OCTEONTX_SERDES_DBG_GET_MEM 0xc2000d04
+#define PLAT_OCTEONTX_SERDES_DBG_RX_TUNING 0xc2000d05
+#define PLAT_OCTEONTX_SERDES_DBG_TX_TUNING 0xc2000d06
+#define PLAT_OCTEONTX_SERDES_DBG_LOOPBACK 0xc2000d07
+#define PLAT_OCTEONTX_SERDES_DBG_PRBS 0xc2000d08
+#define PLAT_OCTEONTX_SERDES_DBG_RX_TRAINING 0xc2000d09
+#define PLAT_OCTEONTX_SERDES_DBG_NOTIFY_ECP 0xc2000d0a
+
+#define PORT_LANES_MAX 4
+#define PRBS_SHOW_HEADER \
+ "port#:\tlane#:\tgserm#:\tg-lane#:\tlocked:\ttotal_bits:\terror_bits:\n"
+
+
+#define DEFINE_ATTRIBUTE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _read, inode->i_private); \
+} \
+ \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .write = __name ## _write, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+#define DEFINE_STR_2_ENUM_FUNC(_conv_arr) \
+static inline int _conv_arr ## _str2enum(const char *str) \
+{ \
+ size_t idx; \
+ size_t len = ARRAY_SIZE(_conv_arr); \
+ \
+ if (!str) \
+ return -1; \
+ \
+ for (idx = 0; idx < len; idx++) { \
+ if (!strcmp(_conv_arr[idx].s, str)) \
+ return _conv_arr[idx].e; \
+ } \
+ \
+ return -1; \
+}
+
+#define DEFINE_ENUM_2_STR_FUNC(_conv_arr) \
+static inline const char *_conv_arr ## _enum2str(int val) \
+{ \
+ size_t idx; \
+ size_t len = ARRAY_SIZE(_conv_arr); \
+ \
+ for (idx = 0; idx < len; idx++) { \
+ if (_conv_arr[idx].e == val) \
+ return _conv_arr[idx].s; \
+ } \
+ \
+ return NULL; \
+}
+
+#define BUF_SZ 64
+static struct dentry *serdes_dbgfs_root;
+static char *serdes_tuning_shmem;
+static char *prbs_shmem;
+
+#define PRBS_PRMS_MAX 6
+enum prbs_subcmd {
+ PRBS_START,
+ PRBS_SHOW,
+ PRBS_CLEAR,
+ PRBS_STOP,
+ PRBS_INJECT
+};
+
+static struct {
+ enum prbs_subcmd e;
+ const char *s;
+} prbs_subcmd[] = {
+ {PRBS_START, "start"},
+ {PRBS_SHOW, "show"},
+ {PRBS_CLEAR, "clear"},
+ {PRBS_STOP, "stop"},
+ {PRBS_INJECT, "inject"}
+};
+
+DEFINE_STR_2_ENUM_FUNC(prbs_subcmd)
+
+enum prbs_optcmd {
+ PRBS_CHECKER,
+ PRBS_GENERATOR,
+ PRBS_BOTH
+};
+
+static struct {
+ enum prbs_optcmd e;
+ const char *s;
+} prbs_optcmd[] = {
+ {PRBS_CHECKER, "check"},
+ {PRBS_GENERATOR, "gen"},
+ {PRBS_BOTH, "both"},
+};
+
+DEFINE_STR_2_ENUM_FUNC(prbs_optcmd)
+
+#define PAM4_PATTERN(_p) (_p << 8)
+
+enum prbs_pattern {
+ PRBS_1T = 1,
+ PRBS_2T = 2,
+ PRBS_4T = 4,
+ PRBS_5T = 5,
+
+ PRBS_7 = 7,
+ PRBS_9 = 9,
+ PRBS_10T = 10,
+ PRBS_11 = 11,
+ PRBS_15 = 15,
+ PRBS_16 = 16,
+ PRBS_23 = 23,
+ PRBS_31 = 31,
+ PRBS_32 = 32,
+ PRBS_SSPRQ,
+ PRBS_K28_5,
+ PRBS_31Q,
+
+ PRBS_11_0 = PAM4_PATTERN(11),
+ PRBS_11_1,
+ PRBS_11_2,
+ PRBS_11_3,
+
+ PRBS_13_0 = PAM4_PATTERN(13),
+ PRBS_13_1,
+ PRBS_13_2,
+ PRBS_13_3,
+};
+
+#define PRBS(_p) {PRBS_ ## _p, #_p}
+
+static struct {
+ enum prbs_pattern e;
+ const char *s;
+} prbs_pattern[] = {
+ PRBS(1T),
+ PRBS(2T),
+ PRBS(4T),
+ PRBS(5T),
+
+ PRBS(7),
+ PRBS(9),
+ PRBS(10T),
+ PRBS(11),
+ PRBS(15),
+ PRBS(16),
+ PRBS(23),
+ PRBS(31),
+ PRBS(32),
+ PRBS(SSPRQ),
+ PRBS(K28_5),
+ PRBS(31Q),
+
+ PRBS(11_0),
+ PRBS(11_1),
+ PRBS(11_2),
+ PRBS(11_3),
+ PRBS(13_0),
+ PRBS(13_1),
+ PRBS(13_2),
+ PRBS(13_3),
+};
+DEFINE_STR_2_ENUM_FUNC(prbs_pattern)
+DEFINE_ENUM_2_STR_FUNC(prbs_pattern)
+
+struct prbs_error_stats {
+ u64 total_bits;
+ u64 error_bits;
+ int locked;
+};
+
+struct prbs_stats {
+ struct prbs_error_stats error_stats[PORT_LANES_MAX];
+ int gen_pattern;
+ int check_pattern;
+};
+
+struct prbs_cmd_params {
+ int port;
+ int lane_idx;
+ int subcmd;
+ int gen_pattern;
+ int check_pattern;
+ int inject_cnt;
+};
+
+#define LPBK_PRMS_MAX 2
+
+enum lpbk_type {
+ LPBK_TYPE_NONE = 0,
+ LPBK_TYPE_NEA,
+ LPBK_TYPE_NED,
+ LPBK_TYPE_FED
+};
+
+static const char *const lpbk_type[] = {
+ "No Loopback",
+ "NEA",
+ "NED",
+ "FED"
+};
+
+struct lpbk_cmd_params {
+ int port;
+ int type;
+};
+
+#define TX_EQ_PRMS_MAX 16
+
+enum tx_param {
+ TX_PARAM_PRE2,
+ TX_PARAM_PRE1,
+ TX_PARAM_POST,
+ TX_PARAM_MAIN,
+ TX_POLARITY,
+ TX_GRAY_CODE,
+ TX_PRE_CODE,
+};
+
+struct tx_eq_params {
+ u16 pre2;
+ u16 pre1;
+ u16 post;
+ u16 main;
+ int polarity;
+ int gray_code;
+ int pre_code;
+ int tx_idle;
+};
+
+static struct {
+ enum tx_param e;
+ const char *s;
+} tx_param[] = {
+ {TX_PARAM_PRE2, "pre2"},
+ {TX_PARAM_PRE1, "pre1"},
+ {TX_PARAM_POST, "post"},
+ {TX_PARAM_MAIN, "main"},
+ {TX_POLARITY, "polarity"},
+ {TX_GRAY_CODE, "graycode"},
+ {TX_PRE_CODE, "precode"},
+};
+
+DEFINE_STR_2_ENUM_FUNC(tx_param)
+
+static struct tx_eq_cmd_params {
+ int port;
+ int lane_idx;
+ int update;
+ u32 pre2_pre1;
+ u32 post_main;
+ u32 flags;
+} tx_eq_cmd;
+
+#define RX_EQ_PRMS_MAX 8
+
+static struct rx_eq_cmd_params {
+ int port;
+ int lane_idx;
+ int update;
+ u32 flags;
+} rx_eq_cmd;
+
+enum rx_param {
+ RX_POLARITY,
+ RX_GRAY_CODE,
+ RX_PRE_CODE,
+ RX_INIT,
+};
+
+static struct {
+ enum rx_param e;
+ const char *s;
+} rx_param[] = {
+ {RX_POLARITY, "polarity"},
+ {RX_GRAY_CODE, "graycode"},
+ {RX_PRE_CODE, "precode"},
+ {RX_INIT, "init"},
+};
+
+DEFINE_STR_2_ENUM_FUNC(rx_param)
+
+#define DFE_TAPS_NUM 24
+#define CTLE_PARAMS_NUM 13
+
+const char *dfe_taps_names[] = {
+ "dfe_dc:\t\t",
+ "dfe_vre:\t",
+ "dfe_f0:\t\t",
+ "dfe_f1:\t\t",
+ "dfe_f2:\t\t",
+ "dfe_f3:\t\t",
+ "dfe_f4:\t\t",
+ "dfe_f5:\t\t",
+ "dfe_f6:\t\t",
+ "dfe_f7:\t\t",
+ "dfe_f8:\t\t",
+ "dfe_f9:\t\t",
+ "dfe_f10:\t",
+ "dfe_f11:\t",
+ "dfe_f12:\t",
+ "dfe_f13:\t",
+ "dfe_f14:\t",
+ "dfe_f15:\t",
+ "dfe_ff0:\t",
+ "dfe_ff1:\t",
+ "dfe_ff2:\t",
+ "dfe_ff3:\t",
+ "dfe_ff4:\t",
+ "dfe_ff5:\t",
+};
+
+const char *ctle_params_names[] = {
+ "ctle_current1_sel:\t",
+ "ctle_rl1_sel:\t\t",
+ "ctle_rl1_extra:\t\t",
+ "ctle_res1_sel:\t\t",
+ "ctle_cap1_sel:\t\t",
+ "ctle_en_mid_freq:\t",
+ "ctle_cs1_mid:\t\t",
+ "ctle_rs1_mid:\t\t",
+ "ctle_current2_sel:\t",
+ "ctle_rl2_sel:\t\t",
+ "ctle_rl2_tune_g:\t",
+ "ctle_res2_sel:\t\t",
+ "ctle_cap2_sel:\t\t",
+};
+
+struct rx_eq_params {
+ s32 dfe_taps[DFE_TAPS_NUM];
+ u32 ctle_params[CTLE_PARAMS_NUM];
+ int polarity;
+ int gray_code;
+ int pre_code;
+ int squelch_detected;
+};
+
+#define RX_TR_PRMS_MAX 2
+
+struct rx_tr_cmd_params {
+ int port;
+ int lane_idx;
+};
+
+enum rx_tr_subcmd {
+ RX_TR_START,
+ RX_TR_CHECK,
+ RX_TR_STOP
+};
+
+enum ecp_notify_prbs_loopback_mode {
+ ECP_NOTIFY_LOOPBACK_NO_LOOPBACK,
+ ECP_NOTIFY_LOOPBACK_NEA,
+ ECP_NOTIFY_LOOPBACK_NED,
+ ECP_NOTIFY_LOOPBACK_FED,
+ ECP_NOTIFY_PRBS_MODE_GEN_ENA,
+ ECP_NOTIFY_PRBS_MODE_CHECK_ENA,
+ ECP_NOTIFY_PRBS_MODE_GEN_CHECK_ENA,
+ ECP_NOTIFY_PRBS_MODE_GEN_DIS,
+ ECP_NOTIFY_PRBS_MODE_CHECK_DIS,
+ ECP_NOTIFY_PRBS_MODE_GEN_CHECK_DIS
+};
+
+#define IOCTL_SEND_ECP_NOTIFICATION _IOWR('a', 'a', int)
+
+static int copy_input_str(const char __user *buffer, size_t count,
+ char *cmd_buf, size_t buf_sz)
+{
+ char *s;
+ size_t cnt;
+
+ cnt = (count >= buf_sz - 1) ? buf_sz - 1 : count;
+
+ if (copy_from_user(cmd_buf, buffer, cnt))
+ return -EFAULT;
+
+ cmd_buf[cnt] = '\0';
+
+ s = strchr(cmd_buf, '\n');
+ if (s)
+ *s = '\0';
+
+ return 0;
+}
+
+static int tokenize_input(char *cmd_str, size_t *argc,
+ const char *argv[], size_t tokens_max)
+{
+ char *token, *endp;
+ int idx = 0;
+
+ endp = strim(cmd_str);
+
+ while (endp && idx < tokens_max) {
+ endp = skip_spaces(endp);
+ token = strsep(&endp, " \t");
+
+ if (token && *token)
+ argv[idx++] = token;
+ }
+
+ *argc = idx;
+ return (idx == tokens_max && endp) ? -1 : 0;
+}
+
+static inline void get_gserm_data(int res, int *gserm_idx,
+ int *mapping, int *lanes_num)
+{
+ *gserm_idx = (res >> 24) & 0xff;
+ *mapping = (res >> 8) & 0xffff;
+ *lanes_num = res & 0xff;
+}
+
+static int serdes_dbg_rx_eq_read(struct seq_file *s, void *unused)
+{
+ struct arm_smccc_res res;
+ int x1, port, lane_idx, max_idx;
+ struct rx_eq_params *rx_eq_params;
+ int lanes_num, gserm_idx, mapping;
+
+ seq_puts(s, "SerDes Rx Tuning Parameters:\n");
+ seq_puts(s, "port#:\tlane#:\tgserm#:\tg-lane#:\n");
+
+ port = rx_eq_cmd.port;
+ lane_idx = rx_eq_cmd.lane_idx;
+
+ x1 = (lane_idx << 8) | port;
+
+ arm_smccc_smc(PLAT_OCTEONTX_SERDES_DBG_RX_TUNING, x1, 0,
+ 0, 0, 0, 0, 0, &res);
+ if (res.a0) {
+ pr_warn("Reading Rx Tuning parameters failed\n");
+ return 0;
+ }
+
+ get_gserm_data(res.a2, &gserm_idx, &mapping, &lanes_num);
+ if (lanes_num > PORT_LANES_MAX) {
+ pr_warn("lanes num exceeds PORT_LANES_MAX (4)\n");
+ return 0;
+ }
+
+ rx_eq_params = (struct rx_eq_params *)serdes_tuning_shmem;
+
+ if (lane_idx == 0xff) {
+ lane_idx = 0;
+ max_idx = lanes_num;
+ } else {
+ max_idx = lane_idx + 1;
+ }
+
+ for (; lane_idx < max_idx; lane_idx++) {
+ int idx;
+ int glane = (mapping >> 4 * lane_idx) & 0xf;
+
+ seq_printf(s, "%d\t%d\t%d\t%d\n", port, lane_idx,
+ gserm_idx, glane);
+
+ for (idx = 0; idx < DFE_TAPS_NUM; idx++)
+ seq_printf(s, "\t\t%s%d\n", dfe_taps_names[idx],
+ rx_eq_params[lane_idx].dfe_taps[idx]);
+
+ seq_puts(s, "\n");
+ for (idx = 0; idx < CTLE_PARAMS_NUM; idx++)
+ seq_printf(s, "\t\t%s%d\n", ctle_params_names[idx],
+ rx_eq_params[lane_idx].ctle_params[idx]);
+
+ seq_printf(s, "\n\n\t\trx polarity:\t%d\n",
+ rx_eq_params[lane_idx].polarity);
+
+ seq_printf(s, "\t\trx gray code:\t%d\n",
+ rx_eq_params[lane_idx].gray_code);
+
+ seq_printf(s, "\t\trx pre code:\t%d\n",
+ rx_eq_params[lane_idx].pre_code);
+
+ seq_printf(s, "\n\t\t%s detected\n",
+ rx_eq_params[lane_idx].squelch_detected ?
+ "Squelch" : "Signal");
+ }
+
+ return 0;
+}
+
+static int parse_rx_eq_params(const char __user *buffer, size_t count,
+ struct rx_eq_cmd_params *params)
+{
+ const char *argv[RX_EQ_PRMS_MAX] = {0};
+ char cmd_buf[BUF_SZ];
+ size_t argc;
+ int port, lane_idx, arg_idx;
+
+ if (copy_input_str(buffer, count, cmd_buf, BUF_SZ))
+ return -EINVAL;
+
+ if (tokenize_input(cmd_buf, &argc, argv, RX_EQ_PRMS_MAX))
+ return -EINVAL;
+
+ if (!argc)
+ return -EINVAL;
+
+ if (kstrtoint(argv[0], 10, &port))
+ return -EINVAL;
+
+ port &= 0xff;
+
+ if (argc > 1 && !kstrtoint(argv[1], 10, &lane_idx)) {
+ if (lane_idx >= PORT_LANES_MAX)
+ return -EINVAL;
+
+ arg_idx = 2;
+ } else {
+ lane_idx = 0xff;
+ arg_idx = 1;
+ }
+
+ params->port = port;
+ params->lane_idx = lane_idx;
+ params->flags = 0;
+
+ if (arg_idx == argc)
+ return 0;
+
+ params->update = 1;
+
+ /* Next parameters are optional and they should come in pairs
+ * [name <value>], like: [precode <prec>].
+ * The loop below is to parse each such pair.
+ */
+ while (arg_idx < argc) {
+ int param;
+ int value;
+
+ param = rx_param_str2enum(argv[arg_idx]);
+ if (param == -1)
+ return -EINVAL;
+
+ arg_idx++;
+ if (arg_idx == argc || kstrtoint(argv[arg_idx], 0, &value))
+ return -EINVAL;
+
+ value &= 0xffff;
+ arg_idx++;
+
+ switch (param) {
+ case RX_PRE_CODE:
+ params->flags |= BIT(1) | (value & 1);
+ break;
+
+ case RX_GRAY_CODE:
+ params->flags |= BIT(3) | (value & 1) << 2;
+ break;
+
+ case RX_POLARITY:
+ params->flags |= BIT(5) | (value & 1) << 4;
+ break;
+
+ case RX_INIT:
+ params->flags |= BIT(6);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t serdes_dbg_rx_eq_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct arm_smccc_res res;
+ int port, lane_idx, max_idx;
+ int lanes_num, gserm_idx, mapping;
+ int x1, x2;
+
+ if (parse_rx_eq_params(buffer, count, &rx_eq_cmd))
+ return -EINVAL;
+
+ port = rx_eq_cmd.port;
+ lane_idx = rx_eq_cmd.lane_idx;
+
+ if (!rx_eq_cmd.update) {
+ if (lane_idx == 0xff)
+ pr_info("Rx Tuning: requested port=%d\n", port);
+ else
+ pr_info("Rx Tuning: requested port=%d, lane_idx=%d\n",
+ port, lane_idx);
+
+ return count;
+ }
+
+ pr_info("SerDes Rx Tuning Parameters:\n");
+ pr_info("port#:\tlane#:\tgserm#:\tg-lane#:\tstatus:\n");
+
+ x1 = (lane_idx << 8) | port;
+ x2 = rx_eq_cmd.flags;
+
+ arm_smccc_smc(PLAT_OCTEONTX_SERDES_DBG_RX_TUNING, x1, x2,
+ 0, 0, 0, 0, 0, &res);
+ if (res.a0) {
+ pr_warn("Writing Rx Tuning parameters failed\n");
+ return count;
+ }
+
+ get_gserm_data(res.a2, &gserm_idx, &mapping, &lanes_num);
+ if (lanes_num > PORT_LANES_MAX) {
+ pr_warn("lanes num exceeds PORT_LANES_MAX (4)\n");
+ return count;
+ }
+
+ rx_eq_cmd.update = 0;
+
+ if (lane_idx == 0xff) {
+ lane_idx = 0;
+ max_idx = lanes_num;
+ } else {
+ max_idx = lane_idx + 1;
+ }
+
+ for (; lane_idx < max_idx; lane_idx++) {
+ int glane = (mapping >> 4 * lane_idx) & 0xf;
+
+ pr_info("%d\t%d\t%d\t%d\t\tUpdated\n",
+ port, lane_idx,
+ gserm_idx, glane);
+ }
+
+ return count;
+}
+DEFINE_ATTRIBUTE(serdes_dbg_rx_eq);
+
+
+static int serdes_dbg_rx_tr_read(struct seq_file *s, void *unused)
+{
+ return 0;
+}
+
+static int parse_rx_tr_params(const char __user *buffer, size_t count,
+ struct rx_tr_cmd_params *params)
+{
+ const char *argv[RX_TR_PRMS_MAX] = {0};
+ char cmd_buf[BUF_SZ];
+ size_t argc;
+ int port, lane_idx;
+
+ if (copy_input_str(buffer, count, cmd_buf, BUF_SZ))
+ return -EINVAL;
+
+ if (tokenize_input(cmd_buf, &argc, argv, RX_TR_PRMS_MAX))
+ return -EINVAL;
+
+ if (argc < 1)
+ return -EINVAL;
+
+ if (kstrtoint(argv[0], 10, &port))
+ return -EINVAL;
+
+ port &= 0xff;
+
+ if (argc > 1) {
+ if (kstrtoint(argv[1], 10, &lane_idx))
+ return -EINVAL;
+
+ if (lane_idx >= PORT_LANES_MAX)
+ return -EINVAL;
+ } else {
+ lane_idx = 0xff;
+ }
+
+ lane_idx &= 0xff;
+
+ params->port = port;
+ params->lane_idx = lane_idx;
+
+ return 0;
+}
+
+static ssize_t serdes_dbg_rx_tr_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int port, idx, lane_idx, max_idx;
+ struct rx_tr_cmd_params input;
+ int ongoing, failed = 0, tries = 30;
+ int lanes_num, gserm_idx, mapping, glane;
+ struct arm_smccc_res res;
+ s32 x1;
+
+ if (parse_rx_tr_params(buffer, count, &input))
+ return -EINVAL;
+
+ port = input.port;
+ lane_idx = input.lane_idx;
+ x1 = (lane_idx << 8) | port;
+
+ pr_info("SerDes Rx Training:\n");
+ pr_info("port#:\tlane#:\tgserm#:\tg-lane#:\tstatus:\n");
+
+ arm_smccc_smc(PLAT_OCTEONTX_SERDES_DBG_RX_TRAINING,
+ x1, RX_TR_START, 0, 0, 0, 0, 0, &res);
+ if (res.a0) {
+ pr_warn("Triggering Rx Training failed\n");
+ return count;
+ }
+
+ get_gserm_data(res.a1, &gserm_idx, &mapping, &lanes_num);
+ if (lanes_num > PORT_LANES_MAX) {
+ pr_warn("lanes num exceeds PORT_LANES_MAX (4)\n");
+ return count;
+ }
+
+ if (lane_idx == 0xff) {
+ lane_idx = 0;
+ max_idx = lanes_num;
+ ongoing = (1 << lanes_num) - 1;
+ } else {
+ max_idx = lane_idx + 1;
+ ongoing = (1 << lane_idx);
+ }
+
+ while (ongoing && tries--) {
+ msleep(100);
+ for (idx = lane_idx; idx < max_idx; idx++) {
+ int completed, result;
+
+ if (!((ongoing >> idx) & 1))
+ continue;
+
+ x1 = (idx << 8) | port;
+ arm_smccc_smc(PLAT_OCTEONTX_SERDES_DBG_RX_TRAINING,
+ x1, RX_TR_CHECK, 0, 0, 0, 0, 0, &res);
+
+ completed = res.a2 & 1;
+ result = (res.a2 >> 1) & 1;
+
+ if (completed) {
+ ongoing &= ~(1 << idx);
+ if (result)
+ failed |= (1 << idx);
+ }
+ }
+ }
+
+ /* All the lanes that did not complete are
+ * marked as failed.
+ */
+ failed |= ongoing;
+
+ /* For all the lanes that failed to complete
+ * need to call the stop_rx_training explicitly.
+ */
+ for (idx = lane_idx; idx < max_idx; idx++) {
+ if ((ongoing >> idx) & 1) {
+ x1 = (idx << 8) | port;
+ arm_smccc_smc(PLAT_OCTEONTX_SERDES_DBG_RX_TRAINING,
+ x1, RX_TR_STOP, 0, 0, 0, 0, 0, &res);
+ }
+ }
+
+ for (idx = lane_idx; idx < max_idx; idx++) {
+ int res = (failed >> idx) & 1;
+
+ glane = (mapping >> 4 * idx) & 0xf;
+ pr_info("%d\t%d\t%d\t%d\t\t%s\n",
+ port, idx,
+ gserm_idx, glane,
+ res ? "FAILED" : "OK");
+ }
+
+ return count;
+}
+DEFINE_ATTRIBUTE(serdes_dbg_rx_tr);
+
+static int serdes_dbg_tx_eq_read(struct seq_file *s, void *unused)
+{
+ struct arm_smccc_res res;
+ int x1, port, lane_idx, max_idx;
+ struct tx_eq_params *tx_eq_params;
+ int lanes_num, gserm_idx, mapping;
+
+ port = tx_eq_cmd.port;
+ lane_idx = tx_eq_cmd.lane_idx;
+
+ x1 = (lane_idx << 8) | port;
+
+ seq_puts(s, "SerDes Tx Tuning Parameters:\n");
+ seq_puts(s, "port#:\tlane#:\tgserm#:\tg-lane#:\tpre2:\tpre1:\tmain:\tpost:\tpolarity:\tgray code:\tpre code:\ttx_idle:\n");
+
+ arm_smccc_smc(PLAT_OCTEONTX_SERDES_DBG_TX_TUNING, x1, 0,
+ 0, 0, 0, 0, 0, &res);
+ if (res.a0) {
+ pr_warn("Reading Tx Tuning parameters failed\n");
+ return 0;
+ }
+
+ get_gserm_data(res.a2, &gserm_idx, &mapping, &lanes_num);
+ if (lanes_num > PORT_LANES_MAX) {
+ pr_warn("lanes num exceeds PORT_LANES_MAX (4)\n");
+ return 0;
+ }
+
+ tx_eq_params = (struct tx_eq_params *)serdes_tuning_shmem;
+
+ if (lane_idx == 0xff) {
+ lane_idx = 0;
+ max_idx = lanes_num;
+ } else {
+ max_idx = lane_idx + 1;
+ }
+
+ for (; lane_idx < max_idx; lane_idx++) {
+ int glane = (mapping >> 4 * lane_idx) & 0xf;
+
+ seq_printf(s, "%d\t%d\t%d\t%d\t\t%hd\t%hd\t%hd\t%hd\t%d\t\t%d\t\t%d\t\t%d\n",
+ port, lane_idx,
+ gserm_idx, glane,
+ tx_eq_params[lane_idx].pre2,
+ tx_eq_params[lane_idx].pre1,
+ tx_eq_params[lane_idx].main,
+ tx_eq_params[lane_idx].post,
+ tx_eq_params[lane_idx].polarity,
+ tx_eq_params[lane_idx].gray_code,
+ tx_eq_params[lane_idx].pre_code,
+ tx_eq_params[lane_idx].tx_idle);
+ }
+
+ return 0;
+}
+
+static int parse_tx_eq_params(const char __user *buffer, size_t count,
+ struct tx_eq_cmd_params *params)
+{
+ const char *argv[TX_EQ_PRMS_MAX] = {0};
+ char cmd_buf[BUF_SZ];
+ size_t argc;
+ int port, lane_idx, arg_idx;
+
+ if (copy_input_str(buffer, count, cmd_buf, BUF_SZ))
+ return -EINVAL;
+
+ if (tokenize_input(cmd_buf, &argc, argv, TX_EQ_PRMS_MAX))
+ return -EINVAL;
+
+ if (!argc)
+ return -EINVAL;
+
+ if (kstrtoint(argv[0], 10, &port))
+ return -EINVAL;
+
+ port &= 0xff;
+
+ if (argc > 1 && !kstrtoint(argv[1], 10, &lane_idx)) {
+ if (lane_idx >= PORT_LANES_MAX)
+ return -EINVAL;
+
+ arg_idx = 2;
+ } else {
+ lane_idx = 0xff;
+ arg_idx = 1;
+ }
+
+ params->port = port;
+ params->lane_idx = lane_idx;
+ params->flags = 0;
+
+ if (arg_idx == argc)
+ return 0;
+
+ params->update = 1;
+ params->pre2_pre1 = 0;
+ params->post_main = 0;
+
+ /* Next parameters are optional and they should come in pairs
+ * [name <value>], like: [pre1 <pre1>].
+ * The loop below is to parse each such pair.
+ */
+ while (arg_idx < argc) {
+ int param;
+ int value;
+
+ param = tx_param_str2enum(argv[arg_idx]);
+ if (param == -1)
+ return -EINVAL;
+
+ arg_idx++;
+ if (arg_idx == argc || kstrtoint(argv[arg_idx], 0, &value))
+ return -EINVAL;
+
+ value &= 0xffff;
+ arg_idx++;
+
+ switch (param) {
+ case TX_PARAM_PRE2:
+ params->pre2_pre1 |= value << 16;
+ params->flags |= BIT(0);
+ break;
+
+ case TX_PARAM_PRE1:
+ params->pre2_pre1 |= value;
+ params->flags |= BIT(1);
+ break;
+
+ case TX_PARAM_POST:
+ params->post_main |= value << 16;
+ params->flags |= BIT(2);
+ break;
+
+ case TX_PARAM_MAIN:
+ params->post_main |= value;
+ params->flags |= BIT(3);
+ break;
+
+ case TX_PRE_CODE:
+ params->flags |= BIT(5) | (value & 1) << 4;
+ break;
+
+ case TX_GRAY_CODE:
+ params->flags |= BIT(7) | (value & 1) << 6;
+ break;
+
+ case TX_POLARITY:
+ params->flags |= BIT(9) | (value & 1) << 8;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t serdes_dbg_tx_eq_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct arm_smccc_res res;
+ int port, lane_idx, max_idx;
+ int lanes_num, gserm_idx, mapping;
+ int x1, x2, x3, x4;
+
+ if (parse_tx_eq_params(buffer, count, &tx_eq_cmd))
+ return -EINVAL;
+
+ port = tx_eq_cmd.port;
+ lane_idx = tx_eq_cmd.lane_idx;
+
+ if (!tx_eq_cmd.update) {
+ if (lane_idx == 0xff)
+ pr_info("Tx Tuning: requested port=%d\n", port);
+ else
+ pr_info("Tx Tuning: requested port=%d, lane_idx=%d\n",
+ port, lane_idx);
+
+ return count;
+ }
+
+ pr_info("SerDes Tx Tuning Parameters:\n");
+ pr_info("port#:\tlane#:\tgserm#:\tg-lane#:\tstatus:\n");
+
+ x1 = (lane_idx << 8) | port;
+ x2 = tx_eq_cmd.pre2_pre1;
+ x3 = tx_eq_cmd.post_main;
+ x4 = tx_eq_cmd.flags;
+
+ arm_smccc_smc(PLAT_OCTEONTX_SERDES_DBG_TX_TUNING, x1, x2,
+ x3, x4, 0, 0, 0, &res);
+ if (res.a0) {
+ pr_warn("Writing Tx Tuning parameters failed\n");
+ return count;
+ }
+
+ get_gserm_data(res.a2, &gserm_idx, &mapping, &lanes_num);
+ if (lanes_num > PORT_LANES_MAX) {
+ pr_warn("lanes num exceeds PORT_LANES_MAX (4)\n");
+ return count;
+ }
+
+ tx_eq_cmd.update = 0;
+
+ if (lane_idx == 0xff) {
+ lane_idx = 0;
+ max_idx = lanes_num;
+ } else {
+ max_idx = lane_idx + 1;
+ }
+
+ for (; lane_idx < max_idx; lane_idx++) {
+ int glane = (mapping >> 4 * lane_idx) & 0xf;
+
+ pr_info("%d\t%d\t%d\t%d\t\tUpdated\n",
+ port, lane_idx,
+ gserm_idx, glane);
+ }
+
+ return count;
+}
+DEFINE_ATTRIBUTE(serdes_dbg_tx_eq);
+
+static int serdes_dbg_lpbk_read(struct seq_file *s, void *unused)
+{
+ return 0;
+}
+
+static int parse_lpbk_params(const char __user *buffer, size_t count,
+ struct lpbk_cmd_params *params)
+{
+ const char *argv[LPBK_PRMS_MAX] = {0};
+ char cmd_buf[BUF_SZ];
+ size_t argc;
+ int port, type;
+
+ if (copy_input_str(buffer, count, cmd_buf, BUF_SZ))
+ return -EINVAL;
+
+ if (tokenize_input(cmd_buf, &argc, argv, LPBK_PRMS_MAX))
+ return -EINVAL;
+
+ if (argc < 2)
+ return -EINVAL;
+
+ if (kstrtoint(argv[0], 10, &port))
+ return -EINVAL;
+
+ port &= 0xff;
+
+ if (kstrtoint(argv[1], 10, &type))
+ return -EINVAL;
+
+ /* Validate looback type against the list below */
+ switch (type) {
+ case LPBK_TYPE_NONE:
+ case LPBK_TYPE_NEA:
+ case LPBK_TYPE_NED:
+ case LPBK_TYPE_FED:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ params->port = port;
+ params->type = type;
+ return 0;
+}
+
+static ssize_t serdes_dbg_lpbk_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int lane_idx;
+ int lanes_num, gserm_idx, mapping;
+ struct lpbk_cmd_params input;
+ struct arm_smccc_res res;
+ int x1, x2;
+
+ if (parse_lpbk_params(buffer, count, &input))
+ return -EINVAL;
+
+ pr_info("Set SerDes Loopback:\n");
+ pr_info("port#:\tlane#:\tgserm#:\tg-lane#:\ttype:\n");
+
+ x1 = (0xff << 8) | input.port;
+ x2 = input.type;
+
+ arm_smccc_smc(PLAT_OCTEONTX_SERDES_DBG_LOOPBACK, x1, x2,
+ 0, 0, 0, 0, 0, &res);
+ if (res.a0) {
+ pr_warn("Setting SerDes Loopback failed\n");
+ return count;
+ }
+
+ get_gserm_data(res.a1, &gserm_idx, &mapping, &lanes_num);
+ if (lanes_num > PORT_LANES_MAX) {
+ pr_warn("lanes num exceeds PORT_LANES_MAX (4)\n");
+ return count;
+ }
+
+ for (lane_idx = 0; lane_idx < lanes_num; lane_idx++) {
+ int glane = (mapping >> 4 * lane_idx) & 0xf;
+
+ pr_info("%d\t%d\t%d\t%d\t\t%s\n",
+ input.port, lane_idx,
+ gserm_idx, glane,
+ lpbk_type[input.type]);
+ }
+
+ return count;
+}
+DEFINE_ATTRIBUTE(serdes_dbg_lpbk);
+
+
+
+static int serdes_dbg_prbs_read(struct seq_file *s, void *unused)
+{
+ return 0;
+}
+
+static inline int _get_pattern(int argc, const char *argv[], int *arg_idx)
+{
+ int pattern;
+
+ pattern = prbs_pattern_str2enum(argv[*arg_idx]);
+ (*arg_idx)++;
+
+ return pattern;
+}
+
+static int parse_prbs_params(const char __user *buffer, size_t count,
+ struct prbs_cmd_params *params)
+{
+ const char *argv[PRBS_PRMS_MAX] = {0};
+ char cmd_buf[BUF_SZ];
+ size_t argc;
+ int optcmd, arg_idx;
+
+ if (copy_input_str(buffer, count, cmd_buf, BUF_SZ))
+ return -EINVAL;
+
+ if (tokenize_input(cmd_buf, &argc, argv, PRBS_PRMS_MAX))
+ return -EINVAL;
+
+ if (argc < 2)
+ return -EINVAL;
+
+ params->subcmd = prbs_subcmd_str2enum(argv[0]);
+ if (params->subcmd == -1)
+ return -EINVAL;
+
+ if (kstrtoint(argv[1], 10, &params->port))
+ return -EINVAL;
+
+ params->port &= 0xff;
+
+ if (params->subcmd != PRBS_START && params->subcmd != PRBS_STOP) {
+ if (params->subcmd == PRBS_INJECT) {
+ if (argc == 2 || kstrtoint(argv[2], 10, &params->inject_cnt))
+ return -1;
+ }
+ return 0;
+ }
+
+ arg_idx = 2;
+ while (arg_idx < argc) {
+ optcmd = prbs_optcmd_str2enum(argv[arg_idx]);
+ arg_idx++;
+
+ switch (optcmd) {
+ case PRBS_GENERATOR:
+ params->gen_pattern = params->subcmd == PRBS_START ?
+ _get_pattern(argc, argv, &arg_idx) : 1;
+ break;
+
+ case PRBS_CHECKER:
+ params->check_pattern = params->subcmd == PRBS_START ?
+ _get_pattern(argc, argv, &arg_idx) : 1;
+ break;
+
+ case PRBS_BOTH:
+ params->gen_pattern = params->subcmd == PRBS_START ?
+ _get_pattern(argc, argv, &arg_idx) : 1;
+ params->check_pattern = params->gen_pattern;
+ break;
+
+ default:
+ return -1;
+ }
+ }
+
+ if (params->gen_pattern == -1 || params->check_pattern == -1)
+ return -1;
+
+ if (params->subcmd == PRBS_STOP &&
+ !params->gen_pattern &&
+ !params->check_pattern) {
+
+ /*
+ * In case of STOP cmd, if both gen and check
+ * are not provided, then do stop both
+ */
+ params->gen_pattern = 1;
+ params->check_pattern = 1;
+ }
+
+ return 0;
+}
+
+static ssize_t serdes_dbg_prbs_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+#define STRBUF_SZ 64
+ int lane_idx;
+ int lanes_num, gserm_idx, mapping;
+ struct prbs_cmd_params input = {0};
+ struct arm_smccc_res res;
+ s32 x1, x2, x3, x4;
+ char strbuf[STRBUF_SZ] = {0};
+
+ if (parse_prbs_params(buffer, count, &input))
+ return -EINVAL;
+
+ x1 = (input.subcmd << 16) | (0xff << 8) | input.port;
+
+ x2 = input.gen_pattern;
+ x3 = input.check_pattern;
+ x4 = input.inject_cnt;
+
+ arm_smccc_smc(PLAT_OCTEONTX_SERDES_DBG_PRBS, x1, x2,
+ x3, x4, 0, 0, 0, &res);
+ if (res.a0) {
+ pr_warn("SerDes PRBS failed\n");
+ return count;
+ }
+
+ get_gserm_data(res.a2, &gserm_idx, &mapping, &lanes_num);
+ if (lanes_num > PORT_LANES_MAX) {
+ pr_warn("lanes num exceeds PORT_LANES_MAX (4)\n");
+ return count;
+ }
+
+ pr_info("SerDes PRBS:\n");
+ if (input.subcmd == PRBS_SHOW) {
+ struct prbs_stats *stats = (struct prbs_stats *)prbs_shmem;
+
+ if (stats->gen_pattern || stats->check_pattern) {
+ char cbuf[16] = {0};
+ char gbuf[16] = {0};
+
+ if (stats->gen_pattern) {
+ const char *ptrn =
+ prbs_pattern_enum2str(stats->gen_pattern);
+
+ snprintf(gbuf, 16, " gen=%s",
+ ptrn ? ptrn : "");
+ }
+
+ if (stats->check_pattern) {
+ const char *ptrn =
+ prbs_pattern_enum2str(stats->check_pattern);
+
+ snprintf(cbuf, 16, " check=%s",
+ ptrn ? ptrn : "");
+ }
+
+ pr_info("PRBS enabled (patterns:%s%s)\n", gbuf, cbuf);
+ } else {
+ pr_info("PRBS disabled\n");
+ }
+
+ pr_info(PRBS_SHOW_HEADER);
+
+ for (lane_idx = 0; lane_idx < lanes_num; lane_idx++) {
+ int glane = (mapping >> 4 * lane_idx) & 0xf;
+
+ pr_info("%d\t%d\t%d\t%d\t\t%d\t%llu\t\t%llu\n",
+ input.port,
+ lane_idx,
+ gserm_idx,
+ glane,
+ stats->error_stats[lane_idx].locked,
+ stats->error_stats[lane_idx].total_bits,
+ stats->error_stats[lane_idx].error_bits);
+ }
+ return count;
+ }
+
+ pr_info("port#:\tlane#:\tgserm#:\tg-lane#:\tcmd:\n");
+
+ switch (input.subcmd) {
+ case PRBS_START:
+ if (input.gen_pattern || input.check_pattern) {
+ char cbuf[16] = {0};
+ char gbuf[16] = {0};
+
+ if (input.gen_pattern) {
+ const char *ptrn =
+ prbs_pattern_enum2str(input.gen_pattern);
+
+ snprintf(gbuf, 16, " gen=%s",
+ ptrn ? ptrn : "");
+ }
+
+ if (input.check_pattern) {
+ const char *ptrn =
+ prbs_pattern_enum2str(input.check_pattern);
+
+ snprintf(cbuf, 16, " check=%s",
+ ptrn ? ptrn : "");
+ }
+
+ snprintf(strbuf, STRBUF_SZ, "(patterns:%s%s)", gbuf, cbuf);
+ }
+ break;
+ case PRBS_CLEAR:
+ snprintf(strbuf, STRBUF_SZ, "counters");
+ break;
+ case PRBS_STOP:
+ snprintf(strbuf, STRBUF_SZ, "%s%s%s",
+ input.gen_pattern ? " generator" : "",
+ input.gen_pattern && input.check_pattern ? "," : "",
+ input.check_pattern ? " checker" : "");
+ break;
+ case PRBS_INJECT:
+ snprintf(strbuf, STRBUF_SZ, "%d errors", input.inject_cnt);
+ break;
+ default:
+ break;
+ }
+
+ for (lane_idx = 0; lane_idx < lanes_num; lane_idx++) {
+ int glane = (mapping >> 4 * lane_idx) & 0xf;
+
+ pr_info("%d\t%d\t%d\t%d\t\t%s %s\n",
+ input.port,
+ lane_idx,
+ gserm_idx,
+ glane,
+ prbs_subcmd[input.subcmd].s,
+ strbuf);
+ }
+
+ return count;
+}
+DEFINE_ATTRIBUTE(serdes_dbg_prbs);
+
+static long notify_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct arm_smccc_res res;
+ s32 portm, evt;
+
+ switch (cmd) {
+ case IOCTL_SEND_ECP_NOTIFICATION:
+ portm = (arg >> 8) & 0xff;
+ evt = arg & 0xff;
+
+ /* Validate event */
+ if (evt > ECP_NOTIFY_PRBS_MODE_GEN_CHECK_DIS)
+ return -EINVAL;
+
+ arm_smccc_smc(PLAT_OCTEONTX_SERDES_DBG_NOTIFY_ECP, portm, evt,
+ 0, 0, 0, 0, 0, &res);
+ if (res.a0) {
+ pr_warn("Sending ECP notification failed\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_err("Unsupported IOCTL\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static const struct file_operations notify_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = notify_ioctl,
+ .llseek = no_llseek,
+};
+
+static int serdes_dbg_setup_debugfs(void)
+{
+ struct dentry *dbg_file;
+
+ serdes_dbgfs_root = debugfs_create_dir("serdes_diagnostics", NULL);
+
+ dbg_file = debugfs_create_file("prbs", 0644, serdes_dbgfs_root, NULL,
+ &serdes_dbg_prbs_fops);
+ if (!dbg_file)
+ goto create_failed;
+
+ dbg_file = debugfs_create_file("loopback", 0644, serdes_dbgfs_root, NULL,
+ &serdes_dbg_lpbk_fops);
+ if (!dbg_file)
+ goto create_failed;
+
+ dbg_file = debugfs_create_file("rx_params", 0644, serdes_dbgfs_root, NULL,
+ &serdes_dbg_rx_eq_fops);
+ if (!dbg_file)
+ goto create_failed;
+
+ dbg_file = debugfs_create_file("rx_training", 0644, serdes_dbgfs_root, NULL,
+ &serdes_dbg_rx_tr_fops);
+ if (!dbg_file)
+ goto create_failed;
+
+ dbg_file = debugfs_create_file("tx_params", 0644, serdes_dbgfs_root, NULL,
+ &serdes_dbg_tx_eq_fops);
+ if (!dbg_file)
+ goto create_failed;
+
+ dbg_file = debugfs_create_file("notify", 0644, serdes_dbgfs_root, NULL,
+ &notify_fops);
+ if (!dbg_file)
+ goto create_failed;
+
+ return 0;
+
+create_failed:
+ pr_err("Failed to create debugfs dir/file for serdes_diagnostics\n");
+ debugfs_remove_recursive(serdes_dbgfs_root);
+ return -1;
+}
+
+static int __init serdes_dbg_init(void)
+{
+ struct arm_smccc_res res;
+
+ if (octeontx_soc_check_smc() != 2) {
+ pr_debug(DRV_NAME": Not supported\n");
+ return -EPERM;
+ }
+
+ arm_smccc_smc(OCTEONTX_SERDES_DBG_GET_MEM, 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 == SMCCC_RET_NOT_SUPPORTED) {
+ pr_info(DRV_NAME
+ ": Firmware doesn't support serdes diagnostic cmds.\n");
+ return -EPERM;
+ }
+
+ if (res.a0 != SMCCC_RET_SUCCESS)
+ return -ENOMEM;
+
+ serdes_tuning_shmem = ioremap_wc(res.a2,
+ PORT_LANES_MAX * sizeof(struct rx_eq_params));
+
+ if (!serdes_tuning_shmem)
+ goto tuning_shmem_failed;
+
+ prbs_shmem = ioremap_wc(res.a3,
+ sizeof(struct prbs_stats));
+
+ if (!prbs_shmem)
+ goto prbs_shmem_failed;
+
+ return serdes_dbg_setup_debugfs();
+
+prbs_shmem_failed:
+ iounmap(serdes_tuning_shmem);
+tuning_shmem_failed:
+ return -ENOMEM;
+}
+
+static void __exit serdes_dbg_exit(void)
+{
+ debugfs_remove_recursive(serdes_dbgfs_root);
+
+ if (serdes_tuning_shmem)
+ iounmap(serdes_tuning_shmem);
+
+ if (prbs_shmem)
+ iounmap(prbs_shmem);
+}
+
+module_init(serdes_dbg_init);
+module_exit(serdes_dbg_exit);
+
+MODULE_AUTHOR("Damian Eppel <deppel@marvell.com>");
+MODULE_DESCRIPTION("SerDes diagnostic commands for CN10K");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/marvell/cn10ka-swup/Makefile b/drivers/soc/marvell/cn10ka-swup/Makefile
new file mode 100644
index 000000000000..d35e2317ae31
--- /dev/null
+++ b/drivers/soc/marvell/cn10ka-swup/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's CN10K firmware secure update driver
+#
+
+obj-$(CONFIG_MARVELL_CN10K_SWUP) += mrvl_swup.o
+
diff --git a/drivers/soc/marvell/cn10ka-swup/mrvl_swup.c b/drivers/soc/marvell/cn10ka-swup/mrvl_swup.c
new file mode 100644
index 000000000000..87f8329649ce
--- /dev/null
+++ b/drivers/soc/marvell/cn10ka-swup/mrvl_swup.c
@@ -0,0 +1,512 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Marvell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/arm-smccc.h>
+#include <linux/uaccess.h>
+#include <linux/ioctl.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/smp.h>
+#include <linux/delay.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <soc/marvell/octeontx/octeontx_smc.h>
+#include "mrvl_swup.h"
+
+#define TO_VERSION_DESC(x) ((struct mrvl_get_versions *)(x))
+#define TO_CLONE_DESC(x) ((struct mrvl_clone_fw *)(x))
+#define TO_UPDATE_DESC(x) ((struct mrvl_update *)(x))
+#define TO_PHYS_BUFFER(x) ((struct mrvl_phys_buffer *)(x))
+
+static int alloc_buffers(struct memory_desc *memdesc, uint32_t required_buf);
+static void free_buffers(void);
+
+/*Debugfs interface root */;
+struct dentry *mrvl_swup_root;
+
+static int alloc_buffers(struct memory_desc *memdesc, uint32_t required_buf);
+static void free_buffers(void);
+
+
+/* Buffers for SMC call
+ * 0 -> 25MB for SW update CPIO blob
+ * 1 -> 1MB for passing data structures
+ */
+#define BUF_CPIO 0
+#define BUF_DATA 1
+#define BUF_SIGNATURE 2
+#define BUF_COUNT 3
+static struct memory_desc memdesc[BUF_COUNT] = {
+ {0, 0, 32*1024*1024, "cpio buffer"},
+ {0, 0, 1*1024*1024, "data buffer"},
+ {0, 0, 1*1024*1024, "signature buffer"},
+};
+
+static struct allocated_pages {
+ struct page *p;
+ int order;
+} page_handler = {0};
+/* IOCTL mapping to fw name */
+static const struct {
+ const char *str;
+ uint8_t bit;
+} name_to_sel_obj[] = {
+ {"tim0", 0},
+ {"rom-script0.fw", 1},
+ {"scp_bl1.bin", 2},
+ {"mcp_bl1.bin", 3},
+ {"ecp_bl1.bin", 4},
+ {"init.bin", 5},
+ {"gserm-cn10xx.fw", 6},
+ {"bl2.bin", 7},
+ {"bl31.bin", 8},
+ {"u-boot-nodtb.bin", 9},
+ {"npc_mkex-cn10xx.fw", 10},
+ {"efi_app1.efi", 11},
+ {"switch_fw_ap.fw", 12},
+ {"switch_fw_super.fw", 13},
+ {"gserp-cn10xx.fw", 14},
+};
+
+static const char *obj_bit_to_str(uint32_t bit)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(name_to_sel_obj); i++) {
+ if (name_to_sel_obj[i].bit == bit)
+ return name_to_sel_obj[i].str;
+ }
+ return NULL;
+}
+
+/* Prepare objects for limited read */
+static void prepare_names(struct smc_version_info *info, uint32_t objects)
+{
+ int i;
+ int obj_count = 0;
+ const char *tmp = NULL;
+
+ for (i = 0; i < SMC_MAX_VERSION_ENTRIES; i++) {
+ if (objects & (1<<i)) {
+ tmp = obj_bit_to_str((i));
+ if (tmp == NULL) {
+ pr_info("incorrect object selected!\n");
+ } else {
+ memcpy(info->objects[obj_count].name, tmp, VER_MAX_NAME_LENGTH);
+ obj_count++;
+ }
+ }
+ }
+}
+
+struct arm_smccc_res mrvl_exec_smc(uint64_t smc, uint64_t buf, uint64_t size)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(smc, buf, size, 0, 0, 0, 0, 0, &res);
+ return res;
+}
+
+static enum smc_version_entry_retcode mrvl_get_version(unsigned long arg, uint8_t calculate_hash)
+{
+ int i, ret = 0;
+ struct mrvl_get_versions *user_desc;
+ struct arm_smccc_res res;
+ struct smc_version_info *swup_info = (struct smc_version_info *)memdesc[BUF_DATA].virt;
+
+ user_desc = kzalloc(sizeof(*user_desc), GFP_KERNEL);
+ if (!user_desc)
+ return -ENOMEM;
+
+ if (copy_from_user(user_desc,
+ TO_VERSION_DESC(arg),
+ sizeof(*user_desc))) {
+ pr_err("Data Read Error\n");
+ ret = -EFAULT;
+ goto mem_error;
+ }
+
+ /* We have to perform conversion from IOCTL interface to smc */
+ memset(swup_info, 0x00, sizeof(*swup_info));
+
+ swup_info->magic_number = VERSION_MAGIC;
+ swup_info->version = VERSION_INFO_VERSION;
+ swup_info->bus = user_desc->bus;
+ swup_info->cs = user_desc->cs;
+
+ if (calculate_hash)
+ swup_info->version_flags |= SMC_VERSION_CHECK_VALIDATE_HASH;
+
+ if (user_desc->version_flags & MARLIN_CHECK_PREDEFINED_OBJ) {
+ swup_info->version_flags |= SMC_VERSION_CHECK_SPECIFIC_OBJECTS;
+ prepare_names(swup_info, user_desc->selected_objects);
+ swup_info->num_objects = hweight_long(user_desc->selected_objects);
+ } else {
+ swup_info->num_objects = SMC_MAX_OBJECTS;
+ }
+
+ res = mrvl_exec_smc(PLAT_CN10K_VERIFY_FIRMWARE,
+ memdesc[BUF_DATA].phys,
+ sizeof(struct smc_version_info));
+
+ if (res.a0) {
+ pr_err("Error during SMC processing\n");
+ ret = res.a0;
+ goto mem_error;
+ }
+
+ user_desc->retcode = swup_info->retcode;
+ for (i = 0; i < SMC_MAX_VERSION_ENTRIES; i++)
+ memcpy(&user_desc->desc[i],
+ &swup_info->objects[i],
+ sizeof(struct smc_version_info_entry));
+
+ if (copy_to_user(TO_VERSION_DESC(arg),
+ user_desc,
+ sizeof(*user_desc))) {
+ pr_err("Data Write Error\n");
+ ret = -EFAULT;
+ }
+
+mem_error:
+ kfree(user_desc);
+ return ret;
+}
+
+static int mrvl_clone_fw(unsigned long arg)
+{
+ int i, ret = 0;
+ struct mrvl_clone_fw *user_desc;
+ struct arm_smccc_res res;
+ struct smc_version_info *swup_info = (struct smc_version_info *)memdesc[BUF_DATA].virt;
+
+ user_desc = kzalloc(sizeof(*user_desc), GFP_KERNEL);
+ if (!user_desc)
+ return -ENOMEM;
+
+ if (copy_from_user(user_desc,
+ TO_CLONE_DESC(arg),
+ sizeof(*user_desc))) {
+ pr_err("Data Read Error\n");
+ ret = -EFAULT;
+ goto mem_error;
+ }
+
+ memset(swup_info, 0x00, sizeof(*swup_info));
+
+ swup_info->magic_number = VERSION_MAGIC;
+ swup_info->version = VERSION_INFO_VERSION;
+ swup_info->bus = user_desc->bus;
+ swup_info->cs = user_desc->cs;
+ swup_info->version_flags |= SMC_VERSION_CHECK_VALIDATE_HASH;
+
+ if (user_desc->version_flags & MARLIN_CHECK_PREDEFINED_OBJ) {
+ swup_info->version_flags |= SMC_VERSION_CHECK_SPECIFIC_OBJECTS;
+ prepare_names(swup_info, user_desc->selected_objects);
+ swup_info->num_objects = hweight_long(user_desc->selected_objects);
+ } else {
+ swup_info->num_objects = SMC_MAX_OBJECTS;
+ }
+
+
+ switch (user_desc->clone_op) {
+ case CLONE_SPI:
+ swup_info->target_bus = user_desc->target_bus;
+ swup_info->target_cs = user_desc->target_cs;
+ swup_info->version_flags |= SMC_VERSION_COPY_TO_BACKUP_FLASH;
+ break;
+ case CLONE_MMC:
+ swup_info->version_flags |= SMC_VERSION_COPY_TO_BACKUP_EMMC;
+ break;
+ case CLONE_OFFSET:
+ swup_info->version_flags |= SMC_VERSION_COPY_TO_BACKUP_OFFSET;
+ break;
+ default:
+ pr_err("Incorrect clone parameter.\n");
+ goto mem_error;
+ }
+
+ res = mrvl_exec_smc(PLAT_CN10K_VERIFY_FIRMWARE,
+ memdesc[BUF_DATA].phys,
+ sizeof(struct smc_version_info));
+
+ if (res.a0) {
+ pr_err("Error during SMC processing\n");
+ ret = res.a0;
+ goto mem_error;
+ }
+
+ user_desc->retcode = swup_info->retcode;
+ for (i = 0; i < SMC_MAX_VERSION_ENTRIES; i++)
+ memcpy(&user_desc->desc[i],
+ &swup_info->objects[i],
+ sizeof(struct smc_version_info_entry));
+
+ if (copy_to_user(TO_CLONE_DESC(arg),
+ user_desc,
+ sizeof(*user_desc))) {
+ pr_err("Data Write Error\n");
+ ret = -EFAULT;
+ }
+
+mem_error:
+ kfree(user_desc);
+ return ret;
+}
+
+static int mrvl_get_membuf(unsigned long arg)
+{
+ struct mrvl_phys_buffer buf;
+
+ buf.cpio_buf = memdesc[BUF_CPIO].phys;
+ buf.cpio_buf_size = memdesc[BUF_CPIO].size;
+ buf.sign_buf = memdesc[BUF_SIGNATURE].phys;
+ buf.sign_buf_size = memdesc[BUF_SIGNATURE].size;
+ buf.reserved_buf = 0;
+ buf.reserved_buf_size = 0;
+
+
+ if (copy_to_user(TO_PHYS_BUFFER(arg),
+ &buf,
+ sizeof(buf))) {
+ pr_err("Data Write Error\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int mrvl_run_fw_update(unsigned long arg)
+{
+ struct mrvl_update ioctl_desc = {0};
+ struct smc_update_descriptor *smc_desc;
+ struct arm_smccc_res res;
+ int spi_in_progress = 0;
+
+ smc_desc = (struct smc_update_descriptor *)memdesc[BUF_DATA].virt;
+ memset(smc_desc, 0x00, sizeof(*smc_desc));
+
+ if (copy_from_user(&ioctl_desc,
+ TO_UPDATE_DESC(arg),
+ sizeof(ioctl_desc))) {
+ pr_err("Data Read Error\n");
+ return -EFAULT;
+ }
+
+ pr_info("Update request: SPI: %d, CS: %d, image size: %lld\n",
+ ioctl_desc.bus,
+ ioctl_desc.cs,
+ ioctl_desc.image_size);
+
+ /*Verify data size*/
+ if (ioctl_desc.image_size > memdesc[BUF_CPIO].size) {
+ pr_err("Incorrect CPIO data size\n");
+ return -EFAULT;
+ }
+
+ /* Verify userdata */
+ if (ioctl_desc.user_size > memdesc[BUF_SIGNATURE].size) {
+ pr_err("Incorrect user data size\n");
+ return -EFAULT;
+ }
+
+ smc_desc->magic = UPDATE_MAGIC;
+ smc_desc->version = UPDATE_VERSION;
+
+ /* Set addresses and flags*/
+ smc_desc->image_addr = memdesc[BUF_CPIO].phys;
+ smc_desc->image_size = ioctl_desc.image_size;
+ if (ioctl_desc.user_size != 0) {
+ smc_desc->user_addr = memdesc[BUF_SIGNATURE].phys;
+ smc_desc->user_size = ioctl_desc.user_size;
+ }
+ smc_desc->user_flags = ioctl_desc.user_flags;
+ smc_desc->update_flags = ioctl_desc.flags;
+
+ /* In linux use asynchronus SPI operation */
+ smc_desc->async_spi = 1;
+
+ /* SPI config */
+ smc_desc->bus = ioctl_desc.bus;
+ smc_desc->cs = ioctl_desc.cs;
+
+ res = mrvl_exec_smc(PLAT_OCTEONTX_SPI_SECURE_UPDATE,
+ memdesc[BUF_DATA].phys,
+ sizeof(struct smc_update_descriptor));
+
+ ioctl_desc.ret = res.a0;
+ if (copy_to_user(TO_UPDATE_DESC(arg),
+ &ioctl_desc,
+ sizeof(ioctl_desc))) {
+ pr_err("Data Write Error\n");
+ return -EFAULT;
+ }
+
+ do {
+ msleep(500);
+ res = mrvl_exec_smc(0xc2000b0e, 0, 0);
+ spi_in_progress = res.a0;
+ } while (spi_in_progress);
+
+ return 0;
+}
+
+static long mrvl_swup_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ switch (cmd) {
+ case GET_VERSION:
+ case VERIFY_HASH:
+ case CLONE_FW:
+ ret = alloc_buffers(memdesc, 1<<BUF_DATA | 1<<BUF_SIGNATURE);
+ break;
+ case GET_MEMBUF:
+ ret = alloc_buffers(memdesc, 1<<BUF_DATA | 1<<BUF_SIGNATURE | 1<<BUF_CPIO);
+ break;
+ default:
+ ret = -ENXIO; /* Illegal cmd */
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ switch (cmd) {
+ case GET_VERSION:
+ ret = mrvl_get_version(arg, 0);
+ free_buffers();
+ break;
+ case VERIFY_HASH:
+ ret = mrvl_get_version(arg, 1);
+ free_buffers();
+ break;
+ case GET_MEMBUF:
+ ret = mrvl_get_membuf(arg);
+ break;
+ case RUN_UPDATE:
+ ret = mrvl_run_fw_update(arg);
+ free_buffers();
+ break;
+ case CLONE_FW:
+ ret = mrvl_clone_fw(arg);
+ free_buffers();
+ break;
+ default:
+ pr_err("Not supported IOCTL\n");
+ return -ENXIO;
+ }
+ return ret;
+}
+
+static const struct file_operations mrvl_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = mrvl_swup_ioctl,
+ .llseek = no_llseek,
+};
+
+static int alloc_buffers(struct memory_desc *memdesc, uint32_t required_buf)
+{
+ int i, required_mem = 0, page_order;
+ void *page_addr;
+
+ for (i = 0; i < BUF_COUNT; i++) {
+ if (required_buf & 1<<i)
+ required_mem += memdesc[i].size;
+ }
+
+ if (!required_mem)
+ return 0;
+
+ page_order = get_order(required_mem);
+ page_handler.p = alloc_pages(GFP_KERNEL, page_order);
+ if (!page_handler.p)
+ return -ENOMEM;
+
+ page_handler.order = page_order;
+ page_addr = page_address(page_handler.p);
+ memset(page_addr, 0x00, 1<<page_order);
+
+ for (i = 0; i < BUF_COUNT; i++) {
+ if (required_buf & 1<<i) {
+ memdesc[i].virt = page_addr;
+ memdesc[i].phys = virt_to_phys(page_addr);
+ page_addr += memdesc[i].size;
+ }
+ }
+ return 0;
+}
+
+static void free_buffers(void)
+{
+ int i;
+
+ for (i = 0; i < BUF_COUNT; i++) {
+ memdesc[i].phys = 0;
+ memdesc[i].virt = 0;
+ }
+
+ if (page_handler.p) {
+ __free_pages(page_handler.p, page_handler.order);
+ page_handler.p = NULL;
+ page_handler.order = 0;
+ }
+}
+
+static int mrvl_swup_setup_debugfs(void)
+{
+ struct dentry *pfile;
+
+ mrvl_swup_root = debugfs_create_dir("cn10k_swup", NULL);
+
+ pfile = debugfs_create_file("verification", 0644, mrvl_swup_root, NULL,
+ &mrvl_fops);
+ if (!pfile)
+ goto create_failed;
+
+ return 0;
+
+create_failed:
+ pr_err("Failed to create debugfs dir/file for firmware update\n");
+ debugfs_remove_recursive(mrvl_swup_root);
+ return 1;
+}
+
+static int __init mrvl_swup_init(void)
+{
+ int ret;
+
+ ret = octeontx_soc_check_smc();
+ if (ret != 2) {
+ pr_debug("%s: Not supported\n", __func__);
+ return -EPERM;
+ }
+
+ return mrvl_swup_setup_debugfs();
+}
+
+static void __exit mrvl_swup_exit(void)
+{
+ debugfs_remove_recursive(mrvl_swup_root);
+}
+
+module_init(mrvl_swup_init)
+module_exit(mrvl_swup_exit)
+
+MODULE_DESCRIPTION("Marvell firmware update");
+MODULE_AUTHOR("Witold Sadowski <wsadowski@marvell.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/marvell/cn10ka-swup/mrvl_swup.h b/drivers/soc/marvell/cn10ka-swup/mrvl_swup.h
new file mode 100644
index 000000000000..9897037037ea
--- /dev/null
+++ b/drivers/soc/marvell/cn10ka-swup/mrvl_swup.h
@@ -0,0 +1,307 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Marvell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MRVL_SWUP_H__
+#define __MRVL_SWUP_H__
+
+#define PLAT_OCTEONTX_SPI_SECURE_UPDATE 0xc2000b05
+#define PLAT_CN10K_VERIFY_FIRMWARE 0xc2000b0c
+
+#define VER_MAX_NAME_LENGTH 32
+#define SMC_MAX_OBJECTS 32
+#define SMC_MAX_VERSION_ENTRIES 32
+#define VERSION_STRING_LENGTH 32
+#define HASH_SIZE 64
+#define VERIFY_LOG_SIZE 1024
+
+#define MARLIN_CHECK_PREDEFINED_OBJ (1<<0)
+#define MARLIN_PRINT_CONSOLE_LOGS (1<<15)
+
+#define VERSION_FLAG_BACKUP BIT(0)
+#define VERSION_FLAG_EMMC BIT(1)
+#define SMC_VERSION_CHECK_SPECIFIC_OBJECTS BIT(2)
+#define SMC_VERSION_CHECK_VALIDATE_HASH BIT(3)
+
+/**
+ * Set this to copy objects to the backup flash after verification.
+ * Do not set this and SCM_VERSION_COPY_TO_BACKUP_EMMC.
+ */
+#define SMC_VERSION_COPY_TO_BACKUP_FLASH BIT(4)
+
+/**
+ * Set this to copy objects to the backup eMMC after verification.
+ * Do not set this and SCM_VERSION_COPY_TO_BACKUP_FLASH.
+ */
+#define SMC_VERSION_COPY_TO_BACKUP_EMMC BIT(5)
+
+/**
+ * Set this to copy objects to the backup flash offset after verification.
+ */
+#define SMC_VERSION_COPY_TO_BACKUP_OFFSET BIT(6)
+
+#define VERSION_MAGIC 0x4e535256 /** VRSN */
+#define VERSION_INFO_VERSION 0x0101 /** 1.0.0.0 */
+
+struct memory_desc {
+ void *virt;
+ dma_addr_t phys;
+ uint64_t size;
+ char pool_name[32];
+};
+
+struct tim_opaque_data_version_info {
+ uint8_t major_version; /** Major version number */
+ uint8_t minor_version; /** Minor version number */
+ uint8_t revision_number;/** Revision number */
+ uint8_t revision_type; /** Revision type (TBD) */
+ uint16_t year; /** GIT Year */
+ uint8_t month; /** GIT Month */
+ uint8_t day; /** GIT Day */
+ uint8_t hour; /** GIT Hour */
+ uint8_t minute; /** GIT Minute */
+ uint16_t flags; /** Flags (TBD) */
+ uint32_t customer_version;/** Customer defined version number */
+ uint8_t version_string[VERSION_STRING_LENGTH];
+} __packed;
+
+/* VERSION INFO
+ * HASH VERIFICATION
+ */
+
+/** Return code for version info */
+enum smc_version_ret {
+ VERSION_OK,
+ FIRMWARE_LAYOUT_CHANGED,
+ TOO_MANY_OBJECTS,
+ INVALID_DEVICE_TREE,
+ VERSION_NOT_SUPPORTED,
+ /** SMC_VERSION_CHECK_VALIDATE_HASH must be set */
+ BACKUP_SRC_NOT_VALIDATED,
+ /** An object failed the verification stage */
+ BACKUP_SRC_FAILED_VALIDATION,
+ /** Both the source and destination are the same */
+ BACKUP_SRC_AND_DEST_ARE_SAME,
+ /** An I/O error with the source occurred copying an object */
+ BACKUP_IO_SRC_ERROR,
+ /** An I/O error with the destination occurred writing an object */
+ BACKUP_IO_DST_ERROR,
+ /** An I/O error with the destination occurred erasing the media */
+ BACKUP_IO_ERASE_ERROR,
+};
+
+/** This is used for each object (version entry) */
+enum smc_version_entry_retcode {
+ RET_OK = 0,
+ RET_NOT_FOUND = 1,
+ RET_TIM_INVALID = 2,
+ RET_BAD_HASH = 3,
+ RET_NOT_ENOUGH_MEMORY = 4,
+ RET_NAME_MISMATCH = 5,
+ RET_TIM_NO_VERSION = 6,
+ RET_TIM_NO_HASH = 7,
+ RET_HASH_ENGINE_ERROR = 8,
+ RET_HASH_NO_MATCH = 9,
+ RET_IMAGE_TOO_BIG = 10,
+ RET_DEVICE_TREE_ENTRY_ERROR = 11,
+};
+
+struct smc_version_info_entry {
+ char name[VER_MAX_NAME_LENGTH];
+ struct tim_opaque_data_version_info version;
+ uint8_t tim_hash[HASH_SIZE]; /** Hash value stored in the TIM */
+ uint8_t obj_hash[HASH_SIZE]; /** Calculated hash value */
+ uint64_t tim_address; /** Address of TIM in flash */
+ uint64_t tim_size; /** Size of TIM in bytes */
+ uint64_t max_size; /** Maximum space for object and TIM */
+ uint64_t object_size; /** Size of flash object in bytes */
+ uint64_t object_address; /** Address of object in flash */
+ uint16_t hash_size; /** Size of hash in bytes */
+ uint16_t flags; /** Flags for this object */
+ enum smc_version_entry_retcode retcode; /** Return code if error */
+ uint64_t reserved[7]; /** Reserved for future growth */
+ uint8_t log[VERIFY_LOG_SIZE]; /** Log for object */
+};
+
+struct smc_version_info {
+ uint32_t magic_number; /** VRSN */
+ uint16_t version; /** Version of descriptor */
+ uint16_t version_flags; /** Flags passed to version process */
+ uint32_t bus; /** SPI BUS number */
+ uint32_t cs; /** SPI chip select number */
+ uint32_t target_bus; /** Target bus used for copying */
+ uint32_t target_cs; /** Target CS used for copying */
+ uintptr_t work_buffer_addr;/** Used to decompress objects */
+ uint64_t work_buffer_size;/** Size of decompression buffer */
+ enum smc_version_ret retcode;
+ uint32_t num_objects;
+ uint32_t timeout; /** Timeout in ms */
+ uint32_t reserved32; /** Pad to 64 bits */
+ uint64_t reserved[4]; /** Reserved for future growth */
+ struct smc_version_info_entry objects[SMC_MAX_VERSION_ENTRIES];
+};
+
+/* UPDATE
+ */
+
+enum update_ret {
+ /** No errors */
+ UPDATE_OK = 0,
+ /** Error with the CPIO image */
+ UPDATE_CPIO_ERROR = -1,
+ /** Invalid TIM found in update */
+ UPDATE_TIM_ERROR = -2,
+ /** One or more files failed hash check */
+ UPDATE_HASH_ERROR = -3,
+ /** Update authentication error */
+ UPDATE_AUTH_ERROR = -4,
+ /** I/O error reading or writing to the flash */
+ UPDATE_IO_ERROR = -5,
+ /**
+ * Error found that requires all objects to be updated,
+ * i.e. a corrupt object found in the existing flash
+ */
+ UPDATE_REQUIRE_FULL = -6,
+ /** Out of resources, too many files, etc. */
+ UPDATE_NO_MEM = -7,
+ /** Problem found with device tree firmware-update section */
+ UPDATE_DT_ERROR = -8,
+ /** Incomplete file grouping found */
+ UPDATE_GROUP_ERROR = -9,
+ /** Location or size of an object invalid */
+ UPDATE_LOCATION_ERROR = -10,
+ /** Unsupported media */
+ UPDATE_INVALID_MEDIA = -11,
+ /** Invalid alignment of update file */
+ UPDATE_BAD_ALIGNMENT = -12,
+ /** TIM is missing in an object */
+ UPDATE_MISSING_TIM = -13,
+ /** File is missing in an object */
+ UPDATE_MISSING_FILE = -14,
+ /** TIM is missing in flash */
+ UPDATE_TIM_MISSING = -15,
+ /** I/O issue with eHSM component */
+ UPDATE_EHSM_ERROR = -16,
+ /** Update rejected due to version check */
+ UPDATE_VERSION_CHECK_FAIL = -17,
+ /** Bad magic number in update descriptor */
+ UPDATE_BAD_DESC_MAGIC = -18,
+ /** Unsupported version in update descriptor */
+ UPDATE_BAD_DESC_VERSION = -19,
+ /** Error mapping update to secure memory */
+ UPDATE_MMAP_ERROR = -20,
+
+ UPDATE_WORK_BUFFER_TOO_SMALL = -21,
+ /** Unknown error */
+ UPDATE_UNKNOWN_ERROR = -1000,
+};
+
+struct smc_update_obj_info {
+
+};
+
+#define UPDATE_MAGIC 0x55504454 /* UPDT */
+/** Current smc_update_descriptor version */
+#define UPDATE_VERSION 0x0001
+
+#define UPDATE_FLAG_BACKUP 0x0001 /** Set to update secondary location */
+#define UPDATE_FLAG_EMMC 0x0002 /** Set to update eMMC instead of SPI */
+#define UPDATE_FLAG_ERASE_PART 0x0004 /** Erase eMMC partition data */
+#define UPDATE_FLAG_IGNORE_VERSION 0x0008 /** Don't perform version check */
+/** Set when user parameters are passed */
+#define UPDATE_FLAG_USER_PARMS 0x8000
+
+/** Offset from the beginning of the flash where the backup image is located */
+#define BACKUP_IMAGE_OFFSET 0x2000000
+/**
+ * This descriptor is passed by U-Boot or other software performing an update
+ */
+struct smc_update_descriptor {
+ uint32_t magic; /** UPDATE_MAGIC */
+ uint16_t version; /** Version of descriptor */
+ uint16_t update_flags; /** Flags passed to update process */
+ uint64_t image_addr; /** Address of image (CPIO file) */
+ uint64_t image_size; /** Size of image (CPIO file) */
+ uint32_t bus; /** SPI BUS number */
+ uint32_t cs; /** SPI chip select number */
+ uint32_t async_spi; /** Async SPI operations */
+ uint32_t reserved; /** Space to add stuff */
+ uint64_t user_addr; /** Passed to customer function */
+ uint64_t user_size; /** Passed to customer function */
+ uint64_t user_flags; /** Passed to customer function */
+ uintptr_t work_buffer; /** Used for compressed objects */
+ uint64_t work_buffer_size;/** Size of work buffer */
+ struct smc_update_obj_info object_retinfo[SMC_MAX_OBJECTS];
+};
+
+
+enum marlin_bootflash_clone_op {
+ CLONE_SPI = 0,
+ CLONE_MMC = 1,
+ CLONE_OFFSET = 2,
+};
+
+
+/* IOCTL interface
+ * Use same data structure for:
+ * get_version
+ * verify_hash
+ */
+struct mrvl_get_versions {
+ uint32_t bus; /** SPI BUS number */
+ uint32_t cs; /** SPI chip select number */
+ uintptr_t log_addr; /** Pointer to a buffer where to store log */
+ size_t log_size; /** Size of the log buffer */
+ uint16_t version_flags; /** Flags to specify options */
+ uint32_t selected_objects; /** Mask of a selection of TIMs (32 max) */
+ uint64_t reserved[5]; /** Reserved for future growth */
+ enum smc_version_ret retcode;
+ struct smc_version_info_entry desc[SMC_MAX_VERSION_ENTRIES];
+} __packed;
+
+struct mrvl_clone_fw {
+ uint32_t bus; /** SPI BUS number */
+ uint32_t cs; /** SPI chip select number */
+ uint32_t target_bus; /** Target SPI BUS number */
+ uint32_t target_cs; /** Target SPI chip select number */
+ enum marlin_bootflash_clone_op clone_op; /** Clone configuration */
+ uint16_t version_flags; /** Flags to specify options */
+ uint32_t selected_objects; /** Mask of a selection of TIMs (32 max) */
+ uint64_t reserved[5]; /** Reserved for future growth */
+ enum smc_version_ret retcode;
+ struct smc_version_info_entry desc[SMC_MAX_VERSION_ENTRIES];
+} __packed;
+
+struct mrvl_phys_buffer {
+ uint64_t cpio_buf;
+ uint64_t cpio_buf_size;
+ uint64_t sign_buf;
+ uint64_t sign_buf_size;
+ uint64_t reserved_buf;
+ uint64_t reserved_buf_size;
+} __packed;
+
+struct mrvl_update {
+ uint32_t bus;
+ uint32_t cs;
+ uint64_t image_size;
+ uint64_t flags;
+ uint64_t user_flags;
+ uint64_t user_size;
+ uint16_t timeout;
+ enum update_ret ret;
+} __packed;
+
+
+#define GET_VERSION _IOWR('a', 'a', struct mrvl_get_versions*)
+#define VERIFY_HASH _IOWR('a', 'b', struct mrvl_get_versions*)
+#define GET_MEMBUF _IOWR('a', 'c', struct mrvl_phys_buffer*)
+#define RUN_UPDATE _IOWR('a', 'd', struct mrvl_update*)
+#define CLONE_FW _IOWR('a', 'e', struct mrvl_clone_fw*)
+
+#endif /* __TIM_UPDATE_H__ */
diff --git a/drivers/soc/marvell/gti/Makefile b/drivers/soc/marvell/gti/Makefile
new file mode 100644
index 000000000000..51ee43c541a3
--- /dev/null
+++ b/drivers/soc/marvell/gti/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's GTI Watchdog driver
+#
+
+obj-$(CONFIG_GTI_WATCHDOG) += gti_wdog.o
+
+gti_wdog-y := gti_watchdog.o gti.o
diff --git a/drivers/soc/marvell/gti/gti.c b/drivers/soc/marvell/gti/gti.c
new file mode 100644
index 000000000000..c571b613af39
--- /dev/null
+++ b/drivers/soc/marvell/gti/gti.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell GTI Watchdog driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpu.h>
+#include <linux/nmi.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/sched/debug.h>
+
+#include "gti.h"
+
+/* Kernel exception simulation wrapper for the NMI kernel exception handler */
+void nmi_kernel_callback_other_cpus(void *unused)
+{
+ struct pt_regs *regs = get_irq_regs();
+
+ pr_emerg("Watchdog CPU:%d\n", raw_smp_processor_id());
+
+ if (regs)
+ show_regs(regs);
+ else
+ dump_stack();
+}
+
+void nmi_kernel_callback(struct pt_regs *regs)
+{
+ int c;
+
+ pr_emerg("Watchdog CPU:%d Hard LOCKUP\n", raw_smp_processor_id());
+
+ if (regs)
+ show_regs(regs);
+ else
+ dump_stack();
+
+ for_each_online_cpu(c) {
+ if (c == raw_smp_processor_id())
+ continue;
+ /*
+ * We are making a synchronous call to other cores and
+ * waiting for those cores to dump their state/context,
+ * if one of the cores is hanged or unable to respond
+ * to interrupts, we can wait here forever, currently
+ * depending on our NMI timer to trigger a system-wide
+ * warm reset to break out of such deadlocks.
+ */
+ smp_call_function_single(c,
+ nmi_kernel_callback_other_cpus, NULL, 1);
+ }
+
+ /*
+ * Return to the interrupted state via el3 and attempt
+ * application recovery.
+ */
+}
diff --git a/drivers/soc/marvell/gti/gti.h b/drivers/soc/marvell/gti/gti.h
new file mode 100644
index 000000000000..88fec90995c4
--- /dev/null
+++ b/drivers/soc/marvell/gti/gti.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell GTI Watchdog driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __GTI_WATCHDOG_H__
+#define __GTI_WATCHDOG_H__
+
+#define OCTEONTX_INSTALL_WDOG 0xc2000c01
+#define OCTEONTX_REMOVE_WDOG 0xc2000c02
+#define OCTEONTX_START_WDOG 0xc2000c03
+
+/* Keep this consistent with arch/arm64/kernel/entry.S */
+#define OCTEONTX_RESTORE_WDOG_CTXT 0xc2000c04
+
+DECLARE_PER_CPU(uint64_t, gti_elr);
+DECLARE_PER_CPU(uint64_t, gti_spsr);
+
+/* Kernel exception simulation wrapper for the NMI callback */
+extern void el0_nmi_callback(void);
+extern void el1_nmi_callback(void);
+void nmi_kernel_callback(struct pt_regs *regs);
+
+#endif // __GTI_WATCHDOG_H__
diff --git a/drivers/soc/marvell/gti/gti_watchdog.c b/drivers/soc/marvell/gti/gti_watchdog.c
new file mode 100644
index 000000000000..9dafa6a582b6
--- /dev/null
+++ b/drivers/soc/marvell/gti/gti_watchdog.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell GTI Watchdog driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/arm-smccc.h>
+#include <linux/cpu.h>
+#include <linux/io.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/ioctl.h>
+#include <linux/uaccess.h>
+#include <linux/pci.h>
+
+#include "gti.h"
+
+#define PCI_DEVID_OCTEONTX2_GTI 0xA017
+
+/* PCI BAR nos */
+#define GTI_PF_BAR0 0
+
+#define DRV_NAME "gti-watchdog"
+#define DRV_VERSION "1.0"
+
+/* Supported devices */
+static const struct pci_device_id gti_wdog_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_GTI) },
+ { 0, } /* end of table */
+};
+MODULE_DEVICE_TABLE(pci, gti_wdog_id_table);
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION("Marvell GTI Watchdog Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+
+#define GTI_WDOG_MAGIC 'G'
+#define SET_WATCHDOG 0x01
+#define CLEAR_WATCHDOG 0x02
+#define GTI_SET_WATCHDOG _IOW(GTI_WDOG_MAGIC, \
+ SET_WATCHDOG, void *)
+#define GTI_CLEAR_WATCHDOG _IOW(GTI_WDOG_MAGIC, \
+ CLEAR_WATCHDOG, void *)
+
+struct set_watchdog_args {
+ uint64_t watchdog_timeout_ms;
+ uint64_t core_mask;
+};
+
+static unsigned long g_mmio_base;
+DEFINE_PER_CPU(uint64_t, gti_elr);
+DEFINE_PER_CPU(uint64_t, gti_spsr);
+
+static void cleanup_gti_watchdog(void)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(OCTEONTX_REMOVE_WDOG, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (!res.a0)
+ pr_warn("Failed to remove/clear watchdog handler: %ld\n",
+ res.a0);
+}
+
+static int gti_wdog_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int gti_wdog_close(struct inode *inode, struct file *file)
+{
+ cleanup_gti_watchdog();
+ return 0;
+}
+
+void install_gti_cwd_wdog(void *arg)
+{
+ struct arm_smccc_res res;
+ uint64_t kernel_in_hyp_mode;
+ int cpu;
+
+ cpu = smp_processor_id();
+
+ pr_info("Installing GTI CWD on CPU %d\n", raw_smp_processor_id());
+
+ kernel_in_hyp_mode = is_kernel_in_hyp_mode();
+
+ arm_smccc_smc(OCTEONTX_INSTALL_WDOG, smp_processor_id(),
+ virt_to_phys(&per_cpu(gti_elr, cpu)),
+ virt_to_phys(&per_cpu(gti_spsr, cpu)), kernel_in_hyp_mode,
+ 0, 0, 0, &res);
+ if (!res.a0)
+ pr_warn("Failed to install watchdog handler on core %d : %ld\n",
+ raw_smp_processor_id(), res.a0);
+}
+
+void install_gti_cwd_wdog_all_cores(struct set_watchdog_args *watchdog_args)
+{
+ struct arm_smccc_res res;
+ uint64_t cpumask = 0;
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (!(watchdog_args->core_mask & (1 << cpu)))
+ continue;
+
+ cpumask |= (1 << cpu);
+ smp_call_function_single(cpu, install_gti_cwd_wdog,
+ (void *)watchdog_args, 1);
+ }
+
+ /*
+ * The last call actually sets up the wdog timers and
+ * enables the interrupts.
+ */
+
+ arm_smccc_smc(OCTEONTX_START_WDOG, (uintptr_t)&el0_nmi_callback,
+ (uintptr_t)&el1_nmi_callback,
+ watchdog_args->watchdog_timeout_ms, cpumask,
+ 0, 0, 0, &res);
+
+ if (!res.a0)
+ pr_warn("Failed to install watchdog handler on core %llx : %ld\n",
+ cpumask, res.a0);
+
+ if (cpumask != watchdog_args->core_mask)
+ pr_warn("Wdog on coremask %llx requested coremask %llx\n",
+ cpumask, watchdog_args->core_mask);
+}
+
+static long gti_wdog_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct set_watchdog_args watchdog_args;
+
+ if (cmd == GTI_SET_WATCHDOG) {
+ pr_debug("OCTEONTX_INSTALL_WDOG\n");
+
+ if (copy_from_user(&watchdog_args, (char *)arg,
+ sizeof(struct set_watchdog_args)))
+ return -EFAULT;
+
+ pr_debug("timeout = %lld, core_mask = 0x%llx\n",
+ watchdog_args.watchdog_timeout_ms,
+ watchdog_args.core_mask);
+
+ install_gti_cwd_wdog_all_cores(&watchdog_args);
+
+ } else if (cmd == GTI_CLEAR_WATCHDOG) {
+ pr_debug("OCTEONTX_CLEAR_WDOG\n");
+
+ cleanup_gti_watchdog();
+ } else {
+ return -ENOTTY;
+ }
+ return 0;
+}
+
+static int gti_wdog_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ unsigned long size = vma->vm_end - vma->vm_start;
+ int ret;
+
+ pr_debug("%s invoked, size = %ld\n", __func__, size);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ ret = io_remap_pfn_range(vma, vma->vm_start,
+ g_mmio_base >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+ if (ret) {
+ pr_warn("%s failed, ret = %d\n", __func__, ret);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static const struct file_operations gti_wdog_fops = {
+ .owner = THIS_MODULE,
+ .open = gti_wdog_open,
+ .release = gti_wdog_close,
+ .unlocked_ioctl = gti_wdog_ioctl,
+ .mmap = gti_wdog_mmap,
+};
+
+static struct miscdevice gti_wdog_miscdevice = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "gti_watchdog",
+ .fops = &gti_wdog_fops,
+};
+
+static int gti_wdog_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ unsigned long start, end;
+ u16 ctrl;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ goto enable_failed;
+ }
+
+ pci_set_master(pdev);
+
+ /*
+ * MSIXEN is disabled during Linux PCIe bus probe/enumeration, simply
+ * enable it here, we don't need to setup any interrupts on Linux, as
+ * we are delivering secure GTI MSIX interrupts to ATF.
+ */
+
+ pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+ ctrl &= ~PCI_MSIX_FLAGS_MASKALL;
+ ctrl |= PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
+
+ start = pci_resource_start(pdev, GTI_PF_BAR0);
+ end = pci_resource_end(pdev, GTI_PF_BAR0);
+ g_mmio_base = start;
+
+ err = misc_register(&gti_wdog_miscdevice);
+ if (err != 0) {
+ dev_err(&pdev->dev, "Failed to register misc device\n");
+ goto misc_register_fail;
+ }
+ return 0;
+
+misc_register_fail:
+ pci_disable_device(pdev);
+enable_failed:
+
+ return err;
+}
+
+static void gti_wdog_remove(struct pci_dev *pdev)
+{
+ pci_disable_device(pdev);
+ misc_deregister(&gti_wdog_miscdevice);
+}
+
+static struct pci_driver gti_wdog_driver = {
+ .name = DRV_NAME,
+ .id_table = gti_wdog_id_table,
+ .probe = gti_wdog_probe,
+ .remove = gti_wdog_remove,
+};
+
+static int __init gti_wdog_init_module(void)
+{
+ pr_info("%s\n", DRV_NAME);
+
+ return pci_register_driver(&gti_wdog_driver);
+}
+
+static void __exit gti_wdog_cleanup_module(void)
+{
+ pci_unregister_driver(&gti_wdog_driver);
+}
+
+module_init(gti_wdog_init_module);
+module_exit(gti_wdog_cleanup_module);
diff --git a/drivers/soc/marvell/hw-access/Makefile b/drivers/soc/marvell/hw-access/Makefile
new file mode 100644
index 000000000000..de7855a7f6a8
--- /dev/null
+++ b/drivers/soc/marvell/hw-access/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+## Makefile for Hardware device access driver
+#
+
+ccflags-y += -I$(src)
+ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
+obj-$(CONFIG_HW_CSR_ACCESS) := hw_access.o
+
+hw_access-y := hw_rw_access.o
diff --git a/drivers/soc/marvell/hw-access/hw_rw_access.c b/drivers/soc/marvell/hw-access/hw_rw_access.c
new file mode 100644
index 000000000000..b0a1d656b01f
--- /dev/null
+++ b/drivers/soc/marvell/hw-access/hw_rw_access.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Hardware device CSR Access driver
+ * Copyright (C) 2021 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* This driver supports Read/Write of only OcteonTx2/OcteonTx3 HW device
+ * config registers. Read/Write of System Registers are not supported.
+ */
+
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/pci.h>
+#include <linux/stddef.h>
+
+#include "rvu_struct.h"
+#include "rvu.h"
+#include "mbox.h"
+
+#define DEVICE_NAME "hw_access"
+#define CLASS_NAME "hw_access_class"
+
+/* PCI device IDs */
+#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065
+
+/* First physical address is the smallest start physical address of all HW
+ * devices.
+ * Smallest expected start physical address of all HW devices is based on
+ * datasheet 'Figure 4-1 Physical Address Regions' with lowest I/O start address
+ * being 0x800000000000 as:
+ * - bits <51:47> = 0x1 define I/O range,
+ * - bits <43:36> = 0x0 constitute NCB DID,
+ * - bits <36:0> = 0x0 assume zero-offset.
+ * In practice the lowest observed register address is for GIC = 0x801000000000
+ * which will be used for access.
+ */
+#define REG_PHYS_BASEADDR 0x801000000000
+
+/* The calculation does not take into consideration Armv8.2's 52bit extended
+ * addressing used for PEM which has bits<51:49> set to {0x1, 0x2, 0x3}.
+ *
+ * Maximum I/O address bits<43:36> are assumed ti be 0xFF with no limits on NCB
+ * offset addesses forwarded to NCB device. Such assumtion leads to maximum
+ * addressable HW address being 0x8FFFFFFFFFFF.
+ * In practice the highest observed address is for PEM(5)_MSIX_MBA(0) as below:
+ */
+#define REG_PHYS_ENDADDR 0x8E5F000F0000
+
+#define REG_SPACE_MAPSIZE (REG_PHYS_ENDADDR - REG_PHYS_BASEADDR + 1)
+
+struct hw_reg_cfg {
+ u64 regaddr; /* Register physical address within a hw device */
+ u64 regval; /* Register value to be read or to write */
+};
+
+struct hw_ctx_cfg {
+ u16 blkaddr;
+ u16 pcifunc;
+ union {
+ u16 qidx;
+ u16 aura;
+ };
+ u8 ctype;
+ u8 op;
+};
+
+struct hw_cgx_info {
+ u8 pf;
+ u8 cgx_id;
+ u8 lmac_id;
+ u8 nix_idx;
+};
+
+#define HW_ACCESS_TYPE 120
+
+#define HW_ACCESS_CSR_READ_IOCTL _IO(HW_ACCESS_TYPE, 1)
+#define HW_ACCESS_CSR_WRITE_IOCTL _IO(HW_ACCESS_TYPE, 2)
+#define HW_ACCESS_CTX_READ_IOCTL _IO(HW_ACCESS_TYPE, 3)
+#define HW_ACCESS_CGX_INFO_IOCTL _IO(HW_ACCESS_TYPE, 4)
+
+struct hw_priv_data {
+ void __iomem *reg_base;
+ struct rvu *rvu;
+};
+
+static struct class *hw_reg_class;
+static int major_no;
+
+static int hw_access_open(struct inode *inode, struct file *filp)
+{
+ struct hw_priv_data *priv_data = NULL;
+ struct pci_dev *pdev;
+ int err;
+
+ priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL);
+ if (!priv_data)
+ return -ENOMEM;
+
+ priv_data->reg_base = ioremap(REG_PHYS_BASEADDR, REG_SPACE_MAPSIZE);
+ if (!priv_data->reg_base) {
+ pr_err("Unable to map Physical Base Address\n");
+ err = -ENOMEM;
+ return err;
+ }
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF,
+ NULL);
+ priv_data->rvu = pci_get_drvdata(pdev);
+
+ filp->private_data = priv_data;
+
+ return 0;
+}
+
+static int
+hw_access_csr_read(void __iomem *regbase, unsigned long arg)
+{
+ struct hw_reg_cfg reg_cfg;
+ u64 regoff;
+
+ if (copy_from_user(&reg_cfg, (void __user *)arg,
+ sizeof(struct hw_reg_cfg))) {
+ pr_err("Read Fault copy from user\n");
+
+ return -EFAULT;
+ }
+
+ if (reg_cfg.regaddr < REG_PHYS_BASEADDR ||
+ reg_cfg.regaddr >= REG_PHYS_BASEADDR + REG_SPACE_MAPSIZE) {
+ pr_err("Address [0x%llx] out of range [0x%lx - 0x%lx]\n",
+ reg_cfg.regaddr, REG_PHYS_BASEADDR,
+ REG_PHYS_BASEADDR + REG_SPACE_MAPSIZE);
+
+ return -EFAULT;
+ }
+
+ /* Only 64 bit reads/writes are allowed */
+ reg_cfg.regaddr &= ~0x07ULL;
+ regoff = reg_cfg.regaddr - REG_PHYS_BASEADDR;
+ reg_cfg.regval = readq(regbase + regoff);
+
+ if (copy_to_user((void __user *)(unsigned long)arg,
+ &reg_cfg,
+ sizeof(struct hw_reg_cfg))) {
+ pr_err("Fault in copy to user\n");
+
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int
+hw_access_csr_write(void __iomem *regbase, unsigned long arg)
+{
+ struct hw_reg_cfg reg_cfg;
+ u64 regoff;
+
+ if (copy_from_user(&reg_cfg, (void __user *)arg,
+ sizeof(struct hw_reg_cfg))) {
+ pr_err("Write Fault in copy from user\n");
+
+ return -EFAULT;
+ }
+
+ if (reg_cfg.regaddr < REG_PHYS_BASEADDR ||
+ reg_cfg.regaddr >= REG_PHYS_BASEADDR + REG_SPACE_MAPSIZE) {
+ pr_err("Address [0x%llx] out of range [0x%lx - 0x%lx]\n",
+ reg_cfg.regaddr, REG_PHYS_BASEADDR,
+ REG_PHYS_BASEADDR + REG_SPACE_MAPSIZE);
+
+ return -EFAULT;
+ }
+
+ /* Only 64 bit reads/writes are allowed */
+ reg_cfg.regaddr &= ~0x07ULL;
+ regoff = reg_cfg.regaddr - REG_PHYS_BASEADDR;
+ writeq(reg_cfg.regval, regbase + regoff);
+
+ return 0;
+}
+
+static int
+hw_access_nix_ctx_read(struct rvu *rvu, struct hw_ctx_cfg *ctx_cfg,
+ unsigned long arg)
+{
+ struct nix_aq_enq_req aq_req;
+ struct nix_aq_enq_rsp rsp;
+
+ memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
+ aq_req.hdr.pcifunc = ctx_cfg->pcifunc;
+ aq_req.ctype = ctx_cfg->ctype;
+ aq_req.op = ctx_cfg->op;
+ aq_req.qidx = ctx_cfg->qidx;
+
+ if (rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp)) {
+ pr_err("Failed to read the context\n");
+ return -EINVAL;
+ }
+
+ if (copy_to_user((struct nix_aq_enq_rsp *)arg,
+ &rsp, sizeof(struct nix_aq_enq_rsp))) {
+ pr_err("Fault in copy to user\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int
+hw_access_npa_ctx_read(struct rvu *rvu, struct hw_ctx_cfg *ctx_cfg,
+ unsigned long arg)
+{
+ struct npa_aq_enq_req aq_req;
+ struct npa_aq_enq_rsp rsp;
+
+ memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
+ aq_req.hdr.pcifunc = ctx_cfg->pcifunc;
+ aq_req.ctype = ctx_cfg->ctype;
+ aq_req.op = ctx_cfg->op;
+ aq_req.aura_id = ctx_cfg->aura;
+
+ if (rvu_mbox_handler_npa_aq_enq(rvu, &aq_req, &rsp)) {
+ pr_err("Failed to read the npa context\n");
+ return -EINVAL;
+ }
+
+ if (copy_to_user((struct npa_aq_enq_rsp *)arg,
+ &rsp, sizeof(struct npa_aq_enq_rsp))) {
+ pr_err("Fault in copy to user\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int
+hw_access_ctx_read(struct rvu *rvu, unsigned long arg)
+{
+ struct hw_ctx_cfg ctx_cfg;
+ int rc;
+
+ if (copy_from_user(&ctx_cfg, (struct hw_ctx_cfg *)arg,
+ sizeof(struct hw_ctx_cfg))) {
+ pr_err("Write Fault in copy from user\n");
+ return -EFAULT;
+ }
+
+ switch (ctx_cfg.blkaddr) {
+ case BLKADDR_NIX0:
+ case BLKADDR_NIX1:
+ rc = hw_access_nix_ctx_read(rvu, &ctx_cfg, arg);
+ break;
+ case BLKADDR_NPA:
+ rc = hw_access_npa_ctx_read(rvu, &ctx_cfg, arg);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
+
+static int
+hw_access_cgx_info(struct rvu *rvu, unsigned long arg)
+{
+ struct hw_cgx_info cgx_info;
+ struct rvu_pfvf *pfvf;
+ u8 cgx_id, lmac_id, pf;
+ u16 pcifunc;
+
+ if (copy_from_user(&cgx_info, (void __user *)arg, sizeof(struct hw_cgx_info))) {
+ pr_err("Reading PF value failed: copy from user\n");
+ return -EFAULT;
+ }
+
+ pf = cgx_info.pf;
+ if (!(pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs)) {
+ pr_err("Invalid PF value %d\n", pf);
+ return -EFAULT;
+ }
+
+ pcifunc = pf << 10;
+ pfvf = &rvu->pf[pf];
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
+ &lmac_id);
+ cgx_info.cgx_id = cgx_id;
+ cgx_info.lmac_id = lmac_id;
+ cgx_info.nix_idx = (pfvf->nix_blkaddr == BLKADDR_NIX0) ? 0 : 1;
+
+ if (copy_to_user((void __user *)(unsigned long)arg,
+ &cgx_info,
+ sizeof(struct hw_cgx_info))) {
+ pr_err("Fault in copy to user\n");
+
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static long hw_access_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct hw_priv_data *priv_data = filp->private_data;
+ void __iomem *regbase = priv_data->reg_base;
+ struct rvu *rvu = priv_data->rvu;
+
+ switch (cmd) {
+ case HW_ACCESS_CSR_READ_IOCTL:
+ return hw_access_csr_read(regbase, arg);
+
+ case HW_ACCESS_CSR_WRITE_IOCTL:
+ return hw_access_csr_write(regbase, arg);
+
+ case HW_ACCESS_CTX_READ_IOCTL:
+ return hw_access_ctx_read(rvu, arg);
+
+ case HW_ACCESS_CGX_INFO_IOCTL:
+ return hw_access_cgx_info(rvu, arg);
+
+ default:
+ pr_info("Invalid IOCTL: %d\n", cmd);
+
+ return -EINVAL;
+ }
+}
+
+static int hw_access_release(struct inode *inode, struct file *filp)
+{
+ struct hw_priv_data *priv_data = filp->private_data;
+ void __iomem *regbase = priv_data->reg_base;
+
+ iounmap(regbase);
+ filp->private_data = NULL;
+ kfree(priv_data);
+ priv_data = NULL;
+
+ return 0;
+}
+
+static const struct file_operations mmap_fops = {
+ .open = hw_access_open,
+ .unlocked_ioctl = hw_access_ioctl,
+ .release = hw_access_release,
+};
+
+static int __init hw_access_module_init(void)
+{
+ static struct device *hw_reg_device;
+
+ major_no = register_chrdev(0, DEVICE_NAME, &mmap_fops);
+ if (major_no < 0) {
+ pr_err("failed to register a major number for %s\n",
+ DEVICE_NAME);
+ return major_no;
+ }
+
+ hw_reg_class = class_create(THIS_MODULE, CLASS_NAME);
+ if (IS_ERR(hw_reg_class)) {
+ unregister_chrdev(major_no, DEVICE_NAME);
+ return PTR_ERR(hw_reg_class);
+ }
+
+ hw_reg_device = device_create(hw_reg_class, NULL,
+ MKDEV(major_no, 0), NULL,
+ DEVICE_NAME);
+ if (IS_ERR(hw_reg_device)) {
+ class_destroy(hw_reg_class);
+ unregister_chrdev(major_no, DEVICE_NAME);
+ return PTR_ERR(hw_reg_device);
+ }
+
+ return 0;
+}
+
+static void __exit hw_access_module_exit(void)
+{
+ device_destroy(hw_reg_class, MKDEV(major_no, 0));
+ class_destroy(hw_reg_class);
+ unregister_chrdev(major_no, DEVICE_NAME);
+}
+
+module_init(hw_access_module_init);
+module_exit(hw_access_module_exit);
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/marvell/marvell_mac_mgmt.c b/drivers/soc/marvell/marvell_mac_mgmt.c
new file mode 100644
index 000000000000..5d38f3dd4ad3
--- /dev/null
+++ b/drivers/soc/marvell/marvell_mac_mgmt.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Marvell
+ *
+ */
+
+#include <linux/arm-smccc.h>
+#include <soc/marvell/octeontx/octeontx_smc.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+
+/* Maximum number of MAC addressess to pass */
+#define MAC_MGMT_MAX_MACS_NUM 32
+
+/* Maximum mac data size */
+#define MAC_MGMT_MAX_MAC_TEXT_SIZE 2048
+
+/* Single entry description */
+struct mac_info {
+ u32 index;
+ u32 reserved; /* Must be zero */
+ union {
+ u64 mac_addr;
+ u8 bytes[8];
+ } s;
+};
+
+/* SMC call number used to set MAC address */
+#define PLAT_OCTEONTX_MAC_MGMT_SET_ADDR 0xc2000e10
+
+/** Set MAC address given by user
+ *
+ * The call passes MAC address information to ATF for further processing.
+ * Information contains index and MAC address itself. Data should be validated
+ * before this call.
+ *
+ * @param minfo - MAC address information (index, value)
+ *
+ * @return 0 for success, error code otherwise
+ *
+ */
+static int mac_mgmt_set_addr(struct mac_info *minfo)
+{
+ struct arm_smccc_res res;
+
+ /* Pass validated data to ATF */
+ arm_smccc_smc(PLAT_OCTEONTX_MAC_MGMT_SET_ADDR,
+ minfo->index, minfo->s.mac_addr, 0, 0, 0, 0, 0,
+ &res);
+ if (res.a0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/** Parse user input in text for to MAC information structure
+ *
+ * @param buffer - ASCII string containing user's input
+ * @param n - size of the user's input
+ * @param minfo - input/output value, contains MAC information. Updated only when call succeeded
+ *
+ * @return bytes parsed for success, error code otherwise
+ *
+ */
+static ssize_t mac_mgmt_parse_buffer(const char *buffer, size_t n,
+ struct mac_info *minfo)
+{
+ u32 index;
+ u64 mac_addr;
+ int processed, ret;
+
+ /* Data are in buffer, parse it */
+ ret = sscanf(buffer, "%u %llx %n", &index, &mac_addr, &processed);
+ if (ret <= 0)
+ return -EINVAL;
+
+ if (processed < 2) /* Expect at least two characters in input */
+ return -EINVAL;
+
+ if (index > MAC_MGMT_MAX_MACS_NUM)
+ return -EINVAL;
+
+ if (!mac_addr)
+ return -EINVAL;
+
+ /* Store validated data */
+ minfo->index = index;
+ minfo->s.mac_addr = mac_addr & 0xffffffffffff;
+
+ pr_debug("%s: Idx: %u, addr: %llx\n", __func__,
+ minfo->index, minfo->s.mac_addr);
+
+ return n;
+}
+
+/** Process the write operations to debugfs.
+ *
+ * The call is supported by seq_file API form kernel
+ *
+ * @param filep - file pointer
+ * @param buffer - user's input buffer
+ * @param count - user's input buffer size
+ * @param ppos - position in file
+ *
+ * @return bytes written for success, error code otherwise
+ *
+ */
+static ssize_t mac_mgmt_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct mac_info minfo = { 0 };
+ char *mac_text_data = NULL;
+ size_t cnt;
+ int ret, bytes;
+
+ /* User should fit into MAC_MGMT_MAX_MAC_TEXT_SIZE - 1, otherwise truncate */
+ cnt = (count >= MAC_MGMT_MAX_MAC_TEXT_SIZE - 1) ?
+ (MAC_MGMT_MAX_MAC_TEXT_SIZE - 1) : count;
+
+ /* Leave one byte for NULL termination */
+ mac_text_data = kzalloc(cnt + 1, GFP_KERNEL);
+ if (!mac_text_data)
+ return -ENOMEM;
+
+ if (copy_from_user(mac_text_data, buffer, cnt)) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ bytes = mac_mgmt_parse_buffer(mac_text_data, cnt, &minfo);
+ if (bytes < 0) {
+ pr_warn("%s: Invalid text format!\n", __func__);
+ ret = bytes;
+ goto done;
+ }
+
+ ret = mac_mgmt_set_addr(&minfo);
+ if (!ret)
+ pr_info("%s: MAC addresses has been updated, change takes effect after reboot\n",
+ __func__);
+done:
+ kfree(mac_text_data);
+ return ret ? ret : bytes;
+}
+
+/** Process the read operations from debugfs.
+ *
+ * The call is supported by seq_file API form kernel.
+ * It provides usage information to user.
+ *
+ * @param s - seq_file file handle
+ * @param unused - unused parameter
+ *
+ * @return 0 for success, error code otherwise
+ *
+ */
+static int mac_mgmt_read(struct seq_file *s, void *unused)
+{
+ seq_printf(s, "Sets MAC address for available interface.\nFormat:\n"
+ "ID BOARD-MAC-ADDRESS\n\n");
+ return 0;
+}
+
+/** Process the open call on debugfs.
+ *
+ * The call is supported by seq_file API form kernel.
+ *
+ * @param inode - inode representing debugfs entry
+ * @param file - file structure related to debugfs entry
+ *
+ * @return 0 for success, error code otherwise
+ *
+ */
+static int mac_mgmt_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mac_mgmt_read, inode->i_private);
+}
+
+static const struct file_operations mac_mgmt_fops = {
+ .owner = THIS_MODULE,
+ .open = mac_mgmt_open,
+ .read = seq_read,
+ .write = mac_mgmt_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/* Handle to debugfs root directory created by the driver */
+static struct dentry *mac_dbgfs_root;
+
+/** Initialize debugfs entries for the driver
+ *
+ * @return 0 for success, error code otherwise
+ *
+ */
+static int mac_mgmt_setup_debugfs(void)
+{
+ struct dentry *dbg_file;
+
+ mac_dbgfs_root = debugfs_create_dir("mac_mgmt", NULL);
+ if (IS_ERR(mac_dbgfs_root))
+ return PTR_ERR(mac_dbgfs_root);
+
+ dbg_file = debugfs_create_file("set_mac_addr", 0600, mac_dbgfs_root,
+ NULL, &mac_mgmt_fops);
+ if (IS_ERR(dbg_file)) {
+ debugfs_remove(mac_dbgfs_root);
+ mac_dbgfs_root = NULL;
+ return PTR_ERR(dbg_file);
+ }
+
+ return 0;
+}
+
+static int __init cn10k_mac_mgmt_init(void)
+{
+ int ret;
+
+ ret = octeontx_soc_check_smc();
+ if (ret != 2) {
+ pr_debug("%s: Not supported\n", __func__);
+ return -EPERM;
+ }
+
+ ret = mac_mgmt_setup_debugfs();
+ if (ret) {
+ pr_err("%s: Can't create debugfs entries! (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ pr_info("Marvell CN10K MAC management\n");
+
+ return 0;
+}
+
+static void __exit cn10k_mac_mgmt_exit(void)
+{
+ debugfs_remove_recursive(mac_dbgfs_root);
+}
+
+module_init(cn10k_mac_mgmt_init);
+module_exit(cn10k_mac_mgmt_exit);
+
+MODULE_AUTHOR("Wojciech Bartczak <wbartczak@marvell.com>");
+MODULE_DESCRIPTION("MAC address management for Marvell CN10K");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/marvell/mvmdio_uio.c b/drivers/soc/marvell/mvmdio_uio.c
new file mode 100644
index 000000000000..ea479047070b
--- /dev/null
+++ b/drivers/soc/marvell/mvmdio_uio.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell's MDIO bus uio driver
+ *
+ * Copyright (C) 2021 Marvell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "mvmdio-uio: " fmt
+
+#include <linux/of_mdio.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+#define MVMDIO_DEV_NAME "mvmdio-uio"
+#define MVMDIO_CLASS_NAME "mvmdio-uio-class"
+#define MAX_MDIO_BUS 8
+
+static inline int mvmdio_read(struct mii_bus *bus, int bus_id,
+ int phy_addr, u32 reg)
+{
+ int ret;
+
+ ret = __mdiobus_read(bus, phy_addr, reg);
+ if (ret < 0) {
+ pr_err("smi read failed: bus:%d, phy:0x%x, reg:0x%x\n",
+ bus_id, phy_addr, reg);
+ }
+ return ret;
+}
+
+static inline int mvmdio_write(struct mii_bus *bus, int bus_id,
+ int phy_addr, u32 reg, u16 data)
+{
+ int ret;
+
+ ret = __mdiobus_write(bus, phy_addr, reg, data);
+ if (ret < 0) {
+ pr_err("smi write failed: bus:%d, phy:0x%x, reg:0x%x, data=0x%hx\n",
+ bus_id, phy_addr, reg, data);
+ }
+ return ret;
+}
+
+static struct mii_bus *mv_mii_buses[MAX_MDIO_BUS];
+static struct class *mv_cl;
+static int major;
+static int paged_access;
+
+struct mii_data {
+ int bus_id;
+ int phy_id;
+ int reg;
+ u16 data;
+};
+
+struct mii_data_pgd_access {
+ struct mii_data md;
+ int page_reg;
+ int page_num;
+};
+
+static int mv_mdio_device_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static ssize_t mv_mdio_device_read(struct file *file,
+ char *buf, size_t count, loff_t *f_pos)
+{
+ int ret;
+ int page, prev_page;
+ struct mii_data_pgd_access mii;
+ struct mii_bus *bus;
+ size_t data_sz;
+
+ data_sz = paged_access ?
+ sizeof(struct mii_data_pgd_access) :
+ sizeof(struct mii_data);
+
+ if (copy_from_user(&mii, (struct mii_data_pgd_access *)buf, data_sz)) {
+ pr_err("copy_from_user failed\n");
+ return -EFAULT;
+ }
+
+ page = paged_access ? mii.page_num : -1;
+
+ if (mii.md.bus_id < 0 || mii.md.bus_id >= MAX_MDIO_BUS)
+ return -EINVAL;
+
+ bus = mv_mii_buses[mii.md.bus_id];
+ if (!bus) {
+ pr_err("invalid bus_id\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&bus->mdio_lock);
+
+ if (page != -1) {
+ /* Save the current page number */
+ ret = mvmdio_read(bus, mii.md.bus_id,
+ mii.md.phy_id, mii.page_reg);
+ if (ret < 0)
+ goto mdio_failed;
+
+ prev_page = ret;
+
+ /* Set a new page number */
+ ret = mvmdio_write(bus, mii.md.bus_id, mii.md.phy_id,
+ mii.page_reg, page);
+ if (ret < 0)
+ goto mdio_failed;
+ }
+
+ /* Read the target register */
+ ret = mvmdio_read(bus, mii.md.bus_id,
+ mii.md.phy_id, mii.md.reg);
+ if (ret < 0)
+ goto mdio_failed;
+
+ mii.md.data = (u16)ret;
+
+ if (page != -1) {
+ /* Restore the previous page number */
+ ret = mvmdio_write(bus, mii.md.bus_id, mii.md.phy_id,
+ mii.page_reg, prev_page);
+ if (ret < 0)
+ goto mdio_failed;
+ }
+
+ mutex_unlock(&bus->mdio_lock);
+
+ if (copy_to_user((struct mii_data_pgd_access *)buf, &mii, data_sz)) {
+ pr_err("copy_to_user failed\n");
+ return -EFAULT;
+ }
+
+ return 0;
+
+mdio_failed:
+ mutex_unlock(&bus->mdio_lock);
+ return ret;
+}
+
+static ssize_t mv_mdio_device_write(struct file *file,
+ const char *buf, size_t count, loff_t *f_pos)
+{
+ int ret;
+ int page, prev_page;
+ struct mii_data_pgd_access mii;
+ struct mii_bus *bus;
+ size_t data_sz;
+
+ data_sz = paged_access ?
+ sizeof(struct mii_data_pgd_access) :
+ sizeof(struct mii_data);
+
+ if (copy_from_user(&mii, (struct mii_data_pgd_access *)buf, data_sz)) {
+ pr_err("copy_from_user failed\n");
+ return -EFAULT;
+ }
+
+ page = paged_access ? mii.page_num : -1;
+
+ if (mii.md.bus_id < 0 || mii.md.bus_id >= MAX_MDIO_BUS)
+ return -EINVAL;
+
+ bus = mv_mii_buses[mii.md.bus_id];
+ if (!bus) {
+ pr_err("invalid bus_id\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&bus->mdio_lock);
+
+ if (page != -1) {
+ /* Save the current page number */
+ ret = mvmdio_read(bus, mii.md.bus_id,
+ mii.md.phy_id, mii.page_reg);
+ if (ret < 0)
+ goto mdio_failed;
+
+ prev_page = ret;
+
+ /* Set a new page number */
+ ret = mvmdio_write(bus, mii.md.bus_id, mii.md.phy_id,
+ mii.page_reg, page);
+ if (ret < 0)
+ goto mdio_failed;
+ }
+
+ /* Write the target register */
+ ret = mvmdio_write(bus, mii.md.bus_id,
+ mii.md.phy_id, mii.md.reg, mii.md.data);
+ if (ret < 0)
+ goto mdio_failed;
+
+ if (page != -1) {
+ /* Restore the previous page number */
+ ret = mvmdio_write(bus, mii.md.bus_id, mii.md.phy_id,
+ mii.page_reg, prev_page);
+ if (ret < 0)
+ goto mdio_failed;
+ }
+
+ mutex_unlock(&bus->mdio_lock);
+
+ return 0;
+
+mdio_failed:
+ mutex_unlock(&bus->mdio_lock);
+ return ret;
+}
+
+static int mv_mdio_device_close(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static const struct file_operations mvmdio_fops = {
+ .owner = THIS_MODULE,
+ .open = mv_mdio_device_open,
+ .read = mv_mdio_device_read,
+ .write = mv_mdio_device_write,
+ .release = mv_mdio_device_close,
+};
+
+static int __init mv_mdio_device_init(void)
+{
+ struct device_node *np;
+ struct device_node *mdio;
+ struct mii_bus *mv_mii_bus;
+ static struct device *mvmdio_dev;
+ int bus_count = 0;
+ int ret;
+
+ memset(mv_mii_buses, 0, sizeof(mv_mii_buses));
+ for_each_compatible_node(np, NULL, "marvell,mvmdio-uio") {
+ if (bus_count == MAX_MDIO_BUS)
+ break;
+
+ mdio = of_parse_phandle(np, "mii-bus", 0);
+ if (mdio == NULL) {
+ pr_err("parse handle failed\n");
+ continue;
+ }
+ mv_mii_bus = of_mdio_find_bus(mdio);
+ if (mv_mii_bus == NULL) {
+ pr_err("mdio find bus failed\n");
+ continue;
+ }
+ pr_info("bus %d added at %s\n",
+ bus_count, mdio->name);
+ mv_mii_buses[bus_count++] = mv_mii_bus;
+ }
+
+ if (bus_count == 0) {
+ pr_err("no useful mdio bus found\n");
+ return -ENODEV;
+ }
+
+
+ ret = register_chrdev(0, MVMDIO_DEV_NAME, &mvmdio_fops);
+ if (ret < 0) {
+ pr_err("failed to register a char device\n");
+ return ret;
+ }
+
+ major = ret;
+
+ mv_cl = class_create(THIS_MODULE, MVMDIO_CLASS_NAME);
+ if (IS_ERR(mv_cl)) {
+ ret = PTR_ERR(mv_cl);
+ goto error_class;
+ }
+
+ mvmdio_dev = device_create(mv_cl, NULL,
+ MKDEV(major, 0), NULL, MVMDIO_DEV_NAME);
+
+ if (IS_ERR(mvmdio_dev)) {
+ ret = PTR_ERR(mvmdio_dev);
+ goto error_device;
+ }
+
+ return 0;
+
+error_device:
+ class_destroy(mv_cl);
+error_class:
+ unregister_chrdev(major, MVMDIO_DEV_NAME);
+
+ pr_err("driver registration failed\n");
+ return ret;
+}
+
+static void __exit mv_mdio_device_exit(void)
+{
+ device_destroy(mv_cl, MKDEV(major, 0));
+ class_destroy(mv_cl);
+ unregister_chrdev(major, MVMDIO_DEV_NAME);
+}
+
+late_initcall(mv_mdio_device_init);
+module_exit(mv_mdio_device_exit);
+
+module_param(paged_access, int, 0644);
+MODULE_PARM_DESC(paged_access, "Enable paged access support");
+MODULE_DESCRIPTION("Marvell MDIO uio driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/marvell/octeontx2-ccu/Makefile b/drivers/soc/marvell/octeontx2-ccu/Makefile
new file mode 100644
index 000000000000..ee0aa0480268
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-ccu/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's OcteonTX2 CCU driver
+#
+
+obj-$(CONFIG_OCTEONTX2_CCU) += octeontx2_ccu.o
+
+octeontx2_ccu-y := ccu.o
diff --git a/drivers/soc/marvell/octeontx2-ccu/README b/drivers/soc/marvell/octeontx2-ccu/README
new file mode 100644
index 000000000000..015d94fe06b0
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-ccu/README
@@ -0,0 +1,97 @@
+
+OCTEONTX2 Cache Controller Unit (CCU)
+-------------------------------------
+
+The Cache Controller Unit allows partitioning the LLC ways into
+different partitions (mparid) and associate them with a cpu or a
+set of CPUs.
+
+This driver creates a debugfs directory (ccu) in /sys/kernel/debug
+with the following files
+ $ ls /sys/kernel/debug/ccu
+ config counter cpumask mparid waymask
+
+mparid - Create a new parition ID. Valid values are 0 - 255.
+waymask - Set the LLC ways that will be masked for the associated mparid.
+ CCU has 14 LTG ways and 20 DTG ways. The mask value is as follows
+ [13:0] - LTG Ways
+ [33:14] - DTG Ways
+cpumask - Set the CPU mask to which the LLC way partitioning will be applied.
+config - Write 1 to enable the CCU partitioning with values set in the files
+ mparid, waymask and cpumask.
+ Read prints the cache configuration.
+counter - Prints the Allocate and Hit counter values for the MPARIDs that
+ are configured.
+
+Usage Steps
+-----------
+ 1. Create a new parition
+ $ echo <mparid> > /sys/kernel/debug/ccu/mparid
+
+ 2. Set the LLC ways that need to be masked.
+ $ echo <waymask> > /sys/kernel/debug/ccu/waymask
+
+ 3. Set cpumask to which waymasking will be applied to.
+ $ echo <cpu-mask> > /sys/kernel/debug/ccu/cpumask
+
+ 4. Enable this configuration
+ $ echo 1 > /sys/kernel/debug/ccu/config
+
+The above 4 steps can be repeated the with different mparid, waymask and
+cpumask values. Finally the hardware configuration can be read by
+ $ cat /sys/kernel/debug/ccu/config
+
+To check the Allocate and Hit counter for the configured MPARIDs
+ $ cat /sys/kernel/debug/ccu/counter
+
+Example
+-------
+Lets create mparid 10, with all LTG ways masked and associate it with cpus 12-23
+ $ echo 10 > /sys/kernel/debug/ccu/mparid
+ $ echo 0x3fff > /sys/kernel/debug/ccu/waymask
+ $ echo 0xfff000 > /sys/kernel/debug/ccu/cpumask
+ $ echo 1 > /sys/kernel/debug/ccu/config
+
+ $ cat /sys/kernel/debug/ccu/config
+ core:0 mparid:0 waymask:0x0
+ core:1 mparid:0 waymask:0x0
+ core:2 mparid:0 waymask:0x0
+ core:3 mparid:0 waymask:0x0
+ core:4 mparid:0 waymask:0x0
+ core:5 mparid:0 waymask:0x0
+ core:6 mparid:0 waymask:0x0
+ core:7 mparid:0 waymask:0x0
+ core:8 mparid:0 waymask:0x0
+ core:9 mparid:0 waymask:0x0
+ core:10 mparid:0 waymask:0x0
+ core:11 mparid:0 waymask:0x0
+ core:12 mparid:10 waymask:0x3fff
+ core:13 mparid:10 waymask:0x3fff
+ core:14 mparid:10 waymask:0x3fff
+ core:15 mparid:10 waymask:0x3fff
+ core:16 mparid:10 waymask:0x3fff
+ core:17 mparid:10 waymask:0x3fff
+ core:18 mparid:10 waymask:0x3fff
+ core:19 mparid:10 waymask:0x3fff
+ core:20 mparid:10 waymask:0x3fff
+ core:21 mparid:10 waymask:0x3fff
+ core:22 mparid:10 waymask:0x3fff
+ core:23 mparid:10 waymask:0x3fff
+
+ $ cat /sys/kernel/debug/ccu/counter
+ CCU:0 TAD:0 MPARID:0 ALLOC:0x20a469e HIT:0x1ca8fa09
+ CCU:0 TAD:1 MPARID:0 ALLOC:0x20a48ea HIT:0x32dc7712
+ CCU:1 TAD:0 MPARID:0 ALLOC:0x20a45ee HIT:0x326f8834
+ CCU:1 TAD:1 MPARID:0 ALLOC:0x20a470e HIT:0x19aa9ce8
+ CCU:2 TAD:0 MPARID:0 ALLOC:0x20a4719 HIT:0x3204242a
+ CCU:2 TAD:1 MPARID:0 ALLOC:0x20a474f HIT:0x32234456
+ CCU:3 TAD:0 MPARID:0 ALLOC:0x20a45a2 HIT:0x19e723c9
+ CCU:3 TAD:1 MPARID:0 ALLOC:0x20a46c2 HIT:0x1aae638a
+ CCU:0 TAD:0 MPARID:10 ALLOC:0x8 HIT:0x2a1
+ CCU:0 TAD:1 MPARID:10 ALLOC:0xb HIT:0x273
+ CCU:1 TAD:0 MPARID:10 ALLOC:0x13 HIT:0x22d
+ CCU:1 TAD:1 MPARID:10 ALLOC:0xb HIT:0x26d
+ CCU:2 TAD:0 MPARID:10 ALLOC:0x11 HIT:0x206
+ CCU:2 TAD:1 MPARID:10 ALLOC:0x10 HIT:0x1f2
+ CCU:3 TAD:0 MPARID:10 ALLOC:0x10 HIT:0x2b2
+ CCU:3 TAD:1 MPARID:10 ALLOC:0x14 HIT:0x26f
diff --git a/drivers/soc/marvell/octeontx2-ccu/ccu.c b/drivers/soc/marvell/octeontx2-ccu/ccu.c
new file mode 100644
index 000000000000..60bfa1438791
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-ccu/ccu.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 CCU controller driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#define CCU_BASE 0x87E050000000
+#define CCS_BASE 0x87E087100000
+
+#define CCS_MPARX_MASK(pid) (0x1000 | ((pid) & 0xff) << 3)
+
+#define CCUX_TADX_MPARX_ACNT(ccu, tad, pid) \
+ (0x401000 | \
+ ((ccu) & 0x3) << 24 | \
+ ((tad) & 0x1) << 21 | \
+ ((pid) & 0xff) << 4)
+
+#define CCUX_TADX_MPARX_HCNT(ccu, tad, pid) \
+ (0x401008 | \
+ (((ccu) & 0x3) << 24) | \
+ (((tad) & 0x1) << 21) | \
+ (((pid) & 0xff) << 4))
+
+#define MPARID_MAX 256
+
+/* Global variables */
+void __iomem *ccu_base;
+void __iomem *ccs_base;
+static u8 mparid;
+static u64 waymask;
+static u32 cpid_mask;
+struct dentry *ccu_dent;
+struct cpumask cpid_cpumask;
+static u8 mparid_configured[MPARID_MAX];
+
+#define COUNTER_BUF_SIZE 65536
+#define CONFIG_BUF_SIZE 4096
+char *counter_buf;
+char *config_buf;
+
+/* Low level accessor functions */
+static inline void apsys_cpidel2_read_remote(void *data)
+{
+ u64 val;
+
+ asm volatile ("mrs %0, s3_4_c11_c6_4" : "=r" (val) : );
+ *(u64 *)data = val;
+}
+
+static inline void apsys_cpidel2_write_remote(void *data)
+{
+ u64 val;
+
+ val = *(u64 *)data;
+ asm volatile ("msr s3_4_c11_c6_4, %0" : : "r" (val));
+}
+
+static inline u64 ccsreg_read(u64 offset)
+{
+ return readq(ccs_base + offset);
+}
+
+static inline void ccsreg_write(u64 offset, u64 val)
+{
+ writeq(val, ccs_base + offset);
+}
+
+static inline u64 ccureg_read(u64 offset)
+{
+ return readq(ccu_base + offset);
+}
+
+static inline void ccureg_write(u64 offset, u64 val)
+{
+ writeq(val, ccu_base + offset);
+}
+
+/* Mask LLC ways for a partition id */
+static inline void ccsreg_mparmask_set(int mparid, u64 waymask)
+{
+ /* Bits [13:0] mask LTG ways, bits [33:14] mask DTG ways */
+ ccsreg_write(CCS_MPARX_MASK(mparid), waymask);
+}
+
+static ssize_t otx2_ccu_config_write(struct file *file, const char *buf,
+ size_t count, loff_t *position)
+{
+ int cpu;
+
+ pr_info("ccu: configuring mparid:%d waymask:0x%llx cpumask:0x%x\n",
+ mparid, waymask, cpid_mask);
+
+ /* Configure the LLC ways */
+ ccsreg_mparmask_set(mparid, waymask);
+
+ /* Create a bitmap */
+ cpumask_clear(&cpid_cpumask);
+ for_each_set_bit(cpu, (unsigned long *)&cpid_mask, num_present_cpus())
+ cpumask_set_cpu(cpu, &cpid_cpumask);
+
+ /* Configure mparid for all cpus in the bitmap */
+ for_each_cpu(cpu, &cpid_cpumask) {
+ smp_call_function_single(cpu, apsys_cpidel2_write_remote,
+ &mparid, true);
+ }
+
+ /* Some book keeping */
+ mparid_configured[mparid] = 1;
+
+ return count;
+}
+
+static ssize_t otx2_ccu_config_read(struct file *file, char __user *buf,
+ size_t count, loff_t *position)
+{
+ u64 val, waymask;
+ u32 cpu, sz = 0;
+ u8 mparid;
+
+ memset(config_buf, 0, CONFIG_BUF_SIZE);
+
+ /* Read the mparid configured for each cpu and then read
+ * the associated waymask for that mparid.
+ */
+ for_each_cpu(cpu, cpu_present_mask) {
+ smp_call_function_single(cpu, apsys_cpidel2_read_remote,
+ &val, true);
+ mparid = (u8)val;
+ waymask = ccsreg_read(CCS_MPARX_MASK(mparid));
+ sz += snprintf(config_buf + sz, CONFIG_BUF_SIZE - sz,
+ "core:%d mparid:%d waymask:0x%llx\n",
+ cpu, mparid, waymask);
+ }
+
+ /* Copy to the user buffer */
+ return simple_read_from_buffer(buf, count, position, config_buf, sz);
+}
+
+static const struct file_operations otx2_ccu_config_fops = {
+ .read = otx2_ccu_config_read,
+ .write = otx2_ccu_config_write,
+};
+
+static ssize_t otx2_ccu_counter_read(struct file *file, char __user *buf,
+ size_t count, loff_t *position)
+{
+ int ccu, tad, pid;
+ u64 acnt, hcnt;
+ u32 sz = 0;
+
+ memset(counter_buf, 0, COUNTER_BUF_SIZE);
+
+ /* Read the Allocate and Hit counter values only for MPARIDs
+ * that were configured.
+ */
+ for (pid = 0; pid < MPARID_MAX; pid++) {
+ if (!mparid_configured[pid])
+ continue;
+
+ for (ccu = 0; ccu < 4; ccu++) {
+ for (tad = 0; tad < 2; tad++) {
+ acnt = ccureg_read(CCUX_TADX_MPARX_ACNT(ccu, tad, pid));
+ hcnt = ccureg_read(CCUX_TADX_MPARX_HCNT(ccu, tad, pid));
+ sz += snprintf(counter_buf + sz, COUNTER_BUF_SIZE - sz,
+ "CCU:%d TAD:%d MPARID:%d ALLOC:0x%llx HIT:0x%llx\n",
+ ccu, tad, pid, acnt, hcnt);
+ }
+ }
+ }
+
+ /* Copy to the user buffer */
+ return simple_read_from_buffer(buf, count, position, counter_buf, sz);
+}
+
+static const struct file_operations otx2_ccu_counter_fops = {
+ .read = otx2_ccu_counter_read,
+};
+
+static int __init otx2_ccu_init(void)
+{
+ u32 cpuid = read_cpuid_id();
+
+ cpuid &= (MIDR_IMPLEMENTOR_MASK | (0xff0 << MIDR_PARTNUM_SHIFT));
+
+ /* Valid only for OcteonTX2 Family */
+ if (((ARM_CPU_IMP_CAVIUM << MIDR_IMPLEMENTOR_SHIFT) |
+ (0xB0 << MIDR_PARTNUM_SHIFT)) != cpuid)
+ return -ENODEV;
+
+ /* CCU Base address */
+ ccu_base = ioremap(CCU_BASE, 0x4000000);
+ if (IS_ERR(ccu_base)) {
+ pr_err("%s: CCU ioremap failed\n", __func__);
+ return PTR_ERR(ccu_base);
+ }
+
+ /* CCS Base address */
+ ccs_base = ioremap(CCS_BASE, 0x1000);
+ if (IS_ERR(ccs_base)) {
+ pr_err("%s: CCS ioremap failed\n", __func__);
+ return PTR_ERR(ccs_base);
+ }
+
+ /* Add debufs hooks */
+ ccu_dent = debugfs_create_dir("ccu", NULL);
+
+ debugfs_create_u8("mparid", 0644, ccu_dent, &mparid);
+
+ debugfs_create_u64("waymask", 0644, ccu_dent, &waymask);
+
+ debugfs_create_u32("cpumask", 0644, ccu_dent, &cpid_mask);
+
+ debugfs_create_file("config", 0644, ccu_dent, NULL, &otx2_ccu_config_fops);
+
+ debugfs_create_file("counter", 0644, ccu_dent, NULL, &otx2_ccu_counter_fops);
+
+ counter_buf = kzalloc(COUNTER_BUF_SIZE, GFP_KERNEL);
+ if (IS_ERR(counter_buf)) {
+ pr_err("Failed to allocate memory for counter buffer\n");
+ return PTR_ERR(counter_buf);
+ }
+
+ config_buf = kzalloc(CONFIG_BUF_SIZE, GFP_KERNEL);
+ if (IS_ERR(config_buf)) {
+ pr_err("Failed to allocate memory for config buffer\n");
+ kfree(counter_buf);
+ return PTR_ERR(config_buf);
+ }
+
+ /* Zero MPARID is the default configuration for all CPUs at bootup */
+ mparid_configured[0] = 1;
+ return 0;
+}
+
+static void __exit otx2_ccu_exit(void)
+{
+ kfree(config_buf);
+ kfree(counter_buf);
+ debugfs_remove_recursive(ccu_dent);
+}
+
+module_init(otx2_ccu_init);
+module_exit(otx2_ccu_exit);
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION("Marvell OcteonTX2 CCU controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/marvell/octeontx2-dpi/Makefile b/drivers/soc/marvell/octeontx2-dpi/Makefile
new file mode 100644
index 000000000000..73640517593c
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-dpi/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's OcteonTX2 DPI PF driver
+#
+
+obj-$(CONFIG_OCTEONTX2_DPI_PF) += octeontx2_dpi.o
+
+octeontx2_dpi-y := dpi.o
diff --git a/drivers/soc/marvell/octeontx2-dpi/dpi.c b/drivers/soc/marvell/octeontx2-dpi/dpi.c
new file mode 100644
index 000000000000..f0faccccd8ae
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-dpi/dpi.c
@@ -0,0 +1,621 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 DPI PF driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/sysfs.h>
+
+#include "dpi.h"
+
+#define DPI_DRV_NAME "octeontx2-dpi"
+#define DPI_DRV_STRING "Marvell OcteonTX2 DPI-DMA Driver"
+#define DPI_DRV_VERSION "1.0"
+
+/* Supported devices */
+static const struct pci_device_id dpi_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_DPI_PF) },
+ { 0, } /* end of table */
+};
+
+static int mps = 128;
+module_param(mps, int, 0644);
+MODULE_PARM_DESC(mps, "Maximum payload size, Supported sizes are 128, 256, 512 and 1024 bytes");
+
+static int mrrs = 128;
+module_param(mrrs, int, 0644);
+MODULE_PARM_DESC(mrrs, "Maximum read request size, Supported sizes are 128, 256, 512 and 1024 bytes");
+
+MODULE_DEVICE_TABLE(pci, dpi_id_table);
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION(DPI_DRV_STRING);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DPI_DRV_VERSION);
+
+static inline bool is_otx3_dpi(struct dpipf *dpi)
+{
+ if (dpi->pdev->subsystem_device >= PCI_SUBDEVID_OCTEONTX3_DPI_PF)
+ return 1;
+
+ return 0;
+}
+
+static void dpi_reg_write(struct dpipf *dpi, u64 offset, u64 val)
+{
+ writeq(val, dpi->reg_base + offset);
+}
+
+static u64 dpi_reg_read(struct dpipf *dpi, u64 offset)
+{
+ return readq(dpi->reg_base + offset);
+}
+
+static int dpi_dma_engine_get_num(void)
+{
+ return DPI_MAX_ENGINES;
+}
+
+static int dpi_queue_init(struct dpipf *dpi, struct dpipf_vf *dpivf, u8 vf)
+{
+ int engine = 0;
+ int queue = vf;
+ u64 reg = 0ULL;
+ u32 aura = dpivf->vf_config.aura;
+ u16 buf_size = dpivf->vf_config.csize;
+ u16 sso_pf_func = dpivf->vf_config.sso_pf_func;
+ u16 npa_pf_func = dpivf->vf_config.npa_pf_func;
+
+ reg = DPI_DMA_IBUFF_CSIZE_CSIZE((u64)(buf_size / 8));
+ if (is_otx3_dpi(dpi))
+ reg |= DPI_DMA_IBUFF_CSIZE_NPA_FREE;
+ dpi_reg_write(dpi, DPI_DMAX_IBUFF_CSIZE(queue), reg);
+
+ if (!is_otx3_dpi(dpi)) {
+ /* IDs are already configured while creating the domains.
+ * No need to configure here.
+ */
+ for (engine = 0; engine < dpi_dma_engine_get_num(); engine++) {
+ /* Dont configure the queus for PKT engines */
+ if (engine >= 4)
+ break;
+
+ reg = 0;
+ reg = dpi_reg_read(dpi, DPI_DMA_ENGX_EN(engine));
+ reg |= DPI_DMA_ENG_EN_QEN(0x1 << queue);
+ dpi_reg_write(dpi, DPI_DMA_ENGX_EN(engine), reg);
+ }
+ }
+
+ reg = dpi_reg_read(dpi, DPI_DMAX_IDS2(queue));
+ reg |= DPI_DMA_IDS2_INST_AURA(aura);
+ dpi_reg_write(dpi, DPI_DMAX_IDS2(queue), reg);
+
+ reg = dpi_reg_read(dpi, DPI_DMAX_IDS(queue));
+ reg |= DPI_DMA_IDS_DMA_NPA_PF_FUNC(npa_pf_func);
+ reg |= DPI_DMA_IDS_DMA_SSO_PF_FUNC(sso_pf_func);
+ reg |= DPI_DMA_IDS_DMA_STRM(vf + 1);
+ reg |= DPI_DMA_IDS_INST_STRM(vf + 1);
+ dpi_reg_write(dpi, DPI_DMAX_IDS(queue), reg);
+
+ return 0;
+}
+
+static int dpi_queue_fini(struct dpipf *dpi, struct dpipf_vf *dpivf, u8 vf)
+{
+ u64 reg = 0ULL;
+ int engine = 0;
+ int queue = vf;
+ u16 buf_size = dpivf->vf_config.csize;
+
+ for (engine = 0; engine < dpi_dma_engine_get_num(); engine++) {
+ /* Dont configure the queus for PKT engines */
+ if (engine >= 4)
+ break;
+
+ reg = 0;
+ reg = dpi_reg_read(dpi, DPI_DMA_ENGX_EN(engine));
+ reg &= DPI_DMA_ENG_EN_QEN((~(1 << queue)));
+ dpi_reg_write(dpi, DPI_DMA_ENGX_EN(engine), reg);
+ }
+
+ dpi_reg_write(dpi, DPI_DMAX_QRST(queue), 0x1ULL);
+ /* TBD: below code required ? */
+ dpi_reg_write(dpi, DPI_DMAX_IBUFF_CSIZE(queue),
+ DPI_DMA_IBUFF_CSIZE_CSIZE((u64)(buf_size)));
+
+ /* Reset IDS and IDS2 registers */
+ dpi_reg_write(dpi, DPI_DMAX_IDS2(queue), 0ULL);
+ dpi_reg_write(dpi, DPI_DMAX_IDS(queue), 0ULL);
+
+ return 0;
+}
+
+/**
+ * Global initialization of DPI
+ *
+ * @dpi: DPI device context structure
+ * @return Zero on success, negative on failure
+ */
+static int dpi_init(struct dpipf *dpi)
+{
+ int engine = 0, port = 0;
+ u8 mrrs_val, mps_val;
+ u64 reg = 0ULL;
+
+ for (engine = 0; engine < dpi_dma_engine_get_num(); engine++) {
+ if (engine == 4 || engine == 5)
+ reg = DPI_ENG_BUF_BLKS(16);
+ else
+ reg = DPI_ENG_BUF_BLKS(8);
+
+ dpi_reg_write(dpi, DPI_ENGX_BUF(engine), reg);
+
+ /* Here qmap for the engines are set to 0.
+ * No dpi queues are mapped to engines.
+ * When a VF is initialised corresponding bit
+ * in the qmap will be set for all engines.
+ */
+ if (!is_otx3_dpi(dpi))
+ dpi_reg_write(dpi, DPI_DMA_ENGX_EN(engine), 0x0ULL);
+ }
+
+ reg = 0ULL;
+ reg = (DPI_DMA_CONTROL_ZBWCSEN | DPI_DMA_CONTROL_PKT_EN |
+ DPI_DMA_CONTROL_LDWB | DPI_DMA_CONTROL_O_MODE);
+
+ if (is_otx3_dpi(dpi))
+ reg |= DPI_DMA_CONTROL_DMA_ENB(0x3fULL);
+ else
+ reg |= DPI_DMA_CONTROL_DMA_ENB(0xfULL);
+
+ dpi_reg_write(dpi, DPI_DMA_CONTROL, reg);
+ dpi_reg_write(dpi, DPI_CTL, DPI_CTL_EN);
+
+ /* Configure MPS and MRRS for DPI */
+ if (mrrs < DPI_EBUS_MRRS_MIN || mrrs > DPI_EBUS_MRRS_MAX ||
+ !is_power_of_2(mrrs)) {
+ dev_info(&dpi->pdev->dev,
+ "Invalid MRRS size:%d, Using default size(128 bytes)\n"
+ , mrrs);
+ mrrs = 128;
+ }
+ mrrs_val = fls(mrrs) - 8;
+
+ if (mps < DPI_EBUS_MPS_MIN || mps > DPI_EBUS_MPS_MAX
+ || !is_power_of_2(mps)) {
+ dev_info(&dpi->pdev->dev,
+ "Invalid MPS size:%d, Using default size(128 bytes)\n"
+ , mps);
+ mps = 128;
+ }
+ mps_val = fls(mps) - 8;
+
+ for (port = 0; port < DPI_EBUS_MAX_PORTS; port++) {
+ reg = dpi_reg_read(dpi, DPI_EBUS_PORTX_CFG(port));
+ reg &= ~(DPI_EBUS_PORTX_CFG_MRRS(0x7) |
+ DPI_EBUS_PORTX_CFG_MPS(0x7));
+ reg |= (DPI_EBUS_PORTX_CFG_MPS(mps_val) |
+ DPI_EBUS_PORTX_CFG_MRRS(mrrs_val));
+ dpi_reg_write(dpi, DPI_EBUS_PORTX_CFG(port), reg);
+ }
+
+ /* Set the write control FIFO threshold as per HW recommendation */
+ if (is_otx3_dpi(dpi))
+ dpi_reg_write(dpi, DPI_WCTL_FIF_THR, 0x30);
+
+ return 0;
+}
+
+static int dpi_fini(struct dpipf *dpi)
+{
+ int engine = 0, port;
+ u64 reg = 0ULL;
+
+ for (engine = 0; engine < dpi_dma_engine_get_num(); engine++) {
+
+ dpi_reg_write(dpi, DPI_ENGX_BUF(engine), reg);
+ if (!is_otx3_dpi(dpi))
+ dpi_reg_write(dpi, DPI_DMA_ENGX_EN(engine), 0x0ULL);
+ }
+
+ reg = 0ULL;
+ dpi_reg_write(dpi, DPI_DMA_CONTROL, reg);
+ dpi_reg_write(dpi, DPI_CTL, ~DPI_CTL_EN);
+
+ for (port = 0; port < DPI_EBUS_MAX_PORTS; port++) {
+ reg = dpi_reg_read(dpi, DPI_EBUS_PORTX_CFG(port));
+ reg &= ~DPI_EBUS_PORTX_CFG_MRRS(0x7);
+ reg &= ~DPI_EBUS_PORTX_CFG_MPS(0x7);
+ dpi_reg_write(dpi, DPI_EBUS_PORTX_CFG(port), reg);
+ }
+ return 0;
+}
+
+static int dpi_queue_reset(struct dpipf *dpi, u16 queue)
+{
+ /* TODO: add support */
+ return 0;
+}
+
+static irqreturn_t dpi_pf_intr_handler (int irq, void *dpi_irq)
+{
+ u64 reg_val = 0;
+ int i = 0;
+ struct dpipf *dpi = (struct dpipf *)dpi_irq;
+
+ dev_err(&dpi->pdev->dev, "intr received: %d\n", irq);
+
+ /* extract MSIX vector number from irq number. */
+ while (irq != pci_irq_vector(dpi->pdev, i)) {
+ i++;
+ if (i > dpi->num_vec)
+ break;
+ }
+ if (i < DPI_REQQX_INT_IDX) {
+ reg_val = dpi_reg_read(dpi, DPI_DMA_CCX_INT(i));
+ dev_err(&dpi->pdev->dev, "DPI_CC%d_INT raised: 0x%016llx\n",
+ i, reg_val);
+ dpi_reg_write(dpi, DPI_DMA_CCX_INT(i), 0x1ULL);
+ } else if (i < DPI_SDP_FLR_RING_LINTX_IDX) {
+ reg_val = dpi_reg_read(
+ dpi, DPI_REQQX_INT(i - DPI_REQQX_INT_IDX));
+ dev_err(&dpi->pdev->dev,
+ "DPI_REQQ_INT raised for q:%d: 0x%016llx\n",
+ (i - 0x40), reg_val);
+
+ dpi_reg_write(
+ dpi, DPI_REQQX_INT(i - DPI_REQQX_INT_IDX), reg_val);
+
+ if (reg_val & (0x71ULL))
+ dpi_queue_reset(dpi, (i - DPI_REQQX_INT_IDX));
+ } else if (i < DPI_SDP_IRE_LINTX_IDX) {
+ /* TODO: handle interrupt */
+ dev_err(&dpi->pdev->dev, "DPI_SDP_FLR_RING_LINTX raised\n");
+
+ } else if (i < DPI_SDP_ORE_LINTX_IDX) {
+ /* TODO: handle interrupt */
+ dev_err(&dpi->pdev->dev, "DPI_SDP_IRE_LINTX raised\n");
+
+ } else if (i < DPI_SDP_ORD_LINTX_IDX) {
+ /* TODO: handle interrupt */
+ dev_err(&dpi->pdev->dev, "DPI_SDP_ORE_LINTX raised\n");
+
+ } else if (i < DPI_EPFX_PP_VF_LINTX_IDX) {
+ /* TODO: handle interrupt */
+ dev_err(&dpi->pdev->dev, "DPI_SDP_ORD_LINTX raised\n");
+
+ } else if (i < DPI_EPFX_DMA_VF_LINTX_IDX) {
+ /* TODO: handle interrupt */
+ dev_err(&dpi->pdev->dev, "DPI_EPFX_PP_VF_LINTX raised\n");
+
+ } else if (i < DPI_EPFX_MISC_LINTX_IDX) {
+ /* TODO: handle interrupt */
+ dev_err(&dpi->pdev->dev, "DPI_EPFX_DMA_VF_LINTX raised\n");
+
+ } else if (i < DPI_PF_RAS_IDX) {
+ /* TODO: handle interrupt */
+ dev_err(&dpi->pdev->dev, "DPI_EPFX_MISC_LINTX raised\n");
+
+ } else if (i == DPI_PF_RAS_IDX) {
+ reg_val = dpi_reg_read(dpi, DPI_PF_RAS);
+ dev_err(&dpi->pdev->dev, "DPI_PF_RAS raised: 0x%016llx\n",
+ reg_val);
+ dpi_reg_write(dpi, DPI_PF_RAS, reg_val);
+ }
+ return IRQ_HANDLED;
+}
+
+static int dpi_irq_init(struct dpipf *dpi)
+{
+ int i, irq = 0;
+ int ret = 0;
+
+ /* Clear All Interrupts */
+ dpi_reg_write(dpi, DPI_PF_RAS, DPI_PF_RAS_INT);
+
+ /* Clear All Enables */
+ dpi_reg_write(dpi, DPI_PF_RAS_ENA_W1C, DPI_PF_RAS_INT);
+
+ for (i = 0; i < DPI_MAX_REQQ_INT; i++) {
+ dpi_reg_write(dpi, DPI_REQQX_INT(i), DPI_REQQ_INT);
+ dpi_reg_write(dpi, DPI_REQQX_INT_ENA_W1C(i), DPI_REQQ_INT);
+ }
+
+ for (i = 0; i < DPI_MAX_CC_INT; i++) {
+ dpi_reg_write(dpi, DPI_DMA_CCX_INT(i), DPI_DMA_CC_INT);
+ dpi_reg_write(dpi, DPI_DMA_CCX_INT_ENA_W1C(i), DPI_DMA_CC_INT);
+ }
+
+ dpi->num_vec = pci_msix_vec_count(dpi->pdev);
+ /* Enable MSI-X */
+ ret = pci_alloc_irq_vectors(dpi->pdev, dpi->num_vec,
+ dpi->num_vec, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(&dpi->pdev->dev,
+ "DPIPF: Request for %d msix vectors failed, ret %d\n",
+ dpi->num_vec, ret);
+ goto alloc_fail;
+ }
+
+ for (irq = 0; irq < dpi->num_vec; irq++) {
+ ret = request_irq(pci_irq_vector(dpi->pdev, irq),
+ dpi_pf_intr_handler, 0, "DPIPF", dpi);
+ if (ret) {
+ dev_err(&dpi->pdev->dev,
+ "DPIPF: IRQ(%d) registration failed for DPIPF\n",
+ irq);
+ goto fail;
+ }
+ }
+
+#define ENABLE_DPI_INTERRUPTS 0
+#if ENABLE_DPI_INTERRUPTS
+ /*Enable All Interrupts */
+ for (i = 0; i < DPI_MAX_REQQ_INT; i++)
+ dpi_reg_write(dpi, DPI_REQQX_INT_ENA_W1S(i), DPI_REQQ_INT);
+
+ dpi_reg_write(dpi, DPI_PF_RAS_ENA_W1S, DPI_PF_RAS_INT);
+#endif
+ return 0;
+fail:
+ if (irq) {
+ for (i = 0; i <= irq; i++)
+ free_irq(pci_irq_vector(dpi->pdev, i), dpi);
+ }
+ pci_free_irq_vectors(dpi->pdev);
+alloc_fail:
+ dpi->num_vec = 0;
+ return ret;
+}
+
+static void dpi_irq_free(struct dpipf *dpi)
+{
+ int i = 0;
+
+ /* Clear All Enables */
+ dpi_reg_write(dpi, DPI_PF_RAS_ENA_W1C, DPI_PF_RAS_INT);
+
+ for (i = 0; i < DPI_MAX_REQQ_INT; i++) {
+ dpi_reg_write(dpi, DPI_REQQX_INT(i), DPI_REQQ_INT);
+ dpi_reg_write(dpi, DPI_REQQX_INT_ENA_W1C(i), DPI_REQQ_INT);
+ }
+
+ for (i = 0; i < DPI_MAX_CC_INT; i++) {
+ dpi_reg_write(dpi, DPI_DMA_CCX_INT(i), DPI_DMA_CC_INT);
+ dpi_reg_write(dpi, DPI_DMA_CCX_INT_ENA_W1C(i), DPI_DMA_CC_INT);
+ }
+
+ for (i = 0; i < dpi->num_vec; i++)
+ free_irq(pci_irq_vector(dpi->pdev, i), dpi);
+
+ pci_free_irq_vectors(dpi->pdev);
+ dpi->num_vec = 0;
+}
+
+static int dpi_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+ struct dpipf *dpi = pci_get_drvdata(pdev);
+ int ret = 0;
+
+ if (numvfs == 0) {
+ pci_disable_sriov(pdev);
+ dpi->total_vfs = 0;
+ } else {
+ ret = pci_enable_sriov(pdev, numvfs);
+ if (ret == 0) {
+ dpi->total_vfs = numvfs;
+ ret = numvfs;
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t dpi_device_config_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct dpipf *dpi = pci_get_drvdata(pdev);
+ int vf_idx;
+
+ for (vf_idx = 0; vf_idx < dpi->total_vfs; vf_idx++) {
+ struct dpipf_vf *dpivf = &dpi->vf[vf_idx];
+
+ if (!dpivf->setup_done)
+ continue;
+ sprintf(buf + strlen(buf),
+ "VF:%d command buffer size:%d aura:%d",
+ vf_idx, dpivf->vf_config.csize, dpivf->vf_config.aura);
+ sprintf(buf + strlen(buf),
+ "sso_pf_func:%x npa_pf_func:%x\n",
+ dpivf->vf_config.sso_pf_func,
+ dpivf->vf_config.npa_pf_func);
+ }
+ return strlen(buf);
+}
+
+static int queue_config(struct dpipf *dpi, struct dpipf_vf *dpivf,
+ union dpi_mbox_message_t *msg)
+{
+ switch (msg->s.cmd) {
+ case DPI_QUEUE_OPEN:
+ dpivf->vf_config.aura = msg->s.aura;
+ dpivf->vf_config.csize = msg->s.csize;
+ dpivf->vf_config.sso_pf_func = msg->s.sso_pf_func;
+ dpivf->vf_config.npa_pf_func = msg->s.npa_pf_func;
+ dpi_queue_init(dpi, dpivf, msg->s.vfid);
+ dpivf->setup_done = true;
+ break;
+ case DPI_QUEUE_CLOSE:
+ dpivf->vf_config.aura = 0;
+ dpivf->vf_config.csize = 0;
+ dpivf->vf_config.sso_pf_func = 0;
+ dpivf->vf_config.npa_pf_func = 0;
+ dpi_queue_fini(dpi, dpivf, msg->s.vfid);
+ dpivf->setup_done = false;
+ break;
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+static int dpi_queue_config(struct pci_dev *pfdev,
+ union dpi_mbox_message_t *msg)
+{
+ struct device *dev = &pfdev->dev;
+ struct dpipf *dpi = pci_get_drvdata(pfdev);
+ struct dpipf_vf *dpivf;
+
+ if (msg->s.vfid > dpi->total_vfs) {
+ dev_err(dev, "Invalid vfid:%d\n", msg->s.vfid);
+ return -1;
+ }
+ dpivf = &dpi->vf[msg->s.vfid];
+
+ return queue_config(dpi, dpivf, msg);
+}
+
+struct otx2_dpipf_com_s otx2_dpipf_com = {
+ .queue_config = dpi_queue_config
+};
+EXPORT_SYMBOL(otx2_dpipf_com);
+
+static ssize_t dpi_device_config_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ union dpi_mbox_message_t mbox_msg = {.u[0] = 0ULL, .u[1] = 0ULL};
+ struct dpipf *dpi = pci_get_drvdata(pdev);
+ struct dpipf_vf *dpivf;
+
+ memcpy(&mbox_msg, buf, count);
+ if (mbox_msg.s.vfid > dpi->total_vfs) {
+ dev_err(dev, "Invalid vfid:%d\n", mbox_msg.s.vfid);
+ return -1;
+ }
+ dpivf = &dpi->vf[mbox_msg.s.vfid];
+
+ if (queue_config(dpi, dpivf, &mbox_msg) < 0)
+ return -1;
+
+ return sizeof(mbox_msg);
+}
+
+static DEVICE_ATTR_RW(dpi_device_config);
+
+static int dpi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct dpipf *dpi;
+ int err;
+
+ dpi = devm_kzalloc(dev, sizeof(*dpi), GFP_KERNEL);
+ if (!dpi)
+ return -ENOMEM;
+ dpi->pdev = pdev;
+
+ pci_set_drvdata(pdev, dpi);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DPI_DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ /* MAP configuration registers */
+ dpi->reg_base = pcim_iomap(pdev, PCI_DPI_PF_CFG_BAR, 0);
+ if (!dpi->reg_base) {
+ dev_err(dev, "DPI: Cannot map CSR memory space, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ /* Initialize global PF registers */
+ err = dpi_init(dpi);
+ if (err) {
+ dev_err(dev, "DPI: Failed to initialize dpi\n");
+ goto err_release_regions;
+ }
+
+ /* Register interrupts */
+ err = dpi_irq_init(dpi);
+ if (err) {
+ dev_err(dev, "DPI: Failed to initialize irq vectors\n");
+ goto err_dpi_fini;
+ }
+
+ err = device_create_file(dev, &dev_attr_dpi_device_config);
+ if (err) {
+ dev_err(dev, "DPI: Failed to create sysfs entry for driver\n");
+ goto err_free_irq;
+ }
+
+ return 0;
+
+err_free_irq:
+ dpi_irq_free(dpi);
+err_dpi_fini:
+ dpi_fini(dpi);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ devm_kfree(dev, dpi);
+ return err;
+}
+
+static void dpi_remove(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dpipf *dpi = pci_get_drvdata(pdev);
+
+ device_remove_file(dev, &dev_attr_dpi_device_config);
+ dpi_irq_free(dpi);
+ dpi_fini(dpi);
+ dpi_sriov_configure(pdev, 0);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ devm_kfree(dev, dpi);
+}
+
+static struct pci_driver dpi_driver = {
+ .name = DPI_DRV_NAME,
+ .id_table = dpi_id_table,
+ .probe = dpi_probe,
+ .remove = dpi_remove,
+ .sriov_configure = dpi_sriov_configure,
+};
+
+static int __init dpi_init_module(void)
+{
+ pr_info("%s: %s\n", DPI_DRV_NAME, DPI_DRV_STRING);
+
+ return pci_register_driver(&dpi_driver);
+}
+
+static void __exit dpi_cleanup_module(void)
+{
+ pci_unregister_driver(&dpi_driver);
+}
+
+module_init(dpi_init_module);
+module_exit(dpi_cleanup_module);
diff --git a/drivers/soc/marvell/octeontx2-dpi/dpi.h b/drivers/soc/marvell/octeontx2-dpi/dpi.h
new file mode 100644
index 000000000000..89aeba02ecaa
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-dpi/dpi.h
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 DPI PF driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DPI_H__
+#define __DPI_H__
+
+ /* PCI device IDs */
+#define PCI_DEVID_OCTEONTX2_DPI_PF 0xA080
+#define PCI_DEVID_OCTEONTX2_DPI_VF 0xA081
+#define PCI_SUBDEVID_OCTEONTX3_DPI_PF 0xB900
+
+/* PCI BAR nos */
+#define PCI_DPI_PF_CFG_BAR 0
+#define PCI_DPI_PF_MSIX_BAR 4
+#define PCI_DPI_VF_CFG_BAR 0
+#define PCI_DPI_VF_MSIX_BAR 4
+#define DPI_VF_CFG_SIZE 0x100000
+#define DPI_VF_OFFSET(x) (0x20000000 | 0x100000 * (x))
+
+/* MSI-X interrupts */
+#define DPI_MAX_REQQ_INT ({ \
+ u32 val; \
+ val = 8; \
+ if (is_otx3_dpi(dpi)) \
+ val = 32; \
+ val; }) \
+
+#define DPI_MAX_CC_INT 64
+
+/* MSI-X interrupt vectors indexes */
+#define DPI_CCX_INT_IDX 0x0
+#define DPI_REQQX_INT_IDX 0x40
+#define DPI_SDP_FLR_RING_LINTX_IDX 0x48
+#define DPI_SDP_IRE_LINTX_IDX 0x4C
+#define DPI_SDP_ORE_LINTX_IDX 0x50
+#define DPI_SDP_ORD_LINTX_IDX 0x54
+#define DPI_EPFX_PP_VF_LINTX_IDX 0x58
+#define DPI_EPFX_DMA_VF_LINTX_IDX 0x78
+#define DPI_EPFX_MISC_LINTX_IDX 0x98
+#define DPI_PF_RAS_IDX 0xA8
+
+#define DPI_MAX_ENGINES 6
+#define DPI_MAX_VFS 32
+
+/**************** Macros for register modification ************/
+#define DPI_DMA_IBUFF_CSIZE_CSIZE(x) ((x) & 0x1fff)
+#define DPI_DMA_IBUFF_CSIZE_GET_CSIZE(x) ((x) & 0x1fff)
+
+#define DPI_DMA_IBUFF_CSIZE_NPA_FREE (1 << 16)
+
+#define DPI_DMA_IDS_INST_STRM(x) ((uint64_t)((x) & 0xff) << 40)
+#define DPI_DMA_IDS_GET_INST_STRM(x) (((x) >> 40) & 0xff)
+
+#define DPI_DMA_IDS_DMA_STRM(x) ((uint64_t)((x) & 0xff) << 32)
+#define DPI_DMA_IDS_GET_DMA_STRM(x) (((x) >> 32) & 0xff)
+
+#define DPI_DMA_IDS_DMA_NPA_PF_FUNC(x) ((uint64_t)((x) & 0xffff) << 16)
+#define DPI_DMA_IDS_GET_DMA_NPA_PF_FUNC(x) (((x) >> 16) & 0xffff)
+
+#define DPI_DMA_IDS_DMA_SSO_PF_FUNC(x) ((uint64_t)((x) & 0xffff))
+#define DPI_DMA_IDS_GET_DMA_SSO_PF_FUNC(x) ((x) & 0xffff)
+
+#define DPI_DMA_IDS2_INST_AURA(x) ((uint64_t)((x) & 0xfffff))
+#define DPI_DMA_IDS2_GET_INST_AURA(x) ((x) & 0xfffff)
+
+#define DPI_ENG_BUF_BLKS(x) ((x) & 0x1fULL)
+#define DPI_ENG_BUF_GET_BLKS(x) ((x) & 0x1fULL)
+
+#define DPI_ENG_BUF_BASE(x) (((x) & 0x3fULL) << 16)
+#define DPI_ENG_BUF_GET_BASE(x) (((x) >> 16) & 0x3fULL)
+
+#define DPI_DMA_ENG_EN_QEN(x) ((x) & 0xffULL)
+#define DPI_DMA_ENG_EN_GET_QEN(x) ((x) & 0xffULL)
+
+#define DPI_DMA_ENG_EN_MOLR(x) (((x) & 0x3ffULL) << 32)
+#define DPI_DMA_ENG_EN_GET_MOLR(x) (((x) >> 32) & 0x3ffULL)
+
+#define DPI_DMA_CONTROL_DMA_ENB(x) (((x) & 0x3fULL) << 48)
+#define DPI_DMA_CONTROL_GET_DMA_ENB(x) (((x) >> 48) & 0x3fULL)
+
+#define DPI_DMA_CONTROL_O_ES(x) (((x) & 0x3ULL) << 15)
+#define DPI_DMA_CONTROL_GET_O_ES(x) (((x) >> 15) & 0x3ULL)
+
+#define DPI_DMA_CONTROL_O_MODE (0x1ULL << 14)
+#define DPI_DMA_CONTROL_O_NS (0x1ULL << 17)
+#define DPI_DMA_CONTROL_O_RO (0x1ULL << 18)
+#define DPI_DMA_CONTROL_O_ADD1 (0x1ULL << 19)
+#define DPI_DMA_CONTROL_LDWB (0x1ULL << 32)
+#define DPI_DMA_CONTROL_NCB_TAG_DIS (0x1ULL << 34)
+#define DPI_DMA_CONTROL_ZBWCSEN (0x1ULL << 39)
+#define DPI_DMA_CONTROL_WQECSDIS (0x1ULL << 47)
+#define DPI_DMA_CONTROL_UIO_DIS (0x1ULL << 55)
+#define DPI_DMA_CONTROL_PKT_EN (0x1ULL << 56)
+#define DPI_DMA_CONTROL_FFP_DIS (0x1ULL << 59)
+
+#define DPI_CTL_EN (0x1ULL)
+
+/******************** macros for Interrupts ************************/
+#define DPI_DMA_CC_INT (0x1ULL)
+
+#define DPI_REQQ_INT_INSTRFLT (0x1ULL)
+#define DPI_REQQ_INT_RDFLT (0x1ULL << 1)
+#define DPI_REQQ_INT_WRFLT (0x1ULL << 2)
+#define DPI_REQQ_INT_CSFLT (0x1ULL << 3)
+#define DPI_REQQ_INT_INST_DBO (0x1ULL << 4)
+#define DPI_REQQ_INT_INST_ADDR_NULL (0x1ULL << 5)
+#define DPI_REQQ_INT_INST_FILL_INVAL (0x1ULL << 6)
+#define DPI_REQQ_INT_INSTR_PSN (0x1ULL << 7)
+
+#define DPI_REQQ_INT \
+ (DPI_REQQ_INT_INSTRFLT | \
+ DPI_REQQ_INT_RDFLT | \
+ DPI_REQQ_INT_WRFLT | \
+ DPI_REQQ_INT_CSFLT | \
+ DPI_REQQ_INT_INST_DBO | \
+ DPI_REQQ_INT_INST_ADDR_NULL | \
+ DPI_REQQ_INT_INST_FILL_INVAL | \
+ DPI_REQQ_INT_INSTR_PSN)
+
+#define DPI_PF_RAS_EBI_DAT_PSN (0x1ULL)
+#define DPI_PF_RAS_NCB_DAT_PSN (0x1ULL << 1)
+#define DPI_PF_RAS_NCB_CMD_PSN (0x1ULL << 2)
+#define DPI_PF_RAS_INT \
+ (DPI_PF_RAS_EBI_DAT_PSN | \
+ DPI_PF_RAS_NCB_DAT_PSN | \
+ DPI_PF_RAS_NCB_CMD_PSN)
+
+
+/***************** Registers ******************/
+#define DPI_DMAX_IBUFF_CSIZE(x) (0x0ULL | ((x) << 11))
+#define DPI_DMAX_REQBANK0(x) (0x8ULL | ((x) << 11))
+#define DPI_DMAX_REQBANK1(x) (0x10ULL | ((x) << 11))
+#define DPI_DMAX_IDS(x) (0x18ULL | ((x) << 11))
+#define DPI_DMAX_IDS2(x) (0x20ULL | ((x) << 11))
+#define DPI_DMAX_IFLIGHT(x) (0x28ULL | ((x) << 11))
+#define DPI_DMAX_QRST(x) (0x30ULL | ((x) << 11))
+#define DPI_DMAX_ERR_RSP_STATUS(x) (0x38ULL | ((x) << 11))
+
+#define DPI_CSCLK_ACTIVE_PC ({ \
+ u64 offset; \
+ \
+ offset = (0x4000ULL); \
+ if (is_otx3_dpi(dpi)) \
+ offset = (0x10000ULL); \
+ offset; }) \
+
+#define DPI_CTL ({ \
+ u64 offset; \
+ \
+ offset = (0x4010ULL); \
+ if (is_otx3_dpi(dpi)) \
+ offset = (0x10010ULL); \
+ offset; }) \
+
+#define DPI_DMA_CONTROL ({ \
+ u64 offset; \
+ \
+ offset = (0x4018ULL); \
+ if (is_otx3_dpi(dpi)) \
+ offset = (0x10018ULL); \
+ offset; }) \
+
+#define DPI_DMA_ENGX_EN(x) ({ \
+ u64 offset; \
+ \
+ offset = (0x4040ULL | (x) << 3); \
+ if (is_otx3_dpi(dpi)) \
+ offset = (0x10040ULL | ((x) << 3)); \
+ offset; }) \
+
+#define DPI_ENGX_BUF(x) ({ \
+ u64 offset; \
+ \
+ offset = (0x40C0ULL | (x) << 3); \
+ if (is_otx3_dpi(dpi)) \
+ offset = (0x100C0ULL | ((x) << 3)); \
+ offset; }) \
+
+#define DPI_EBUS_PORTX_CFG(x) ({ \
+ u64 offset; \
+ \
+ offset = (0x4100ULL | (x) << 3); \
+ if (is_otx3_dpi(dpi)) \
+ offset = (0x10100ULL | ((x) << 3)); \
+ offset; }) \
+
+#define DPI_PF_RAS ({ \
+ u64 offset; \
+ \
+ offset = (0x4308ULL); \
+ if (is_otx3_dpi(dpi)) \
+ offset = (0x10308ULL); \
+ offset; }) \
+
+#define DPI_PF_RAS_ENA_W1C ({ \
+ u64 offset; \
+ \
+ offset = (0x4318ULL); \
+ if (is_otx3_dpi(dpi)) \
+ offset = (0x10318ULL); \
+ offset; }) \
+
+#define DPI_PF_RAS_ENA_W1S ({ \
+ u64 offset; \
+ \
+ offset = (0x4320ULL); \
+ if (is_otx3_dpi(dpi)) \
+ offset = (0x10320ULL); \
+ offset; }) \
+
+#define DPI_DMA_CCX_INT(x) ({ \
+ u64 offset; \
+ \
+ offset = (0x5000ULL | (x) << 3); \
+ if (is_otx3_dpi(dpi)) \
+ offset = (0x11000ULL | ((x) << 3)); \
+ offset; }) \
+
+#define DPI_DMA_CCX_INT_ENA_W1C(x) ({ \
+ u64 offset; \
+ \
+ offset = (0x5800ULL | (x) << 3); \
+ if (is_otx3_dpi(dpi)) \
+ offset = (0x11800ULL | ((x) << 3)); \
+ offset; }) \
+
+#define DPI_REQQX_INT(x) ({ \
+ u64 offset; \
+ \
+ offset = (0x6600ULL | (x) << 3); \
+ if (is_otx3_dpi(dpi)) \
+ offset = (0x12C00ULL | ((x) << 5)); \
+ offset; }) \
+
+#define DPI_REQQX_INT_ENA_W1C(x) ({ \
+ u64 offset; \
+ \
+ offset = (0x6680ULL | (x) << 3); \
+ if (is_otx3_dpi(dpi)) \
+ offset = (0x13800ULL | ((x) << 5)); \
+ offset; }) \
+
+#define DPI_REQQX_INT_ENA_W1S(x) ({ \
+ u64 offset; \
+ \
+ offset = (0x66C0ULL | (x) << 3); \
+ if (is_otx3_dpi(dpi)) \
+ offset = (0x13C00ULL | ((x) << 5)); \
+ offset; }) \
+
+#define DPI_WCTL_FIF_THR (0x17008ULL)
+
+#define DPI_EBUS_MRRS_MIN 128
+#define DPI_EBUS_MRRS_MAX 1024
+#define DPI_EBUS_MPS_MIN 128
+#define DPI_EBUS_MPS_MAX 1024
+#define DPI_EBUS_MAX_PORTS 2
+#define DPI_EBUS_PORTX_CFG_MRRS(x) (((x) & 0x7) << 0)
+#define DPI_EBUS_PORTX_CFG_MPS(x) (((x) & 0x7) << 4)
+
+/* VF Registers: */
+#define DPI_VDMA_EN (0x0ULL)
+#define DPI_VDMA_REQQ_CTL (0x8ULL)
+#define DPI_VDMA_DBELL (0x10ULL)
+#define DPI_VDMA_SADDR (0x18ULL)
+#define DPI_VDMA_COUNTS (0x20ULL)
+#define DPI_VDMA_NADDR (0x28ULL)
+#define DPI_VDMA_IWBUSY (0x30ULL)
+#define DPI_VDMA_CNT (0x38ULL)
+#define DPI_VF_INT (0x100ULL)
+#define DPI_VF_INT_W1S (0x108ULL)
+#define DPI_VF_INT_ENA_W1C (0x110ULL)
+#define DPI_VF_INT_ENA_W1S (0x118ULL)
+
+struct dpivf_config {
+ uint16_t csize;
+ uint32_t aura;
+ uint16_t sso_pf_func;
+ uint16_t npa_pf_func;
+};
+
+struct dpipf_vf {
+ uint8_t this_vfid;
+ bool setup_done;
+ struct dpivf_config vf_config;
+};
+
+struct dpipf {
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+ int num_vec;
+ struct msix_entry *msix_entries;
+ int total_vfs;
+ int vfs_in_use;
+ struct dpipf_vf vf[DPI_MAX_VFS];
+};
+
+#define DPI_QUEUE_OPEN 0x1
+#define DPI_QUEUE_CLOSE 0x2
+#define DPI_REG_DUMP 0x3
+#define DPI_GET_REG_CFG 0x4
+
+union dpi_mbox_message_t {
+ uint64_t u[2];
+ struct dpi_mbox_message_s {
+ /* VF ID to configure */
+ uint64_t vfid :8;
+ /* Command code */
+ uint64_t cmd :4;
+ /* Command buffer size in 8-byte words */
+ uint64_t csize :14;
+ /* aura of the command buffer */
+ uint64_t aura :20;
+ /* SSO PF function */
+ uint64_t sso_pf_func :16;
+ /* NPA PF function */
+ uint64_t npa_pf_func :16;
+ } s;
+};
+
+struct otx2_dpipf_com_s {
+ int (*queue_config)(struct pci_dev *pfdev,
+ union dpi_mbox_message_t *req);
+};
+
+extern struct otx2_dpipf_com_s otx2_dpipf_com;
+
+#endif
diff --git a/drivers/soc/marvell/octeontx2-ghes/Makefile b/drivers/soc/marvell/octeontx2-ghes/Makefile
new file mode 100644
index 000000000000..ac7bd905e4b9
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-ghes/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's OcteonTX2 SDEI/GHES device driver
+#
+
+obj-$(CONFIG_OCTEONTX2_SDEI_GHES) += otx2_sdei_ghes.o
+otx2_sdei_ghes-$(CONFIG_OCTEONTX2_GHES_BERT) += otx2-ghes-bert.o
+otx2_sdei_ghes-y += otx2-sdei-ghes.o
+
+obj-$(CONFIG_OCTEONTX2_EINJ) += otx2-einj.o
diff --git a/drivers/soc/marvell/octeontx2-ghes/otx2-einj.c b/drivers/soc/marvell/octeontx2-ghes/otx2-einj.c
new file mode 100644
index 000000000000..812abc15e90f
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-ghes/otx2-einj.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OcteonTX2 memory controller ECC injection
+ * Copyright Marvell Technologies. (C) 2019-2020. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+/*
+ * All DRAM/cache controller hardware is handled by ATF on these platforms
+ * and not visible to Non-Secure OS kernel.
+ * The EDAC functions are passed to ATF by OCTEONTX_EDAC SMC, which performs
+ * injection and reporting, and copies log stream back to kernel for reporting
+ * detail in syslog.
+ *
+ * This is minimal SMC stub approach, minimally providing hooks for usermode
+ * error-injection tools, to exercise either the legacy EDAC code of pre-4.18,
+ * or the standard SDEI/GHES RAS handling possible in newer kernels.
+ * It knows nothing of either, just asks ATF to corrupt memory.
+ * This allows LMC/etc details to be hidden from EL2, all RAS/EDAC
+ * work going to ATF/EL3 for security.
+ *
+ * For further details, see:
+ * ATF's docs/plat/marvell/marvell_ras.txt
+ * include/plat/marvell/octeontx/otx2/plat_ras.h
+ */
+
+#define OCTEONTX_EDAC 0xc2000c0b
+/* x1 is one of the following ... */
+#define OCTEONTX_EDAC_VER 0 /* report version */
+#define OCTEONTX_EDAC_INJECT 3 /* x2=addr x3=flags _F_xxx below */
+#define OCTEONTX_EDAC_MDC_CONST 4 /* read CAVM_MDC_CONST */
+#define OCTEONTX_EDAC_MDC_RW 5 /* read/write MDC */
+#define OCTEONTX_EDAC_MDC_ROM 6 /* read MDC_RAS_ROM x2=addr */
+
+#define OCTEONTX_EDAC_F_BITMASK 0x007 /* single bit to corrupt */
+#define OCTEONTX_EDAC_F_MULTI 0x008 /* corrupt multiple bits */
+#define OCTEONTX_EDAC_F_CLEVEL 0x070 /* cache level to corrupt (L0 == DRAM) */
+#define OCTEONTX_EDAC_F_ICACHE 0x080 /* Icache, not Dcache */
+#define OCTEONTX_EDAC_F_REREAD 0x100 /* read-back in EL3 */
+#define OCTEONTX_EDAC_F_PHYS 0x200 /* target is EL3-physical, not EL012 */
+
+#include <linux/arm-smccc.h>
+
+/*
+ * Module parameters are used here instead of debugfs because debugfs requires
+ * a kernel configuration option to be enabled, which potentially requires
+ * a configuration change and kernel rebuild.
+ * The use of error injection via this module is meant to be available at all
+ * times (when the module is loaded) and should not require a special kernel.
+ */
+static u64 smc_params[7];
+static u64 smc_result;
+static int smc_argc;
+
+/* an easily recognized value for logs */
+static const u64 test_val = 0x5555555555555555;
+
+/* target address for please-corrupt-EL1/EL2 I-cache/DRAM */
+static u64 ecc_test_target_fn(void)
+{
+ return test_val;
+}
+
+static int otx2_edac_smc(void)
+{
+ /* target address for please-corrupt-EL1/EL2 D-cache/DRAM: */
+ u64 ecc_test_target_data = test_val;
+ struct arm_smccc_res res;
+ bool test_read = false;
+ bool test_call = false;
+ u64 *a = smc_params;
+
+ /*
+ * Replace magic ECC-injection addresses:
+ * special ECC-injection addresses 0-3/4-7 are substituted by
+ * EL0-3 code as instr/data targets at that execution level.
+ * Any 0/4 addresses will have already been substituted
+ * by EL0 test harness, here we substitute EL1/EL2 targets.
+ * While 3/7 are replaced by ATF with its own test objects,
+ * we remind it to reread in its own context.
+ */
+ if (a[0] == OCTEONTX_EDAC_INJECT) {
+ a[2] &= ~OCTEONTX_EDAC_F_REREAD;
+ switch (a[1]) {
+ case 1 ... 2: /* EL0..EL2 D-space target */
+ a[1] = (u64)&ecc_test_target_data;
+ test_read = true;
+ break; /* EL0..EL2 I-space target */
+ case 5 ... 6:
+ a[1] = (u64)ecc_test_target_fn;
+ test_call = true;
+ break;
+ case 3: /* EL3 targets */
+ case 7:
+ a[2] |= OCTEONTX_EDAC_F_REREAD;
+ break;
+ }
+ }
+
+ arm_smccc_smc(OCTEONTX_EDAC, a[0], a[1], a[2], /* x1-x3 */
+ a[3], a[4], a[5], a[6], &res); /* x4-x7, result */
+ trace_printk("%s: OCTEONTX_EDAC(%llx, %llx, %llx, %llx) -> e?%ld\n",
+ __func__, a[0], a[1], a[2], a[3], res.a0);
+
+ if (test_read && ecc_test_target_data != test_val)
+ trace_printk("%s test_read mismatch\n", __func__);
+ if (test_call && ecc_test_target_fn() != test_val)
+ trace_printk("%s test_call mismatch\n", __func__);
+
+ return res.a0;
+}
+
+static int smc_params_set(const char *_str, const struct kernel_param *kp)
+{
+ /* as with param_array_set(), temporarily overwrites string */
+ char *str = (char *)_str;
+ int rc;
+
+ trace_printk("%s: (%s)\n", __func__, str);
+
+ if (!str)
+ return -EINVAL;
+
+ smc_result = -EBUSY;
+
+ for (smc_argc = 0; smc_argc < 7 && *str; smc_argc++) {
+ int len = strcspn(str, ",");
+ char *nxt = len ? str + len + 1 : "";
+
+ if (len)
+ str[len] = '\0';
+ rc = kstrtoull(str, 0, &smc_params[smc_argc]);
+
+ trace_printk("%s: (%s/%s) smc_params[%d]=%llx e?%d\n",
+ __func__, str, nxt, smc_argc,
+ smc_params[smc_argc], rc);
+ if (len)
+ str[len] = ',';
+ str = nxt;
+ trace_printk("%s: smc_params[%d]=%llx\n",
+ __func__, smc_argc, smc_params[smc_argc]);
+ }
+
+ smc_result = otx2_edac_smc();
+ trace_printk("%s: result: %llx\n", __func__, smc_result);
+ return 0;
+}
+
+static int smc_params_get(char *buffer, const struct kernel_param *kp)
+{
+ return sprintf(buffer, "%lld\n", smc_result);
+}
+
+static const struct kernel_param_ops smc_params_ops = {
+ .set = smc_params_set,
+ .get = smc_params_get,
+};
+
+module_param_cb(smc_params, &smc_params_ops, smc_params, 0644);
+MODULE_PARM_DESC(smc_params, "call/return values for OCTEONTX_EDAC SMC");
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Marvell Semiconductor");
+MODULE_DESCRIPTION("OcteonTX2 ECC injector stub");
diff --git a/drivers/soc/marvell/octeontx2-ghes/otx2-ghes-bert.c b/drivers/soc/marvell/octeontx2-ghes/otx2-ghes-bert.c
new file mode 100644
index 000000000000..4d0e84205e2e
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-ghes/otx2-ghes-bert.c
@@ -0,0 +1,406 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Supports OcteonTX2 Generic Hardware Error Source (BED)
+ * Boot Error Data (BED) from BERT table DT and ACPI
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/arm_sdei.h>
+#include <linux/uuid.h>
+#include <linux/acpi.h>
+#include <acpi/apei.h>
+#include <linux/pci.h>
+#include <linux/crash_dump.h>
+#include "otx2-ghes-bert.h"
+#include "otx2-sdei-ghes.h"
+
+#define DRV_NAME "bed-bert"
+
+#define initerrmsg(fmt, ...) pr_err(DRV_NAME ":" fmt, __VA_ARGS__)
+#ifdef CONFIG_OCTEONTX2_SDEI_GHES_DEBUG
+# define initdbgmsg(fmt, ...) pr_info(DRV_NAME ":" fmt, __VA_ARGS__)
+# define dbgmsg(dev, ...) dev_info((dev), __VA_ARGS__)
+#else
+# define initdbgmsg(fmt, ...) (void)(fmt)
+# define dbgmsg(dev, ...) (void)(dev)
+#endif // CONFIG_OCTEONTX2_SDEI_GHES_DEBUG
+
+#define BERT_TBL_OEM_ID "OTX2 "
+#define BERT_OEM_ID "MRVL "
+
+#ifdef CONFIG_OF
+static const struct of_device_id bed_bert_of_match[] = {
+ { .compatible = "marvell,bed-bert", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bed_bert_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id bed_bert_acpi_match[] = {
+ { "BERT0001", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, bed_bert_acpi_match);
+#endif
+
+static int __init ghes_bed_acpi_match_resource(struct platform_device *pdev,
+ struct mrvl_bed_source *bsrc)
+{
+ struct resource *res;
+
+ // BERT
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ initerrmsg("%s ACPI unable get bert block\n", __func__);
+ return -ENOENT;
+ }
+ initdbgmsg("%s Status BERT %s [%llx - %llx, %lx, %lx]\n", __func__,
+ res->name, res->start, res->end, res->flags, res->desc);
+ bsrc->bert_pa = res->start;
+ bsrc->bert_sz = resource_size(res);
+ initdbgmsg("BERT RING: 0x%llx/0x%llx\n", bsrc->bert_pa, bsrc->bert_sz);
+
+ // Error Block Ring
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!res) {
+ initerrmsg("%s ACPI unable get ring block\n", __func__);
+ return -ENOENT;
+ }
+ initdbgmsg("%s Status Ring %s [%llx - %llx, %lx, %lx]\n", __func__,
+ res->name, res->start, res->end, res->flags, res->desc);
+ bsrc->block_pa = res->start;
+ bsrc->block_sz = resource_size(res);
+ initdbgmsg("BERT RING: 0x%llx/0x%llx\n", bsrc->block_pa, bsrc->block_sz);
+
+ return 0;
+}
+
+static int __init ghes_bed_of_match_resource(struct mrvl_bed_source *bsrc)
+{
+ struct device_node *of_node;
+ struct device_node *child_node;
+ const __be32 *res;
+ u64 size;
+ u64 base;
+
+ of_node = of_find_matching_node_and_match(NULL, bed_bert_of_match, NULL);
+ if (!of_node) {
+ initerrmsg("BERT initialization no dev node %p\n", of_node);
+ return -ENODEV;
+ }
+
+ child_node = of_get_next_available_child(of_node, NULL);
+ if (!child_node) {
+ initerrmsg("BERT initialization no child node %p\n", child_node);
+ return -ENODEV;
+ }
+
+ res = of_get_address(child_node, 0, &size, NULL);
+ if (!res)
+ goto err;
+
+ base = of_translate_address(child_node, res);
+ if (base == OF_BAD_ADDR)
+ goto err;
+
+ bsrc->bert_pa = (phys_addr_t)base;
+ bsrc->bert_sz = (phys_addr_t)size;
+
+ res = of_get_address(child_node, 2, &size, NULL);
+ if (!res)
+ goto err;
+
+ base = of_translate_address(child_node, res);
+ if (base == OF_BAD_ADDR)
+ goto err;
+
+ bsrc->block_pa = (phys_addr_t)base;
+ bsrc->block_sz = (phys_addr_t)size;
+
+ initdbgmsg("BERT: 0x%llx/0x%llx 0x%llx/0x%llx\n",
+ bsrc->bert_pa, bsrc->bert_sz, bsrc->block_pa, bsrc->block_sz);
+
+ return 0;
+
+err:
+ initerrmsg("%s BERT unable get/translate address block\n", __func__);
+ return -ENODEV;
+}
+
+static int __init ghes_bed_map_resource(struct device *dev, struct mrvl_bed_source *bsrc)
+{
+ if (pfn_valid(PHYS_PFN(bsrc->block_pa))) {
+ bsrc->block_va = phys_to_virt(bsrc->block_pa);
+ } else {
+ if (!devm_request_mem_region(dev, bsrc->block_pa, bsrc->block_sz, "BERT")) {
+ initerrmsg("Failure BERT request 0x%llx\n", bsrc->block_pa);
+ return -ENODEV;
+ }
+ bsrc->block_va = devm_ioremap(dev, bsrc->block_pa, bsrc->block_sz);
+ if (!bsrc->block_va) {
+ initerrmsg("%s Unable to map Boot Error Data\n", __func__);
+ return -ENODEV;
+ }
+ }
+ initdbgmsg("%s BERT Ring block VA=0x%llx\n", __func__, (long long)bsrc->block_va);
+
+ if (pfn_valid(PHYS_PFN(bsrc->bert_pa))) {
+ bsrc->bert_va = phys_to_virt(bsrc->bert_pa);
+ } else {
+ if (!devm_request_mem_region(dev, bsrc->bert_pa, bsrc->bert_sz, "BERT")) {
+ initerrmsg("Failure BERT request 0x%llx\n", bsrc->bert_pa);
+ return -ENODEV;
+ }
+ bsrc->bert_va = devm_ioremap(dev, bsrc->bert_pa, bsrc->bert_sz);
+ if (!bsrc->bert_va) {
+ initerrmsg("%s Unable to map Boot Error Data\n", __func__);
+ return -ENODEV;
+ }
+ }
+ initdbgmsg("%s BERT Ring block VA=0x%llx\n", __func__, (long long)bsrc->bert_va);
+
+
+ return 0;
+}
+
+static int __init ghes_bed_count_error(struct mrvl_bed_source *bsrc)
+{
+ struct otx2_ghes_err_ring *ring;
+ size_t error_cnt = 0;
+
+ ring = bsrc->block_va;
+ ring->reg = OTX2_GHES_ERR_RING_SIG;
+
+ if (!ring->size) {
+ initerrmsg("%s BERT support disabled by firmware\n", __func__);
+ return 0;
+ }
+
+ if (ring->head >= ring->tail)
+ error_cnt = ring->head - ring->tail;
+ else
+ error_cnt = ring->size - (ring->tail - ring->head);
+
+ bsrc->error_cnt = error_cnt;
+
+ initdbgmsg("BED mem @ %llx (%llx PA), %llu B, error entries %ld\n",
+ (long long)bsrc->block_va, bsrc->block_pa,
+ (long long)bsrc->block_sz, error_cnt);
+
+ return error_cnt;
+}
+
+static int __init ghes_bed_fetch_errors(struct mrvl_bed_source *bsrc)
+{
+ struct acpi_table_bert *bert_tbl;
+ struct acpi_bert_region *bert_esb;
+ struct bed_bert_mem_entry *bert_entries;
+ struct otx2_ghes_err_ring *ring;
+ struct acpi_hest_generic_data *hest_gen_data;
+ struct bed_bert_mem_entry *bert_mem_entry;
+ struct acpi_hest_generic_status *estatus;
+ struct cper_sec_mem_err_old *mem_err;
+ struct otx2_ghes_err_record *err_rec;
+ u8 *p;
+ u8 sum = 0;
+ u32 idx = 0;
+
+ ring = bsrc->block_va;
+
+ bert_tbl = kzalloc(bsrc->bert_sz, GFP_KERNEL);
+
+ if (!bert_tbl) {
+ initerrmsg("Unable to allocate BERT data (0x%llx B)\n", bsrc->bert_sz);
+ return -ENOMEM;
+ }
+
+ bert_esb = (struct acpi_bert_region *)(bert_tbl + 1);
+
+ bert_entries = (struct bed_bert_mem_entry *)bert_esb;
+
+ strcpy(bert_tbl->header.signature, ACPI_SIG_BERT);
+ bert_tbl->header.length = sizeof(*bert_tbl);
+ bert_tbl->header.revision = 1;
+ bert_tbl->header.oem_revision = 1;
+ strcpy(bert_tbl->header.oem_id, BERT_OEM_ID);
+ strcpy(bert_tbl->header.oem_table_id, BERT_TBL_OEM_ID);
+ strcpy(bert_tbl->header.asl_compiler_id, BERT_OEM_ID);
+ bert_tbl->header.asl_compiler_revision = 1;
+
+ p = (u8 *)&bert_tbl->header;
+ while (p < (u8 *)(&bert_tbl->header + 1))
+ sum += *p, p++;
+ bert_tbl->header.checksum -= sum;
+
+ bert_tbl->region_length = (bsrc->error_cnt * sizeof(*bert_entries));
+ bert_tbl->address = bsrc->bert_pa + ((void *)bert_esb - (void *)bert_tbl);
+
+ initdbgmsg("BERT: 0x%llx -> 0x%llx\n", bsrc->bert_pa, (long long)bsrc->bert_va);
+
+ for (idx = 0; idx < bsrc->error_cnt; idx++) {
+ err_rec = &ring->records[ring->tail];
+
+ bert_mem_entry = &bert_entries[idx];
+
+ estatus = &bert_mem_entry->estatus.hest;
+
+ estatus->raw_data_length = 0;
+ estatus->raw_data_offset = 0;
+ estatus->data_length = sizeof(bert_mem_entry->gen_data);
+ estatus->error_severity = err_rec->severity;
+
+ hest_gen_data = &bert_mem_entry->gen_data;
+
+ hest_gen_data->revision = 0x201; /* ACPI 4.x */
+ if (err_rec->fru_text[0]) {
+ hest_gen_data->validation_bits = ACPI_HEST_GEN_VALID_FRU_STRING;
+ strncpy(hest_gen_data->fru_text, err_rec->fru_text,
+ sizeof(hest_gen_data->fru_text));
+ }
+
+ hest_gen_data->error_severity = estatus->error_severity;
+ memcpy((guid_t *)hest_gen_data->section_type,
+ &CPER_SEC_PLATFORM_MEM, sizeof(guid_t));
+
+ hest_gen_data->error_data_length = sizeof(*mem_err);
+ estatus->data_length += hest_gen_data->error_data_length;
+
+ mem_err = &bert_mem_entry->mem_err;
+
+ if (pfn_valid(PHYS_PFN(bsrc->block_pa)))
+ memcpy(mem_err, &err_rec->u.mcc, sizeof(*mem_err));
+ else
+ memcpy_fromio(mem_err, &err_rec->u.mcc, sizeof(*mem_err));
+
+ /*
+ * This simply needs the entry count to be non-zero.
+ * Set entry count to one (see ACPI_HEST_ERROR_ENTRY_COUNT).
+ */
+ estatus->block_status = (1 << 4); /* i.e. one entry */
+
+ if (++ring->tail >= ring->size)
+ ring->tail = 0;
+ }
+
+ memcpy(bsrc->bert_va, bert_tbl, bsrc->bert_sz);
+ kfree(bert_tbl);
+
+ return 0;
+}
+
+/*
+ * Boot Error Data probe BERT ring
+ */
+static int __init ghes_bert_probe(struct platform_device *pdev)
+{
+ struct mrvl_bed_source bed_src;
+ struct device *dev = &pdev->dev;
+ int ret = -ENODEV;
+
+#ifdef CONFIG_CRASH_DUMP
+ if (is_kdump_kernel())
+#else
+ #pragma message "CONFIG_CRASH_DUMP setting is required for this module"
+ if (true)
+#endif
+ return ret;
+
+ if (has_acpi_companion(dev)) {
+ initdbgmsg("%s ACPI\n", __func__);
+ ret = ghes_bed_acpi_match_resource(pdev, &bed_src);
+ } else {
+ initdbgmsg("%s Device Tree\n", __func__);
+ ret = ghes_bed_of_match_resource(&bed_src);
+ }
+ if (ret)
+ goto exit0;
+
+ ret = ghes_bed_map_resource(dev, &bed_src);
+ if (ret) {
+ initerrmsg("%s Unable map BERT resource\n", __func__);
+ goto exit0;
+ }
+
+ ret = ghes_bed_count_error(&bed_src);
+ if (ret <= 0) {
+ initdbgmsg("%s No BERT errors\n", __func__);
+ goto exit1;
+ }
+
+ ret = ghes_bed_fetch_errors(&bed_src);
+ if (ret) {
+ initerrmsg("%s Unable setup BERT\n", __func__);
+ goto exit1;
+ }
+
+ if (!has_acpi_companion(dev)) {
+ bert_table_set(bed_src.bert_va);
+ }
+
+ if (has_acpi_companion(dev) && !pfn_valid(PHYS_PFN(bed_src.block_pa))) {
+ devm_iounmap(dev, bed_src.block_va);
+ devm_release_mem_region(dev, bed_src.block_pa, bed_src.block_sz);
+ }
+
+exit1:
+ initdbgmsg("%s BERT setup done.\n", __func__);
+ return ret;
+
+exit0:
+ initerrmsg("%s BERT setup failure %d\n", __func__, ret);
+ return ret;
+}
+
+static void ghes_bert_shutdown(struct platform_device *pdev)
+{
+}
+
+static int ghes_bert_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+
+static const struct platform_device_id ghes_bert_pdev_match[] = {
+ { .name = DRV_NAME, },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, ghes_bert_pdev_match);
+
+static struct platform_driver ghes_bert_drv_ops __refdata = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = bed_bert_of_match,
+ .acpi_match_table = ACPI_PTR(bed_bert_acpi_match),
+ },
+ .probe = ghes_bert_probe,
+ .remove = ghes_bert_remove,
+ .shutdown = ghes_bert_shutdown,
+ .id_table = ghes_bert_pdev_match,
+};
+
+static int __init ghes_bert_init(void)
+{
+ platform_driver_probe(&ghes_bert_drv_ops, ghes_bert_probe);
+ return 0;
+}
+
+static void __exit ghes_bert_exit(void)
+{
+ platform_driver_unregister(&ghes_bert_drv_ops);
+}
+
+module_init(ghes_bert_init);
+module_exit(ghes_bert_exit);
+
+MODULE_DESCRIPTION("OcteonTX2 GHES BERT Module");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/soc/marvell/octeontx2-ghes/otx2-ghes-bert.h b/drivers/soc/marvell/octeontx2-ghes/otx2-ghes-bert.h
new file mode 100644
index 000000000000..5c16c52a7bb8
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-ghes/otx2-ghes-bert.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Supports OcteonTX2 Generic Hardware Error Source (BED)
+ * Boot Error Data (BED) from BERT table DT and ACPI
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#ifndef __OTX2_GHES_BERT_H__
+#define __OTX2_GHES_BERT_H__
+
+/*
+ * Boot Error Data Source
+ */
+struct mrvl_bed_source {
+ phys_addr_t block_pa;
+ u64 block_sz;
+ void __iomem *block_va;
+ phys_addr_t bert_pa;
+ u64 bert_sz;
+ void __iomem *bert_va;
+ u32 error_cnt;
+};
+
+struct bed_bert_mem_entry {
+ union {
+ /* These are identical; both are listed here for clarity */
+ struct acpi_hest_generic_status hest;
+ struct acpi_bert_region bert;
+ } estatus;
+ struct acpi_hest_generic_data gen_data;
+ struct cper_sec_mem_err_old mem_err;
+} __packed;
+
+#endif //__OTX2_GHES_BERT_H__
diff --git a/drivers/soc/marvell/octeontx2-ghes/otx2-sdei-ghes.c b/drivers/soc/marvell/octeontx2-ghes/otx2-sdei-ghes.c
new file mode 100644
index 000000000000..eeb271ea3d2e
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-ghes/otx2-sdei-ghes.c
@@ -0,0 +1,900 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Supports OcteonTX2 Generic Hardware Error Source[s] (GHES).
+ * GHES ACPI HEST & DT
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/arm_sdei.h>
+#include <linux/uuid.h>
+#include <linux/acpi.h>
+#include <acpi/apei.h>
+#include <linux/pci.h>
+#include <linux/crash_dump.h>
+#include <soc/marvell/octeontx/octeontx_smc.h>
+#include <asm/cputype.h>
+#include "otx2-sdei-ghes.h"
+
+#define DRV_NAME "sdei-ghes"
+
+#define initerrmsg(fmt, ...) pr_err(DRV_NAME ":" fmt, __VA_ARGS__)
+#ifdef CONFIG_OCTEONTX2_SDEI_GHES_DEBUG
+# define initdbgmsg(fmt, ...) pr_info(DRV_NAME ":" fmt, __VA_ARGS__)
+# define dbgmsg(dev, ...) dev_info((dev), __VA_ARGS__)
+#else
+# define initdbgmsg(fmt, ...) (void)(fmt)
+# define dbgmsg(dev, ...) (void)(dev)
+#endif // CONFIG_OCTEONTX2_SDEI_GHES_DEBUG
+
+#define OTX2_HEST_OEM_ID "MRVL "
+#define HEST_TBL_OEM_ID "OTX2 "
+
+
+static const struct of_device_id sdei_ghes_of_match[] = {
+ { .compatible = "marvell,sdei-ghes", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sdei_ghes_of_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id sdei_ghes_acpi_match[] = {
+ { "GHES0001", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, sdei_ghes_acpi_match);
+#endif
+
+#define PCI_VENDOR_ID_CAVIUM 0x177d
+#define PCI_DEVICE_ID_OCTEONTX2_LMC 0xa022
+#define PCI_DEVICE_ID_OCTEONTX2_MCC 0xa070
+#define PCI_DEVICE_ID_OCTEONTX2_MDC 0xa073
+
+static const struct pci_device_id sdei_ghes_mrvl_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_OCTEONTX2_LMC) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_OCTEONTX2_MCC) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_OCTEONTX2_MDC) },
+ { 0, },
+};
+
+static bool cn10kx_model;
+static u8 tmp[256];
+
+static int sdei_ghes_callback(u32 event_id, struct pt_regs *regs, void *arg)
+{
+ struct acpi_hest_generic_status *estatus;
+ struct acpi_hest_generic_data *gdata;
+ void *esb_err;
+ struct otx2_ghes_err_record *ring_rec;
+ struct mrvl_ghes_source *gsrc;
+ u32 head, tail;
+
+ pr_notice("%s event id 0x%x\n", __func__, event_id);
+
+ if (!arg) {
+ initerrmsg("%s Failed callback\n", __func__);
+ return -1;
+ }
+
+ gsrc = arg;
+
+ head = gsrc->ring->head;
+ tail = gsrc->ring->tail;
+
+ initerrmsg("%s to %llx, head=%d (%llx), tail=%d (%llx), size=%d, sign=%x\n", __func__,
+ (long long)gsrc->esb_va, head,
+ (long long)&gsrc->ring->head, tail, (long long)&gsrc->ring->tail,
+ gsrc->ring->size, *(int *)((&gsrc->ring->size) + 1));
+
+ /*Ensure that head updated*/
+ rmb();
+
+ if (head == tail) {
+ initerrmsg("event 0x%x ring is empty, head=%d, size=%d\n",
+ event_id, head, gsrc->ring->size);
+ return -1;
+ }
+
+ ring_rec = &gsrc->ring->records[tail];
+
+ memset(tmp, 0, sizeof(tmp));
+ estatus = (struct acpi_hest_generic_status *)tmp;
+ gdata = (struct acpi_hest_generic_data *)(estatus + 1);
+ esb_err = (gdata + 1);
+
+ //This simply needs the entry count to be non-zero.
+ //Set entry count to one (see ACPI_HEST_ERROR_ENTRY_COUNT).
+ estatus->block_status = (1 << 4); // i.e. one entry
+ estatus->raw_data_offset = sizeof(*estatus) + sizeof(*gdata);
+ estatus->raw_data_length = 0;
+ estatus->data_length = gsrc->esb_sz - sizeof(*estatus);
+ estatus->error_severity = ring_rec->severity;
+ gdata->revision = 0x201; // ACPI 4.x
+ if (ring_rec->fru_text[0]) {
+ gdata->validation_bits = ACPI_HEST_GEN_VALID_FRU_STRING;
+ memcpy_fromio(gdata->fru_text, ring_rec->fru_text, sizeof(gdata->fru_text));
+ }
+ gdata->error_severity = estatus->error_severity;
+
+ guid_copy((guid_t *)gdata->section_type, &CPER_SEC_PLATFORM_MEM);
+ initdbgmsg("%s CPER_SEC_PLATFORM_MEM\n", __func__);
+
+ gdata->error_data_length = gsrc->esb_sz -
+ (sizeof(*estatus) + sizeof(*gdata));
+
+ memcpy_fromio(esb_err, &ring_rec->u.mcc, gdata->error_data_length);
+ initdbgmsg("%s err_sev=%x,\n", __func__, ring_rec->severity);
+ memcpy_toio(gsrc->esb_va, tmp, gsrc->esb_sz);
+
+ /*Ensure that error status is committed to memory prior to set block_status*/
+ wmb();
+
+ if (++tail >= gsrc->ring->size)
+ tail = 0;
+ gsrc->ring->tail = tail;
+
+ return 0;
+}
+
+static int sdei_ras_core_callback(uint32_t event_id, struct pt_regs *regs, void *arg)
+{
+ struct mrvl_ghes_source *core = NULL;
+ struct mrvl_core_error_raport *raport = NULL;
+ struct acpi_hest_generic_status *estatus = NULL;
+ struct acpi_hest_generic_data *gdata = NULL;
+ struct otx2_ghes_err_record *rec = NULL;
+ uint32_t head = 0;
+ uint32_t tail = 0;
+
+ if (!arg) {
+ initdbgmsg("%s %s failed argument\n", DRV_NAME, __func__);
+ return -EINVAL;
+ }
+
+ core = arg;
+
+ head = core->ring->head;
+ tail = core->ring->tail;
+ pr_notice("%s event id 0x%x\n", __func__, event_id);
+
+ /*Ensure that head updated*/
+ rmb();
+
+ if (head == tail) {
+ initdbgmsg("%s event 0x%x ring is empty, head=%d, size=%d\n", DRV_NAME,
+ event_id, head, core->ring->size);
+ return -EINVAL;
+ }
+
+ memset(tmp, 0, sizeof(tmp));
+ rec = &core->ring->records[tail];
+
+ raport = (struct mrvl_core_error_raport *)tmp;
+ estatus = &raport->estatus;
+ gdata = &raport->gdata;
+
+ estatus->block_status = (1 << 4);
+ estatus->raw_data_offset = sizeof(struct acpi_hest_generic_status) +
+ sizeof(struct acpi_hest_generic_data);
+ estatus->raw_data_length = 0;
+ estatus->data_length = core->esb_sz - sizeof(struct acpi_hest_generic_status);
+ estatus->error_severity = rec->severity;
+
+ gdata->revision = 0x201; // ACPI 4.x
+ if (rec->fru_text[0]) {
+ gdata->validation_bits = ACPI_HEST_GEN_VALID_FRU_STRING;
+ memcpy(gdata->fru_text, rec->fru_text, sizeof(gdata->fru_text));
+ }
+
+ gdata->error_severity = estatus->error_severity;
+ guid_copy((guid_t *)gdata->section_type, &CPER_SEC_PROC_ARM);
+ gdata->error_data_length = core->esb_sz -
+ (sizeof(struct acpi_hest_generic_status) +
+ sizeof(struct acpi_hest_generic_data));
+
+ initdbgmsg("%s event 0x%x error severity=%x,\n", DRV_NAME, core->id,
+ rec->severity);
+
+ memcpy_fromio(&raport->desc, &rec->u.core.desc, gdata->error_data_length);
+ memcpy_fromio(&raport->info, &rec->u.core.info, sizeof(rec->u.core.info));
+
+ memcpy_toio(core->esb_core_va, tmp, core->esb_sz);
+
+ /*Ensure that error status is committed to memory prior to set status*/
+ wmb();
+
+ if (++tail >= core->ring->size)
+ tail = 0;
+ core->ring->tail = tail;
+
+ return 0;
+}
+
+/*
+ * Enable MSIX at the device level (MSIX_CAPABILITIES Header).
+ *
+ * NOTE: We SHOULD be able to use PCCPVF_XXX_VSEC_SCTL[MSIX_SEC_EN]
+ * to enable our SECURE IRQs, but for errata PCC-34263...
+ */
+static void dev_enable_msix_t9x(struct pci_dev *pdev)
+{
+ u16 ctrl;
+
+ initdbgmsg("%s: entry\n", __func__);
+
+ if ((pdev->msi_enabled) || (pdev->msix_enabled)) {
+ initerrmsg("MSI(%d) or MSIX(%d) already enabled\n",
+ pdev->msi_enabled, pdev->msix_enabled);
+ return;
+ }
+
+ /* enable MSIX delivery for this device; we handle [secure] MSIX ints */
+ pdev->msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (pdev->msix_cap) {
+ pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+ ctrl |= PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
+
+ initdbgmsg("Set MSI-X Enable for PCI dev %04d:%02d.%d\n",
+ pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ } else {
+ initerrmsg("PCI dev %04d:%02d.%d missing MSIX capabilities\n",
+ pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ }
+}
+
+/* Enable MSIX for devices whose [secure] IRQ's we control.
+ * These IRQs have been initialized by ATF.
+ * This is required due to an errata against
+ * PCCPVF_XXX_VSEC_SCTL[MSIX_SEC_EN].
+ */
+static void sdei_ghes_msix_init_t9x(void)
+{
+ const struct pci_device_id *pdevid;
+ struct pci_dev *pdev;
+ size_t i;
+
+ initdbgmsg("%s: entry\n", __func__);
+
+ for (i = 0; i < ARRAY_SIZE(sdei_ghes_mrvl_pci_tbl); i++) {
+ pdevid = &sdei_ghes_mrvl_pci_tbl[i];
+ pdev = NULL;
+
+ while ((pdev = pci_get_device(pdevid->vendor, pdevid->device, pdev)))
+ dev_enable_msix_t9x(pdev);
+ }
+}
+
+/* Main initialization function for ghes_drv device instance. */
+static int sdei_ghes_driver_init(struct platform_device *pdev)
+{
+ struct mrvl_sdei_ghes_drv *ghes_drv;
+ struct device *dev = &pdev->dev;
+ struct mrvl_ghes_source *gsrc;
+ size_t i;
+ int ret = 0;
+
+ initdbgmsg("%s: entry\n", __func__);
+
+ ghes_drv = platform_get_drvdata(pdev);
+
+ for (i = 0; i < ghes_drv->source_count; i++) {
+ gsrc = &ghes_drv->source_list[i];
+
+ if (gsrc->id < OCTEONTX_SDEI_RAS_AP0_EVENT)
+ ret = sdei_event_register(gsrc->id, sdei_ghes_callback, gsrc);
+ else
+ ret = sdei_event_register(gsrc->id, sdei_ras_core_callback, gsrc);
+
+ if (ret < 0) {
+ dev_err(dev, "Error %d registering ghes 0x%x (%s)\n",
+ ret, gsrc->id, gsrc->name);
+ continue;
+ }
+
+ ret = sdei_event_enable(gsrc->id);
+ if (ret < 0) {
+ dev_err(dev, "Error %d enabling ghes 0x%x (%s)\n",
+ ret, gsrc->id, gsrc->name);
+ continue;
+ }
+ gsrc->ring->reg = OTX2_GHES_ERR_RING_SIG;
+
+ initdbgmsg("Register GHES 0x%x (%s) %s [%llx, %llx, %llx]\n",
+ gsrc->id, gsrc->name, "reg",
+ (long long)gsrc->esa_va,
+ (long long)gsrc->esb_va, (long long)gsrc->ring);
+ }
+
+ if (i != ghes_drv->source_count)
+ dev_err(dev, "Error cannot register all ghes\n");
+ else
+ dev_info(dev, "Registered & enabled %ld GHES\n", ghes_drv->source_count);
+
+ return 0;
+}
+
+/* Main de-initialization function for ghes_drv device instance. */
+static int sdei_ghes_driver_deinit(struct platform_device *pdev)
+{
+ struct mrvl_sdei_ghes_drv *ghes_drv;
+ struct device *dev = &pdev->dev;
+ struct mrvl_ghes_source *gsrc;
+ int ret, i;
+
+ initdbgmsg("%s: entry\n", __func__);
+
+ ghes_drv = platform_get_drvdata(pdev);
+
+ for (i = 0; i < ghes_drv->source_count; i++) {
+ gsrc = &ghes_drv->source_list[i];
+
+ gsrc->ring->reg = 0;
+
+ ret = sdei_event_disable(gsrc->id);
+ if (ret < 0)
+ dev_err(dev, "Error %d disabling SDEI gsrc 0x%x (%s)\n",
+ ret, gsrc->id, gsrc->name);
+
+ ret = sdei_event_unregister(gsrc->id);
+ if (ret < 0)
+ dev_err(dev, "Error %d unregistering SDEI gsrc 0x%x (%s)\n",
+ ret, gsrc->id, gsrc->name);
+ }
+
+ return 0;
+}
+
+static int __init sdei_ghes_of_match_resource(struct platform_device *pdev)
+{
+ struct device_node *of_node;
+ struct device_node *child_node;
+ struct mrvl_sdei_ghes_drv *ghes_drv;
+ struct mrvl_ghes_source *gsrc;
+ struct device *dev;
+ const __be32 *res;
+ u64 size;
+ u64 base;
+ const u32 *id;
+ size_t i = 0;
+
+ initdbgmsg("%s: entry\n", __func__);
+
+ dev = &pdev->dev;
+ ghes_drv = platform_get_drvdata(pdev);
+ of_node = of_find_matching_node(NULL, sdei_ghes_of_match);
+
+ if (!of_node) {
+ dev_err(dev, "ghes no matching node.\n");
+ return -ENODEV;
+ }
+
+ for_each_available_child_of_node(of_node, child_node) {
+ if (i >= ghes_drv->source_count) {
+ dev_err(dev, "ghes resource allocation overflow %ld.\n", i);
+ return -EFAULT;
+ }
+
+ gsrc = &ghes_drv->source_list[i];
+
+ strncpy(gsrc->name, child_node->name, sizeof(gsrc->name) - 1);
+
+ // Error Status Address
+ res = of_get_address(child_node, 0, NULL, NULL);
+ if (!res) {
+ dev_err(dev, "ghes cannot get esa addr %ld.\n", i);
+ return -EINVAL;
+ }
+ base = of_translate_address(child_node, res);
+ if (base == OF_BAD_ADDR) {
+ dev_err(dev, "ghes cannot map esa addr %ld.\n", i);
+ return -EINVAL;
+ }
+ gsrc->esa_pa = (phys_addr_t)base;
+
+ // Error Status Block
+ res = of_get_address(child_node, 1, &size, NULL);
+ if (!res) {
+ dev_err(dev, "ghes cannot get esb addr %ld.\n", i);
+ return -EINVAL;
+ }
+ base = of_translate_address(child_node, res);
+ if (base == OF_BAD_ADDR) {
+ dev_err(dev, "ghes cannot map esb addr %ld.\n", i);
+ return -EINVAL;
+ }
+ gsrc->esb_pa = (phys_addr_t)base;
+ gsrc->esb_sz = (size_t)size;
+
+ // Error Ring
+ res = of_get_address(child_node, 2, &size, NULL);
+ if (!res) {
+ dev_err(dev, "ghes cannot get ring addr %ld.\n", i);
+ return -EINVAL;
+ }
+ base = of_translate_address(child_node, res);
+ if (base == OF_BAD_ADDR) {
+ dev_err(dev, "ghes cannot map ring addr %ld.", i);
+ return -EINVAL;
+ }
+ gsrc->ring_pa = (phys_addr_t)base;
+ gsrc->ring_sz = (size_t)size;
+
+ // Event ID
+ id = of_get_property(child_node, "event-id", NULL);
+ if (!id) {
+ dev_err(dev, "ghes cannot map event id %ld.", i);
+ return -EINVAL;
+ }
+ gsrc->id = be32_to_cpu(*id);
+
+ initdbgmsg("GHES: %s 0x%llx/0x%llx/0x%llx, ID:0x%x)\n", gsrc->name,
+ gsrc->esa_pa, gsrc->esb_pa, gsrc->ring_pa, gsrc->id);
+
+ i++;
+ }
+
+ return 0;
+}
+
+static int __init sdei_ghes_get_esa(struct acpi_hest_header *hest_hdr, void *data)
+{
+ struct acpi_hest_generic *generic = (struct acpi_hest_generic *)hest_hdr;
+ u64 *esrc = data;
+ static int i;
+
+ initdbgmsg("%s 0x%llx: 0x%llx\n", __func__,
+ (long long)&generic->error_status_address.address,
+ (long long)generic->error_status_address.address);
+ esrc[i] = generic->error_status_address.address;
+ i++;
+
+ return 0;
+}
+
+static phys_addr_t __init sdei_ghes_get_error_source_address(struct mrvl_sdei_ghes_drv *ghes_drv)
+{
+ int i = 0;
+ u64 *esrc = NULL;
+ phys_addr_t ret = ~0ULL;
+
+ esrc = kcalloc(ghes_drv->source_count, sizeof(u64 *), GFP_KERNEL);
+ if (!esrc) {
+ initdbgmsg("%s Failed to allocate esrc\n", __func__);
+ return 0;
+ }
+
+ apei_hest_parse(sdei_ghes_get_esa, esrc);
+
+ for (i = 0; i < ghes_drv->source_count; i++) {
+ ret = ret > esrc[i] ? esrc[i] : ret;
+ }
+
+ kfree(esrc);
+ return ret;
+}
+
+static int __init sdei_ghes_acpi_match_resource(struct platform_device *pdev)
+{
+ struct mrvl_sdei_ghes_drv *ghes_drv;
+ struct mrvl_ghes_source *gsrc;
+ struct resource *res;
+ struct device *dev;
+ size_t i = 0;
+ size_t idx = 0;
+ phys_addr_t base = 0;
+ u32 core = 0;
+
+ dev = &pdev->dev;
+ ghes_drv = platform_get_drvdata(pdev);
+
+ base = sdei_ghes_get_error_source_address(ghes_drv);
+
+ for (i = 0; i < ghes_drv->source_count; i++) {
+ gsrc = &ghes_drv->source_list[i];
+
+ // Error Status Address
+ res = platform_get_resource(pdev, IORESOURCE_MEM, idx);
+ if (!res) {
+ dev_err(dev, "%s ACPI warn get gsrc=%ld idx=%ld\n", __func__, i, idx);
+ return -ENOENT;
+ }
+ initdbgmsg("%s Status Address %s [%llx - %llx, %lx, %lx]\n", __func__,
+ res->name, res->start, res->end, res->flags, res->desc);
+ /*
+ * HEST define BASE address 'error status address' block
+ * DSDT define offset from BASE for error address status/block/ring
+ * driver make valid addresses base + offset
+ * and next patch HEST with validated addresses
+ */
+ gsrc->esa_pa = res->start + base;
+ idx++;
+
+ // Error Status Block Buffer
+ res = platform_get_resource(pdev, IORESOURCE_MEM, idx);
+ if (!res) {
+ dev_err(dev, "%s ACPI warn get gsrc=%ld idx=%ld\n", __func__, i, idx);
+ return -ENOENT;
+ }
+ initdbgmsg("%s Status Block %s [%llx - %llx / %llx, %lx, %lx]\n", __func__,
+ res->name, res->start, res->end, resource_size(res),
+ res->flags, res->desc);
+ gsrc->esb_pa = res->start + base;
+ gsrc->esb_sz = resource_size(res);
+ idx++;
+
+ // Error Blocks Ring
+ res = platform_get_resource(pdev, IORESOURCE_MEM, idx);
+ if (!res) {
+ dev_err(dev, "%s ACPI warn get gsrc=%ld idx=%ld\n", __func__, i, idx);
+ return -ENOENT;
+ }
+ initdbgmsg("%s Status Ring %s [%llx - %llx, %lx, %lx]\n", __func__,
+ res->name, res->start, res->end, res->flags, res->desc);
+ gsrc->ring_pa = res->start + base;
+ gsrc->ring_sz = resource_size(res);
+ idx++;
+
+ // Event ID
+ res = platform_get_resource(pdev, IORESOURCE_MEM, idx);
+ if (!res) {
+ dev_err(dev, "%s ACPI warn get gsrc=%ld idx=%ld\n", __func__, i, idx);
+ return -ENOENT;
+ }
+ initdbgmsg("%s Event ID %s [%llx - %llx, %lx, %lx]\n", __func__,
+ res->name, res->start, res->end, res->flags, res->desc);
+ gsrc->id = res->start;
+ idx++;
+
+ initdbgmsg("GHES: 0x%llx / 0x%llx / 0x%llx, ID:0x%x)\n",
+ gsrc->esa_pa, gsrc->esb_pa, gsrc->ring_pa, gsrc->id);
+ }
+
+ for (i = 0; i < ghes_drv->source_count; i++) {
+ gsrc = &ghes_drv->source_list[i];
+ if (gsrc->id >= OCTEONTX_SDEI_RAS_AP0_EVENT)
+ sprintf(gsrc->name, "core%d", core++);
+ else if (gsrc->id == OCTEONTX_SDEI_RAS_MDC_EVENT)
+ sprintf(gsrc->name, "MDC");
+ else if (gsrc->id == OCTEONTX_SDEI_RAS_MCC_EVENT)
+ sprintf(gsrc->name, cn10kx_model ? "DSS" : "MCC");
+ else if (gsrc->id == OCTEONTX_SDEI_RAS_LMC_EVENT)
+ sprintf(gsrc->name, cn10kx_model ? "TAD" : "LMC");
+ initdbgmsg("%s %s\n", __func__, gsrc->name);
+ }
+
+ return 0;
+}
+
+static int sdei_ghes_setup_resource(struct mrvl_sdei_ghes_drv *ghes_drv)
+{
+ struct mrvl_ghes_source *gsrc;
+ size_t i = 0;
+ struct device *dev = ghes_drv->dev;
+
+ initdbgmsg("%s: entry\n", __func__);
+
+ for (i = 0; i < ghes_drv->source_count; i++) {
+ gsrc = &ghes_drv->source_list[i];
+
+ if (pfn_valid(PHYS_PFN(gsrc->esa_pa)))
+ gsrc->esa_va = phys_to_virt(gsrc->esa_pa);
+ else {
+ if (!devm_request_mem_region(dev, gsrc->esa_pa,
+ sizeof(gsrc->esa_va), gsrc->name))
+ return -EFAULT;
+ gsrc->esa_va = devm_ioremap(dev, gsrc->esa_pa, sizeof(gsrc->esa_va));
+ if (!gsrc->esa_va) {
+ dev_err(dev, "estatus unable map phys addr");
+ return -EFAULT;
+ }
+ }
+
+ if (pfn_valid(PHYS_PFN(gsrc->esb_pa)))
+ gsrc->esb_va = phys_to_virt(gsrc->esb_pa);
+ else {
+ if (!devm_request_mem_region(dev, gsrc->esb_pa, gsrc->esb_sz, gsrc->name))
+ return -EFAULT;
+ gsrc->esb_va = devm_ioremap(dev, gsrc->esb_pa, gsrc->esb_sz);
+ if (!gsrc->esb_va) {
+ dev_err(dev, "gdata unable map phys addr");
+ return -EFAULT;
+ }
+ }
+
+ if (pfn_valid(PHYS_PFN(gsrc->ring_pa))) {
+ gsrc->ring = phys_to_virt(gsrc->ring_pa);
+ initdbgmsg("%s ring buffer direct map\n", __func__);
+ }
+ else {
+ if (!devm_request_mem_region(dev, gsrc->ring_pa, gsrc->ring_sz, gsrc->name))
+ return -EFAULT;
+ gsrc->ring = devm_ioremap(dev, gsrc->ring_pa, gsrc->ring_sz);
+ if (!gsrc->ring) {
+ dev_err(dev, "ring unable map phys addr");
+ return -EFAULT;
+ }
+ }
+
+ initdbgmsg("%s %s 0x%llx/0x%llx/0x%llx\n", __func__, gsrc->name,
+ (unsigned long long)gsrc->esa_va,
+ (unsigned long long)gsrc->esb_va,
+ (unsigned long long)gsrc->ring);
+ }
+
+ return 0;
+}
+
+static void sdei_ghes_init_source(struct mrvl_sdei_ghes_drv *ghes_drv)
+{
+ struct mrvl_ghes_source *gsrc;
+ size_t i;
+
+ for (i = 0; i < ghes_drv->source_count; i++) {
+ gsrc = &ghes_drv->source_list[i];
+
+ gsrc->esb_va->block_status = 0;
+
+ *gsrc->esa_va = gsrc->esb_pa;
+
+ initdbgmsg("%s poll address 0x%llx: 0x%llx\n", __func__,
+ gsrc->esa_pa, gsrc->esb_pa);
+
+ devm_iounmap(ghes_drv->dev, gsrc->esa_va);
+ devm_release_mem_region(ghes_drv->dev, gsrc->esa_pa, sizeof(gsrc->esa_va));
+ acpi_os_map_iomem(gsrc->esa_pa, 8);
+ }
+}
+
+static int sdei_ghes_count(struct acpi_hest_header *hest_hdr, void *data)
+{
+ int *count = data;
+
+ if (hest_hdr->type == ACPI_HEST_TYPE_GENERIC_ERROR ||
+ hest_hdr->type == ACPI_HEST_TYPE_GENERIC_ERROR_V2)
+ (*count)++;
+
+ return 0;
+}
+
+static size_t sdei_ghes_count_source(struct mrvl_sdei_ghes_drv *ghes_drv)
+{
+ size_t count;
+ struct device_node *of_node;
+ struct device_node *child_node;
+
+ count = 0;
+ of_node = of_find_matching_node(NULL, sdei_ghes_of_match);
+
+ if (of_node) {
+ for_each_available_child_of_node(of_node, child_node) {
+ initdbgmsg("%s %s\n", __func__, child_node->name);
+ count++;
+ }
+ } else {
+ count = 0;
+ apei_hest_parse(sdei_ghes_count, &count);
+ }
+ initdbgmsg("%s %zu\n", __func__, count);
+
+ return count;
+}
+
+static int sdei_ghes_alloc_source(struct device *dev,
+ struct mrvl_sdei_ghes_drv *ghes_drv)
+{
+ size_t size = 0;
+
+ initdbgmsg("%s\n", __func__);
+
+ size = ghes_drv->source_count * sizeof(struct mrvl_ghes_source);
+
+ ghes_drv->source_list = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (!ghes_drv->source_list)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int __init sdei_ghes_of_alloc_hest(struct mrvl_sdei_ghes_drv *ghes_drv)
+{
+ struct mrvl_ghes_source *gsrc;
+ unsigned int size;
+ struct acpi_table_hest *hest;
+ struct acpi_table_header *hdr;
+ struct acpi_hest_generic *generic;
+ size_t i;
+ u8 *p;
+ u8 sum = 0;
+ struct device *dev = ghes_drv->dev;
+
+ initdbgmsg("%s: entry\n", __func__);
+
+ size = sizeof(struct acpi_table_hest) +
+ ghes_drv->source_count * sizeof(struct acpi_hest_generic);
+
+ hest = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (!hest)
+ return -ENOMEM;
+
+ generic = (struct acpi_hest_generic *)(hest + 1);
+
+ hdr = &hest->header;
+
+ strcpy(hdr->signature, ACPI_SIG_HEST);
+ hdr->length = size;
+ hdr->revision = 1;
+ strcpy(hdr->oem_id, OTX2_HEST_OEM_ID);
+ strcpy(hdr->oem_table_id, HEST_TBL_OEM_ID);
+ hdr->oem_revision = 1;
+ strcpy(hdr->asl_compiler_id, OTX2_HEST_OEM_ID);
+ hdr->asl_compiler_revision = 1;
+ p = (u8 *)hdr;
+ while (p < (u8 *)(hdr + 1))
+ sum += *p, p++;
+ hdr->checksum -= sum;
+ hest->error_source_count = ghes_drv->source_count;
+
+ for (i = 0; i < hest->error_source_count; i++, generic++) {
+ gsrc = &ghes_drv->source_list[i];
+
+ generic->header.type = ACPI_HEST_TYPE_GENERIC_ERROR;
+ generic->header.source_id = i;
+ generic->related_source_id = i;
+ generic->reserved = 0;
+ generic->enabled = 1;
+ generic->records_to_preallocate = 1;
+ generic->max_sections_per_record = 1;
+ generic->max_raw_data_length = 0;
+
+ generic->error_status_address.space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
+ generic->error_status_address.bit_width = 64;
+ generic->error_status_address.bit_offset = 0;
+ generic->error_status_address.access_width = 4;
+ generic->error_status_address.address = gsrc->esa_pa;
+
+ generic->notify.type = ACPI_HEST_NOTIFY_POLLED;
+ generic->notify.length = sizeof(struct acpi_hest_notify);
+ generic->notify.config_write_enable = 0;
+ generic->notify.poll_interval = 1000; /* i.e. 1 sec */
+ generic->notify.vector = gsrc->id;
+ generic->notify.error_threshold_value = 1;
+ generic->notify.error_threshold_window = 1;
+
+ generic->error_block_length = gsrc->esb_sz;
+
+ initdbgmsg("%s %s [%x] estatus=%llx, poll=%d, block_sz=%x\n", __func__,
+ gsrc->name, gsrc->id,
+ (unsigned long long)generic->error_status_address.address,
+ generic->notify.poll_interval,
+ generic->error_block_length);
+ }
+
+ hest_table_set(hest);
+
+ acpi_hest_init();
+ initdbgmsg("%s registering HEST\n", __func__);
+
+ return 0;
+}
+
+static int __init sdei_ghes_probe(struct platform_device *pdev)
+{
+ struct mrvl_sdei_ghes_drv *ghes_drv = NULL;
+ struct device *dev = &pdev->dev;
+ int ret = -ENODEV;
+ cn10kx_model = is_soc_cn10kx();
+
+#ifdef CONFIG_CRASH_DUMP
+ if (is_kdump_kernel())
+#else
+ #pragma message "CONFIG_CRASH_DUMP setting is required for this module"
+ if (true)
+#endif
+ return ret;
+
+ initdbgmsg("%s\n", __func__);
+
+ ghes_drv = devm_kzalloc(dev, sizeof(struct mrvl_sdei_ghes_drv), GFP_KERNEL);
+ if (!ghes_drv)
+ return -ENOMEM;
+
+ ghes_drv->dev = dev;
+
+ ghes_drv->source_count = sdei_ghes_count_source(ghes_drv);
+ if (!ghes_drv->source_count) {
+ dev_err(dev, "Not available resource.\n");
+ return -EINVAL;
+ }
+ initdbgmsg("%s source count %ld\n", __func__, ghes_drv->source_count);
+
+ ret = sdei_ghes_alloc_source(dev, ghes_drv);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, ghes_drv);
+
+ if (has_acpi_companion(dev)) {
+ initdbgmsg("%s ACPI\n", __func__);
+ acpi_match_device(dev->driver->acpi_match_table, dev);
+ ret = sdei_ghes_acpi_match_resource(pdev);
+ } else {
+ initdbgmsg("%s DeviceTree\n", __func__);
+ acpi_permanent_mmap = true;
+ set_bit(EFI_MEMMAP, &efi.flags);
+ ret = sdei_ghes_of_match_resource(pdev);
+ }
+ if (ret < 0) {
+ dev_err(dev, "Failed parse match resources\n");
+ return ret;
+ }
+
+ ret = sdei_ghes_setup_resource(ghes_drv);
+ if (ret) {
+ goto exit0;
+ }
+
+ sdei_ghes_init_source(ghes_drv);
+
+ if (!has_acpi_companion(dev)) {
+ ret = sdei_ghes_of_alloc_hest(ghes_drv);
+ if (ret) {
+ dev_err(dev, "Unable allocate HEST.\n");
+ goto exit0;
+ }
+ }
+
+ if (!cn10kx_model)
+ sdei_ghes_msix_init_t9x();
+
+ ret = sdei_ghes_driver_init(pdev);
+ if (ret) {
+ dev_err(dev, "Error initializing SDEI GHES support.\n");
+ sdei_ghes_driver_deinit(pdev);
+ goto exit0;
+ }
+
+ return 0;
+
+exit0:
+ dev_err(dev, "Error probe GHES.\n");
+ return ret;
+}
+
+static int sdei_ghes_remove(struct platform_device *pdev)
+{
+ initdbgmsg("%s: entry\n", __func__);
+
+ sdei_ghes_driver_deinit(pdev);
+
+ return 0;
+}
+
+static const struct platform_device_id sdei_ghes_pdev_match[] = {
+ { .name = DRV_NAME, },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, sdei_ghes_pdev_match);
+
+static struct platform_driver sdei_ghes_drv_probe = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = sdei_ghes_of_match,
+ .acpi_match_table = ACPI_PTR(sdei_ghes_acpi_match),
+ },
+ .probe = sdei_ghes_probe,
+ .remove = sdei_ghes_remove,
+ .id_table = sdei_ghes_pdev_match,
+};
+module_platform_driver(sdei_ghes_drv_probe);
+
+
+MODULE_DESCRIPTION("OcteonTX2 SDEI GHES Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/soc/marvell/octeontx2-ghes/otx2-sdei-ghes.h b/drivers/soc/marvell/octeontx2-ghes/otx2-sdei-ghes.h
new file mode 100644
index 000000000000..36f144495681
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-ghes/otx2-sdei-ghes.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Supports OcteonTX2 Generic Hardware Error Source[s] (GHES).
+ * GHES ACPI HEST & DT
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#ifndef __OTX2_SDEI_GHES_H__
+#define __OTX2_SDEI_GHES_H__
+
+#define OCTEONTX_SDEI_RAS_MDC_EVENT 0x40000000
+#define OCTEONTX_SDEI_RAS_MCC_EVENT 0x40000001
+#define OCTEONTX_SDEI_RAS_LMC_EVENT 0x40000002
+#define OCTEONTX_SDEI_RAS_AP0_EVENT 0x40000003
+#define OCTEONTX_SDEI_RAS_DSS_EVENT OCTEONTX_SDEI_RAS_MCC_EVENT
+#define OCTEONTX_SDEI_RAS_TAD_EVENT OCTEONTX_SDEI_RAS_LMC_EVENT
+
+#define SDEI_GHES_EVENT_NAME_MAX_CHARS 16
+
+struct mrvl_core_error_raport {
+ struct acpi_hest_generic_status estatus;
+ struct acpi_hest_generic_data gdata;
+ struct cper_sec_proc_arm desc;
+ struct cper_arm_err_info info;
+// struct cper_arm_ctx_info ctx;
+// uint64_t reg[0];
+};
+
+/*
+ * Describes an error source per ACPI 18.3.2.6 (Generic Hardware Error Source).
+ * This produces GHES-compliant error records from data forwarded by the [ATF]
+ * firmware.
+ * There exists one of these for each error source.
+ *
+ * @name: event source name mdc/mcc/lmc
+ * @id: event id
+ * @esa_pa physical address of Error Status Address register/iomem
+ * @esa_va: mapped pointer to Error Status Address point on Error Status Block
+ * @esb_pa: phys address of Error Status Block follow Error Status Data
+ * @esb_va: mapped pointer to Error Status Block
+ * @ring_pa: physical address of Ring of Error Status Blocks
+ * @ring: mapped pointer to Ring of Error Status Blocks
+ * @ring_sz: ring buffer size
+ */
+struct mrvl_ghes_source {
+ char name[SDEI_GHES_EVENT_NAME_MAX_CHARS];
+ phys_addr_t esa_pa;
+ phys_addr_t esb_pa;
+ phys_addr_t ring_pa;
+ phys_addr_t *esa_va;
+ union {
+ struct acpi_hest_generic_status *esb_va;
+ struct mrvl_core_error_raport *esb_core_va;
+ };
+ struct otx2_ghes_err_ring *ring;
+ size_t ring_sz;
+ size_t esb_sz;
+ u32 id;
+};
+
+/**
+ * struct mrvl_sdei_ghes_drv: driver state
+ *
+ * @source_list: list of [SDEI] producers
+ * (1 for each error source)
+ * @source_count: count of [SDEI] producers
+ * (size of @source_list)
+ */
+struct mrvl_sdei_ghes_drv {
+ struct device *dev;
+ struct mrvl_ghes_source *source_list;
+ size_t source_count;
+};
+
+#define OTX2_GHES_ERR_RING_SIG ((int)'M' << 24 | 'R' << 16 | 'V' << 8 | 'L')
+
+#define OTX2_GHES_ERR_REC_FRU_TEXT_LEN 32
+
+struct processor_error {
+ struct cper_sec_proc_arm desc;
+ struct cper_arm_err_info info;
+};
+
+struct otx2_ghes_err_record {
+ union {
+ struct processor_error core;
+ struct cper_sec_mem_err_old mcc;
+ struct cper_sec_mem_err_old mdc;
+ struct cper_sec_mem_err_old lmc;
+ } u;
+ uint32_t severity; /* CPER_SEV_xxx */
+ char fru_text[OTX2_GHES_ERR_REC_FRU_TEXT_LEN];
+};
+
+/* This is shared with Linux sdei-ghes driver */
+struct otx2_ghes_err_ring {
+ uint32_t volatile head;
+ uint32_t volatile tail;
+ uint32_t size; /* ring size */
+ uint32_t sig; /* set to OTX2_GHES_ERR_RING_SIG if initialized */
+ uint32_t reg;
+ /* ring of records */
+ struct otx2_ghes_err_record records[1] __aligned(8);
+};
+
+#endif // __OTX2_SDEI_GHES_H__
diff --git a/drivers/soc/marvell/octeontx2-llc/Makefile b/drivers/soc/marvell/octeontx2-llc/Makefile
new file mode 100644
index 000000000000..85eb9d6f89dc
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-llc/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's OcteonTX2 LLC driver
+#
+
+obj-$(CONFIG_OCTEONTX2_LLC) += octeontx2_llc.o
+
+octeontx2_llc-y := llc.o
diff --git a/drivers/soc/marvell/octeontx2-llc/llc.c b/drivers/soc/marvell/octeontx2-llc/llc.c
new file mode 100644
index 000000000000..3d624542f861
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-llc/llc.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/soc/marvell/llc.h>
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION("Last Level Cache Manager for OcteonTX2");
+MODULE_LICENSE("GPL v2");
+
+/* SYS instruction opcode for LLC Hit Writeback Invalidate */
+#define CVMCACHEWBIL3 "#0,c11,c1,#2"
+/* SYS instruction opcode for LLC Fetch and Lock */
+#define CVMCACHELCKL3 "#0,c11,c1,#4"
+/* LLC cache line granule size on OcteonTx2 */
+#define CVMCACHELCKL3_LINESIZE BIT_ULL(7)
+
+static spinlock_t llc_lock;
+
+static bool is_octeontx2(void)
+{
+ u32 model;
+
+ model = read_cpuid_id();
+ model &= MIDR_IMPLEMENTOR_MASK | MIDR_ARCHITECTURE_MASK |
+ MIDR_PARTNUM_MASK;
+
+ return (model == MIDR_MRVL_OCTEONTX2_98XX ||
+ model == MIDR_MRVL_OCTEONTX2_96XX ||
+ model == MIDR_MRVL_OCTEONTX2_95XX ||
+ model == MIDR_MRVL_OCTEONTX2_LOKI ||
+ model == MIDR_MRVL_OCTEONTX2_95MM);
+}
+
+int octeontx2_llc_unlock(phys_addr_t addr, int size)
+{
+ bool cacheline_unaligned = 0;
+
+ /* Unlock not supported on other silicon */
+ if (!is_octeontx2())
+ return 0;
+
+ if (!addr || size < 0)
+ return -EINVAL;
+
+ if ((addr & (CVMCACHELCKL3_LINESIZE-1)) && size) {
+ cacheline_unaligned = 1;
+ addr -= (addr & (CVMCACHELCKL3_LINESIZE-1));
+ }
+
+ spin_lock(&llc_lock);
+
+ while (size > 0) {
+ /* write cache line into memory,invalidate and unlock in LLC */
+ asm volatile ("sys " CVMCACHEWBIL3 ", %0" : : "r" (addr));
+ addr += CVMCACHELCKL3_LINESIZE;
+ size -= CVMCACHELCKL3_LINESIZE;
+ }
+
+ if (cacheline_unaligned)
+ asm volatile ("sys " CVMCACHEWBIL3 ", %0" : : "r" (addr));
+
+ isb();
+ spin_unlock(&llc_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(octeontx2_llc_unlock);
+
+int octeontx2_llc_lock(phys_addr_t addr, int size)
+{
+ bool cacheline_unaligned = 0;
+
+ /* Lock not supported on other silicon */
+ if (!is_octeontx2())
+ return 0;
+
+ if (!addr || size < 0)
+ return -EINVAL;
+
+ if ((addr & (CVMCACHELCKL3_LINESIZE-1)) && size) {
+ cacheline_unaligned = 1;
+ addr -= (addr & (CVMCACHELCKL3_LINESIZE-1));
+ }
+
+ spin_lock(&llc_lock);
+
+ while (size > 0) {
+ /* Fill a block of memory into LLC and lock the cache line */
+ asm volatile ("sys " CVMCACHELCKL3 ", %0" : : "r" (addr));
+ addr += CVMCACHELCKL3_LINESIZE;
+ size -= CVMCACHELCKL3_LINESIZE;
+ }
+
+ if (cacheline_unaligned)
+ asm volatile ("sys " CVMCACHELCKL3 ", %0" : : "r" (addr));
+
+ isb();
+ spin_unlock(&llc_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(octeontx2_llc_lock);
+
+static int __init octx2_llc_init(void)
+{
+ spin_lock_init(&llc_lock);
+ return 0;
+}
+arch_initcall(octx2_llc_init);
diff --git a/drivers/soc/marvell/octeontx2-npa/Makefile b/drivers/soc/marvell/octeontx2-npa/Makefile
new file mode 100644
index 000000000000..50bfab3d5509
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-npa/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's OcteonTX2 NPA PF driver
+#
+
+obj-$(CONFIG_OCTEONTX2_NPA_PF) += octeontx2_npa.o
+ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
+octeontx2_npa-y := npa.o
diff --git a/drivers/soc/marvell/octeontx2-npa/npa.c b/drivers/soc/marvell/octeontx2-npa/npa.c
new file mode 100644
index 000000000000..67971bd1ea69
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-npa/npa.c
@@ -0,0 +1,1774 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 NPA driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/of.h>
+#include <linux/if_vlan.h>
+#include <linux/mutex.h>
+#include <net/ip.h>
+#include <linux/iommu.h>
+
+#include "rvu.h"
+#include "rvu_reg.h"
+#include "mbox.h"
+#include "npa.h"
+#include "npa_api.h"
+
+#define DRV_NAME "octeontx2-npapf"
+#define DRV_VERSION "1.0"
+#define DRV_STRING "Marvell OcteonTX2 NPA Physical Function Driver"
+#define PCI_DEVID_OCTEONTX2_RVU_NPA_PF 0xA0FB
+
+/* Supported devices */
+static const struct pci_device_id otx2_npa_pf_id_table[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_NPA_PF)},
+ {0,} /* end of table */
+};
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION(DRV_STRING);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, otx2_npa_pf_id_table);
+
+/* All PF devices found are stored here */
+static spinlock_t npa_lst_lock;
+LIST_HEAD(npa_dev_lst_head);
+static DECLARE_BITMAP(pf_bmp, NPA_MAX_PFS);
+static struct npa_dev_t *gnpa_pf_dev[NPA_MAX_PFS] = { NULL };
+
+static void npa_write64(struct npa_dev_t *rvu, u64 b, u64 s, u64 o, u64 v)
+{
+ writeq_relaxed(v,
+ rvu->mmio[NPA_REG_BASE].hw_addr +
+ ((b << 20) | (s << 12) | o));
+}
+
+static u64 npa_read64(struct npa_dev_t *rvu, u64 b, u64 s, u64 o)
+{
+ return readq_relaxed(rvu->mmio[NPA_REG_BASE].hw_addr +
+ ((b << 20) | (s << 12) | o));
+}
+
+static int
+forward_to_mbox(struct npa_dev_t *npa, struct otx2_mbox *mbox, int devid,
+ struct mbox_msghdr *req, int size, const char *mstr)
+{
+ struct mbox_msghdr *msg;
+ int res = 0;
+
+ msg = otx2_mbox_alloc_msg(mbox, devid, size);
+ if (msg == NULL)
+ return -ENOMEM;
+
+ memcpy((uint8_t *) msg + sizeof(struct mbox_msghdr),
+ (uint8_t *) req + sizeof(struct mbox_msghdr), size);
+ msg->id = req->id;
+ msg->pcifunc = req->pcifunc;
+ msg->sig = req->sig;
+ msg->ver = req->ver;
+ msg->rc = req->rc;
+
+ otx2_mbox_msg_send(mbox, devid);
+ res = otx2_mbox_wait_for_rsp(mbox, devid);
+ if (res == -EIO) {
+ dev_err(&npa->pdev->dev, "RVU %s MBOX timeout.\n", mstr);
+ goto err;
+ } else if (res) {
+ dev_err(&npa->pdev->dev, "RVU %s MBOX error: %d.\n", mstr, res);
+ res = -EFAULT;
+ goto err;
+ }
+
+ return 0;
+err:
+ return res;
+}
+
+static int
+handle_af_req(struct npa_dev_t *npa, struct rvu_vf *vf, struct mbox_msghdr *req,
+ int size)
+{
+ /* We expect a request here */
+ if (req->sig != OTX2_MBOX_REQ_SIG) {
+ dev_err(&npa->pdev->dev,
+ "UP MBOX msg with wrong signature %x, ID 0x%x\n",
+ req->sig, req->id);
+ return -EINVAL;
+ }
+
+ /* If handling notifs in PF is required,add a switch-case here. */
+ return forward_to_mbox(npa, &npa->pfvf_mbox_up, vf->vf_id, req, size,
+ "VF");
+}
+
+static void npa_afpf_mbox_up_handler(struct work_struct *work)
+{
+ /* TODO: List MBOX uphandler operations */
+ struct npa_dev_t *npa_pf_dev;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ int offset, id, err;
+ struct rvu_vf *vf;
+ u16 vf_id;
+
+ npa_pf_dev = container_of(work, struct npa_dev_t, mbox_wrk_up);
+ mbox = &npa_pf_dev->afpf_mbox_up;
+ mdev = &mbox->dev[0];
+ /* sync with mbox memory region */
+ smp_rmb();
+
+ /* Process received mbox messages */
+ req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < req_hdr->num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
+ offset);
+
+ if ((msg->pcifunc >> RVU_PFVF_PF_SHIFT) !=
+ (npa_pf_dev->pcifunc >> RVU_PFVF_PF_SHIFT)
+ || (msg->pcifunc & RVU_PFVF_FUNC_MASK) <=
+ npa_pf_dev->num_vfs)
+ err = -EINVAL;
+ else {
+ vf_id = msg->pcifunc & RVU_PFVF_FUNC_MASK;
+ vf = &npa_pf_dev->vf_info[vf_id];
+ err =
+ handle_af_req(npa_pf_dev, vf, msg,
+ msg->next_msgoff - offset);
+ }
+
+ if (err)
+ otx2_reply_invalid_msg(mbox, 0, msg->pcifunc, msg->id);
+ offset = msg->next_msgoff;
+ }
+
+ otx2_mbox_msg_send(mbox, 0);
+}
+
+static void npa_mbox_handler_msix_offset(struct npa_dev_t *pfvf,
+ struct msix_offset_rsp *rsp)
+{
+ pfvf->npa_msixoff = rsp->npa_msixoff;
+}
+
+static void npa_mbox_handler_lf_alloc(struct npa_dev_t *pfvf,
+ struct npa_lf_alloc_rsp *rsp)
+{
+ pfvf->stack_pg_ptrs = rsp->stack_pg_ptrs;
+ pfvf->stack_pg_bytes = rsp->stack_pg_bytes;
+}
+
+static irqreturn_t otx2_afpf_mbox_intr_handler(int irq, void *pf_irq)
+{
+ struct npa_dev_t *npa_pf_dev = (struct npa_dev_t *)pf_irq;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+
+ /* Read latest mbox data */
+ smp_rmb();
+
+ mbox = &npa_pf_dev->afpf_mbox;
+ mdev = &mbox->dev[0];
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ /* Handle PF => AF channel response */
+ if (hdr->num_msgs)
+ queue_work(npa_pf_dev->afpf_mbox_wq, &npa_pf_dev->mbox_wrk);
+
+ mbox = &npa_pf_dev->afpf_mbox_up;
+ mdev = &mbox->dev[0];
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ /* Handle AF => PF request */
+ if (hdr->num_msgs)
+ queue_work(npa_pf_dev->afpf_mbox_wq, &npa_pf_dev->mbox_wrk_up);
+
+ /* Clear the IRQ */
+ npa_write64(npa_pf_dev, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
+
+ return IRQ_HANDLED;
+}
+
+static inline void otx2_enable_afpf_mbox_intr(struct npa_dev_t *npa)
+{
+ /* Enable mailbox interrupt for msgs coming from AF.
+ * First clear to avoid spurious interrupts, if any.
+ */
+ npa_write64(npa, BLKADDR_RVUM, 0, RVU_PF_INT, BIT_ULL(0));
+ npa_write64(npa, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
+}
+
+static inline void otx2_disable_afpf_mbox_intr(struct npa_dev_t *npa)
+{
+ /* Clear interrupt if any */
+ npa_write64(npa, BLKADDR_RVUM, 0, RVU_PF_INT, BIT_ULL(0));
+ /* Disable AF => PF mailbox IRQ */
+ npa_write64(npa, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
+}
+
+static int otx2_alloc_afpf_mbox_intr(struct npa_dev_t *npa)
+{
+ struct pci_dev *pdev;
+ int err;
+
+ pdev = npa->pdev;
+ /* Register PF-AF interrupt handler */
+ sprintf(&npa->irq_names[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE],
+ "PF%02d_AF_MBOX_IRQ", pdev->devfn);
+ err =
+ request_irq(pci_irq_vector
+ (npa->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
+ otx2_afpf_mbox_intr_handler, 0,
+ &npa->irq_names[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE],
+ npa);
+ if (err) {
+ dev_err(&npa->pdev->dev,
+ "RVUPF: IRQ registration failed for PFAF mbox irq\n");
+ return err;
+ }
+
+ otx2_enable_afpf_mbox_intr(npa);
+
+ return 0;
+}
+
+static void otx2_free_afpf_mbox_intr(struct npa_dev_t *npa)
+{
+ int vector = pci_irq_vector(npa->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
+
+ otx2_disable_afpf_mbox_intr(npa);
+ free_irq(vector, npa);
+}
+
+static void otx2_process_afpf_mbox_msg(struct npa_dev_t *npa_pf_dev,
+ struct mbox_msghdr *msg, int size)
+{
+ struct otx2_mbox *vf_mbx;
+ struct mbox_msghdr *fwd;
+ struct device *dev;
+ struct rvu_vf *vf;
+ int vf_id;
+
+ dev = &npa_pf_dev->pdev->dev;
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(dev, "Mbox msg with unknown ID 0x%x\n", msg->id);
+ return;
+ }
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(dev,
+ "Mbox msg with wrong signature %x, ID 0x%x\n",
+ msg->sig, msg->id);
+ return;
+ }
+
+ /* message response heading VF */
+ vf_id = msg->pcifunc & RVU_PFVF_FUNC_MASK;
+ vf_mbx = &npa_pf_dev->pfvf_mbox;
+
+ if (vf_id > 0) {
+ if (vf_id > npa_pf_dev->num_vfs) {
+ dev_err(&npa_pf_dev->pdev->dev,
+ "MBOX msg to unknown VF: %d >= %d\n",
+ vf_id, npa_pf_dev->num_vfs);
+ return;
+ }
+ vf = &npa_pf_dev->vf_info[vf_id - 1];
+ /* Ignore stale responses and VFs in FLR. */
+ if (!vf->in_use || vf->got_flr)
+ return;
+ fwd = otx2_mbox_alloc_msg(vf_mbx, vf_id - 1, size);
+ if (!fwd) {
+ dev_err(&npa_pf_dev->pdev->dev,
+ "Forwarding to VF%d failed.\n", vf_id);
+ return;
+ }
+ memcpy((uint8_t *) fwd + sizeof(struct mbox_msghdr),
+ (uint8_t *) msg + sizeof(struct mbox_msghdr), size);
+ fwd->id = msg->id;
+ fwd->pcifunc = msg->pcifunc;
+ fwd->sig = msg->sig;
+ fwd->ver = msg->ver;
+ fwd->rc = msg->rc;
+ } else {
+ switch (msg->id) {
+ case MBOX_MSG_READY:
+ npa_pf_dev->pcifunc = msg->pcifunc;
+ break;
+ case MBOX_MSG_MSIX_OFFSET:
+ npa_mbox_handler_msix_offset(npa_pf_dev,
+ (struct msix_offset_rsp *)
+ msg);
+ break;
+ case MBOX_MSG_NPA_LF_ALLOC:
+ npa_mbox_handler_lf_alloc(npa_pf_dev,
+ (struct npa_lf_alloc_rsp *)
+ msg);
+ break;
+ default:
+ if (msg->rc)
+ dev_err(&npa_pf_dev->pdev->dev,
+ "Mbox msg response has err %d, ID 0x%x\n",
+ msg->rc, msg->id);
+ break;
+ }
+ }
+}
+
+static void npa_afpf_mbox_handler(struct work_struct *work)
+{
+ struct npa_dev_t *npa_pf_dev;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ int offset, id, size;
+
+ npa_pf_dev = container_of(work, struct npa_dev_t, mbox_wrk);
+ mbox = &npa_pf_dev->afpf_mbox;
+ mdev = &mbox->dev[0];
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (rsp_hdr->num_msgs == 0)
+ return;
+
+ offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ for (id = 0; id < rsp_hdr->num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start + offset);
+ size = msg->next_msgoff - offset;
+ otx2_process_afpf_mbox_msg(npa_pf_dev, msg, size);
+ offset = msg->next_msgoff;
+ mdev->msgs_acked++;
+ }
+
+ otx2_mbox_reset(mbox, 0);
+}
+
+static int
+reply_free_rsrc_cnt(struct npa_dev_t *npa, struct rvu_vf *vf,
+ struct mbox_msghdr *req, int size)
+{
+ struct free_rsrcs_rsp *rsp;
+
+ rsp = (struct free_rsrcs_rsp *)otx2_mbox_alloc_msg(&npa->pfvf_mbox,
+ vf->vf_id,
+ sizeof(*rsp));
+ if (rsp == NULL)
+ return -ENOMEM;
+
+ rsp->hdr.id = MBOX_MSG_FREE_RSRC_CNT;
+ rsp->hdr.pcifunc = req->pcifunc;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ return 0;
+}
+
+static int
+handle_vf_req(struct npa_dev_t *npa, struct rvu_vf *vf, struct mbox_msghdr *req,
+ int size)
+{
+ int err = 0;
+
+ /* Check if valid, if not reply with a invalid msg */
+ if (req->sig != OTX2_MBOX_REQ_SIG) {
+ dev_err(&npa->pdev->dev,
+ "VF MBOX msg with wrong signature %x, ID 0x%x\n",
+ req->sig, req->id);
+ return -EINVAL;
+ }
+
+ if (req->ver < OTX2_MBOX_VERSION) {
+ dev_err(&npa->pdev->dev,
+ "VF MBOX msg with version %04x != %04x\n",
+ req->ver, OTX2_MBOX_VERSION);
+ return -EINVAL;
+ }
+ switch (req->id) {
+ case MBOX_MSG_READY:
+ vf->in_use = true;
+ err = forward_to_mbox(npa, &npa->afpf_mbox, 0, req, size, "AF");
+ break;
+ case MBOX_MSG_FREE_RSRC_CNT:
+ err = reply_free_rsrc_cnt(npa, vf, req, size);
+ break;
+ default:
+ err = forward_to_mbox(npa, &npa->afpf_mbox, 0, req, size, "AF");
+ break;
+ }
+
+ return err;
+}
+
+static int send_flr_msg(struct otx2_mbox *mbox, int dev_id, int pcifunc)
+{
+ struct msg_req *req;
+
+ req = (struct msg_req *)
+ otx2_mbox_alloc_msg(mbox, dev_id, sizeof(*req));
+ if (req == NULL)
+ return -ENOMEM;
+
+ req->hdr.pcifunc = pcifunc;
+ req->hdr.id = MBOX_MSG_VF_FLR;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+
+ otx2_mbox_msg_send(mbox, 0);
+
+ return 0;
+}
+
+static void npa_send_flr_msg(struct npa_dev_t *npa, struct rvu_vf *vf)
+{
+ int res, pcifunc;
+
+ pcifunc = vf->npa->pcifunc | ((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK);
+
+ if (send_flr_msg(&npa->afpf_mbox, 0, pcifunc) != 0) {
+ dev_err(&npa->pdev->dev, "Sending FLR to AF failed\n");
+ return;
+ }
+
+ res = otx2_mbox_wait_for_rsp(&npa->afpf_mbox, 0);
+ if (res == -EIO)
+ dev_err(&npa->pdev->dev, "RVU AF MBOX timeout.\n");
+ else if (res)
+ dev_err(&npa->pdev->dev, "RVU MBOX error: %d.\n", res);
+}
+
+static void npa_pfvf_flr_handler(struct work_struct *work)
+{
+ struct rvu_vf *vf = container_of(work, struct rvu_vf, pfvf_flr_work);
+ struct npa_dev_t *npa = vf->npa;
+ struct otx2_mbox *mbox;
+
+ mbox = &npa->pfvf_mbox;
+
+ npa_send_flr_msg(npa, vf);
+
+ /* Disable interrupts from AF and wait for any pending
+ * responses to be handled for this VF and then reset the
+ * mailbox
+ */
+ otx2_disable_afpf_mbox_intr(npa);
+ flush_workqueue(npa->afpf_mbox_wq);
+ otx2_mbox_reset(mbox, vf->vf_id);
+ vf->in_use = false;
+ vf->got_flr = false;
+ otx2_enable_afpf_mbox_intr(npa);
+ npa_write64(npa, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(vf->vf_id / 64),
+ BIT_ULL(vf->intr_idx));
+}
+
+static void npa_pfvf_mbox_handler_up(struct work_struct *work)
+{
+ struct otx2_mbox *af_mbx, *vf_mbx;
+ struct mbox_msghdr *msg, *fwd;
+ struct mbox_hdr *rsp_hdr;
+ struct npa_dev_t *npa;
+ int offset, i, size;
+ struct rvu_vf *vf;
+
+ /* Read latest mbox data */
+ smp_rmb();
+
+ vf = container_of(work, struct rvu_vf, mbox_wrk_up);
+ npa = vf->npa;
+ af_mbx = &npa->afpf_mbox;
+ vf_mbx = &npa->pfvf_mbox;
+ rsp_hdr = (struct mbox_hdr *)(vf_mbx->dev[vf->vf_id].mbase +
+ vf_mbx->rx_start);
+ if (rsp_hdr->num_msgs == 0)
+ return;
+ offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < rsp_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)(vf_mbx->dev->mbase +
+ vf_mbx->rx_start + offset);
+ size = msg->next_msgoff - offset;
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(&npa->pdev->dev,
+ "UP MBOX msg with wrong signature %x, ID 0x%x\n",
+ msg->sig, msg->id);
+ goto end;
+ }
+
+ /* override message value with actual values */
+ msg->pcifunc = npa->pcifunc | vf->vf_id;
+
+ fwd = otx2_mbox_alloc_msg(af_mbx, 0, size);
+ if (!fwd) {
+ dev_err(&npa->pdev->dev,
+ "UP Forwarding from VF%d to AF failed.\n",
+ vf->vf_id);
+ goto end;
+ }
+ memcpy((uint8_t *) fwd + sizeof(struct mbox_msghdr),
+ (uint8_t *) msg + sizeof(struct mbox_msghdr), size);
+ fwd->id = msg->id;
+ fwd->pcifunc = msg->pcifunc;
+ fwd->sig = msg->sig;
+ fwd->ver = msg->ver;
+ fwd->rc = msg->rc;
+end:
+ offset = msg->next_msgoff;
+ vf_mbx->dev->msgs_acked++;
+ }
+ otx2_mbox_reset(vf_mbx, vf->vf_id);
+}
+
+static void npa_pfvf_mbox_handler(struct work_struct *work)
+{
+ struct rvu_vf *vf = container_of(work, struct rvu_vf, mbox_wrk);
+ struct npa_dev_t *npa = vf->npa;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ int offset, id, err;
+
+ mbox = &npa->pfvf_mbox;
+ mdev = &mbox->dev[vf->vf_id];
+
+ /* sync with mbox memory region */
+ smp_rmb();
+
+ /* Process received mbox messages */
+ req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+ for (id = 0; id < req_hdr->num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
+ offset);
+
+ /* Set which VF sent this message based on mbox IRQ */
+ msg->pcifunc =
+ npa->pcifunc | ((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK);
+ err = handle_vf_req(npa, vf, msg, msg->next_msgoff - offset);
+ if (err)
+ otx2_reply_invalid_msg(mbox, vf->vf_id, msg->pcifunc,
+ msg->id);
+ offset = msg->next_msgoff;
+ }
+ /* Send mbox responses to VF */
+ if (mdev->num_msgs)
+ otx2_mbox_msg_send(mbox, vf->vf_id);
+}
+
+static int npa_afpf_mbox_init(struct npa_dev_t *npa_pf_dev)
+{
+ struct pci_dev *pdev;
+ int err;
+
+ pdev = npa_pf_dev->pdev;
+ npa_pf_dev->afpf_mbox_wq = alloc_workqueue("otx2_npa_pfaf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 1);
+ if (!npa_pf_dev->afpf_mbox_wq)
+ return -ENOMEM;
+
+ err =
+ otx2_mbox_init(&npa_pf_dev->afpf_mbox,
+ npa_pf_dev->mmio[AFPF_MBOX_BASE].hw_addr, pdev,
+ npa_pf_dev->mmio[NPA_REG_BASE].hw_addr,
+ MBOX_DIR_PFAF, 1);
+ if (err) {
+ dev_err(&pdev->dev, "mbox init for pfaf failed\n");
+ goto destroy_mbox_wq;
+ }
+
+ err =
+ otx2_mbox_init(&npa_pf_dev->afpf_mbox_up,
+ npa_pf_dev->mmio[AFPF_MBOX_BASE].hw_addr, pdev,
+ npa_pf_dev->mmio[NPA_REG_BASE].hw_addr,
+ MBOX_DIR_PFAF_UP, 1);
+ if (err) {
+ dev_err(&pdev->dev, "mbox init for pfaf up failed\n");
+ goto destroy_mbox_afpf;
+ }
+
+ INIT_WORK(&npa_pf_dev->mbox_wrk, npa_afpf_mbox_handler);
+ INIT_WORK(&npa_pf_dev->mbox_wrk_up, npa_afpf_mbox_up_handler);
+ mutex_init(&npa_pf_dev->lock);
+ return 0;
+
+destroy_mbox_wq:
+ destroy_workqueue(npa_pf_dev->afpf_mbox_wq);
+destroy_mbox_afpf:
+ otx2_mbox_destroy(&npa_pf_dev->afpf_mbox);
+
+ return err;
+}
+
+static void __handle_vf_flr(struct npa_dev_t *npa, struct rvu_vf *vf_ptr)
+{
+ if (vf_ptr->in_use) {
+ /* Using the same MBOX workqueue here, so that we can
+ * synchronize with other VF->PF messages being forwarded to
+ * AF
+ */
+ vf_ptr->got_flr = true;
+ queue_work(npa->pfvf_mbox_wq, &vf_ptr->pfvf_flr_work);
+ } else
+ npa_write64(npa, BLKADDR_RVUM, 0,
+ RVU_PF_VFTRPENDX(vf_ptr->vf_id / 64),
+ BIT_ULL(vf_ptr->intr_idx));
+}
+
+static irqreturn_t npa_pf_vf_flr_intr(int irq, void *pf_irq)
+{
+ struct npa_dev_t *npa = (struct npa_dev_t *)pf_irq;
+ struct rvu_vf *vf_ptr;
+ int vec, vf, i;
+ u64 intr;
+
+ /* Check which VF FLR has been raised and process accordingly */
+ for (vec = RVU_PF_INT_VEC_VFFLR0, i = 0;
+ vec + i <= RVU_PF_INT_VEC_VFFLR1; i++) {
+ /* Read the interrupt bits */
+ intr = npa_read64(npa, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(i));
+
+ for (vf = i * 64; vf < npa->num_vfs; vf++) {
+ vf_ptr = &npa->vf_info[vf];
+ if (intr & (1ULL << vf_ptr->intr_idx)) {
+ /* Clear the interrupts */
+ npa_write64(npa, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INTX(i),
+ BIT_ULL(vf_ptr->intr_idx));
+ __handle_vf_flr(npa, vf_ptr);
+ }
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
+{
+ struct npa_dev_t *npa = (struct npa_dev_t *)pf_irq;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+ struct rvu_vf *vf;
+ int i, vfi, vec;
+ u64 intr;
+
+ /* Check which VF has raised an interrupt and schedule corresponding
+ * workq to process the MBOX
+ */
+ for (vec = RVU_PF_INT_VEC_VFPF_MBOX0, i = 0;
+ vec + i <= RVU_PF_INT_VEC_VFPF_MBOX1; i++) {
+ /* Read the interrupt bits */
+ intr = npa_read64(npa, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(i));
+
+ for (vfi = i * 64; vfi < npa->num_vfs; vfi++) {
+ vf = &npa->vf_info[vfi];
+ if ((intr & (1ULL << vf->intr_idx)) == 0)
+ continue;
+ mbox = &npa->pfvf_mbox;
+ mdev = &mbox->dev[vf->vf_id];
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ /* Handle VF => PF channel request */
+ if (hdr->num_msgs)
+ queue_work(npa->pfvf_mbox_wq, &vf->mbox_wrk);
+
+ mbox = &npa->pfvf_mbox_up;
+ mdev = &mbox->dev[vf->vf_id];
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ /* Handle PF => VF channel response */
+ if (hdr->num_msgs)
+ queue_work(npa->pfvf_mbox_wq, &vf->mbox_wrk_up);
+ /* Clear the interrupt */
+ npa_write64(npa, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(i),
+ BIT_ULL(vf->intr_idx));
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static void free_ptrs(struct otx2_npa_pool *pool, struct device *owner)
+{
+ struct iommu_domain *iommu_domain;
+ struct page *list_page;
+ struct ptr_pair *list;
+ u64 *next_ptr;
+ int i, cnt;
+
+ iommu_domain = iommu_get_domain_for_dev(owner);
+ list_page = pool->ptr_list_start;
+ while (pool->ptr_pair_cnt) {
+ list = page_to_virt(list_page);
+ if (pool->ptr_pair_cnt > pool->ptr_pairs_per_page)
+ cnt = pool->ptr_pairs_per_page;
+ else
+ cnt = pool->ptr_pair_cnt;
+ for (i = 0; i < cnt; i++) {
+ if (iommu_domain
+ && (iommu_domain->type != IOMMU_DOMAIN_IDENTITY))
+ dma_unmap_page_attrs(owner, list->iova,
+ pool->rbsize,
+ DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ __free_pages(list->page, pool->rbpage_order);
+ list++;
+ }
+ next_ptr = (u64 *) list;
+ __free_page(list_page);
+ list_page = (struct page *)*next_ptr;
+ pool->ptr_pair_cnt -= cnt;
+ }
+}
+
+static int record_ptrs(struct otx2_npa_pool *pool, dma_addr_t iova)
+{
+ struct page *ptr_list_page;
+ struct ptr_pair *pair;
+ u64 *next_ptr;
+
+ if (pool->ptr_list
+ && (pool->ptr_pairs_in_page < pool->ptr_pairs_per_page))
+ goto store_ptrs;
+
+ ptr_list_page = alloc_page(GFP_KERNEL | __GFP_COMP | __GFP_NOWARN);
+ if (unlikely(!ptr_list_page))
+ return -ENOMEM;
+
+ if (!pool->ptr_list_start)
+ pool->ptr_list_start = ptr_list_page;
+
+ if (pool->ptr_list) {
+ next_ptr = (u64 *) pool->ptr_list;
+ *next_ptr = (u64) ptr_list_page;
+ }
+ pool->ptr_list = page_to_virt(ptr_list_page);
+ pool->ptr_pairs_in_page = 0;
+
+store_ptrs:
+ pair = (struct ptr_pair *)pool->ptr_list;
+ pair->page = pool->page;
+ pair->iova = iova;
+ pool->ptr_list += sizeof(struct ptr_pair);
+ pool->ptr_pairs_in_page += 1;
+ pool->ptr_pair_cnt += 1;
+
+ return 0;
+}
+
+static dma_addr_t otx2_alloc_npa_buf(struct npa_dev_t *pfvf,
+ struct otx2_npa_pool *pool, gfp_t gfp,
+ struct device *owner)
+{
+ dma_addr_t iova;
+ struct iommu_domain *iommu_domain;
+
+ /* Check if request can be accommodated in previous allocated page */
+ if (pool->page && ((pool->page_offset + pool->rbsize) <= PAGE_SIZE)) {
+ page_ref_inc(pool->page);
+ goto ret;
+ }
+
+ /* Allocate a new page */
+ pool->page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
+ pool->rbpage_order);
+ if (unlikely(!pool->page))
+ return -ENOMEM;
+ pool->page_offset = 0;
+ret:
+ iommu_domain = iommu_get_domain_for_dev(owner);
+ if (iommu_domain && (iommu_domain->type == IOMMU_DOMAIN_IDENTITY)) {
+ iova = page_to_phys(pool->page) + pool->page_offset;
+ } else {
+ iova = dma_map_page_attrs(owner, pool->page, pool->page_offset,
+ pool->rbsize, DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (unlikely(dma_mapping_error(owner, iova)))
+ iova = (dma_addr_t) NULL;
+ }
+
+ if (!iova) {
+ if (!pool->page_offset)
+ __free_pages(pool->page, pool->rbpage_order);
+ pool->page = NULL;
+ return -ENOMEM;
+ }
+
+ record_ptrs(pool, iova);
+ pool->page_offset += pool->rbsize;
+ return iova;
+}
+
+static inline int npa_sync_mbox_msg(struct otx2_mbox *mbox)
+{
+ int err;
+
+ if (!otx2_mbox_nonempty(mbox, 0))
+ return 0;
+ otx2_mbox_msg_send(mbox, 0);
+ err = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (err)
+ return err;
+
+ return otx2_mbox_check_rsp_msgs(mbox, 0);
+}
+
+static int otx2_npa_aura_init(struct npa_dev_t *npa, int aura_id,
+ int pool_id, int numptrs)
+{
+ struct npa_aq_enq_req *aq;
+ struct otx2_npa_pool *pool;
+ struct device *dev;
+ int err;
+
+ pool = npa->pools[aura_id];
+ dev = &npa->pdev->dev;
+
+ /* Allocate memory for HW to update Aura count.
+ * Alloc one cache line, so that it fits all FC_STYPE modes.
+ */
+ if (!pool->fc_addr) {
+ err = qmem_alloc(dev, &pool->fc_addr, 1, OTX2_ALIGN);
+ if (err)
+ return err;
+ }
+
+ /* Initialize this aura's context via AF */
+ aq = otx2_af_mbox_alloc_msg_npa_aq_enq(&npa->afpf_mbox);
+ if (!aq) {
+ /* Shared mbox memory buffer is full, flush it and retry */
+ err = npa_sync_mbox_msg(&npa->afpf_mbox);
+ if (err)
+ return err;
+ aq = otx2_af_mbox_alloc_msg_npa_aq_enq(&npa->afpf_mbox);
+ if (!aq)
+ return -ENOMEM;
+ }
+
+ aq->aura_id = aura_id;
+ /* Will be filled by AF with correct pool context address */
+ aq->aura.pool_addr = pool_id;
+ aq->aura.pool_caching = 1;
+ aq->aura.shift = ilog2(numptrs) - 8;
+ aq->aura.count = numptrs;
+ aq->aura.limit = numptrs;
+ aq->aura.avg_level = NPA_AURA_AVG_LVL;
+ aq->aura.ena = 1;
+ aq->aura.fc_ena = 1;
+ aq->aura.fc_addr = pool->fc_addr->iova;
+ aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
+
+ /* Fill AQ info */
+ aq->ctype = NPA_AQ_CTYPE_AURA;
+ aq->op = NPA_AQ_INSTOP_INIT;
+
+ return 0;
+}
+
+static int otx2_npa_pool_init(struct npa_dev_t *pfvf, u16 pool_id,
+ int stack_pages, int numptrs, int buf_size)
+{
+ struct npa_aq_enq_req *aq;
+ struct otx2_npa_pool *pool;
+ struct device *dev;
+ int err;
+
+ dev = &pfvf->pdev->dev;
+ pool = pfvf->pools[pool_id];
+
+ /* Alloc memory for stack which is used to store buffer pointers */
+ err = qmem_alloc(dev, &pool->stack, stack_pages, pfvf->stack_pg_bytes);
+ if (err)
+ return err;
+
+ pool->rbsize = buf_size;
+ pool->rbpage_order = get_order(buf_size);
+
+ /* Initialize this pool's context via AF */
+ aq = otx2_af_mbox_alloc_msg_npa_aq_enq(&pfvf->afpf_mbox);
+ if (!aq) {
+ /* Shared mbox memory buffer is full, flush it and retry */
+ err = npa_sync_mbox_msg(&pfvf->afpf_mbox);
+ if (err) {
+ qmem_free(dev, pool->stack);
+ return err;
+ }
+ aq = otx2_af_mbox_alloc_msg_npa_aq_enq(&pfvf->afpf_mbox);
+ if (!aq) {
+ qmem_free(dev, pool->stack);
+ return -ENOMEM;
+ }
+ }
+
+ aq->aura_id = pool_id;
+ aq->pool.stack_base = pool->stack->iova;
+ aq->pool.stack_caching = 1;
+ aq->pool.ena = 1;
+ aq->pool.buf_size = buf_size / 128;
+ aq->pool.stack_max_pages = stack_pages;
+ aq->pool.shift = ilog2(numptrs) - 8;
+ aq->pool.ptr_start = 0;
+ aq->pool.ptr_end = ~0ULL;
+
+ /* Fill AQ info */
+ aq->ctype = NPA_AQ_CTYPE_POOL;
+ aq->op = NPA_AQ_INSTOP_INIT;
+
+ return 0;
+}
+
+u64 npa_alloc_buf(u32 aura)
+{
+ union aura_handle ah;
+ struct npa_dev_t *npa_pf_dev;
+
+ ah.handle = aura;
+ npa_pf_dev = gnpa_pf_dev[ah.s.pf_id];
+ return otx2_atomic64_add((u64) ah.s.aura | BIT_ULL(63),
+ npa_pf_dev->alloc_reg_ptr);
+}
+EXPORT_SYMBOL(npa_alloc_buf);
+
+u16 npa_pf_func(u32 aura)
+{
+ union aura_handle ah;
+ struct npa_dev_t *npa_pf_dev;
+
+ ah.handle = aura;
+ npa_pf_dev = gnpa_pf_dev[ah.s.pf_id];
+ return npa_pf_dev->pcifunc;
+}
+EXPORT_SYMBOL(npa_pf_func);
+
+void npa_free_buf(u32 aura, u64 buf)
+{
+ union aura_handle ah;
+ struct npa_dev_t *npa_pf_dev;
+
+ ah.handle = aura;
+ npa_pf_dev = gnpa_pf_dev[ah.s.pf_id];
+ otx2_write128((u64) buf, (u64) ah.s.aura | BIT_ULL(63),
+ npa_pf_dev->free_reg_addr);
+}
+EXPORT_SYMBOL(npa_free_buf);
+
+static void npa_set_reg_ptrs(struct npa_dev_t *npa_pf_dev)
+{
+ void __iomem *reg_addr = npa_pf_dev->mmio[NPA_REG_BASE].hw_addr;
+ u64 offset = NPA_LF_AURA_OP_ALLOCX(0);
+
+ offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT);
+ offset |= (BLKADDR_NPA << RVU_FUNC_BLKADDR_SHIFT);
+ npa_pf_dev->alloc_reg_ptr = (u64 *) (reg_addr + offset);
+
+ offset = NPA_LF_AURA_OP_FREE0;
+ offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT);
+ offset |= (BLKADDR_NPA << RVU_FUNC_BLKADDR_SHIFT);
+ npa_pf_dev->free_reg_addr = (reg_addr + offset);
+}
+
+static int npa_lf_alloc(struct npa_dev_t *pfvf)
+{
+ struct npa_lf_alloc_req *npalf;
+ int err, aura_cnt;
+
+ npalf = otx2_af_mbox_alloc_msg_npa_lf_alloc(&pfvf->afpf_mbox);
+ if (!npalf)
+ return -ENOMEM;
+
+ /* Set aura and pool counts */
+ npalf->nr_pools = NPA_MAX_AURAS;
+ aura_cnt = ilog2(roundup_pow_of_two(npalf->nr_pools));
+ npalf->aura_sz = (aura_cnt >= ilog2(128)) ? (aura_cnt - 6) : 1;
+
+ err = npa_sync_mbox_msg(&pfvf->afpf_mbox);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int otx2_npa_aura_pool_init(struct npa_dev_t *npa, int num_ptrs,
+ int buf_size, int aura_id, u32 *handle,
+ struct device *owner)
+{
+ struct otx2_npa_pool *pool;
+ union aura_handle ah;
+ struct device *dev;
+ int stack_pages;
+ int err, ptr;
+ s64 bufptr;
+
+ mutex_lock(&npa->lock);
+ if (!npa->alloc_reg_ptr) {
+ npa_lf_alloc(npa);
+ npa_set_reg_ptrs(npa);
+ }
+ mutex_unlock(&npa->lock);
+
+ dev = &npa->pdev->dev;
+ pool = devm_kzalloc(dev, sizeof(struct otx2_npa_pool), GFP_KERNEL);
+ if (!pool)
+ return -ENOMEM;
+
+ pool->ptr_list = NULL;
+ pool->ptr_pairs_in_page = 0;
+ pool->ptr_pairs_per_page =
+ (PAGE_SIZE - sizeof(u64)) / sizeof(struct ptr_pair);
+ pool->ptr_list_start = NULL;
+ pool->ptr_pair_cnt = 0;
+
+ npa->pools[aura_id] = pool;
+ /* Initialize aura context */
+ err = otx2_npa_aura_init(npa, aura_id, aura_id, num_ptrs);
+ if (err)
+ goto pool_init_fail;
+
+ stack_pages =
+ (num_ptrs + npa->stack_pg_ptrs - 1) / npa->stack_pg_ptrs;
+ err =
+ otx2_npa_pool_init(npa, aura_id, stack_pages, num_ptrs,
+ buf_size);
+ if (err)
+ goto pool_init_fail;
+
+ /* Flush accumulated messages */
+ err = npa_sync_mbox_msg(&npa->afpf_mbox);
+ if (err)
+ goto pool_init_fail;
+
+ /* Allocate pointers and free them to aura/pool */
+ for (ptr = 0; ptr < num_ptrs; ptr++) {
+ bufptr = otx2_alloc_npa_buf(npa, pool, GFP_KERNEL, owner);
+ if (bufptr <= 0)
+ return bufptr;
+ /* Free buffer to Aura */
+ otx2_write128((u64) bufptr, (u64) aura_id | BIT_ULL(63),
+ npa->free_reg_addr);
+ }
+ ah.s.aura = aura_id;
+ ah.s.pf_id = npa->pf_id;
+ /* Report the handle to caller */
+ *handle = ah.handle;
+ return 0;
+
+pool_init_fail:
+ otx2_mbox_reset(&npa->afpf_mbox, 0);
+ qmem_free(dev, pool->stack);
+ qmem_free(dev, pool->fc_addr);
+ devm_kfree(dev, pool);
+ return err;
+}
+
+int npa_aura_pool_init(int pool_size, int buf_size, u32 *aura_handle,
+ struct device *owner)
+{
+ struct npa_dev_t *npa_pf;
+ int aura_id;
+ bool set;
+ int i;
+
+ for_each_set_bit(i, pf_bmp, NPA_MAX_PFS) {
+ npa_pf = gnpa_pf_dev[i];
+ set = true;
+ while (set) {
+ aura_id =
+ find_first_zero_bit(npa_pf->aura_bmp,
+ NPA_MAX_AURAS);
+ if (aura_id < NPA_MAX_AURAS)
+ set =
+ test_and_set_bit(aura_id, npa_pf->aura_bmp);
+ else
+ break;
+ }
+ if (!set)
+ break;
+ }
+
+ if (set) {
+ dev_err(owner, "Max aura limit reached\n");
+ return -ENOMEM;
+ }
+
+ return otx2_npa_aura_pool_init(npa_pf, pool_size, buf_size, aura_id,
+ aura_handle, owner);
+}
+EXPORT_SYMBOL(npa_aura_pool_init);
+
+static int npa_lf_aura_pool_fini(struct npa_dev_t *npa, u16 aura_id)
+{
+ struct npa_aq_enq_req *aura_req, *pool_req;
+ struct ndc_sync_op *ndc_req;
+ struct otx2_mbox *mbox;
+ int rc;
+
+ mbox = &npa->afpf_mbox;
+ /* Procedure for disabling an aura/pool */
+ usleep_range(10, 11);
+
+ /* TODO: Need to know why? */
+ otx2_atomic64_add((u64) aura_id | BIT_ULL(63), npa->alloc_reg_ptr);
+
+ pool_req = otx2_af_mbox_alloc_msg_npa_aq_enq(mbox);
+ pool_req->aura_id = aura_id;
+ pool_req->ctype = NPA_AQ_CTYPE_POOL;
+ pool_req->op = NPA_AQ_INSTOP_WRITE;
+ pool_req->pool.ena = 0;
+ pool_req->pool_mask.ena = ~pool_req->pool_mask.ena;
+
+ aura_req = otx2_af_mbox_alloc_msg_npa_aq_enq(mbox);
+ aura_req->aura_id = aura_id;
+ aura_req->ctype = NPA_AQ_CTYPE_AURA;
+ aura_req->op = NPA_AQ_INSTOP_WRITE;
+ aura_req->aura.ena = 0;
+ aura_req->aura_mask.ena = ~aura_req->aura_mask.ena;
+
+ rc = npa_sync_mbox_msg(&npa->afpf_mbox);
+ if (rc) {
+ dev_err(&npa->pdev->dev, "Aura pool finish failed\n");
+ return rc;
+ }
+
+ /* Sync NDC-NPA for LF */
+ ndc_req = otx2_af_mbox_alloc_msg_ndc_sync_op(mbox);
+ ndc_req->npa_lf_sync = 1;
+
+ rc = npa_sync_mbox_msg(&npa->afpf_mbox);
+ if (rc) {
+ dev_err(&npa->pdev->dev, "Error on NDC-NPA LF sync.\n");
+ return rc;
+ }
+ return 0;
+}
+
+int npa_aura_pool_fini(const u32 aura_handle, struct device *owner)
+{
+ struct npa_dev_t *npa;
+ union aura_handle ah;
+ u16 aura_id, pf_id;
+
+ ah.handle = aura_handle;
+ aura_id = ah.s.aura;
+ pf_id = ah.s.pf_id;
+ npa = gnpa_pf_dev[pf_id];
+ if (!test_bit(aura_id, npa->aura_bmp)) {
+ dev_info(&npa->pdev->dev, "Pool not active\n");
+ return 0;
+ }
+
+ npa_lf_aura_pool_fini(npa, aura_id);
+ qmem_free(&npa->pdev->dev, npa->pools[aura_id]->stack);
+ qmem_free(&npa->pdev->dev, npa->pools[aura_id]->fc_addr);
+ free_ptrs(npa->pools[aura_id], owner);
+ devm_kfree(&npa->pdev->dev, npa->pools[aura_id]);
+
+ clear_bit(aura_id, npa->aura_bmp);
+
+ return 0;
+}
+EXPORT_SYMBOL(npa_aura_pool_fini);
+
+static int npa_check_pf_usable(struct npa_dev_t *npa)
+{
+ u64 rev;
+
+ rev = npa_read64(npa, BLKADDR_RVUM, 0,
+ RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
+ rev = (rev >> 12) & 0xFF;
+ /* Check if AF has setup revision for RVUM block,
+ * otherwise this driver probe should be deferred
+ * until AF driver comes up.
+ */
+ if (!rev) {
+ dev_warn(&npa->pdev->dev,
+ "AF is not initialized, deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+ return 0;
+}
+
+static int npa_register_mbox_intr(struct npa_dev_t *npa_pf_dev, bool probe_af)
+{
+ struct msg_req *req;
+ struct rsrc_attach *attach;
+ struct msg_req *msix;
+ int err;
+
+ /* Request and enable AF=>PF mailbox interrupt handler */
+ otx2_alloc_afpf_mbox_intr(npa_pf_dev);
+
+ if (!probe_af)
+ return 0;
+
+ /* Check mailbox communication with AF */
+ req = otx2_af_mbox_alloc_msg_ready(&npa_pf_dev->afpf_mbox);
+ if (!req) {
+ otx2_disable_afpf_mbox_intr(npa_pf_dev);
+ err = -ENOMEM;
+ goto err_free_intr;
+ }
+
+ err = npa_sync_mbox_msg(&npa_pf_dev->afpf_mbox);
+ if (err) {
+ dev_warn(&npa_pf_dev->pdev->dev,
+ "AF not responding to mailbox, deferring probe\n");
+ err = -EPROBE_DEFER;
+ goto err_free_intr;
+ }
+
+ mutex_lock(&npa_pf_dev->lock);
+ /* Get memory to put this msg */
+ attach =
+ otx2_af_mbox_alloc_msg_attach_resources(&npa_pf_dev->afpf_mbox);
+ if (!attach) {
+ mutex_unlock(&npa_pf_dev->lock);
+ err = -ENOMEM;
+ goto err_free_intr;
+ }
+
+ attach->npalf = true;
+ /* Send attach request to AF */
+ err = npa_sync_mbox_msg(&npa_pf_dev->afpf_mbox);
+ if (err) {
+ mutex_unlock(&npa_pf_dev->lock);
+ goto err_free_intr;
+ }
+
+ /* Get NPA MSIX vector offsets */
+ msix = otx2_af_mbox_alloc_msg_msix_offset(&npa_pf_dev->afpf_mbox);
+ if (!msix) {
+ mutex_unlock(&npa_pf_dev->lock);
+ err = -ENOMEM;
+ goto err_free_intr;
+ }
+
+ err = npa_sync_mbox_msg(&npa_pf_dev->afpf_mbox);
+ if (err) {
+ mutex_unlock(&npa_pf_dev->lock);
+ goto err_free_intr;
+ }
+
+ mutex_unlock(&npa_pf_dev->lock);
+
+ return 0;
+
+err_free_intr:
+ otx2_free_afpf_mbox_intr(npa_pf_dev);
+ return err;
+}
+
+static void npa_afpf_mbox_destroy(struct npa_dev_t *npa_pf_dev)
+{
+
+ if (npa_pf_dev->afpf_mbox_wq) {
+ flush_workqueue(npa_pf_dev->afpf_mbox_wq);
+ destroy_workqueue(npa_pf_dev->afpf_mbox_wq);
+ npa_pf_dev->afpf_mbox_wq = NULL;
+ }
+
+ otx2_mbox_destroy(&npa_pf_dev->afpf_mbox);
+ otx2_mbox_destroy(&npa_pf_dev->afpf_mbox_up);
+}
+
+static int otx2_npa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct npa_dev_t *npa;
+ int err, pos;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ if (pci_request_regions(pdev, DRV_NAME)) {
+ dev_err(&pdev->dev, "pci_request_regions failed\n");
+ goto err_disable_device;
+ }
+
+ pci_set_master(pdev);
+
+ npa = vzalloc(sizeof(struct npa_dev_t));
+ if (npa == NULL)
+ goto err_release_regions;
+
+ pci_set_drvdata(pdev, npa);
+ npa->pdev = pdev;
+
+ npa->mmio[NPA_REG_BASE].hw_addr = pcim_iomap(pdev, REG_BAR_NUM, 0);
+ dev_info(&pdev->dev, "REG BAR %p\n", npa->mmio[NPA_REG_BASE].hw_addr);
+
+ npa->mmio[AFPF_MBOX_BASE].hw_addr = ioremap_wc(pci_resource_start(pdev, MBOX_BAR_NUM),
+ pci_resource_len(pdev, MBOX_BAR_NUM));
+ dev_info(&pdev->dev, "MBOX BAR %p\n",
+ npa->mmio[AFPF_MBOX_BASE].hw_addr);
+
+ err = npa_check_pf_usable(npa);
+ if (err)
+ goto err_free_privdev;
+
+ npa->num_vec = pci_msix_vec_count(pdev);
+ err = pci_alloc_irq_vectors(pdev, npa->num_vec,
+ npa->num_vec, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(&pdev->dev, "%s: Failed to alloc %d IRQ vectors\n",
+ __func__, npa->num_vec);
+ goto err_free_privdev;
+ }
+
+ npa->irq_names = kmalloc_array(npa->num_vec, NAME_SIZE, GFP_KERNEL);
+ if (!npa->irq_names) {
+ err = -ENOMEM;
+ goto err_free_irq_vectors;
+ }
+
+ err = npa_afpf_mbox_init(npa);
+ if (err) {
+ dev_err(&pdev->dev, "Mbox init failed\n");
+ goto err_free_irq_names;
+ }
+
+ /* Register mailbox interrupt */
+ err = npa_register_mbox_intr(npa, true);
+ if (err) {
+ dev_err(&pdev->dev, "Registering MBOX interrupt failed\n");
+ goto err_mbox_destroy;
+ }
+
+ spin_lock(&npa_lst_lock);
+ pos = find_first_zero_bit(pf_bmp, NPA_MAX_PFS);
+ if (pos < NPA_MAX_PFS) {
+ set_bit(pos, pf_bmp);
+ npa->pf_id = pos;
+ gnpa_pf_dev[pos] = npa;
+ }
+ spin_unlock(&npa_lst_lock);
+
+ return 0;
+err_mbox_destroy:
+ npa_afpf_mbox_destroy(npa);
+err_free_irq_names:
+ kfree(npa->irq_names);
+err_free_irq_vectors:
+ pci_free_irq_vectors(npa->pdev);
+err_free_privdev:
+ iounmap(npa->mmio[AFPF_MBOX_BASE].hw_addr);
+ pci_set_drvdata(pdev, NULL);
+ vfree(npa);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ return err;
+}
+
+static void npa_disable_vf_flr_int(struct npa_dev_t *npa)
+{
+ struct pci_dev *pdev;
+ int ena_bits, vec, i;
+ u64 intr;
+
+ pdev = npa->pdev;
+ /* clear any pending interrupt */
+
+ intr = npa_read64(npa, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0));
+ npa_write64(npa, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0), intr);
+ intr = npa_read64(npa, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(0));
+ npa_write64(npa, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(0), intr);
+
+ if (npa->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ intr = npa_read64(npa, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1));
+ npa_write64(npa, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1), intr);
+ intr = npa_read64(npa, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(1));
+ npa_write64(npa, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(1), intr);
+ }
+
+ /* Disable for first 64 VFs here - upto number of VFs enabled */
+ ena_bits = ((npa->num_vfs - 1) % 64);
+
+ npa_write64(npa, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INT_ENA_W1CX(0),
+ GENMASK_ULL(ena_bits, 0));
+
+ if (npa->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ /* Enable for VF interrupts for VFs 64 to 128 */
+ ena_bits = npa->num_vfs - 64 - 1;
+ npa_write64(npa, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INT_ENA_W1CX(1),
+ GENMASK_ULL(ena_bits, 0));
+ }
+
+ for (vec = RVU_PF_INT_VEC_VFFLR0, i = 0;
+ vec + i <= RVU_PF_INT_VEC_VFFLR1; i++)
+ free_irq(pci_irq_vector(pdev, vec + i), npa);
+}
+
+static int npa_enable_vf_flr_int(struct npa_dev_t *npa)
+{
+ struct pci_dev *pdev;
+ int err, vec, i;
+ int ena_bits;
+
+ pdev = npa->pdev;
+
+ /* Register for VF FLR interrupts
+ * There are 2 vectors starting at index 0x0
+ */
+ for (vec = RVU_PF_INT_VEC_VFFLR0, i = 0;
+ vec + i <= RVU_PF_INT_VEC_VFFLR1; i++) {
+ sprintf(&npa->irq_names[(vec + i) * NAME_SIZE],
+ "PF%02d_VF_FLR_IRQ%d", pdev->devfn, i);
+ err = request_irq(pci_irq_vector(pdev, vec + i),
+ npa_pf_vf_flr_intr, 0,
+ &npa->irq_names[(vec + i) * NAME_SIZE], npa);
+ if (err) {
+ dev_err(&pdev->dev,
+ "request_irq() failed for PFVF FLR intr %d\n",
+ vec);
+ return err;
+ }
+ }
+
+ /* Clear any pending interrupts */
+ npa_write64(npa, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(0), ~0x0ULL);
+ npa_write64(npa, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0), ~0x0ULL);
+
+ if (npa->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ npa_write64(npa, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(1), ~0x0ULL);
+ npa_write64(npa, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
+ ~0x0ULL);
+ }
+
+ /* Enable for first 64 VFs here - upto number of VFs enabled */
+ ena_bits = ((npa->num_vfs - 1) % 64);
+ npa_write64(npa, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INT_ENA_W1SX(0),
+ GENMASK_ULL(ena_bits, 0));
+
+ if (npa->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ /* Enable for VF interrupts for VFs 64 to 128 */
+ ena_bits = npa->num_vfs - 64 - 1;
+ npa_write64(npa, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INT_ENA_W1SX(1),
+ GENMASK_ULL(ena_bits, 0));
+ }
+ return 0;
+}
+
+static int npa_enable_pfvf_mbox_intr(struct npa_dev_t *npa)
+{
+ int ena_bits, vec, err, i;
+ struct pci_dev *pdev;
+
+ /* Register for PF-VF mailbox interrupts
+ * There are 2 vectors starting at index 0x4
+ */
+ pdev = npa->pdev;
+ for (vec = RVU_PF_INT_VEC_VFPF_MBOX0, i = 0;
+ vec + i <= RVU_PF_INT_VEC_VFPF_MBOX1; i++) {
+ sprintf(&npa->irq_names[(vec + i) * NAME_SIZE],
+ "PF%02d_VF_MBOX_IRQ%d", pdev->devfn, i);
+ err = request_irq(pci_irq_vector(pdev, vec + i),
+ otx2_pfvf_mbox_intr_handler, 0,
+ &npa->irq_names[(vec + i) * NAME_SIZE], npa);
+ if (err) {
+ dev_err(&pdev->dev,
+ "request_irq() failed for PFVF Mbox intr %d\n",
+ vec + i);
+ return err;
+ }
+ }
+
+ /* Clear any pending interrupts */
+ npa_write64(npa, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(0), ~0x0ULL);
+
+ if (npa->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ npa_write64(npa, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(1),
+ ~0x0ULL);
+ }
+
+ /* Enable for first 64 VFs here - upto number of VFs enabled */
+ ena_bits = ((npa->num_vfs - 1) % 64);
+ npa_write64(npa, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0),
+ GENMASK_ULL(ena_bits, 0));
+
+ if (npa->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ /* Enable for VF interrupts for VFs 64 to 128 */
+ ena_bits = npa->num_vfs - 64 - 1;
+ npa_write64(npa, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
+ GENMASK_ULL(ena_bits, 0));
+ }
+
+ return 0;
+}
+
+static void npa_disable_pfvf_mbox_intr(struct npa_dev_t *npa)
+{
+ struct pci_dev *pdev;
+ int ena_bits, vec, i;
+ u64 intr;
+
+ intr = npa_read64(npa, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(0));
+ npa_write64(npa, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(0), intr);
+
+ if (npa->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ intr = npa_read64(npa, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(1));
+ npa_write64(npa, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(1), intr);
+ }
+
+ /* Disable for first 64 VFs here - upto number of VFs enabled */
+ ena_bits = ((npa->num_vfs - 1) % 64);
+ npa_write64(npa, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0),
+ GENMASK_ULL(ena_bits, 0));
+
+ if (npa->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ /* Enable for VF interrupts for VFs 64 to 128 */
+ ena_bits = npa->num_vfs - 64 - 1;
+ npa_write64(npa, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
+ GENMASK_ULL(ena_bits, 0));
+ }
+
+ pdev = npa->pdev;
+ for (vec = RVU_PF_INT_VEC_VFPF_MBOX0, i = 0;
+ vec + i <= RVU_PF_INT_VEC_VFPF_MBOX1; i++)
+ free_irq(pci_irq_vector(pdev, vec + i), npa);
+}
+
+static int otx2_npa_sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+ struct rvu_vf *vf_ptr;
+ struct npa_dev_t *npa;
+ u64 pf_vf_mbox_base;
+ int err, vf;
+
+ npa = pci_get_drvdata(pdev);
+
+ npa->vf_info = kcalloc(num_vfs, sizeof(struct rvu_vf), GFP_KERNEL);
+ if (npa->vf_info == NULL)
+ return -ENOMEM;
+
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable to SRIOV VFs: %d\n", err);
+ goto err_enable_sriov;
+ }
+
+ npa->num_vfs = num_vfs;
+
+ /* Map PF-VF mailbox memory.
+ * On CN10K platform, PF <-> VF mailbox region follows after
+ * PF <-> AF mailbox region.
+ */
+ if (pdev->subsystem_device == 0xB900)
+ pf_vf_mbox_base = pci_resource_start(pdev, PCI_MBOX_BAR_NUM) + MBOX_SIZE;
+ else
+ pf_vf_mbox_base = readq((void __iomem *)((u64)npa->mmio[NPA_REG_BASE].hw_addr +
+ RVU_PF_VF_BAR4_ADDR));
+
+ if (!pf_vf_mbox_base) {
+ dev_err(&pdev->dev, "PF-VF Mailbox address not configured\n");
+ err = -ENOMEM;
+ goto err_mbox_mem_map;
+ }
+ npa->mmio[PFVF_MBOX_BASE].hw_addr =
+ ioremap_wc(pf_vf_mbox_base, MBOX_SIZE * num_vfs);
+ if (!npa->mmio[PFVF_MBOX_BASE].hw_addr) {
+ dev_err(&pdev->dev,
+ "Mapping of PF-VF mailbox address failed\n");
+ err = -ENOMEM;
+ goto err_mbox_mem_map;
+ }
+ err =
+ otx2_mbox_init(&npa->pfvf_mbox, npa->mmio[PFVF_MBOX_BASE].hw_addr,
+ pdev, npa->mmio[NPA_REG_BASE].hw_addr, MBOX_DIR_PFVF,
+ num_vfs);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to initialize PF/VF MBOX for %d VFs\n",
+ num_vfs);
+ goto err_mbox_init;
+ }
+ err =
+ otx2_mbox_init(&npa->pfvf_mbox_up,
+ npa->mmio[PFVF_MBOX_BASE].hw_addr, pdev,
+ npa->mmio[NPA_REG_BASE].hw_addr, MBOX_DIR_PFVF_UP,
+ num_vfs);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to initialize PF/VF MBOX UP for %d VFs\n",
+ num_vfs);
+ goto err_mbox_up_init;
+ }
+
+ /* Allocate a single workqueue for VF/PF mailbox because access to
+ * AF/PF mailbox has to be synchronized.
+ */
+ npa->pfvf_mbox_wq =
+ alloc_workqueue("npa_pfvf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, 1);
+ if (npa->pfvf_mbox_wq == NULL) {
+ dev_err(&pdev->dev,
+ "Workqueue allocation failed for PF-VF MBOX\n");
+ err = -ENOMEM;
+ goto err_workqueue_alloc;
+ }
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ vf_ptr = &npa->vf_info[vf];
+ vf_ptr->vf_id = vf;
+ vf_ptr->npa = (void *)npa;
+ vf_ptr->intr_idx = vf % 64;
+ INIT_WORK(&vf_ptr->mbox_wrk, npa_pfvf_mbox_handler);
+ INIT_WORK(&vf_ptr->mbox_wrk_up, npa_pfvf_mbox_handler_up);
+ INIT_WORK(&vf_ptr->pfvf_flr_work, npa_pfvf_flr_handler);
+ }
+
+ err = npa_enable_pfvf_mbox_intr(npa);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to initialize PF/VF MBOX intr for %d VFs\n",
+ num_vfs);
+ goto err_pfvf_mbox_intr;
+ }
+ err = npa_enable_vf_flr_int(npa);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to initialize PF/VF MBOX intr for %d VFs\n",
+ num_vfs);
+ goto err_vf_flr_intr;
+ }
+ return num_vfs;
+
+err_vf_flr_intr:
+ npa_disable_pfvf_mbox_intr(npa);
+err_pfvf_mbox_intr:
+ destroy_workqueue(npa->pfvf_mbox_wq);
+err_workqueue_alloc:
+ if (npa->pfvf_mbox_up.dev != NULL)
+ otx2_mbox_destroy(&npa->pfvf_mbox_up);
+err_mbox_up_init:
+ if (npa->pfvf_mbox.dev != NULL)
+ otx2_mbox_destroy(&npa->pfvf_mbox);
+err_mbox_init:
+ iounmap(npa->mmio[PFVF_MBOX_BASE].hw_addr);
+err_mbox_mem_map:
+ pci_disable_sriov(pdev);
+err_enable_sriov:
+ kfree(npa->vf_info);
+
+ return err;
+}
+
+static int otx2_npa_sriov_disable(struct pci_dev *pdev)
+{
+ struct npa_dev_t *npa;
+
+ npa = pci_get_drvdata(pdev);
+ npa_disable_vf_flr_int(npa);
+ npa_disable_pfvf_mbox_intr(npa);
+
+ if (npa->pfvf_mbox_wq) {
+ flush_workqueue(npa->pfvf_mbox_wq);
+ destroy_workqueue(npa->pfvf_mbox_wq);
+ npa->pfvf_mbox_wq = NULL;
+ }
+
+ if (npa->mmio[PFVF_MBOX_BASE].hw_addr)
+ iounmap(npa->mmio[PFVF_MBOX_BASE].hw_addr);
+
+ otx2_mbox_destroy(&npa->pfvf_mbox);
+ otx2_mbox_destroy(&npa->pfvf_mbox_up);
+
+ pci_disable_sriov(pdev);
+
+ kfree(npa->vf_info);
+ npa->vf_info = NULL;
+
+ return 0;
+}
+
+static int otx2_npa_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ if (num_vfs == 0)
+ return otx2_npa_sriov_disable(pdev);
+ else
+ return otx2_npa_sriov_enable(pdev, num_vfs);
+}
+
+static void otx2_npa_remove(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct npa_dev_t *npa;
+ struct msg_req *req;
+ int err;
+
+ npa = pci_get_drvdata(pdev);
+
+ if (npa->num_vfs)
+ otx2_npa_sriov_disable(pdev);
+
+ req = otx2_af_mbox_alloc_msg_npa_lf_free(&npa->afpf_mbox);
+ if (!req)
+ dev_err(dev, "Failed to allocate npa lf free req\n");
+ err = npa_sync_mbox_msg(&npa->afpf_mbox);
+ if (err)
+ dev_err(dev, "Failed to free lf\n");
+
+ otx2_af_mbox_alloc_msg_detach_resources(&npa->afpf_mbox);
+ err = npa_sync_mbox_msg(&npa->afpf_mbox);
+ if (err)
+ dev_err(dev, "Failed to detach resources\n");
+
+ otx2_free_afpf_mbox_intr(npa);
+ npa_afpf_mbox_destroy(npa);
+
+ kfree(npa->irq_names);
+
+ spin_lock(&npa_lst_lock);
+ gnpa_pf_dev[npa->pf_id] = NULL;
+ clear_bit(npa->pf_id, pf_bmp);
+ spin_unlock(&npa_lst_lock);
+
+ pci_free_irq_vectors(pdev);
+ /* Unmap regions */
+ iounmap(npa->mmio[AFPF_MBOX_BASE].hw_addr);
+ pci_set_drvdata(pdev, NULL);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ vfree(npa);
+}
+
+static struct pci_driver otx2_pf_driver = {
+ .name = DRV_NAME,
+ .id_table = otx2_npa_pf_id_table,
+ .probe = otx2_npa_probe,
+ .shutdown = otx2_npa_remove,
+ .remove = otx2_npa_remove,
+ .sriov_configure = otx2_npa_sriov_configure
+};
+
+static int __init otx2_npa_rvupf_init_module(void)
+{
+ pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
+
+ spin_lock_init(&npa_lst_lock);
+ return pci_register_driver(&otx2_pf_driver);
+}
+
+static void __exit otx2_npa_rvupf_cleanup_module(void)
+{
+ pci_unregister_driver(&otx2_pf_driver);
+}
+
+module_init(otx2_npa_rvupf_init_module);
+module_exit(otx2_npa_rvupf_cleanup_module);
diff --git a/drivers/soc/marvell/octeontx2-npa/npa.h b/drivers/soc/marvell/octeontx2-npa/npa.h
new file mode 100644
index 000000000000..ed8b6d45769a
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-npa/npa.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 NPA driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* PCI Config offsets */
+#define REG_BAR_NUM 2
+#define MBOX_BAR_NUM 4
+
+#define NPA_MAX_PFS 16
+#define NPA_MAX_AURAS 128
+#define NPA_AURA_AVG_LVL 255
+#define NAME_SIZE 32
+
+#define RVU_PFVF_PF_SHIFT 10
+#define RVU_PFVF_PF_MASK 0x3F
+#define RVU_PFVF_FUNC_SHIFT 0
+#define RVU_PFVF_FUNC_MASK 0x3FF
+
+#define RVU_FUNC_BLKADDR_SHIFT 20
+#define RVU_FUNC_BLKADDR_MASK 0x1FULL
+
+/* NPA LF registers */
+#define NPA_LFBASE (BLKTYPE_NPA << RVU_FUNC_BLKADDR_SHIFT)
+#define NPA_LF_AURA_OP_ALLOCX(a) (NPA_LFBASE | 0x10 | (a) << 3)
+#define NPA_LF_AURA_OP_FREE0 (NPA_LFBASE | 0x20)
+#define NPA_LF_AURA_OP_FREE1 (NPA_LFBASE | 0x28)
+
+#if defined(CONFIG_ARM64)
+static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr)
+{
+ __asm__ volatile("stp %x[x0], %x[x1], [%x[p1],#0]!"
+ ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr));
+}
+
+static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
+{
+ u64 result;
+
+ __asm__ volatile(".cpu generic+lse\n"
+ "ldadd %x[i], %x[r], [%[b]]"
+ : [r]"=r"(result), "+m"(*ptr)
+ : [i]"r"(incr), [b]"r"(ptr)
+ : "memory");
+ return result;
+}
+#else
+#define otx2_write128(lo, hi, addr)
+#define otx2_atomic64_add(incr, ptr) ({ *(ptr) += incr; })
+#endif
+
+enum {
+ NPA_REG_BASE,
+ AFPF_MBOX_BASE,
+ PFVF_MBOX_BASE,
+ NPA_MEM_REGIONS,
+};
+
+struct ptr_pair {
+ struct page *page;
+ dma_addr_t iova;
+};
+
+struct otx2_npa_pool {
+ struct qmem *stack;
+ struct qmem *fc_addr;
+ u8 rbpage_order;
+ u16 rbsize;
+ u32 page_offset;
+ u16 pageref;
+ struct page *page;
+
+ /* Metadata of pointers */
+ u16 ptr_pairs_in_page;
+ u16 ptr_pairs_per_page;
+ u16 ptr_pair_cnt;
+ u8 *ptr_list;
+ struct page *ptr_list_start;
+};
+
+struct otx2_mmio {
+ /** PCI address to which the BAR is mapped. */
+ unsigned long start;
+ /** Length of this PCI address space. */
+ unsigned long len;
+ /** Length that has been mapped to phys. address space. */
+ unsigned long mapped_len;
+ /** The physical address to which the PCI address space is mapped. */
+ void __iomem *hw_addr;
+ /** Flag indicating the mapping was successful. */
+ int done;
+};
+
+struct npa_dev_t;
+struct rvu_vf {
+ struct work_struct mbox_wrk;
+ struct work_struct mbox_wrk_up;
+ struct work_struct pfvf_flr_work;
+ struct device_attribute in_use_attr;
+ struct pci_dev *pdev;
+ struct kobject *limits_kobj;
+ /* pointer to PF struct this PF belongs to */
+ struct npa_dev_t *npa;
+ int vf_id;
+ int intr_idx; /* vf_id%64 actually */
+ bool in_use;
+ bool got_flr;
+};
+
+struct npa_dev_t {
+ struct mutex lock;
+ struct pci_dev *pdev;
+ u64 *alloc_reg_ptr;
+ void __iomem *free_reg_addr;
+ u16 pcifunc;
+ u16 npa_msixoff;
+ u16 pf_id;
+ u16 num_vfs;
+ u16 num_vec;
+ u32 stack_pg_ptrs; /* No of ptrs per stack page */
+ u32 stack_pg_bytes; /* Size of stack page */
+ DECLARE_BITMAP(aura_bmp, NPA_MAX_AURAS);
+ char *irq_names;
+ struct workqueue_struct *afpf_mbox_wq;
+ struct workqueue_struct *pfvf_mbox_wq;
+ struct otx2_mbox pfvf_mbox; /* MBOXes for VF => PF channel */
+ struct otx2_mbox pfvf_mbox_up; /* MBOXes for PF => VF channel */
+ struct otx2_mbox afpf_mbox; /* MBOX for PF => AF channel */
+ struct otx2_mbox afpf_mbox_up; /* MBOX for AF => PF channel */
+ struct work_struct mbox_wrk;
+ struct work_struct mbox_wrk_up;
+ struct rvu_vf *vf_info;
+ struct otx2_npa_pool *pools[NPA_MAX_AURAS];
+ struct otx2_mmio mmio[NPA_MEM_REGIONS];
+};
+
+union aura_handle {
+ struct {
+ u32 aura:16;
+ u32 pf_id:16;
+ } s;
+ u32 handle;
+};
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+static struct _req_type __maybe_unused \
+*otx2_af_mbox_alloc_msg_ ## _fn_name(struct otx2_mbox *mbox) \
+{ \
+ struct _req_type *req; \
+ \
+ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
+ mbox, 0, sizeof(struct _req_type), \
+ sizeof(struct _rsp_type)); \
+ if (!req) \
+ return NULL; \
+ req->hdr.sig = OTX2_MBOX_REQ_SIG; \
+ req->hdr.id = _id; \
+ return req; \
+}
+
+MBOX_MESSAGES
+#undef M
diff --git a/drivers/soc/marvell/octeontx2-npa/npa_api.h b/drivers/soc/marvell/octeontx2-npa/npa_api.h
new file mode 100644
index 000000000000..728cbfdeabc1
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-npa/npa_api.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 NPA driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Initializa aura pool pair */
+int npa_aura_pool_init(int pool_size, int buf_size, u32 *aura_handle,
+ struct device *owner);
+/* Teardown aura pool pair */
+int npa_aura_pool_fini(const u32 aura_handle, struct device *owner);
+u64 npa_alloc_buf(u32 aura);
+void npa_free_buf(u32 aura, u64 buf);
+/* Get PF function used for aura */
+u16 npa_pf_func(u32 aura);
diff --git a/drivers/soc/marvell/octeontx2-pcicons/Makefile b/drivers/soc/marvell/octeontx2-pcicons/Makefile
new file mode 100644
index 000000000000..67167c38e148
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-pcicons/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's OcteonTX2 PCI console driver
+#
+
+obj-$(CONFIG_OCTEONTX2_PCI_CONSOLE) += octeontx2_pcicons.o
+ccflags-y += -mno-outline-atomics
+octeontx2_pcicons-y := otx2-pci-console.o
diff --git a/drivers/soc/marvell/octeontx2-pcicons/otx2-pci-console.c b/drivers/soc/marvell/octeontx2-pcicons/otx2-pci-console.c
new file mode 100644
index 000000000000..a13ffd32c73a
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-pcicons/otx2-pci-console.c
@@ -0,0 +1,1350 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Supports the PCI Console when OcteonTX2 is running as an Endpoint.
+ *
+ */
+
+/* Implementation notes:
+ *
+ * There are four types of devices for which a driver is provided by this
+ * module:
+ *
+ * - PCI console nexus device
+ * - PCI console device
+ * - Linux console
+ * - Linux TTY
+ *
+ * The primary device to which the driver initially attaches is the
+ * 'PCI console nexus', represented in the Device Tree as
+ * 'pci-console-nexus@0x7f000000'.
+ *
+ * The driver entry points are declared in 'pci_console_nexus_driver'.
+ *
+ * During its initialization, the pci_console_nexus_driver locates its device
+ * memory and verifies that it has been configured appropriately
+ * (i.e. by U-Boot).
+ *
+ * Next, it registers a platform driver for the actual console devices;
+ * this driver's entry points are declared in 'pci_console_driver'.
+ *
+ * Finally, it populates the Device Tree with the console devices, which are
+ * represented in the Device Tree as 'pci-console@{0-7}' – these are children
+ * of the 'PCI console nexus' device.
+ *
+ * At this point, Linux will probe each new console device, which in turn will
+ * register a Linux console. The entry points for the Linux console are
+ * declared as part of the private state structure of the Linux console device
+ * (see invocation of 'register_console()' in the function 'pci_console_init()'.
+ *
+ * The Linux console device will, in turn, register a Linux TTY device. These
+ * device entry points are declared in 'pci_console_dev_tty_ops'.
+ *
+ * It is the Linux console & TTY devices which actually transfer data between
+ * Linux and the OcteonTX device memory; the OcteonTX device memory is accessed
+ * by the host remote console application. This data transfer uses low-level
+ * OcteonTX functions.
+ *
+ * Naming conventions:
+ *
+ * PCI console nexus device functions are named 'pci_console_nexus_xxx'.
+ * PCI console device functions are named 'pci_console_xxx'.
+ * Linux console device functions are named 'pci_console_dev_xxx'.
+ * Linux TTY device functions are named 'pci_console_dev_tty_xxx'.
+ * Low-level OcteonTX routines are named 'octeontx_console_xxx'.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/uio_driver.h>
+#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <linux/iommu.h>
+#include <linux/of_address.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/tty_driver.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include "otx2-pci-console.h"
+
+#define DRV_NAME "pci-console"
+#define NEXUS_DRV_NAME DRV_NAME "-nexus"
+
+/* copied from Octeon pci console driver */
+#define TTY_DRV_MAJOR_VER 4
+#define TTY_DRV_MINOR_VER_START 96
+
+#ifdef CONFIG_OCTEONTX2_PCI_CONSOLE_DEBUG
+# define dbgmsg(dev, ...) dev_info((dev), __VA_ARGS__)
+#else
+# define dbgmsg(dev, ...) (void)(dev)
+#endif // CONFIG_OCTEONTX2_PCI_CONSOLE_DEBUG
+
+static u32 max_consoles = 1;
+module_param(max_consoles, uint, 0644);
+MODULE_PARM_DESC(max_consoles, "Maximum console count to support");
+
+/* pci console driver prototypes */
+static void pci_console_dev_write(struct console *cons, const char *buf,
+ unsigned int len);
+static struct tty_driver *pci_console_dev_device(struct console *cons,
+ int *index);
+static int pci_console_dev_setup(struct console *cons, char *arg);
+static struct platform_driver pci_console_driver;
+
+/* pci console TTY driver prototypes */
+static int pci_console_dev_tty_open(struct tty_struct *tty, struct file *filp);
+static void pci_console_dev_tty_close(struct tty_struct *tty,
+ struct file *filp);
+static int pci_console_dev_tty_write(struct tty_struct *tty,
+ const unsigned char *buf, int count);
+static int pci_console_dev_tty_write_room(struct tty_struct *tty);
+static int pci_console_dev_tty_chars_in_buffer(struct tty_struct *tty);
+static void pci_console_dev_tty_send_xchar(struct tty_struct *tty, char ch);
+
+/* TTY driver operations table */
+static const struct tty_operations pci_console_dev_tty_ops = {
+ .open = pci_console_dev_tty_open,
+ .close = pci_console_dev_tty_close,
+ .write = pci_console_dev_tty_write,
+ .write_room = pci_console_dev_tty_write_room,
+ .chars_in_buffer = pci_console_dev_tty_chars_in_buffer,
+ .send_xchar = pci_console_dev_tty_send_xchar,
+};
+
+static u32 max_cons_mask;
+
+/*
+ * Utility function; returns the number of free bytes in the buffer.
+ *
+ * @param buffer_size size of buffer
+ * @param wr_idx write index
+ * @param rd_idx read index
+ *
+ * @return number of bytes free
+ */
+static int buffer_free_bytes(size_t buffer_size, u32 wr_idx, u32 rd_idx)
+{
+ if (rd_idx >= buffer_size || wr_idx >= buffer_size)
+ return -1;
+ return ((buffer_size - 1) - (wr_idx - rd_idx)) % buffer_size;
+}
+
+/*
+ * Utility function; returns the number of pending bytes (i.e. data) in the
+ * buffer.
+ *
+ * @param buffer_size size of buffer
+ * @param wr_idx write index
+ * @param rd_idx read index
+ *
+ * @return number of pending data bytes
+ */
+static int buffer_pending_bytes(size_t buffer_size, u32 wr_idx, u32 rd_idx)
+{
+ if (rd_idx >= buffer_size || wr_idx >= buffer_size)
+ return -1;
+ return buffer_size - 1 -
+ buffer_free_bytes(buffer_size, wr_idx, rd_idx);
+}
+
+/* ======================== pci console nexus driver ======================== */
+
+/*
+ * Check that the console version is acceptable.
+ */
+static bool pci_console_nexus_check_ver(u8 major, u8 minor)
+{
+ if (major > OCTEONTX_PCIE_CONSOLE_MAJOR)
+ return true;
+ if (major == OCTEONTX_PCIE_CONSOLE_MAJOR &&
+ minor >= OCTEONTX_PCIE_CONSOLE_MINOR)
+ return true;
+ return false;
+}
+
+/*
+ * Used to initialize access to nexus memory.
+ */
+static int pci_console_nexus_init_resources(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pci_console_nexus *pci_cons_nexus = platform_get_drvdata(pdev);
+ struct device_node *of_node;
+ const __be32 *of_base;
+ u64 of_xbase, of_size;
+ int ret;
+
+ dbgmsg(dev, "%s: entry\n", __func__);
+
+ WARN_ON(!pci_cons_nexus);
+
+ ret = -ENODEV;
+
+ pci_cons_nexus->of_node = of_node = pdev->dev.of_node;
+ if (!of_node) {
+ dev_err(dev, "Missing devicetree configuration\n");
+ goto exit;
+ }
+
+ of_base = of_get_address(of_node, 0, &of_size, 0);
+ if (!of_base) {
+ dev_err(dev, "Missing configuration base address\n");
+ goto exit;
+ }
+
+ of_xbase = of_translate_address(of_node, of_base);
+ /* TODO: verify we can use WC */
+ if (of_xbase != OF_BAD_ADDR)
+ pci_cons_nexus->desc =
+ ioremap_wc(of_xbase, of_size);
+
+ if (!pci_cons_nexus->desc) {
+ dev_err(dev, "Invalid configuration base address\n");
+ goto exit;
+ }
+
+ dbgmsg(dev, "of_base: %p (%llx), of_size: %llx, nexus:%p\n",
+ of_base, of_xbase, of_size, pci_cons_nexus->desc);
+
+ ret = 0;
+
+exit:
+ return ret ? -ENODEV : 0;
+}
+
+static int pci_console_nexus_de_init_resources(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pci_console_nexus *pci_cons_nexus = platform_get_drvdata(pdev);
+
+ dbgmsg(dev, "%s: entry\n", __func__);
+
+ if (pci_cons_nexus && pci_cons_nexus->desc) {
+ iounmap(pci_cons_nexus->desc);
+ pci_cons_nexus->desc = NULL;
+ }
+
+ return 0;
+}
+
+/*
+ * This is used to initialize the console nexus state.
+ */
+static int pci_console_nexus_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pci_console_nexus *pci_cons_nexus = platform_get_drvdata(pdev);
+ struct octeontx_pcie_console_nexus __iomem *nexus;
+ struct device_node *child_node;
+ uint num_consoles;
+ int ret;
+
+ dbgmsg(dev, "%s: entry\n", __func__);
+
+ WARN_ON(!pci_cons_nexus);
+
+ ret = -ENODEV;
+
+ nexus = pci_cons_nexus->desc;
+ if (!nexus)
+ goto exit;
+
+ /* Verify/use existing configuration (i.e. from U-Boot) */
+ if (readq(&nexus->magic) !=
+ cpu_to_le64(OCTEONTX_PCIE_CONSOLE_NEXUS_MAGIC)) {
+ dev_err(dev, "Invalid nexus signature (0x%llx).\n",
+ (long long)readq(&nexus->magic));
+ goto exit;
+ }
+
+ if (!pci_console_nexus_check_ver(readb(&nexus->major_version),
+ readb(&nexus->minor_version))) {
+ dev_err(dev,
+ "Unsupported nexus version %u.%u (%u.%u)\n)",
+ readb(&nexus->major_version),
+ readb(&nexus->minor_version),
+ OCTEONTX_PCIE_CONSOLE_MAJOR,
+ OCTEONTX_PCIE_CONSOLE_MINOR);
+ goto exit;
+ }
+
+ if (!readb(&nexus->num_consoles)) {
+ dev_err(dev, "No consoles present");
+ goto exit;
+ }
+
+ /* enumerate 'available' consoles present in device tree */
+ num_consoles = 0;
+ for_each_available_child_of_node(pci_cons_nexus->of_node,
+ child_node)
+ if (of_device_is_compatible(child_node,
+ "marvell,pci-console"))
+ num_consoles++;
+
+ if (num_consoles < readb(&nexus->num_consoles)) {
+ dev_err(dev,
+ "Console count mismatch: DT %d, nexus: %d\n",
+ num_consoles, readb(&nexus->num_consoles));
+ goto exit;
+ }
+
+ dbgmsg(dev,
+ "Console nexus initialized: ver %u.%u, %u consoles available\n",
+ readb(&nexus->major_version), readb(&nexus->minor_version),
+ readb(&nexus->num_consoles));
+
+ ret = 0;
+
+exit:
+ return ret ? -ENODEV : 0;
+}
+
+/*
+ * This is the main probe routine for the console nexus driver.
+ */
+static int pci_console_nexus_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pci_console_nexus *pci_cons_nexus;
+ bool registered;
+ int ret;
+
+ BUILD_BUG_ON(offsetof(struct octeontx_pcie_console_nexus, console_addr)
+ != 128);
+
+ dbgmsg(dev, "%s: entry, max_consoles %d\n", __func__, max_consoles);
+
+ max_cons_mask = BIT(max_consoles) - 1;
+
+ pci_cons_nexus = NULL;
+ registered = false;
+
+ ret = -ENODEV;
+
+ /* allocate device structure */
+ pci_cons_nexus = devm_kzalloc(dev, sizeof(*pci_cons_nexus),
+ GFP_KERNEL);
+
+ if (pci_cons_nexus == NULL) {
+ ret = -ENOMEM;
+ dev_err(dev, "Unable to allocate drv context.\n");
+ goto exit;
+ }
+
+ platform_set_drvdata(pdev, pci_cons_nexus);
+
+ ret = pci_console_nexus_init_resources(pdev);
+ if (ret)
+ goto exit;
+
+ ret = pci_console_nexus_init(pdev);
+ if (ret)
+ goto exit;
+
+ dev_info(dev, "Registering child console driver...\n");
+
+ ret = platform_driver_register(&pci_console_driver);
+
+ if (ret) {
+ dev_err(dev,
+ "Error %d registering child console driver\n",
+ ret);
+ goto exit;
+ } else
+ registered = true;
+
+ ret = of_platform_populate(pci_cons_nexus->of_node, NULL, NULL,
+ dev);
+
+ if (ret) {
+ dev_err(dev, "Error %d populating children of %s\n",
+ ret,
+ of_node_full_name(pci_cons_nexus->of_node));
+ goto exit;
+ }
+
+ ret = 0;
+
+exit:
+ if (ret) {
+ if (registered)
+ platform_driver_unregister(&pci_console_driver);
+
+ pci_console_nexus_de_init_resources(pdev);
+
+ if (pci_cons_nexus != NULL)
+ devm_kfree(dev, pci_cons_nexus);
+ }
+
+ return ret;
+}
+
+static void pci_console_nexus_shutdown(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ dbgmsg(dev, "%s: entry\n", __func__);
+}
+
+/*
+ * Linux driver callback.
+ */
+static int pci_console_nexus_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pci_console_nexus *pci_cons_nexus = platform_get_drvdata(pdev);
+
+ dbgmsg(dev, "%s: entry\n", __func__);
+
+ WARN_ON(!pci_cons_nexus);
+
+ of_platform_depopulate(dev);
+
+ platform_driver_unregister(&pci_console_driver);
+
+ pci_console_nexus_de_init_resources(pdev);
+
+ devm_kfree(dev, pci_cons_nexus);
+
+ return 0;
+}
+
+static const struct of_device_id pci_console_nexus_of_match[] = {
+ { .compatible = "marvell,pci-console-nexus", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pci_console_nexus_of_match);
+
+static const struct platform_device_id pci_console_nexus_pdev_match[] = {
+ { .name = NEXUS_DRV_NAME, },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, pci_console_nexus_pdev_match);
+
+static struct platform_driver pci_console_nexus_driver = {
+ .driver = {
+ .name = NEXUS_DRV_NAME,
+ .of_match_table = pci_console_nexus_of_match,
+ },
+ .probe = pci_console_nexus_probe,
+ .remove = pci_console_nexus_remove,
+ .shutdown = pci_console_nexus_shutdown,
+ .id_table = pci_console_nexus_pdev_match,
+};
+
+module_platform_driver(pci_console_nexus_driver);
+
+MODULE_DESCRIPTION("OcteonTX PCI Console Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" NEXUS_DRV_NAME);
+
+/* =========================== pci console driver =========================== */
+
+/*
+ * Low-level initialization function for octeontx console state.
+ */
+static int octeontx_console_init(struct device *dev,
+ struct pci_console *pci_cons, int index,
+ u64 cons_addr, u64 cons_size)
+{
+ int ret;
+ u32 cons_num;
+ struct octeontx_pcie_console __iomem *ring_descr;
+
+ /* see notes in structure declaration regarding these elements */
+ BUILD_BUG_ON(offsetof(struct octeontx_pcie_console,
+ host_console_connected) & 0x7);
+ BUILD_BUG_ON((offsetof(struct octeontx_pcie_console,
+ host_console_connected) + sizeof(u32)) !=
+ offsetof(struct octeontx_pcie_console, output_read_index));
+
+ dbgmsg(dev, "%s: entry\n", __func__);
+
+ ret = -ENODEV;
+ cons_num = index;
+
+ if (!cons_addr) {
+ dev_err(dev, "Missing console base address\n");
+ goto exit;
+ }
+
+ /* map the ring descriptor from the nexus */
+ /* TODO: verify we can use WC */
+ pci_cons->ring_descr = ioremap_wc(cons_addr, cons_size);
+ if (!pci_cons->ring_descr) {
+ dev_err(dev,
+ "Unable to remap console %d base address\n",
+ cons_num);
+ goto exit;
+ }
+
+ ring_descr = pci_cons->ring_descr;
+
+ /* Here, we verify/use existing configuration
+ * (i.e. from U-Boot).
+ *
+ * If this changes and the console is initialized here,
+ * then the pcie_lock must be taken/released around
+ * the init code.
+ */
+
+ if (readq(&ring_descr->magic) !=
+ cpu_to_le64(OCTEONTX_PCIE_CONSOLE_MAGIC)) {
+ dev_err(dev, "Invalid console %d signature\n",
+ cons_num);
+ goto exit;
+ }
+
+ /* Implementation note: using 'u32' will catch negative vals */
+ if (((u32)le32_to_cpu(readl(&ring_descr->input_read_index)) >=
+ le32_to_cpu(readl(&ring_descr->input_buf_size))) ||
+ ((u32)le32_to_cpu(readl(&ring_descr->input_write_index)) >=
+ le32_to_cpu(readl(&ring_descr->input_buf_size))) ||
+ ((u32)le32_to_cpu(readl(&ring_descr->output_read_index)) >=
+ le32_to_cpu(readl(&ring_descr->output_buf_size))) ||
+ ((u32)le32_to_cpu(readl(&ring_descr->output_write_index)) >=
+ le32_to_cpu(readl(&ring_descr->output_buf_size))) ||
+ !readl(&ring_descr->input_buf_size) ||
+ !readl(&ring_descr->output_buf_size) ||
+ !readq(&ring_descr->input_base_addr) ||
+ !readq(&ring_descr->output_base_addr)) {
+ dev_err(dev, "Invalid console %d ring configuration\n",
+ cons_num);
+ goto exit;
+ }
+
+ /* map the input buffer */
+ pci_cons->input_ring = ioremap_wc(readq(&ring_descr->input_base_addr),
+ readl(&ring_descr->input_buf_size));
+
+ /* map the output buffer */
+ pci_cons->output_ring = ioremap_wc(readq(&ring_descr->output_base_addr),
+ readl(&ring_descr->output_buf_size));
+
+ if (!pci_cons->input_ring || !pci_cons->output_ring) {
+ dev_err(dev,
+ "Unable to remap console %d memory ring[s]\n",
+ cons_num);
+ goto exit;
+ }
+
+ writel(cpu_to_le32(cons_num), &ring_descr->host.cons_idx);
+ spin_lock_init(&pci_cons->excl_lock[cons_num]);
+
+ ret = 0;
+
+exit:
+ return ret ? -ENODEV : 0;
+}
+
+/*
+ * Low-level de-initialization function for octeontx console state.
+ */
+static int octeontx_console_de_init(struct device *dev,
+ struct pci_console *pci_cons, int index,
+ u64 cons_addr, u64 cons_size)
+{
+ dbgmsg(dev, "%s: entry\n", __func__);
+
+ if (pci_cons->input_ring) {
+ iounmap(pci_cons->input_ring);
+ pci_cons->input_ring = NULL;
+ }
+
+ if (pci_cons->output_ring) {
+ iounmap(pci_cons->output_ring);
+ pci_cons->output_ring = NULL;
+ }
+
+ if (pci_cons->ring_descr) {
+ iounmap(pci_cons->ring_descr);
+ pci_cons->ring_descr = NULL;
+ }
+
+ return 0;
+}
+
+/*
+ * Used to acquire or release a low-level octeontx console.
+ */
+static bool
+octeontx_console_acquire(struct octeontx_pcie_console_nexus __iomem *nexus_desc,
+ int index, bool acquire, u64 *old, u64 *new)
+{
+ bool b_ok;
+ u32 wait_usecs;
+ u64 old_use_mask, new_use_mask;
+ int cons_num;
+
+ b_ok = false;
+
+ if (!nexus_desc)
+ return b_ok;
+
+ cons_num = index;
+ wait_usecs = 0;
+
+#define CONSOLE_NEXUS_IN_USE_WAIT_USECS 1
+#define CONSOLE_NEXUS_IN_USE_TIMEOUT_USECS 100
+
+ do {
+ old_use_mask = le32_to_cpu(readl(&nexus_desc->in_use)) |
+ (u64)le32_to_cpu(readl(
+ &nexus_desc->exclusive)) << 32;
+
+ /* set (or clear) both 'in-use' and 'exclusive' bits */
+ new_use_mask = ((1ULL << cons_num) |
+ ((1ULL << cons_num) << 32));
+
+ if (acquire) {
+ /* Check if console has already been acquired */
+ if (old_use_mask & (1ULL << cons_num))
+ break;
+ new_use_mask = old_use_mask | new_use_mask;
+ } else {
+ new_use_mask = old_use_mask & ~new_use_mask;
+ }
+
+ b_ok = (__atomic_compare_exchange_n((u64 *)&nexus_desc->in_use,
+ &old_use_mask, new_use_mask,
+ false, __ATOMIC_SEQ_CST,
+ __ATOMIC_SEQ_CST));
+ if (b_ok)
+ break;
+
+ udelay(CONSOLE_NEXUS_IN_USE_WAIT_USECS);
+ wait_usecs += CONSOLE_NEXUS_IN_USE_WAIT_USECS;
+
+ } while (wait_usecs < CONSOLE_NEXUS_IN_USE_TIMEOUT_USECS);
+
+ if (old)
+ *old = old_use_mask;
+
+ if (new)
+ *new = new_use_mask;
+
+ return b_ok;
+}
+
+/*
+ * Clears pending data [bytes] from the low-level octeontx console output
+ * buffer if the host console is not connected.
+ * If the host console IS connected, an error is returned.
+ *
+ * @param console console to clear output from
+ * @param bytes_to_clear Number of bytes to free up
+ *
+ * @return 0 for success, -1 on error, or a positive value
+ * If the size of pending data is less than 'bytes_to_clear',
+ * the return value equals the count of pending data.
+ */
+int octeontx_console_output_truncate(struct octeontx_pcie_console *console,
+ size_t bytes_to_clear)
+{
+ u64 old_val;
+ u64 new_val;
+ size_t bytes_avail;
+ const u32 out_buf_size = le32_to_cpu(readl(&console->output_buf_size));
+ u32 out_wr_idx, out_rd_idx;
+ int ret;
+
+ if (le32_to_cpu(readl(&console->host_console_connected)))
+ return -1;
+
+ out_wr_idx = le32_to_cpu(readl(&console->output_write_index));
+ out_rd_idx = le32_to_cpu(readl(&console->output_read_index));
+
+ old_val = cpu_to_le64((u64)out_rd_idx << 32);
+ bytes_avail = buffer_pending_bytes(out_buf_size, out_wr_idx,
+ out_rd_idx);
+ if (bytes_avail < 0)
+ return bytes_avail;
+ /* Not enough space */
+ if (bytes_to_clear > bytes_avail)
+ return bytes_avail;
+
+ out_rd_idx = (out_rd_idx + bytes_to_clear) % out_buf_size;
+ new_val = cpu_to_le64((u64)out_rd_idx << 32);
+
+ /*
+ * We need to use an atomic operation here in case the host
+ * console should connect. This guarantees that if the host
+ * connects that it will always see a consistent state. Normally
+ * only the host can modify the read pointer. This assures us
+ * that the read pointer will only be modified if the host
+ * is disconnected.
+ */
+ ret = __atomic_compare_exchange_n
+ ((u64 *)(&console->host_console_connected),
+ &old_val, new_val, 0,
+ __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+
+ return ret ? 0 : -1;
+}
+
+/*
+ * Low-level octeontx console write function.
+ *
+ * NOTE: this may NOT sleep, as it is called by the TTY 'write()' API.
+ *
+ */
+static unsigned
+octeontx_console_write(struct device *dev, const char *buf, unsigned int len,
+ struct octeontx_pcie_console __iomem *ring_descr,
+ u8 __iomem *output_ring, spinlock_t *excl_lock)
+{
+ const u8 *src;
+ int srclen, avail, wr_len, written;
+ unsigned int wait_usecs;
+ u32 sz, rd_idx, wr_idx;
+
+ spin_lock(excl_lock);
+
+ sz = le32_to_cpu(readl(&ring_descr->output_buf_size));
+ src = buf;
+ srclen = len;
+ written = 0;
+ wait_usecs = 0;
+
+ wr_idx = le32_to_cpu(readl(&ring_descr->output_write_index));
+
+ while (srclen > 0) {
+ rd_idx = le32_to_cpu(readl(&ring_descr->output_read_index));
+ avail = buffer_free_bytes(sz, wr_idx, rd_idx);
+
+ if (avail > 0) {
+ /* reset host wait time */
+ wait_usecs = 0;
+
+ wr_len = min(avail, srclen);
+ srclen -= wr_len;
+ if (wr_idx + wr_len > sz) {
+ memcpy_toio(output_ring + wr_idx, src,
+ (sz - wr_idx));
+ wr_len -= (sz - wr_idx);
+ src += (sz - wr_idx);
+ wr_idx = 0;
+ }
+ if (wr_len)
+ memcpy_toio(output_ring + wr_idx, src, wr_len);
+ src += wr_len;
+ written += wr_len;
+ wr_idx = (wr_idx + wr_len) % sz;
+
+ /* The write index is used by another process
+ * (remote PCI) to indicate the presence of [new] data
+ * in the ring buffer.
+ * Use a barrier here to ensure that all such data
+ * has been committed to memory prior to updating
+ * the write index in the descriptor.
+ */
+ wmb();
+ writel(cpu_to_le32(wr_idx),
+ &ring_descr->output_write_index);
+ } else if (!avail) {
+ /* Try to free space in output buffer (i.e. truncate) */
+ wr_len = octeontx_console_output_truncate(ring_descr,
+ srclen);
+
+ if (wr_len < 0) {
+ if (wait_usecs >=
+ PCI_CONS_HOST_WAIT_TIMEOUT_USECS) {
+ dev_err_once(dev,
+ "Timeout awaiting host\n");
+ break;
+ }
+ /* We cannot sleep, we have acquired the lock */
+ udelay(PCI_CONS_HOST_WAIT_LOOP_USECS);
+ wait_usecs += PCI_CONS_HOST_WAIT_LOOP_USECS;
+ } else if (wr_len > 0) {
+ /* Truncate what we can */
+ wr_len = octeontx_console_output_truncate(
+ ring_descr, wr_len);
+ if (wr_len != 0) {
+ dev_err(dev,
+ "output buffer truncate error\n");
+ break;
+ }
+ }
+ } else {
+ dev_err_once(dev, "output buffer error\n");
+ break;
+ }
+ }
+
+ spin_unlock(excl_lock);
+
+ return written;
+}
+
+/*
+ * Linux console callback.
+ */
+static void pci_console_dev_write(struct console *cons, const char *buf,
+ unsigned int len)
+{
+ struct pci_console *pci_cons = cons->data;
+ struct device *dev = pci_cons->device;
+ struct octeontx_pcie_console __iomem *ring_descr;
+ u8 __iomem *output_ring;
+ u32 cons_idx;
+
+ ring_descr = pci_cons->ring_descr;
+ output_ring = pci_cons->output_ring;
+
+ cons_idx = le32_to_cpu(readl(&ring_descr->host.cons_idx));
+
+ octeontx_console_write(dev, buf, len, ring_descr, output_ring,
+ &pci_cons->excl_lock[cons_idx]);
+}
+
+/*
+ * Linux console callback.
+ */
+static struct tty_driver *pci_console_dev_device(struct console *cons,
+ int *index)
+{
+ struct pci_console *pci_cons = cons->data;
+ struct device *dev = pci_cons->device;
+
+ dbgmsg(dev, "%s: entry\n", __func__);
+
+ *index = pci_cons->cons.index;
+
+ dbgmsg(dev, "return index: %d, tty driver: %p\n", *index,
+ pci_cons->tty.drv);
+
+ return pci_cons->tty.drv;
+}
+
+/*
+ * Linux console initialization callback.
+ *
+ * Create and register a TTY driver to be used with this console.
+ *
+ */
+int pci_console_dev_setup(struct console *cons, char *arg)
+{
+ struct pci_console *pci_cons = cons->data;
+ struct device *dev = pci_cons->device;
+ struct tty_driver *ttydrv;
+ int ret;
+
+ dbgmsg(dev, "%s: entry, args '%s'\n", __func__, arg);
+
+ ret = 0;
+ ttydrv = NULL;
+
+ /* Create/register our TTY driver */
+ if (!pci_cons->tty.drv) {
+ ret = -ENODEV;
+
+ ttydrv = tty_alloc_driver(1 /*i.e. a single line */,
+ TTY_DRIVER_REAL_RAW);
+ if (!ttydrv) {
+ dev_err(dev, "Cannot allocate tty driver\n");
+ goto exit;
+ }
+
+ ttydrv->driver_name = DRV_NAME;
+ ttydrv->name = "ttyPCI";
+ ttydrv->type = TTY_DRIVER_TYPE_SERIAL;
+ ttydrv->subtype = SERIAL_TYPE_NORMAL;
+ ttydrv->major = TTY_DRV_MAJOR_VER;
+ ttydrv->minor_start = TTY_DRV_MINOR_VER_START;
+ ttydrv->init_termios = tty_std_termios;
+ ttydrv->init_termios.c_cflag =
+ B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ ttydrv->driver_state = pci_cons;
+ tty_set_operations(ttydrv, &pci_console_dev_tty_ops);
+ tty_port_init(&pci_cons->tty.port);
+ tty_port_link_device(&pci_cons->tty.port, ttydrv,
+ 0 /* i.e. the first, and only, port */);
+ ret = tty_register_driver(ttydrv);
+ if (ret) {
+ dev_err(dev, "Error registering TTY %s\n",
+ ttydrv->name);
+ goto exit;
+ }
+
+ pci_cons->tty.drv = ttydrv;
+
+ ret = 0;
+ }
+
+exit:
+ /* If error initializing tty driver, release it */
+ if (ret && ttydrv)
+ put_tty_driver(ttydrv);
+
+ return ret ? -ENODEV : 0;
+}
+
+/*
+ * Main initialization function for pci_console device instance.
+ *
+ * returns:
+ * 0 if no error
+ * -ENODEV if error occurred initializing device
+ * ENODEV if device should not be used (not an error per se)
+ */
+static int pci_console_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pci_console *pci_cons = platform_get_drvdata(pdev);
+ struct device_node *of_node, *of_parent;
+ int ret, cons_num, len;
+ u64 cons_addr, cons_size, new_use_mask, old_use_mask;
+ u64 of_parent_sz, of_parent_xbase;
+ u32 cons_index;
+ const __be32 *of_base, *of_parent_base;
+ struct octeontx_pcie_console_nexus __iomem *nexus_desc;
+
+ dbgmsg(dev, "%s: entry\n", __func__);
+
+ ret = -ENODEV;
+
+ nexus_desc = NULL;
+
+ of_node = pdev->dev.of_node;
+ if (!of_node) {
+ dev_err(dev, "Missing devicetree configuration\n");
+ goto exit;
+ }
+
+ /* retrieve our console index */
+ cons_num = -1;
+ if (!of_property_read_u32(of_node, "reg", &cons_index))
+ cons_num = cons_index;
+ if ((cons_num < 0) ||
+ (cons_num >= OCTEONTX_PCIE_MAX_CONSOLES)) {
+ dev_err(dev, "Invalid configuration console index\n");
+ goto exit;
+ }
+
+ if (!(max_cons_mask & BIT(cons_num))) {
+ dev_info(dev, "Ignoring excluded console %d\n",
+ cons_num);
+ ret = ENODEV;
+ goto exit;
+ }
+
+ /* Retrieve console base address and length from device tree */
+ cons_addr = OF_BAD_ADDR;
+ of_base = of_get_address(of_node, 0, &cons_size, 0);
+ if (of_base)
+ cons_addr = of_translate_address(of_node, of_base);
+ if (cons_addr == OF_BAD_ADDR) {
+ dev_err(dev, "Invalid configuration base address\n");
+ goto exit;
+ }
+
+ dbgmsg(dev, "Located console %d, address %#llx, size: %#llx\n",
+ cons_num, cons_addr, cons_size);
+
+ /* ======================================================= */
+ /* Note: we must [eventually] call 'of_node_put' on parent */
+ of_parent = of_get_parent(of_node);
+ if (!of_parent) {
+ dev_err(dev,
+ "Missing devicetree parent configuration\n");
+ goto exit;
+ }
+
+ /* retrieve (and map) nexus pointer from parent node */
+ of_parent_base = of_get_address(of_parent, 0, &of_parent_sz, 0);
+ if (of_parent_base) {
+ of_parent_xbase = of_translate_address(of_parent,
+ of_parent_base);
+ /* TODO: verify we can use WC */
+ if (of_parent_xbase != OF_BAD_ADDR) {
+ dbgmsg(dev, "of_parent_xbase: %#llx\n",
+ of_parent_xbase);
+ pci_cons->nexus_desc = nexus_desc =
+ ioremap_wc(of_parent_xbase,
+ of_parent_sz);
+ }
+ }
+
+ /* Release reference on parent */
+ of_node_put(of_parent);
+ /* ======================================================= */
+
+ if (!nexus_desc) {
+ dev_err(dev,
+ "Invalid parent configuration base address\n");
+ goto exit;
+ }
+
+ /* Verify/use existing configuration (i.e. from U-Boot) */
+
+ if (readq(&nexus_desc->magic) !=
+ cpu_to_le64(OCTEONTX_PCIE_CONSOLE_NEXUS_MAGIC)) {
+ dev_err(dev, "Invalid nexus signature\n");
+ goto exit;
+ }
+
+ if (cons_addr !=
+ le64_to_cpu(readq(&nexus_desc->console_addr[cons_num]))) {
+ dev_err(dev,
+ "Console %d base address mismatch %#llx/%#llx\n"
+ , cons_num, cons_addr,
+ le64_to_cpu(readq(&nexus_desc->console_addr[cons_num]))
+ );
+ goto exit;
+ }
+
+ if (le32_to_cpu(readl(&nexus_desc->in_use)) & (1 << cons_num)) {
+ dev_err(dev, "Console %d already in-use\n", cons_num);
+ goto exit;
+ }
+
+ if (octeontx_console_init(dev, pci_cons, cons_num, cons_addr,
+ cons_size)) {
+ dev_err(dev,
+ "Error initializing octeontx pci console\n");
+ goto exit;
+ }
+
+ dev_info(dev,
+ "Initialized console %d, address %#llx, size: %#llx\n",
+ cons_num, cons_addr, cons_size);
+
+ old_use_mask = new_use_mask = 0;
+
+ if (!octeontx_console_acquire(pci_cons->nexus_desc, cons_num,
+ true, &old_use_mask,
+ &new_use_mask)) {
+ dev_err(dev,
+ "Console acquisition failed, old: %#llx, new: %#llx\n",
+ old_use_mask, new_use_mask);
+ goto exit;
+ }
+
+ pci_cons->octeontx_console_acquired = true;
+
+ dbgmsg(dev, "Console acquisition - old: %#llx, new: %#llx\n",
+ old_use_mask, new_use_mask);
+
+ /* initialize linux console state */
+ len = sizeof(pci_cons->cons.name);
+ strncpy(pci_cons->cons.name, "pci", len - 1);
+ pci_cons->cons.name[len - 1] = 0;
+ pci_cons->device = dev;
+ pci_cons->cons.write = pci_console_dev_write;
+ pci_cons->cons.device = pci_console_dev_device;
+ pci_cons->cons.setup = pci_console_dev_setup;
+ pci_cons->cons.data = pci_cons;
+ pci_cons->cons.index = cons_num;
+ pci_cons->cons.flags = CON_PRINTBUFFER;
+
+ register_console(&pci_cons->cons);
+
+ ret = 0;
+
+exit:
+ return ret;
+}
+
+/*
+ * Main de-initialization function for pci_console device instance.
+ */
+static int pci_console_de_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pci_console *pci_cons = platform_get_drvdata(pdev);
+ u64 new_use_mask, old_use_mask;
+ int cons_num;
+
+ dbgmsg(dev, "%s: entry\n", __func__);
+
+ cons_num = pci_cons->cons.index;
+
+ if (pci_cons->tty.drv) {
+ tty_unregister_driver(pci_cons->tty.drv);
+ put_tty_driver(pci_cons->tty.drv);
+ }
+
+ if (pci_cons->cons.flags & CON_ENABLED) {
+ if (unregister_console(&pci_cons->cons))
+ dev_err(dev,
+ "Error unregistering pci console %d\n",
+ cons_num);
+ }
+
+ octeontx_console_de_init(dev, pci_cons, cons_num, 0, 0);
+
+ if (pci_cons->octeontx_console_acquired) {
+ old_use_mask = new_use_mask = 0;
+ if (!octeontx_console_acquire(pci_cons->nexus_desc,
+ cons_num, false, &old_use_mask,
+ &new_use_mask))
+ dev_err(dev,
+ "Console release failed, old: %#llx, new: %#llx\n",
+ old_use_mask, new_use_mask);
+ else
+ dbgmsg(dev,
+ "Console release - old: %#llx, new: %#llx\n",
+ old_use_mask, new_use_mask);
+
+ iounmap(pci_cons->nexus_desc);
+ pci_cons->nexus_desc = NULL;
+ }
+
+ return 0;
+}
+
+static int pci_console_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pci_console *pci_cons;
+ int ret;
+
+ dbgmsg(dev, "%s: entry\n", __func__);
+
+ pci_cons = NULL;
+
+ ret = -ENODEV;
+
+ /* allocate device structure */
+ pci_cons = devm_kzalloc(dev, sizeof(*pci_cons), GFP_KERNEL);
+
+ if (pci_cons == NULL) {
+ ret = -ENOMEM;
+ dev_err(dev, "Unable to allocate drv context.\n");
+ goto exit;
+ }
+
+ platform_set_drvdata(pdev, pci_cons);
+
+ ret = pci_console_init(pdev);
+
+ /* a negative value indicates an error */
+ if (ret < 0)
+ dev_err(dev, "Error initializing pci console\n");
+
+exit:
+ if (ret) {
+ pci_console_de_init(pdev);
+
+ if (pci_cons != NULL)
+ devm_kfree(dev, pci_cons);
+ }
+
+ return ret ? -ENODEV : 0;
+}
+
+static void pci_console_shutdown(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ dbgmsg(dev, "%s: entry\n", __func__);
+}
+
+static int pci_console_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pci_console *pci_cons = platform_get_drvdata(pdev);
+
+ dbgmsg(dev, "%s: entry\n", __func__);
+
+ pci_console_de_init(pdev);
+
+ devm_kfree(dev, pci_cons);
+
+ return 0;
+}
+
+static const struct of_device_id pci_console_of_match[] = {
+ { .compatible = "marvell,pci-console", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pci_console_of_match);
+
+static const struct platform_device_id pci_console_pdev_match[] = {
+ { .name = DRV_NAME, },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, pci_console_pdev_match);
+
+static struct platform_driver pci_console_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = pci_console_of_match,
+ },
+ .probe = pci_console_probe,
+ .remove = pci_console_remove,
+ .shutdown = pci_console_shutdown,
+ .id_table = pci_console_pdev_match,
+};
+
+/* ========================= pci console TTY driver ========================= */
+
+/*
+ * Linux TTY driver timer callback (used to poll for data).
+ */
+void pci_console_dev_tty_poll(struct timer_list *timer)
+{
+#define MAX_BUFFERED_INP_DATA 0x100
+ struct pci_console *pci_cons = from_timer(pci_cons, timer, tty.poll_timer);
+ struct octeontx_pcie_console __iomem *ring_descr;
+ u8 __iomem *input_ring;
+ u8 buf[MAX_BUFFERED_INP_DATA];
+ int cnt;
+ u32 sz, rd_idx, wr_idx, avail;
+
+ BUILD_BUG_ON(PCI_CONS_TTY_POLL_INTERVAL_JIFFIES > HZ);
+ if (!(pci_cons->tty.stats.poll_count++ %
+ (HZ / PCI_CONS_TTY_POLL_INTERVAL_JIFFIES))) {
+ dbgmsg(pci_cons->device,
+ "timer poll count: %u, dropped: %u, pushed: %u\n",
+ pci_cons->tty.stats.poll_count,
+ pci_cons->tty.stats.dropped_count,
+ pci_cons->tty.stats.pushed_count);
+ }
+
+ ring_descr = pci_cons->ring_descr;
+ input_ring = pci_cons->input_ring;
+ sz = le32_to_cpu(readl(&ring_descr->input_buf_size));
+ rd_idx = le32_to_cpu(readl(&ring_descr->input_read_index));
+ wr_idx = le32_to_cpu(readl(&ring_descr->input_write_index));
+ avail = buffer_pending_bytes(sz, wr_idx, rd_idx);
+
+ while ((s32)avail > 0) {
+
+ if (rd_idx > wr_idx)
+ cnt = min(avail, sz - rd_idx);
+ else
+ cnt = min(avail, wr_idx - rd_idx);
+
+ cnt = min(cnt, MAX_BUFFERED_INP_DATA);
+ memcpy_fromio(buf, &input_ring[rd_idx], cnt);
+ cnt = tty_insert_flip_string(&pci_cons->tty.port, buf, cnt);
+ if (!cnt) {
+ pci_cons->tty.stats.dropped_count += cnt;
+ break;
+ }
+
+ rd_idx = (rd_idx + cnt) % sz;
+ avail -= cnt;
+
+ pci_cons->tty.stats.pushed_count += cnt;
+
+ tty_flip_buffer_push(&pci_cons->tty.port);
+ }
+ /* The read index is used by another process (remote PCI) to
+ * indicate which data have been consumed from the ring buffer.
+ * Use a barrier here to ensure that all such data
+ * has been copied from the ring buffer prior to updating the
+ * read index in the descriptor.
+ */
+ mb();
+ writel(cpu_to_le32(rd_idx), &ring_descr->input_read_index);
+
+ mod_timer(&pci_cons->tty.poll_timer,
+ jiffies + PCI_CONS_TTY_POLL_INTERVAL_JIFFIES);
+}
+
+/*
+ * Linux TTY driver callback.
+ */
+static int pci_console_dev_tty_open(struct tty_struct *tty, struct file *filp)
+{
+ struct pci_console *pci_cons = tty->driver->driver_state;
+ struct device *dev = pci_cons->device;
+
+ dbgmsg(dev, "%s: entry\n", __func__);
+
+ if (!pci_cons->tty.open_count++) {
+ dbgmsg(dev, "Scheduling timer...\n");
+ timer_setup(&pci_cons->tty.poll_timer,
+ pci_console_dev_tty_poll, 0);
+ mod_timer(&pci_cons->tty.poll_timer,
+ jiffies + PCI_CONS_TTY_POLL_INTERVAL_JIFFIES);
+ }
+
+ return 0;
+}
+
+/*
+ * Linux TTY driver callback.
+ */
+static void pci_console_dev_tty_close(struct tty_struct *tty,
+ struct file *filp)
+{
+ struct pci_console *pci_cons = tty->driver->driver_state;
+ struct device *dev = pci_cons->device;
+
+ dbgmsg(dev, "%s: entry\n", __func__);
+
+ if (--pci_cons->tty.open_count == 0) {
+ dbgmsg(dev, "Deleting timer...\n");
+ del_timer(&pci_cons->tty.poll_timer);
+ }
+}
+
+/*
+ * Linux TTY driver callback.
+ */
+static int pci_console_dev_tty_write(struct tty_struct *tty,
+ const unsigned char *buf, int count)
+{
+ struct pci_console *pci_cons = tty->driver->driver_state;
+ struct device *dev = pci_cons->device;
+ struct octeontx_pcie_console __iomem *ring_descr;
+ u8 __iomem *output_ring;
+ u32 cons_idx;
+
+ ring_descr = pci_cons->ring_descr;
+ output_ring = pci_cons->output_ring;
+
+ cons_idx = le32_to_cpu(readl(&ring_descr->host.cons_idx));
+
+ return octeontx_console_write(dev, buf, count, ring_descr,
+ output_ring,
+ &pci_cons->excl_lock[cons_idx]);
+}
+
+static int pci_console_dev_tty_write_room(struct tty_struct *tty)
+{
+ struct pci_console *pci_cons = tty->driver->driver_state;
+
+ /* Assume maximum space is available; write function will wait for
+ * available room, if necessary.
+ */
+ return pci_cons->ring_descr->output_buf_size - 1;
+}
+
+static int pci_console_dev_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ struct pci_console *pci_cons = tty->driver->driver_state;
+
+ (void)pci_cons;
+
+ /* We do not buffer any data - zero chars in buffer */
+ return 0;
+}
+
+static void pci_console_dev_tty_send_xchar(struct tty_struct *tty, char ch)
+{
+ pci_console_dev_tty_write(tty, (const u8 *)&ch, sizeof(ch));
+}
+
diff --git a/drivers/soc/marvell/octeontx2-pcicons/otx2-pci-console.h b/drivers/soc/marvell/octeontx2-pcicons/otx2-pci-console.h
new file mode 100644
index 000000000000..76c33bec3dd3
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-pcicons/otx2-pci-console.h
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Supports the PCI Console when OcteonTX2 is running as an Endpoint.
+ *
+ */
+
+#ifndef __OTX2_PCI_CONSOLE_H__
+#define __OTX2_PCI_CONSOLE_H__
+
+/** CONSOLE! converted to little-endian */
+#define OCTEONTX_PCIE_CONSOLE_MAGIC 0x21454C4F534E4F43
+
+/** CONDSCR! converted to little-endian */
+#define OCTEONTX_PCIE_CONSOLE_NEXUS_MAGIC 0x21524353444E4F43
+
+#define OCTEONTX_PCIE_CONSOLE_MAJOR 1
+#define OCTEONTX_PCIE_CONSOLE_MINOR 0
+
+#define OCTEONTX_PCIE_MAX_CONSOLES 8
+
+#define OCTEONTX_PCIE_CONSOLE_NAME_LEN 16
+
+#define PCI_CONS_HOST_WAIT_TIMEOUT_USECS 1000
+#define PCI_CONS_HOST_WAIT_LOOP_USECS 10
+
+#define PCI_CONS_TTY_POLL_INTERVAL_JIFFIES 1
+/**
+ * NOTE: this must match structure in U-Boot console driver.
+ *
+ * This is the main container structure that contains all the information
+ * about all PCI consoles. The address of this structure is passed to
+ * various routines that operation on PCI consoles.
+ *
+ * @param magic console descriptor magic number
+ * @param major_version major version of console data structure
+ * @param minor_version minor version of console data structure
+ * @param flags flags applied to all consoles
+ * @param num_consoles number of console data structures available
+ * @param excl_lock lock between cores for this data structure
+ * @param in_use Set if the console is claimed by anyone (shared or not)
+ * @param exclusive bitmap of consoles exclusively used
+ * @param pad padding for header for future versions
+ * @param console_addr array of addresses for each console, 0 if unavailable.
+ */
+struct octeontx_pcie_console_nexus {
+ __le64 magic;
+ u8 major_version;
+ u8 minor_version;
+ u8 flags;
+ u8 num_consoles;
+ arch_spinlock_t excl_lock;
+ __le32 /* volatile */ in_use;
+ __le32 /* volatile */ exclusive;
+ u64 pad[13];
+ /* Starts at offset 128 */
+ __le64 console_addr[OCTEONTX_PCIE_MAX_CONSOLES];
+} __packed;
+
+/**
+ * NOTE: this must match structure in U-Boot console driver.
+ *
+ * Structure that defines a single console.
+ *
+ * Note: when read_index == write_index, the buffer is empty.
+ * The actual usable size of each console is console_buf_size -1;
+ *
+ * There are two different types of locks. pcie_lock is for locking
+ * between the host and target. excl_lock should always be acquired
+ * before pcie_lock is acquired and released after pcie_lock is released.
+ *
+ * excl_lock is a spinlock held between different tasks, such as u-boot
+ * and atf or the atf and the Linux kernel. It should be held whenever
+ * any of the indices are changed or when the pcie_lock is held.
+ *
+ * @param magic console magic number OCTEONTX_PCIE_CONSOLE_MAGIC
+ * @param name name assigned to the console ("ATF", "U-Boot")
+ * @param flags flags associated with console, see
+ * OCTEONTX_PCIE_CONSOLE_FLAG_...
+ * @param owner_id owning task id of last user, 0 if unused.
+ * @param input_buf_size Input buffer size in bytes
+ * @param output_buf_size Output buffer size in bytes
+ * @param input_base_addr Base address of input buffer
+ * @param input_read_index index target begins reading data from
+ * @param input_write_index index host starts writing from
+ * @param output_base_addr Base address of output buffer
+ * @param host_console_connected non-zero if host console is connected
+ * @param output_read_index index host reads from
+ * @param output_write_index index target writes to
+ * @param pcie_lock lock held whenever the indices are updated
+ * using Peterson's algorithm. Use
+ * octeontx_pcie_target_lock() and
+ * octeontx_pcie_target_unlock() to lock and
+ * unlock this data structure.
+ * @param user User-defined pointer
+ * (octeontx_pcie_console_priv *) for U-Boot
+ * @param excl_lock cpu core lock. This lock should be held
+ * whenever this data structure is updated by
+ * the target since it can be shared by multiple
+ * targets.
+ * @param pad pads header to 128 bytes
+ *
+ * Typically the input and output buffers immediately follow this data
+ * structure, however, this is not a requirement.
+ *
+ * Note that the host_console_connected and output_read_index MUST be
+ * next to each other and should be 64-bit aligned. This is due to the
+ * fact that if the output buffer fills up and no host is connected that
+ * the read pointer must be modified atomically in case the host should
+ * connect within that window.
+ */
+struct octeontx_pcie_console {
+ __le64 magic;
+ char name[OCTEONTX_PCIE_CONSOLE_NAME_LEN];
+ __le32 /* volatile */ flags;
+ __le32 /* volatile */ owner_id;
+ __le32 input_buf_size;
+ __le32 output_buf_size;
+ __le64 input_base_addr;
+ __le32 input_read_index;
+ __le32 /* volatile */ input_write_index;
+ __le64 output_base_addr;
+ __le32 /* volatile */ host_console_connected;
+ __le32 /* volatile */ output_read_index;
+ __le32 output_write_index;
+ union {
+ __le32 unused_excl_lock;
+ __le32 cons_idx; /* host console index */
+ } host;
+ void *user;
+ u32 /* i.e. struct octeontx_pcie_lock */ pcie_lock;
+ u32 pad[8];
+} __packed;
+
+/**
+ * struct pci_console_nexus: Console nexus driver state
+ *
+ * @of_node: associated device tree node
+ * @of_base: device tree nexus address
+ * @of_size: device tree nexus size
+ * @nexus: [mapped] pointer to nexus state structure
+ */
+struct pci_console_nexus {
+ struct device_node *of_node;
+ struct octeontx_pcie_console_nexus __iomem *desc;
+};
+
+/**
+ * struct pci_console: Console driver state
+ *
+ * @device: Linux device
+ * @cons: Linux console
+ * @of_node: associated device tree node
+ * @ring_descr: [mapped] pointer to console memory ring descriptor
+ * @input_ring: [mapped] pointer to console input ring
+ * @output_ring: [mapped] pointer to console output ring
+ * @nexus_desc: [mapped] nexus descriptor
+ * @octeontx_console_acquired indicates if console was acquired (and therefore,
+ * needs to be release during de-init)
+ * @tty.drv: Linux TTY driver
+ * @tty.port: Linux TTY port
+ * @tty.poll_timer: Linux timer used to poll for available data
+ * @tty.open_count: reference count for TTY driver
+ */
+struct pci_console {
+ struct device *device;
+ struct console cons;
+ struct device_node *of_node;
+ struct octeontx_pcie_console __iomem *ring_descr;
+ u8 __iomem *input_ring;
+ u8 __iomem *output_ring;
+ struct octeontx_pcie_console_nexus __iomem *nexus_desc;
+ spinlock_t excl_lock[OCTEONTX_PCIE_MAX_CONSOLES];
+ bool octeontx_console_acquired;
+ struct {
+ struct tty_driver *drv;
+ struct tty_port port;
+ struct timer_list poll_timer;
+ u32 open_count;
+ struct {
+ u32 poll_count;
+ u32 dropped_count;
+ u32 pushed_count;
+ } stats;
+ } tty;
+};
+
+#endif // __OTX2_PCI_CONSOLE_H__
+
diff --git a/drivers/soc/marvell/octeontx2-rm/Makefile b/drivers/soc/marvell/octeontx2-rm/Makefile
new file mode 100644
index 000000000000..bab787b56b43
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-rm/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's OcteonTX2 SSO/TIM RVU device driver
+#
+
+obj-$(CONFIG_OCTEONTX2_RM) += octeontx2_rm.o
+
+octeontx2_rm-y := otx2_rm.o quota.o
+octeontx2_rm-$(CONFIG_OCTEONTX2_RM_DOM_SYSFS) += domain_sysfs.o
+ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
+ccflags-y += -I$(srctree)/drivers/soc/marvell/octeontx2-dpi/
diff --git a/drivers/soc/marvell/octeontx2-rm/domain_sysfs.c b/drivers/soc/marvell/octeontx2-rm/domain_sysfs.c
new file mode 100644
index 000000000000..6083db40d0ed
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-rm/domain_sysfs.c
@@ -0,0 +1,830 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OcteonTX2 RVU Resource Manager driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/sysfs.h>
+#include "domain_sysfs.h"
+#include "otx2_rm.h"
+#include "dpi.h"
+
+#define DOMAIN_NAME_LEN 32
+#define PCI_SCAN_FMT "%04x:%02x:%02x.%02x"
+
+/* The format of DP is: DP(_name, _param_type, _scanf_fmt) */
+#define DOM_PARAM_SPEC \
+DP(ssow, int, "%d") \
+DP(sso, int, "%d") \
+DP(npa, int, "%d") \
+DP(cpt, int, "%d") \
+DP(tim, int, "%d") \
+DP(dpi, int, "%d")
+
+struct domain_params {
+ const char *name;
+#define DP(_name, _type, _1) \
+ _type _name;
+DOM_PARAM_SPEC
+#undef DP
+ const char *ports[RM_MAX_PORTS];
+ u16 port_cnt;
+};
+
+struct domain {
+ char name[DOMAIN_NAME_LEN];
+ struct kobj_attribute domain_id;
+ struct kobj_attribute domain_in_use;
+ /* List of all ports attached to the domain */
+ struct rvu_port *ports;
+ struct kobject *kobj;
+ struct rvu_vf *rvf;
+ int port_count;
+ bool in_use;
+};
+
+struct rvu_port {
+ /* handle in global list of ports associated to all domains */
+ struct list_head list;
+ struct pci_dev *pdev;
+ struct domain *domain;
+};
+
+struct dpi_vf {
+ struct pci_dev *pdev;
+ /* pointer to the kobject which owns this vf */
+ struct kobject *domain_kobj;
+ int vf_id;
+ bool in_use;
+};
+
+struct dpi_info {
+ /* Total number of vfs available */
+ uint8_t num_vfs;
+ /* Free vfs */
+ uint8_t vfs_free;
+ /* Pointer to the vfs available */
+ struct dpi_vf *dpi_vf;
+};
+
+struct domain_sysfs {
+ struct list_head list;
+ struct kobj_attribute create_domain;
+ struct kobj_attribute destroy_domain;
+ struct kobj_attribute pmccntr_el0;
+ /* List of all ports added to all domains. Used for validating if new
+ * domain creation doesn't want to take an already taken port.
+ */
+ struct list_head ports;
+ struct rm_dev *rdev;
+ struct kobject *parent;
+ struct domain *domains;
+ size_t domains_len;
+ struct dpi_info dpi_info;
+};
+
+static DEFINE_MUTEX(domain_sysfs_lock);
+static LIST_HEAD(domain_sysfs_list);
+
+static ssize_t
+domain_id_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct domain *dom = container_of(attr, struct domain, domain_id);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", dom->name);
+}
+
+static ssize_t
+domain_in_use_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct domain *dom = container_of(attr, struct domain, domain_in_use);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", dom->rvf->in_use);
+}
+
+static int do_destroy_domain(struct domain_sysfs *lsfs, struct domain *domain)
+{
+ struct device *dev = &lsfs->rdev->pdev->dev;
+ int i;
+
+ if (domain->rvf->in_use) {
+ dev_err(dev, "Domain %s is in use.\n", domain->name);
+ return -EBUSY;
+ }
+
+ sysfs_remove_file(domain->kobj, &domain->domain_id.attr);
+ domain->domain_id.attr.mode = 0;
+ sysfs_remove_file(domain->kobj, &domain->domain_in_use.attr);
+ domain->domain_in_use.attr.mode = 0;
+ for (i = 0; i < domain->port_count; i++) {
+ sysfs_remove_link(domain->kobj,
+ pci_name(domain->ports[i].pdev));
+ }
+
+ for (i = 0; i < lsfs->dpi_info.num_vfs; i++) {
+ struct dpi_vf *dpivf_ptr = NULL;
+
+ dpivf_ptr = &lsfs->dpi_info.dpi_vf[i];
+ /* Identify the devices belongs to this domain */
+ if (dpivf_ptr->in_use &&
+ dpivf_ptr->domain_kobj == domain->kobj) {
+ sysfs_remove_link(domain->kobj,
+ pci_name(dpivf_ptr->pdev));
+ dpivf_ptr->in_use = false;
+ dpivf_ptr->domain_kobj = NULL;
+ lsfs->dpi_info.vfs_free++;
+ }
+ }
+
+ sysfs_remove_link(domain->kobj, pci_name(domain->rvf->pdev));
+ kobject_del(domain->kobj);
+ mutex_lock(&lsfs->rdev->lock);
+ // restore limits
+ lsfs->rdev->vf_limits.sso->a[domain->rvf->vf_id].val = 0;
+ lsfs->rdev->vf_limits.ssow->a[domain->rvf->vf_id].val = 0;
+ lsfs->rdev->vf_limits.npa->a[domain->rvf->vf_id].val = 0;
+ lsfs->rdev->vf_limits.cpt->a[domain->rvf->vf_id].val = 0;
+ lsfs->rdev->vf_limits.tim->a[domain->rvf->vf_id].val = 0;
+ mutex_unlock(&lsfs->rdev->lock);
+
+ mutex_lock(&domain_sysfs_lock);
+ // FREE ALL allocated ports
+ for (i = 0; i < domain->port_count; i++) {
+ list_del(&domain->ports[i].list);
+ pci_dev_put(domain->ports[i].pdev);
+ }
+ kfree(domain->ports);
+ domain->ports = NULL;
+ domain->port_count = 0;
+ domain->in_use = false;
+ domain->name[0] = '\0';
+ mutex_unlock(&domain_sysfs_lock);
+
+ return 0;
+}
+
+static int
+do_create_domain(struct domain_sysfs *lsfs, struct domain_params *dparams)
+{
+ struct device *dev = &lsfs->rdev->pdev->dev;
+ struct domain *domain = NULL;
+ struct rvu_port *ports = NULL, *cur;
+ u32 dom, bus, slot, fn;
+ int old_sso, old_ssow, old_npa, old_cpt, old_tim, device;
+ int res = 0, i;
+
+ /* Validate parameters */
+ if (dparams == NULL)
+ return -EINVAL;
+ if (strnlen(dparams->name, DOMAIN_NAME_LEN) >= DOMAIN_NAME_LEN) {
+ dev_err(dev, "Domain name too long, max %d characters.\n",
+ DOMAIN_NAME_LEN);
+ return -EINVAL;
+ }
+ if (dparams->npa != 1) {
+ dev_err(dev, "Exactly 1 NPA resource required.\n");
+ return -EINVAL;
+ }
+ if (dparams->ssow < 1) {
+ dev_err(dev, "At least 1 SSOW resource required.\n");
+ return -EINVAL;
+ }
+ mutex_lock(&domain_sysfs_lock);
+ /* Find a free domain device */
+ for (i = 0; i < lsfs->domains_len; i++) {
+ if (!strncmp(lsfs->domains[i].name, dparams->name,
+ DOMAIN_NAME_LEN)) {
+ dev_err(dev, "Domain %s exists already.\n",
+ dparams->name);
+ res = -EINVAL;
+ goto err_dom;
+ }
+ if (lsfs->domains[i].in_use == false &&
+ lsfs->domains[i].rvf->in_use == false) {
+ if (domain == NULL)
+ domain = &lsfs->domains[i];
+ }
+ }
+ if (domain == NULL) {
+ dev_err(dev, "No free device to create new domain.\n");
+ res = -ENODEV;
+ goto err_dom;
+ }
+ strncpy(domain->name, dparams->name, DOMAIN_NAME_LEN - 1);
+ domain->in_use = true;
+ /* Verify ports are valid and supported. */
+ if (dparams->port_cnt == 0)
+ goto skip_ports;
+ ports = kcalloc(dparams->port_cnt, sizeof(struct rvu_port), GFP_KERNEL);
+ if (ports == NULL) {
+ dev_err(dev, "Not enough memory.\n");
+ res = -ENOMEM;
+ goto err_ports;
+ }
+ for (i = 0; i < dparams->port_cnt; i++) {
+ if (sscanf(dparams->ports[i], PCI_SCAN_FMT, &dom, &bus, &slot,
+ &fn) != 4) {
+ dev_err(dev, "Invalid port: %s.\n", dparams->ports[i]);
+ res = -EINVAL;
+ goto err_ports;
+ }
+ ports[i].pdev =
+ pci_get_domain_bus_and_slot(dom, bus,
+ PCI_DEVFN(slot, fn));
+ if (ports[i].pdev == NULL) {
+ dev_err(dev, "Unknown port: %s.\n", dparams->ports[i]);
+ res = -ENODEV;
+ goto err_ports;
+ }
+ device = ports[i].pdev->device;
+ if (ports[i].pdev->vendor != PCI_VENDOR_ID_CAVIUM ||
+ (device != PCI_DEVID_OCTEONTX2_RVU_PF &&
+ device != PCI_DEVID_OCTEONTX2_PASS1_RVU_PF &&
+ device != PCI_DEVID_OCTEONTX2_RVU_AFVF &&
+ device != PCI_DEVID_OCTEONTX2_PASS1_RVU_AFVF &&
+ device != PCI_DEVID_OCTEONTX2_RVU_VF &&
+ device != PCI_DEVID_OCTEONTX2_PASS1_RVU_VF)) {
+ dev_err(dev, "Unsupported port: %s.\n",
+ dparams->ports[i]);
+ res = -EINVAL;
+ goto err_ports;
+ }
+ list_for_each_entry(cur, &lsfs->ports, list) {
+ if (cur->pdev != ports[i].pdev)
+ continue;
+ dev_err(dev,
+ "Port %s already assigned to domain %s.\n",
+ dparams->ports[i], cur->domain->name);
+ res = -EBUSY;
+ goto err_ports;
+ }
+ }
+ for (i = 0; i < dparams->port_cnt; i++) {
+ ports[i].domain = domain;
+ list_add(&ports[i].list, &lsfs->ports);
+ }
+ domain->ports = ports;
+ domain->port_count = dparams->port_cnt;
+skip_ports:
+ mutex_unlock(&domain_sysfs_lock);
+ /* Check domain spec against limits for the parent RVU. */
+ mutex_lock(&lsfs->rdev->lock);
+ old_sso = lsfs->rdev->vf_limits.sso->a[domain->rvf->vf_id].val;
+ old_ssow = lsfs->rdev->vf_limits.ssow->a[domain->rvf->vf_id].val;
+ old_npa = lsfs->rdev->vf_limits.npa->a[domain->rvf->vf_id].val;
+ old_cpt = lsfs->rdev->vf_limits.cpt->a[domain->rvf->vf_id].val;
+ old_tim = lsfs->rdev->vf_limits.tim->a[domain->rvf->vf_id].val;
+#define CHECK_LIMITS(_ls, _val, _n, _idx) do { \
+ if (quotas_get_sum(_ls) + _val - _ls->a[_idx].val > _ls->max_sum) { \
+ dev_err(dev, \
+ "Not enough "_n" LFs, currently used: %lld/%lld\n", \
+ quotas_get_sum(_ls), _ls->max_sum); \
+ res = -ENODEV; \
+ goto err_limits; \
+ } \
+} while (0)
+ CHECK_LIMITS(lsfs->rdev->vf_limits.sso, dparams->sso, "SSO",
+ domain->rvf->vf_id);
+ CHECK_LIMITS(lsfs->rdev->vf_limits.ssow, dparams->ssow, "SSOW",
+ domain->rvf->vf_id);
+ CHECK_LIMITS(lsfs->rdev->vf_limits.npa, dparams->npa, "NPA",
+ domain->rvf->vf_id);
+ CHECK_LIMITS(lsfs->rdev->vf_limits.cpt, dparams->cpt, "CPT",
+ domain->rvf->vf_id);
+ CHECK_LIMITS(lsfs->rdev->vf_limits.tim, dparams->tim, "TIM",
+ domain->rvf->vf_id);
+ if (dparams->dpi > lsfs->dpi_info.vfs_free) {
+ dev_err(dev,
+ "Not enough DPI VFS, currently used:%d/%d\n",
+ lsfs->dpi_info.num_vfs -
+ lsfs->dpi_info.vfs_free,
+ lsfs->dpi_info.num_vfs);
+ res = -ENODEV;
+ goto err_limits;
+ }
+
+ /* Now that checks are done, update the limits */
+ lsfs->rdev->vf_limits.sso->a[domain->rvf->vf_id].val = dparams->sso;
+ lsfs->rdev->vf_limits.ssow->a[domain->rvf->vf_id].val = dparams->ssow;
+ lsfs->rdev->vf_limits.npa->a[domain->rvf->vf_id].val = dparams->npa;
+ lsfs->rdev->vf_limits.cpt->a[domain->rvf->vf_id].val = dparams->cpt;
+ lsfs->rdev->vf_limits.tim->a[domain->rvf->vf_id].val = dparams->tim;
+ lsfs->dpi_info.vfs_free -= dparams->dpi;
+ mutex_unlock(&lsfs->rdev->lock);
+
+ /* Set it up according to user spec */
+ domain->kobj = kobject_create_and_add(dparams->name, lsfs->parent);
+ if (domain->kobj == NULL) {
+ dev_err(dev, "Failed to create domain directory.\n");
+ res = -ENOMEM;
+ goto err_kobject_create;
+ }
+ res = sysfs_create_link(domain->kobj, &domain->rvf->pdev->dev.kobj,
+ pci_name(domain->rvf->pdev));
+ if (res < 0) {
+ dev_err(dev, "Failed to create dev links for domain %s.\n",
+ domain->name);
+ res = -ENOMEM;
+ goto err_dom_dev_symlink;
+ }
+ for (i = 0; i < dparams->port_cnt; i++) {
+ res = sysfs_create_link(domain->kobj, &ports[i].pdev->dev.kobj,
+ pci_name(ports[i].pdev));
+ if (res < 0) {
+ dev_err(dev,
+ "Failed to create dev links for domain %s.\n",
+ domain->name);
+ res = -ENOMEM;
+ goto err_dom_port_symlink;
+ }
+ }
+ /* Create symlinks for dpi vfs in domain */
+ for (i = 0; i < dparams->dpi; i++) {
+ struct dpi_vf *dpivf_ptr = NULL;
+ int vf_idx;
+
+ for (vf_idx = 0; vf_idx < lsfs->dpi_info.num_vfs;
+ vf_idx++) {
+ /* Find available dpi vfs and create symlinks */
+ dpivf_ptr = &lsfs->dpi_info.dpi_vf[vf_idx];
+ if (dpivf_ptr->in_use)
+ continue;
+ else
+ break;
+ }
+ res = sysfs_create_link(domain->kobj,
+ &dpivf_ptr->pdev->dev.kobj,
+ pci_name(dpivf_ptr->pdev));
+ if (res < 0) {
+ dev_err(dev,
+ "Failed to create DPI dev links for domain %s\n",
+ domain->name);
+ res = -ENOMEM;
+ goto err_dpi_symlink;
+ }
+ dpivf_ptr->domain_kobj = domain->kobj;
+ dpivf_ptr->in_use = true;
+ }
+
+ domain->domain_in_use.attr.mode = 0444;
+ domain->domain_in_use.attr.name = "domain_in_use";
+ domain->domain_in_use.show = domain_in_use_show;
+ res = sysfs_create_file(domain->kobj, &domain->domain_in_use.attr);
+ if (res < 0) {
+ dev_err(dev,
+ "Failed to create domain_in_use file for domain %s.\n",
+ domain->name);
+ res = -ENOMEM;
+ goto err_dom_in_use;
+ }
+
+ domain->domain_id.attr.mode = 0444;
+ domain->domain_id.attr.name = "domain_id";
+ domain->domain_id.show = domain_id_show;
+ res = sysfs_create_file(domain->kobj, &domain->domain_id.attr);
+ if (res < 0) {
+ dev_err(dev, "Failed to create domain_id file for domain %s.\n",
+ domain->name);
+ res = -ENOMEM;
+ goto err_dom_id;
+ }
+
+ return res;
+
+err_dom_id:
+ domain->domain_id.attr.mode = 0;
+ sysfs_remove_file(domain->kobj, &domain->domain_in_use.attr);
+err_dom_in_use:
+ domain->domain_in_use.attr.mode = 0;
+err_dpi_symlink:
+ for (i = 0; i < lsfs->dpi_info.num_vfs; i++) {
+ struct dpi_vf *dpivf_ptr = NULL;
+
+ dpivf_ptr = &lsfs->dpi_info.dpi_vf[i];
+ /* Identify the devices belongs to this domain */
+ if (dpivf_ptr->in_use &&
+ dpivf_ptr->domain_kobj == domain->kobj) {
+ sysfs_remove_link(domain->kobj,
+ pci_name(dpivf_ptr->pdev));
+ dpivf_ptr->in_use = false;
+ dpivf_ptr->domain_kobj = NULL;
+ }
+ }
+err_dom_port_symlink:
+ for (i = 0; i < dparams->port_cnt; i++)
+ sysfs_remove_link(domain->kobj, pci_name(ports[i].pdev));
+ sysfs_remove_link(domain->kobj, pci_name(domain->rvf->pdev));
+err_dom_dev_symlink:
+ kobject_del(domain->kobj);
+err_kobject_create:
+ mutex_lock(&lsfs->rdev->lock);
+err_limits:
+ // restore limits
+ lsfs->rdev->vf_limits.sso->a[domain->rvf->vf_id].val = old_sso;
+ lsfs->rdev->vf_limits.ssow->a[domain->rvf->vf_id].val = old_ssow;
+ lsfs->rdev->vf_limits.npa->a[domain->rvf->vf_id].val = old_npa;
+ lsfs->rdev->vf_limits.cpt->a[domain->rvf->vf_id].val = old_cpt;
+ lsfs->rdev->vf_limits.tim->a[domain->rvf->vf_id].val = old_tim;
+ lsfs->dpi_info.vfs_free += dparams->dpi;
+ mutex_unlock(&lsfs->rdev->lock);
+ mutex_lock(&domain_sysfs_lock);
+err_ports:
+ // FREE ALL allocated ports
+ for (i = 0; i < dparams->port_cnt; i++) {
+ if (ports[i].pdev == NULL)
+ break;
+ if (ports[i].domain != NULL)
+ list_del(&ports[i].list);
+ pci_dev_put(ports[i].pdev);
+ }
+ kfree(ports);
+ domain->ports = NULL;
+ domain->port_count = 0;
+ domain->in_use = false;
+ domain->name[0] = '\0';
+err_dom:
+ mutex_unlock(&domain_sysfs_lock);
+ return res;
+}
+
+static ssize_t
+destroy_domain_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct domain_sysfs *lsfs =
+ container_of(attr, struct domain_sysfs, destroy_domain);
+ struct device *dev = &lsfs->rdev->pdev->dev;
+ struct domain *domain = NULL;
+ char name[DOMAIN_NAME_LEN], *name_ptr;
+ int i, res;
+
+ strncpy(name, buf, DOMAIN_NAME_LEN - 1);
+ name_ptr = strim(name);
+ if (strlen(name_ptr) == 0) {
+ dev_err(dev, "Empty domain name.\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&domain_sysfs_lock);
+ /* Find a free domain device */
+ for (i = 0; i < lsfs->domains_len; i++) {
+ if (!strncmp(lsfs->domains[i].name, name_ptr,
+ DOMAIN_NAME_LEN)) {
+ domain = &lsfs->domains[i];
+ break;
+ }
+ }
+ if (domain == NULL) {
+ dev_err(dev, "Domain '%s' doesn't exist.\n", name);
+ res = -EINVAL;
+ goto err_dom;
+ }
+ mutex_unlock(&domain_sysfs_lock);
+
+ res = do_destroy_domain(lsfs, domain);
+ if (res == 0)
+ res = count;
+err_dom:
+ mutex_unlock(&domain_sysfs_lock);
+ return res;
+}
+
+static ssize_t
+create_domain_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct domain_params *dparams = NULL;
+ struct domain_sysfs *lsfs =
+ container_of(attr, struct domain_sysfs, create_domain);
+ struct device *dev = &lsfs->rdev->pdev->dev;
+ int res = 0;
+ char *start;
+ char *end;
+ char *ptr = NULL;
+ const char *name;
+ char *errmsg = "Invalid domain specification format.";
+
+ if (strlen(buf) == 0) {
+ dev_err(dev, "Empty domain spec.\n");
+ return -EINVAL;
+ }
+
+ dparams = kzalloc(sizeof(*dparams), GFP_KERNEL);
+ if (dparams == NULL) {
+ errmsg = "Not enough memory";
+ res = -ENOMEM;
+ goto error;
+ }
+
+ end = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (end == NULL) {
+ errmsg = "Not enough memory";
+ res = -ENOMEM;
+ goto error;
+ }
+
+ ptr = end;
+ memcpy(end, buf, count);
+
+ name = strsep(&end, ";");
+ if (end == NULL) {
+ res = -EINVAL;
+ goto error;
+ }
+
+ dparams->name = name;
+
+ for (;;) {
+ start = strsep(&end, ";");
+ if (start == NULL)
+ break;
+ start = strim(start);
+ if (!*start)
+ continue;
+
+ if (!strncmp(strim(start), "port", sizeof("port") - 1)) {
+ strsep(&start, ":");
+ if (dparams->port_cnt >= RM_MAX_PORTS)
+ goto error;
+ dparams->ports[dparams->port_cnt++] = strim(start);
+ }
+ #define DP(_name, _1, _fmt) \
+ else if (!strncmp(strim(start), #_name, \
+ sizeof(#_name) - 1)) { \
+ strsep(&start, ":"); \
+ start = strim(start); \
+ res = sscanf(start, _fmt, &dparams->_name); \
+ if (res != 1) \
+ goto error; \
+ continue; \
+ }
+ DOM_PARAM_SPEC
+ #undef DP
+ else {
+ res = -EINVAL;
+ goto error;
+ }
+ }
+ res = do_create_domain(lsfs, dparams);
+ if (res < 0) {
+ errmsg = "Failed to create application domain.";
+ goto error;
+ } else
+ res = count;
+error:
+ if (res < 0)
+ dev_err(dev, "%s\n", errmsg);
+ kfree(ptr);
+ kfree(dparams);
+ return res;
+}
+
+static int dpivf_sysfs_create(struct domain_sysfs *lsfs)
+{
+ struct dpi_info *dpi_info = &lsfs->dpi_info;
+ struct dpi_vf *dpivf_ptr = NULL;
+ struct pci_dev *pdev = lsfs->rdev->pdev;
+ struct pci_dev *vdev = NULL;
+ uint8_t vf_idx = 0;
+
+ dpi_info->dpi_vf = kcalloc(DPI_MAX_VFS,
+ sizeof(struct dpi_vf), GFP_KERNEL);
+ if (dpi_info->dpi_vf == NULL)
+ return -ENOMEM;
+
+ /* Get available DPI vfs */
+ while ((vdev = pci_get_device(pdev->vendor,
+ PCI_DEVID_OCTEONTX2_DPI_VF, vdev))) {
+ if (!vdev->is_virtfn)
+ continue;
+ else {
+ dpivf_ptr = &dpi_info->dpi_vf[vf_idx];
+ dpivf_ptr->pdev = vdev;
+ dpivf_ptr->vf_id = vf_idx;
+ dpivf_ptr->in_use = false;
+ vf_idx++;
+ }
+ }
+ dpi_info->num_vfs = vf_idx;
+ dpi_info->vfs_free = vf_idx;
+ return 0;
+}
+
+static void dpivf_sysfs_destroy(struct domain_sysfs *lsfs)
+{
+ struct dpi_info *dpi_info = &lsfs->dpi_info;
+ struct dpi_vf *dpivf_ptr = NULL;
+ uint8_t vf_idx = 0;
+
+ if (dpi_info->num_vfs == 0)
+ goto free_mem;
+ else {
+ for (vf_idx = 0; vf_idx < dpi_info->num_vfs; vf_idx++) {
+ dpivf_ptr = &dpi_info->dpi_vf[vf_idx];
+ pci_dev_put(dpivf_ptr->pdev);
+ dpivf_ptr->pdev = NULL;
+ vf_idx++;
+ }
+ }
+ dpi_info->num_vfs = 0;
+
+free_mem:
+ kfree(dpi_info->dpi_vf);
+ dpi_info->dpi_vf = NULL;
+}
+
+
+static void enable_pmccntr_el0(void *data)
+{
+ u64 val;
+ /* Disable cycle counter overflow interrupt */
+ asm volatile("mrs %0, pmintenset_el1" : "=r" (val));
+ val &= ~BIT_ULL(31);
+ asm volatile("msr pmintenset_el1, %0" : : "r" (val));
+ /* Enable cycle counter */
+ asm volatile("mrs %0, pmcntenset_el0" : "=r" (val));
+ val |= BIT_ULL(31);
+ asm volatile("msr pmcntenset_el0, %0" :: "r" (val));
+ /* Enable user-mode access to cycle counters. */
+ asm volatile("mrs %0, pmuserenr_el0" : "=r" (val));
+ val |= BIT(2) | BIT(0);
+ asm volatile("msr pmuserenr_el0, %0" : : "r"(val));
+ /* Start cycle counter */
+ asm volatile("mrs %0, pmcr_el0" : "=r" (val));
+ val |= BIT(0);
+ isb();
+ asm volatile("msr pmcr_el0, %0" : : "r" (val));
+ asm volatile("mrs %0, pmccfiltr_el0" : "=r" (val));
+ val |= BIT(27);
+ asm volatile("msr pmccfiltr_el0, %0" : : "r" (val));
+}
+
+static void disable_pmccntr_el0(void *data)
+{
+ u64 val;
+ /* Disable cycle counter */
+ asm volatile("mrs %0, pmcntenset_el0" : "=r" (val));
+ val &= ~BIT_ULL(31);
+ asm volatile("msr pmcntenset_el0, %0" :: "r" (val));
+ /* Disable user-mode access to counters. */
+ asm volatile("mrs %0, pmuserenr_el0" : "=r" (val));
+ val &= ~(BIT(2) | BIT(0));
+ asm volatile("msr pmuserenr_el0, %0" : : "r"(val));
+}
+
+static ssize_t
+enadis_pmccntr_el0_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct domain_sysfs *lsfs = container_of(attr, struct domain_sysfs,
+ pmccntr_el0);
+ struct device *dev = &lsfs->rdev->pdev->dev;
+ char tmp_buf[64];
+ long enable = 0;
+ char *tmp_ptr;
+
+ strlcpy(tmp_buf, buf, 64);
+ tmp_ptr = strim(tmp_buf);
+ if (kstrtol(tmp_ptr, 0, &enable)) {
+ dev_err(dev, "Invalid value, expected 1/0\n");
+ return -EIO;
+ }
+
+ if (enable)
+ on_each_cpu(enable_pmccntr_el0, NULL, 1);
+ else
+ on_each_cpu(disable_pmccntr_el0, NULL, 1);
+
+ return count;
+}
+
+static void check_pmccntr_el0(void *data)
+{
+ int *out = data;
+ u64 val;
+
+ asm volatile("mrs %0, pmuserenr_el0" : "=r" (val));
+ *out = *out & !!(val & (BIT(2) | BIT(0)));
+}
+
+static ssize_t
+enadis_pmccntr_el0_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ int out = 1;
+
+ on_each_cpu(check_pmccntr_el0, &out, 1);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", out);
+}
+
+int domain_sysfs_create(struct rm_dev *rm)
+{
+ struct domain_sysfs *lsfs;
+ int res = 0, i;
+
+ if (rm == NULL || rm->num_vfs == 0)
+ return -EINVAL;
+
+ lsfs = kzalloc(sizeof(*lsfs), GFP_KERNEL);
+ if (lsfs == NULL) {
+ res = -ENOMEM;
+ goto err_lsfs_alloc;
+ }
+
+ INIT_LIST_HEAD(&lsfs->ports);
+ lsfs->rdev = rm;
+ lsfs->domains_len = rm->num_vfs;
+ lsfs->domains =
+ kcalloc(lsfs->domains_len, sizeof(struct domain), GFP_KERNEL);
+ if (lsfs->domains == NULL)
+ goto err_domains_alloc;
+ for (i = 0; i < lsfs->domains_len; i++)
+ lsfs->domains[i].rvf = &rm->vf_info[i];
+
+ lsfs->create_domain.attr.name = "create_domain";
+ lsfs->create_domain.attr.mode = 0200;
+ lsfs->create_domain.store = create_domain_store;
+ res = sysfs_create_file(&rm->pdev->dev.kobj, &lsfs->create_domain.attr);
+ if (res)
+ goto err_create_domain;
+
+ lsfs->destroy_domain.attr.name = "destroy_domain";
+ lsfs->destroy_domain.attr.mode = 0200;
+ lsfs->destroy_domain.store = destroy_domain_store;
+ res = sysfs_create_file(&rm->pdev->dev.kobj,
+ &lsfs->destroy_domain.attr);
+ if (res)
+ goto err_destroy_domain;
+
+ lsfs->pmccntr_el0.attr.name = "pmccntr_el0";
+ lsfs->pmccntr_el0.attr.mode = 0644;
+ lsfs->pmccntr_el0.show = enadis_pmccntr_el0_show;
+ lsfs->pmccntr_el0.store = enadis_pmccntr_el0_store;
+ res = sysfs_create_file(&rm->pdev->dev.kobj, &lsfs->pmccntr_el0.attr);
+ if (res)
+ goto err_pmccntr_el0;
+
+ lsfs->parent = &rm->pdev->dev.kobj;
+
+ res = dpivf_sysfs_create(lsfs);
+ if (res)
+ goto err_dpivf_sysfs_create;
+
+ mutex_lock(&domain_sysfs_lock);
+ list_add_tail(&lsfs->list, &domain_sysfs_list);
+ mutex_unlock(&domain_sysfs_lock);
+
+ return 0;
+
+err_dpivf_sysfs_create:
+ sysfs_remove_file(&rm->pdev->dev.kobj, &lsfs->pmccntr_el0.attr);
+err_pmccntr_el0:
+ sysfs_remove_file(&rm->pdev->dev.kobj, &lsfs->destroy_domain.attr);
+err_destroy_domain:
+ sysfs_remove_file(&rm->pdev->dev.kobj, &lsfs->create_domain.attr);
+err_create_domain:
+ kfree(lsfs->domains);
+err_domains_alloc:
+ kfree(lsfs);
+err_lsfs_alloc:
+ return res;
+}
+
+void domain_sysfs_destroy(struct rm_dev *rm)
+{
+ struct list_head *pos, *n;
+ struct domain_sysfs *lsfs;
+
+ if (rm == NULL)
+ return;
+
+ mutex_lock(&domain_sysfs_lock);
+ list_for_each_safe(pos, n, &domain_sysfs_list) {
+ lsfs = container_of(pos, struct domain_sysfs, list);
+ if (lsfs->rdev == rm) {
+ list_del(pos);
+ break;
+ }
+ lsfs = NULL;
+ }
+ mutex_unlock(&domain_sysfs_lock);
+
+ if (lsfs == NULL)
+ return;
+
+ dpivf_sysfs_destroy(lsfs);
+
+ if (lsfs->pmccntr_el0.attr.mode != 0)
+ sysfs_remove_file(lsfs->parent, &lsfs->pmccntr_el0.attr);
+ if (lsfs->destroy_domain.attr.mode != 0)
+ sysfs_remove_file(lsfs->parent, &lsfs->destroy_domain.attr);
+ if (lsfs->create_domain.attr.mode != 0)
+ sysfs_remove_file(lsfs->parent, &lsfs->create_domain.attr);
+
+ kfree(lsfs->domains);
+ kfree(lsfs);
+}
diff --git a/drivers/soc/marvell/octeontx2-rm/domain_sysfs.h b/drivers/soc/marvell/octeontx2-rm/domain_sysfs.h
new file mode 100644
index 000000000000..d6d5dfbb97f1
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-rm/domain_sysfs.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* OcteonTX2 RVU Resource Manager driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef DOMAIN_SYSFS_H_
+#define DOMAIN_SYSFS_H_
+
+#include "otx2_rm.h"
+
+int domain_sysfs_create(struct rm_dev *rm);
+void domain_sysfs_destroy(struct rm_dev *rm);
+
+#endif /* DOMAIN_SYSFS_H_ */
diff --git a/drivers/soc/marvell/octeontx2-rm/otx2_rm.c b/drivers/soc/marvell/octeontx2-rm/otx2_rm.c
new file mode 100644
index 000000000000..beb7a07e5a71
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-rm/otx2_rm.c
@@ -0,0 +1,1841 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OcteonTX2 RVU Resource Manager driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/pci.h>
+#include <linux/sysfs.h>
+#include <asm/cputype.h>
+
+#include "otxrmcmd.h"
+#include "rvu_reg.h"
+#include "rvu_struct.h"
+#include "otx2_rm.h"
+
+#ifdef CONFIG_OCTEONTX2_RM_DOM_SYSFS
+#include "domain_sysfs.h"
+#endif
+
+#define DRV_NAME "octeontx2-rm"
+#define DRV_VERSION "1.1"
+#define CLS_NAME "otxrm"
+#define DEV_NAME "otxrm"
+#define DEV_MINOR 102
+
+#define PCI_DEVID_OCTEONTX2_SSO_PF 0xA0F9
+#define PCI_DEVID_OCTEONTX2_SSO_VF 0xA0FA
+
+/* OCTEONTX2 models */
+#define CPU_MODEL_98XX_PART 0xB1
+#define CPU_MODEL_96XX_PART 0xB2
+#define CPU_MODEL_95XX_PART 0xB3
+#define CPU_MODEL_95XXN_PART 0xB4
+#define CPU_MODEL_95XXMM_PART 0xB5
+
+/* PCI BAR info */
+#define PCI_AF_REG_BAR_NUM 0
+#define PCI_CFG_REG_BAR_NUM 2
+#define PCI_MBOX_BAR_NUM 4
+
+/* Misc */
+#define RVU_PF_INT_VEC_VFME_MAX 2
+
+/* Supported devices */
+static const struct pci_device_id rvu_rm_id_table[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_SSO_PF)},
+ {0} /* end of table */
+};
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION("Marvell OcteonTX2 SSO/SSOW/TIM/NPA PF Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, rvu_rm_id_table);
+
+static struct class *cls; /* Device class */
+static struct cdev cdev; /* Char device control */
+static dev_t devno; /* Char device major:minor */
+
+/* All PF devices found are stored here */
+static spinlock_t rm_lst_lock;
+LIST_HEAD(rm_dev_lst_head);
+
+static void rm_write64(struct rm_dev *rvu, u64 b, u64 s, u64 o, u64 v)
+{
+ writeq_relaxed(v, rvu->bar2 + ((b << 20) | (s << 12) | o));
+}
+
+static u64 rm_read64(struct rm_dev *rvu, u64 b, u64 s, u64 o)
+{
+ return readq_relaxed(rvu->bar2 + ((b << 20) | (s << 12) | o));
+}
+
+static void enable_af_mbox_int(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+
+ rm = pci_get_drvdata(pdev);
+ /* Clear interrupt if any */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
+
+ /* Now Enable AF-PF interrupt */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1S, 0x1ULL);
+}
+
+static void disable_af_mbox_int(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+
+ rm = pci_get_drvdata(pdev);
+ /* Clear interrupt if any */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
+
+ /* Now Disable AF-PF interrupt */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1C, 0x1ULL);
+}
+
+static int
+forward_to_mbox(struct rm_dev *rm, struct otx2_mbox *mbox, int devid,
+ struct mbox_msghdr *req, int size, const char *mstr)
+{
+ struct mbox_msghdr *msg;
+ int res = 0;
+
+ msg = otx2_mbox_alloc_msg(mbox, devid, size);
+ if (msg == NULL)
+ return -ENOMEM;
+
+ memcpy((uint8_t *)msg + sizeof(struct mbox_msghdr),
+ (uint8_t *)req + sizeof(struct mbox_msghdr), size);
+ msg->id = req->id;
+ msg->pcifunc = req->pcifunc;
+ msg->sig = req->sig;
+ msg->ver = req->ver;
+
+ otx2_mbox_msg_send(mbox, devid);
+ res = otx2_mbox_wait_for_rsp(mbox, devid);
+ if (res == -EIO) {
+ dev_err(&rm->pdev->dev, "RVU %s MBOX timeout.\n", mstr);
+ goto err;
+ } else if (res) {
+ dev_err(&rm->pdev->dev,
+ "RVU %s MBOX error: %d.\n", mstr, res);
+ res = -EFAULT;
+ goto err;
+ }
+
+ return 0;
+err:
+ return res;
+}
+
+static int
+handle_af_req(struct rm_dev *rm, struct rvu_vf *vf, struct mbox_msghdr *req,
+ int size)
+{
+ /* We expect a request here */
+ if (req->sig != OTX2_MBOX_REQ_SIG) {
+ dev_err(&rm->pdev->dev,
+ "UP MBOX msg with wrong signature %x, ID 0x%x\n",
+ req->sig, req->id);
+ return -EINVAL;
+ }
+
+ /* If handling notifs in PF is required,add a switch-case here. */
+ return forward_to_mbox(rm, &rm->pfvf_mbox_up, vf->vf_id, req, size,
+ "VF");
+}
+
+static void rm_afpf_mbox_handler_up(struct work_struct *work)
+{
+ struct rm_dev *rm = container_of(work, struct rm_dev, mbox_wrk_up);
+ struct otx2_mbox *mbox = &rm->afpf_mbox_up;
+ struct otx2_mbox_dev *mdev = mbox->dev;
+ struct rvu_vf *vf;
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ int offset, id, err;
+
+ /* sync with mbox memory region */
+ smp_rmb();
+
+ /* Process received mbox messages */
+ req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+ for (id = 0; id < req_hdr->num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
+ offset);
+
+ if ((msg->pcifunc >> RVU_PFVF_PF_SHIFT) != rm->pf ||
+ (msg->pcifunc & RVU_PFVF_FUNC_MASK) <= rm->num_vfs)
+ err = -EINVAL;
+ else {
+ vf = &rm->vf_info[msg->pcifunc & RVU_PFVF_FUNC_MASK];
+ err = handle_af_req(rm, vf, msg,
+ msg->next_msgoff - offset);
+ }
+ if (err)
+ otx2_reply_invalid_msg(mbox, 0, msg->pcifunc, msg->id);
+ offset = msg->next_msgoff;
+ }
+
+ otx2_mbox_msg_send(mbox, 0);
+}
+
+static void rm_afpf_mbox_handler(struct work_struct *work)
+{
+ struct rm_dev *rm;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg, *fwd;
+ struct otx2_mbox *af_mbx, *vf_mbx;
+ struct free_rsrcs_rsp *rsp;
+ int offset, i, vf_id, size;
+ struct rvu_vf *vf;
+
+ /* Read latest mbox data */
+ smp_rmb();
+
+ rm = container_of(work, struct rm_dev, mbox_wrk);
+ af_mbx = &rm->afpf_mbox;
+ vf_mbx = &rm->pfvf_mbox;
+ rsp_hdr = (struct mbox_hdr *)(af_mbx->dev->mbase + af_mbx->rx_start);
+ if (rsp_hdr->num_msgs == 0)
+ return;
+ offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < rsp_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)(af_mbx->dev->mbase +
+ af_mbx->rx_start + offset);
+ size = msg->next_msgoff - offset;
+
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(&rm->pdev->dev,
+ "MBOX msg with unknown ID 0x%x\n", msg->id);
+ goto end;
+ }
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(&rm->pdev->dev,
+ "MBOX msg with wrong signature %x, ID 0x%x\n",
+ msg->sig, msg->id);
+ goto end;
+ }
+
+ vf_id = (msg->pcifunc & RVU_PFVF_FUNC_MASK);
+ if (vf_id > 0) {
+ if (vf_id > rm->num_vfs) {
+ dev_err(&rm->pdev->dev,
+ "MBOX msg to unknown VF: %d >= %d\n",
+ vf_id, rm->num_vfs);
+ goto end;
+ }
+ vf = &rm->vf_info[vf_id - 1];
+ /* Ignore stale responses and VFs in FLR. */
+ if (!vf->in_use || vf->got_flr)
+ goto end;
+ fwd = otx2_mbox_alloc_msg(vf_mbx, vf_id - 1, size);
+ if (!fwd) {
+ dev_err(&rm->pdev->dev,
+ "Forwarding to VF%d failed.\n", vf_id);
+ goto end;
+ }
+ memcpy((uint8_t *)fwd + sizeof(struct mbox_msghdr),
+ (uint8_t *)msg + sizeof(struct mbox_msghdr),
+ size);
+ fwd->id = msg->id;
+ fwd->pcifunc = msg->pcifunc;
+ fwd->sig = msg->sig;
+ fwd->ver = msg->ver;
+ fwd->rc = msg->rc;
+ } else {
+ if (msg->ver < OTX2_MBOX_VERSION) {
+ dev_err(&rm->pdev->dev,
+ "MBOX msg with version %04x != %04x\n",
+ msg->ver, OTX2_MBOX_VERSION);
+ goto end;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_READY:
+ rm->pf = (msg->pcifunc >> RVU_PFVF_PF_SHIFT) &
+ RVU_PFVF_PF_MASK;
+ break;
+ case MBOX_MSG_FREE_RSRC_CNT:
+ rsp = (struct free_rsrcs_rsp *)msg;
+ memcpy(&rm->limits, msg, sizeof(*rsp));
+ break;
+ default:
+ dev_err(&rm->pdev->dev,
+ "Unsupported msg %d received.\n",
+ msg->id);
+ break;
+ }
+ }
+end:
+ offset = msg->next_msgoff;
+ af_mbx->dev->msgs_acked++;
+ }
+ otx2_mbox_reset(af_mbx, 0);
+}
+
+static int
+reply_free_rsrc_cnt(struct rm_dev *rm, struct rvu_vf *vf,
+ struct mbox_msghdr *req, int size)
+{
+ struct free_rsrcs_rsp *rsp;
+
+ rsp = (struct free_rsrcs_rsp *)otx2_mbox_alloc_msg(&rm->pfvf_mbox,
+ vf->vf_id,
+ sizeof(*rsp));
+ if (rsp == NULL)
+ return -ENOMEM;
+
+ rsp->hdr.id = MBOX_MSG_FREE_RSRC_CNT;
+ rsp->hdr.pcifunc = req->pcifunc;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ mutex_lock(&rm->lock);
+ rsp->sso = rm->vf_limits.sso->a[vf->vf_id].val;
+ rsp->ssow = rm->vf_limits.ssow->a[vf->vf_id].val;
+ rsp->npa = rm->vf_limits.npa->a[vf->vf_id].val;
+ rsp->cpt = rm->vf_limits.cpt->a[vf->vf_id].val;
+ rsp->tim = rm->vf_limits.tim->a[vf->vf_id].val;
+ rsp->nix = 0;
+ mutex_unlock(&rm->lock);
+ return 0;
+}
+
+static int
+check_attach_rsrcs_req(struct rm_dev *rm, struct rvu_vf *vf,
+ struct mbox_msghdr *req, int size)
+{
+ struct rsrc_attach *rsrc_req;
+
+ rsrc_req = (struct rsrc_attach *)req;
+ mutex_lock(&rm->lock);
+ if (rsrc_req->sso > rm->vf_limits.sso->a[vf->vf_id].val ||
+ rsrc_req->ssow > rm->vf_limits.ssow->a[vf->vf_id].val ||
+ rsrc_req->npalf > rm->vf_limits.npa->a[vf->vf_id].val ||
+ rsrc_req->timlfs > rm->vf_limits.tim->a[vf->vf_id].val ||
+ rsrc_req->cptlfs > rm->vf_limits.cpt->a[vf->vf_id].val ||
+ rsrc_req->nixlf > 0) {
+ dev_err(&rm->pdev->dev,
+ "Invalid ATTACH_RESOURCES request from %s\n",
+ dev_name(&vf->pdev->dev));
+ mutex_unlock(&rm->lock);
+ return -EINVAL;
+ }
+ mutex_unlock(&rm->lock);
+ return forward_to_mbox(rm, &rm->afpf_mbox, 0, req, size, "AF");
+}
+
+static int
+handle_vf_req(struct rm_dev *rm, struct rvu_vf *vf, struct mbox_msghdr *req,
+ int size)
+{
+ int err = 0;
+
+ /* Check if valid, if not reply with a invalid msg */
+ if (req->sig != OTX2_MBOX_REQ_SIG) {
+ dev_err(&rm->pdev->dev,
+ "VF MBOX msg with wrong signature %x, ID 0x%x\n",
+ req->sig, req->id);
+ return -EINVAL;
+ }
+
+ switch (req->id) {
+ case MBOX_MSG_READY:
+ if (req->ver < OTX2_MBOX_VERSION) {
+ dev_err(&rm->pdev->dev,
+ "VF MBOX msg with version %04x != %04x\n",
+ req->ver, OTX2_MBOX_VERSION);
+ return -EINVAL;
+ }
+ vf->in_use = true;
+ err = forward_to_mbox(rm, &rm->afpf_mbox, 0, req, size, "AF");
+ break;
+ case MBOX_MSG_FREE_RSRC_CNT:
+ if (req->ver < OTX2_MBOX_VERSION) {
+ dev_err(&rm->pdev->dev,
+ "VF MBOX msg with version %04x != %04x\n",
+ req->ver, OTX2_MBOX_VERSION);
+ return -EINVAL;
+ }
+ err = reply_free_rsrc_cnt(rm, vf, req, size);
+ break;
+ case MBOX_MSG_ATTACH_RESOURCES:
+ if (req->ver < OTX2_MBOX_VERSION) {
+ dev_err(&rm->pdev->dev,
+ "VF MBOX msg with version %04x != %04x\n",
+ req->ver, OTX2_MBOX_VERSION);
+ return -EINVAL;
+ }
+ err = check_attach_rsrcs_req(rm, vf, req, size);
+ break;
+ default:
+ err = forward_to_mbox(rm, &rm->afpf_mbox, 0, req, size, "AF");
+ break;
+ }
+
+ return err;
+}
+
+static int send_flr_msg(struct otx2_mbox *mbox, int dev_id, int pcifunc)
+{
+ struct msg_req *req;
+
+ req = (struct msg_req *)
+ otx2_mbox_alloc_msg(mbox, dev_id, sizeof(*req));
+ if (req == NULL)
+ return -ENOMEM;
+
+ req->hdr.pcifunc = pcifunc;
+ req->hdr.id = MBOX_MSG_VF_FLR;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+
+ otx2_mbox_msg_send(mbox, 0);
+
+ return 0;
+}
+
+static void rm_send_flr_msg(struct rm_dev *rm, struct rvu_vf *vf)
+{
+ int res, pcifunc;
+
+ pcifunc = (vf->rm->pf << RVU_PFVF_PF_SHIFT) |
+ ((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK);
+
+ if (send_flr_msg(&rm->afpf_mbox, 0, pcifunc) != 0) {
+ dev_err(&rm->pdev->dev, "Sending FLR to AF failed\n");
+ return;
+ }
+
+ res = otx2_mbox_wait_for_rsp(&rm->afpf_mbox, 0);
+ if (res == -EIO) {
+ dev_err(&rm->pdev->dev, "RVU AF MBOX timeout.\n");
+ } else if (res) {
+ dev_err(&rm->pdev->dev,
+ "RVU MBOX error: %d.\n", res);
+ }
+}
+
+static void rm_send_flr_to_dpi(struct rm_dev *rm)
+{
+ /* TODO: DPI VF's needs to be handled */
+}
+
+static void rm_pfvf_flr_handler(struct work_struct *work)
+{
+ struct rvu_vf *vf = container_of(work, struct rvu_vf, pfvf_flr_work);
+ struct rm_dev *rm = vf->rm;
+ struct otx2_mbox *mbox = &rm->pfvf_mbox;
+ int idx;
+
+ rm_send_flr_to_dpi(rm);
+ rm_send_flr_msg(rm, vf);
+
+ /* Disable interrupts from AF and wait for any pending
+ * responses to be handled for this VF and then reset the
+ * mailbox
+ */
+ disable_af_mbox_int(rm->pdev);
+ flush_workqueue(rm->afpf_mbox_wq);
+ otx2_mbox_reset(mbox, vf->vf_id);
+ vf->in_use = false;
+ vf->got_flr = false;
+ enable_af_mbox_int(rm->pdev);
+ idx = vf->vf_id / 64;
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(idx),
+ BIT_ULL(vf->intr_idx));
+}
+
+static void rm_pfvf_mbox_handler_up(struct work_struct *work)
+{
+ struct rm_dev *rm;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg, *fwd;
+ struct otx2_mbox *af_mbx, *vf_mbx;
+ int offset, i, size;
+ struct rvu_vf *vf;
+
+ /* Read latest mbox data */
+ smp_rmb();
+
+ vf = container_of(work, struct rvu_vf, mbox_wrk_up);
+ rm = vf->rm;
+ af_mbx = &rm->afpf_mbox;
+ vf_mbx = &rm->pfvf_mbox;
+ rsp_hdr = (struct mbox_hdr *)(vf_mbx->dev[vf->vf_id].mbase +
+ vf_mbx->rx_start);
+ if (rsp_hdr->num_msgs == 0)
+ return;
+ offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < rsp_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)(vf_mbx->dev->mbase +
+ vf_mbx->rx_start + offset);
+ size = msg->next_msgoff - offset;
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(&rm->pdev->dev,
+ "UP MBOX msg with wrong signature %x, ID 0x%x\n",
+ msg->sig, msg->id);
+ goto end;
+ }
+
+ /* override message value with actual values */
+ msg->pcifunc = (rm->pf << RVU_PFVF_PF_SHIFT) | vf->vf_id;
+
+ fwd = otx2_mbox_alloc_msg(af_mbx, 0, size);
+ if (!fwd) {
+ dev_err(&rm->pdev->dev,
+ "UP Forwarding from VF%d to AF failed.\n",
+ vf->vf_id);
+ goto end;
+ }
+ memcpy((uint8_t *)fwd + sizeof(struct mbox_msghdr),
+ (uint8_t *)msg + sizeof(struct mbox_msghdr),
+ size);
+ fwd->id = msg->id;
+ fwd->pcifunc = msg->pcifunc;
+ fwd->sig = msg->sig;
+ fwd->ver = msg->ver;
+ fwd->rc = msg->rc;
+end:
+ offset = msg->next_msgoff;
+ vf_mbx->dev->msgs_acked++;
+ }
+ otx2_mbox_reset(vf_mbx, vf->vf_id);
+}
+
+static void rm_pfvf_mbox_handler(struct work_struct *work)
+{
+ struct rvu_vf *vf = container_of(work, struct rvu_vf, mbox_wrk);
+ struct rm_dev *rm = vf->rm;
+ struct otx2_mbox *mbox = &rm->pfvf_mbox;
+ struct otx2_mbox_dev *mdev = &mbox->dev[vf->vf_id];
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ int offset, id, err;
+
+ /* sync with mbox memory region */
+ smp_rmb();
+
+ /* Process received mbox messages */
+ req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+ for (id = 0; id < req_hdr->num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
+ offset);
+
+ /* Set which VF sent this message based on mbox IRQ */
+ msg->pcifunc = ((u16)rm->pf << RVU_PFVF_PF_SHIFT) |
+ ((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK);
+ err = handle_vf_req(rm, vf, msg, msg->next_msgoff - offset);
+ if (err)
+ otx2_reply_invalid_msg(mbox, vf->vf_id, msg->pcifunc,
+ msg->id);
+ offset = msg->next_msgoff;
+ }
+ /* Send mbox responses to VF */
+ if (mdev->num_msgs)
+ otx2_mbox_msg_send(mbox, vf->vf_id);
+}
+
+static irqreturn_t rm_af_pf_mbox_intr(int irq, void *arg)
+{
+ struct rm_dev *rm = (struct rm_dev *)arg;
+ struct mbox_hdr *hdr;
+ struct otx2_mbox *mbox;
+ struct otx2_mbox_dev *mdev;
+
+ /* Read latest mbox data */
+ smp_rmb();
+
+ mbox = &rm->afpf_mbox;
+ mdev = &mbox->dev[0];
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ /* Handle PF => AF channel response */
+ if (hdr->num_msgs)
+ queue_work(rm->afpf_mbox_wq, &rm->mbox_wrk);
+
+ mbox = &rm->afpf_mbox_up;
+ mdev = &mbox->dev[0];
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ /* Handle AF => PF request */
+ if (hdr->num_msgs)
+ queue_work(rm->afpf_mbox_wq, &rm->mbox_wrk_up);
+
+ /* Clear and ack the interrupt */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
+
+ return IRQ_HANDLED;
+}
+
+static void __handle_vf_flr(struct rm_dev *rm, struct rvu_vf *vf_ptr)
+{
+ if (vf_ptr->in_use) {
+ /* Using the same MBOX workqueue here, so that we can
+ * synchronize with other VF->PF messages being forwarded to AF.
+ */
+ vf_ptr->got_flr = true;
+ queue_work(rm->pfvf_mbox_wq, &vf_ptr->pfvf_flr_work);
+ } else
+ rm_write64(rm, BLKADDR_RVUM, 0,
+ RVU_PF_VFTRPENDX(vf_ptr->vf_id / 64),
+ BIT_ULL(vf_ptr->intr_idx));
+}
+
+static irqreturn_t rm_pf_vf_flr_intr(int irq, void *arg)
+{
+ struct rm_dev *rm = (struct rm_dev *)arg;
+ u64 intr;
+ struct rvu_vf *vf_ptr;
+ int vf, i;
+
+ /* Check which VF FLR has been raised and process accordingly */
+ for (i = 0; i < 2; i++) {
+ /* Read the interrupt bits */
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(i));
+
+ for (vf = i * 64; vf < rm->num_vfs; vf++) {
+ vf_ptr = &rm->vf_info[vf];
+ if (intr & (1ULL << vf_ptr->intr_idx)) {
+ /* Clear the interrupts */
+ rm_write64(rm, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INTX(i),
+ BIT_ULL(vf_ptr->intr_idx));
+ __handle_vf_flr(rm, vf_ptr);
+ }
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static void vfme_intr_clear(struct rm_dev *rm, int idx, uint64_t mask)
+{
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(idx), mask);
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(idx), mask);
+}
+
+static irqreturn_t rm_pf_vf_me_intr(int irq, void *arg)
+{
+ struct rm_dev *rm = (struct rm_dev *)arg;
+ struct rvu_vf *vf;
+ u64 intr;
+ int i, vfi;
+
+ for (i = 0; i < RVU_PF_INT_VEC_VFME_MAX; i++) {
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(i));
+ for (vfi = i * 64; vfi < rm->num_vfs; vfi++) {
+ vf = &rm->vf_info[vfi];
+ if ((intr & (1ULL << vf->intr_idx)) == 0)
+ continue;
+ vfme_intr_clear(rm, i, BIT_ULL(vf->intr_idx));
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rm_pf_vf_mbox_intr(int irq, void *arg)
+{
+ struct rm_dev *rm = (struct rm_dev *)arg;
+ struct mbox_hdr *hdr;
+ struct otx2_mbox *mbox;
+ struct otx2_mbox_dev *mdev;
+ u64 intr;
+ struct rvu_vf *vf;
+ int i, vfi;
+
+ /* Check which VF has raised an interrupt and schedule corresponding
+ * workq to process the MBOX
+ */
+ for (i = 0; i < 2; i++) {
+ /* Read the interrupt bits */
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(i));
+
+ for (vfi = i * 64; vfi < rm->num_vfs; vfi++) {
+ vf = &rm->vf_info[vfi];
+ if ((intr & (1ULL << vf->intr_idx)) == 0)
+ continue;
+ mbox = &rm->pfvf_mbox;
+ mdev = &mbox->dev[vf->vf_id];
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ /* Handle VF => PF channel request */
+ if (hdr->num_msgs)
+ queue_work(rm->pfvf_mbox_wq, &vf->mbox_wrk);
+
+ mbox = &rm->pfvf_mbox_up;
+ mdev = &mbox->dev[vf->vf_id];
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ /* Handle PF => VF channel response */
+ if (hdr->num_msgs)
+ queue_work(rm->pfvf_mbox_wq, &vf->mbox_wrk_up);
+ /* Clear the interrupt */
+ rm_write64(rm, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(i),
+ BIT_ULL(vf->intr_idx));
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static int rm_register_flr_irq(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int err, vec, i;
+
+ rm = pci_get_drvdata(pdev);
+
+ /* Register for VF FLR interrupts
+ * There are 2 vectors starting at index 0x0
+ */
+ for (vec = RVU_PF_INT_VEC_VFFLR0, i = 0;
+ vec + i <= RVU_PF_INT_VEC_VFFLR1; i++) {
+ sprintf(&rm->irq_names[(vec + i) * NAME_SIZE],
+ "PF%02d_VF_FLR_IRQ%d", pdev->devfn, i);
+ err = request_irq(pci_irq_vector(pdev, vec + i),
+ rm_pf_vf_flr_intr, 0,
+ &rm->irq_names[(vec + i) * NAME_SIZE], rm);
+ if (err) {
+ dev_err(&pdev->dev,
+ "request_irq() failed for PFVF FLR intr %d\n",
+ vec);
+ goto reg_fail;
+ }
+ rm->irq_allocated[vec + i] = true;
+ }
+ return 0;
+
+reg_fail:
+ return err;
+}
+
+static void rm_free_flr_irq(struct pci_dev *pdev)
+{
+ (void) pdev;
+ /* Nothing here but will free workqueues */
+}
+
+static int rm_register_me_irq(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int err, vec;
+
+ rm = pci_get_drvdata(pdev);
+ for (vec = RVU_PF_INT_VEC_VFME0; vec <= RVU_PF_INT_VEC_VFME1; vec++) {
+ sprintf(&rm->irq_names[vec * NAME_SIZE],
+ "PF%02d_VF_ME_IRQ%d", pdev->devfn, vec - 2);
+ err = request_irq(pci_irq_vector(pdev, vec),
+ rm_pf_vf_me_intr, 0,
+ &rm->irq_names[vec * NAME_SIZE], rm);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to install PFVF ME intr\n");
+ goto reg_fail;
+ }
+ rm->irq_allocated[vec] = true;
+ }
+ return 0;
+
+reg_fail:
+ return err;
+}
+
+static void rm_free_me_irq(struct pci_dev *pdev)
+{
+ (void) pdev;
+ /* Nothing to do here.
+ * The function is used for maintaining consistent driver design.
+ */
+}
+
+static int rm_alloc_irqs(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int err;
+
+ rm = pci_get_drvdata(pdev);
+
+ /* Get number of MSIX vector count and allocate vectors first */
+ rm->msix_count = pci_msix_vec_count(pdev);
+
+ err = pci_alloc_irq_vectors(pdev, rm->msix_count, rm->msix_count,
+ PCI_IRQ_MSIX);
+
+ if (err < 0) {
+ dev_err(&pdev->dev, "pci_alloc_irq_vectors() failed %d\n", err);
+ return err;
+ }
+
+ rm->irq_names = kmalloc_array(rm->msix_count, NAME_SIZE, GFP_KERNEL);
+ if (!rm->irq_names) {
+ err = -ENOMEM;
+ goto err_irq_names;
+ }
+
+ rm->irq_allocated = kcalloc(rm->msix_count, sizeof(bool), GFP_KERNEL);
+ if (!rm->irq_allocated) {
+ err = -ENOMEM;
+ goto err_irq_allocated;
+ }
+
+ return 0;
+
+err_irq_allocated:
+ kfree(rm->irq_names);
+ rm->irq_names = NULL;
+err_irq_names:
+ pci_free_irq_vectors(pdev);
+ rm->msix_count = 0;
+
+ return err;
+}
+
+static void rm_free_irqs(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int irq;
+
+ rm = pci_get_drvdata(pdev);
+ for (irq = 0; irq < rm->msix_count; irq++) {
+ if (rm->irq_allocated[irq])
+ free_irq(pci_irq_vector(rm->pdev, irq), rm);
+ }
+
+ pci_free_irq_vectors(pdev);
+
+ kfree(rm->irq_names);
+ kfree(rm->irq_allocated);
+}
+
+static int rm_register_mbox_irq(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int err, vec = RVU_PF_INT_VEC_VFPF_MBOX0, i;
+
+ rm = pci_get_drvdata(pdev);
+
+ /* Register PF-AF interrupt handler */
+ sprintf(&rm->irq_names[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE],
+ "PF%02d_AF_MBOX_IRQ", pdev->devfn);
+ err = request_irq(pci_irq_vector(pdev, RVU_PF_INT_VEC_AFPF_MBOX),
+ rm_af_pf_mbox_intr, 0,
+ &rm->irq_names[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE],
+ rm);
+ if (err) {
+ dev_err(&pdev->dev,
+ "request_irq() failed for AF_PF MSIX vector\n");
+ return err;
+ }
+ rm->irq_allocated[RVU_PF_INT_VEC_AFPF_MBOX] = true;
+
+ err = otx2_mbox_init(&rm->afpf_mbox, rm->af_mbx_base, pdev, rm->bar2,
+ MBOX_DIR_PFAF, 1);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize PF/AF MBOX\n");
+ goto error;
+ }
+ err = otx2_mbox_init(&rm->afpf_mbox_up, rm->af_mbx_base, pdev, rm->bar2,
+ MBOX_DIR_PFAF_UP, 1);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize PF/AF UP MBOX\n");
+ goto error;
+ }
+
+ /* Register for PF-VF mailbox interrupts
+ * There are 2 vectors starting at index 0x4
+ */
+ for (vec = RVU_PF_INT_VEC_VFPF_MBOX0, i = 0;
+ vec + i <= RVU_PF_INT_VEC_VFPF_MBOX1; i++) {
+ sprintf(&rm->irq_names[(vec + i) * NAME_SIZE],
+ "PF%02d_VF_MBOX_IRQ%d", pdev->devfn, i);
+ err = request_irq(pci_irq_vector(pdev, vec + i),
+ rm_pf_vf_mbox_intr, 0,
+ &rm->irq_names[(vec + i) * NAME_SIZE], rm);
+ if (err) {
+ dev_err(&pdev->dev,
+ "request_irq() failed for PFVF Mbox intr %d\n",
+ vec + i);
+ goto error;
+ }
+ rm->irq_allocated[vec + i] = true;
+ }
+
+ rm->afpf_mbox_wq = alloc_workqueue(
+ "rm_pfaf_mailbox", WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, 1);
+ if (!rm->afpf_mbox_wq)
+ goto error;
+
+ INIT_WORK(&rm->mbox_wrk, rm_afpf_mbox_handler);
+ INIT_WORK(&rm->mbox_wrk_up, rm_afpf_mbox_handler_up);
+
+ return err;
+
+error:
+ if (rm->afpf_mbox_up.dev != NULL)
+ otx2_mbox_destroy(&rm->afpf_mbox_up);
+ if (rm->afpf_mbox.dev != NULL)
+ otx2_mbox_destroy(&rm->afpf_mbox);
+
+ return err;
+}
+
+static int rm_get_pcifunc(struct rm_dev *rm)
+{
+ struct msg_req *ready_req;
+ int res = 0;
+
+ ready_req = (struct msg_req *)
+ otx2_mbox_alloc_msg_rsp(&rm->afpf_mbox, 0, sizeof(ready_req),
+ sizeof(struct ready_msg_rsp));
+ if (ready_req == NULL) {
+ dev_err(&rm->pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+
+ ready_req->hdr.id = MBOX_MSG_READY;
+ ready_req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ otx2_mbox_msg_send(&rm->afpf_mbox, 0);
+ res = otx2_mbox_wait_for_rsp(&rm->afpf_mbox, 0);
+ if (res == -EIO) {
+ dev_err(&rm->pdev->dev, "RVU AF MBOX timeout.\n");
+ } else if (res) {
+ dev_err(&rm->pdev->dev, "RVU MBOX error: %d.\n", res);
+ res = -EFAULT;
+ }
+ return res;
+}
+
+static int rm_get_available_rsrcs(struct rm_dev *rm)
+{
+ struct mbox_msghdr *rsrc_req;
+ int res = 0;
+
+ rsrc_req = otx2_mbox_alloc_msg(&rm->afpf_mbox, 0, sizeof(*rsrc_req));
+ if (rsrc_req == NULL) {
+ dev_err(&rm->pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+ rsrc_req->id = MBOX_MSG_FREE_RSRC_CNT;
+ rsrc_req->sig = OTX2_MBOX_REQ_SIG;
+ rsrc_req->pcifunc = RVU_PFFUNC(rm->pf, 0);
+ otx2_mbox_msg_send(&rm->afpf_mbox, 0);
+ res = otx2_mbox_wait_for_rsp(&rm->afpf_mbox, 0);
+ if (res == -EIO) {
+ dev_err(&rm->pdev->dev, "RVU AF MBOX timeout.\n");
+ } else if (res) {
+ dev_err(&rm->pdev->dev,
+ "RVU MBOX error: %d.\n", res);
+ res = -EFAULT;
+ }
+ return res;
+}
+
+static void rm_afpf_mbox_term(struct pci_dev *pdev)
+{
+ struct rm_dev *rm = pci_get_drvdata(pdev);
+
+ flush_workqueue(rm->afpf_mbox_wq);
+ destroy_workqueue(rm->afpf_mbox_wq);
+ otx2_mbox_destroy(&rm->afpf_mbox);
+ otx2_mbox_destroy(&rm->afpf_mbox_up);
+}
+
+static ssize_t vf_in_use_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct rvu_vf *vf = container_of(attr, struct rvu_vf, in_use_attr);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", vf->in_use);
+}
+
+static void vf_sysfs_destroy(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ struct rvu_vf *vf;
+ int i;
+
+ rm = pci_get_drvdata(pdev);
+
+ quotas_free(rm->vf_limits.sso);
+ quotas_free(rm->vf_limits.ssow);
+ quotas_free(rm->vf_limits.npa);
+ quotas_free(rm->vf_limits.cpt);
+ quotas_free(rm->vf_limits.tim);
+ rm->vf_limits.sso = NULL;
+ rm->vf_limits.ssow = NULL;
+ rm->vf_limits.npa = NULL;
+ rm->vf_limits.cpt = NULL;
+ rm->vf_limits.tim = NULL;
+
+ for (i = 0; i < rm->num_vfs; i++) {
+ vf = &rm->vf_info[i];
+ if (vf->limits_kobj == NULL)
+ continue;
+ if (vf->in_use_attr.attr.mode != 0) {
+ sysfs_remove_file(&vf->pdev->dev.kobj,
+ &vf->in_use_attr.attr);
+ vf->in_use_attr.attr.mode = 0;
+ }
+ kobject_del(vf->limits_kobj);
+ vf->limits_kobj = NULL;
+ pci_dev_put(vf->pdev);
+ vf->pdev = NULL;
+ }
+}
+
+static int check_vf_in_use(void *arg, struct quota *quota, int new_val)
+{
+ struct rvu_vf *vf = arg;
+
+ if (vf->in_use) {
+ dev_err(quota->dev, "Can't modify limits, device is in use.\n");
+ return 1;
+ }
+ return 0;
+}
+
+static struct quota_ops vf_limit_ops = {
+ .pre_store = check_vf_in_use,
+};
+
+static int vf_sysfs_create(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ struct pci_dev *vdev;
+ struct rvu_vf *vf;
+ int err, i;
+
+ vdev = NULL;
+ vf = NULL;
+ rm = pci_get_drvdata(pdev);
+ err = 0;
+ i = 0;
+
+ /* Create limit structures for all resource types */
+ rm->vf_limits.sso = quotas_alloc(rm->num_vfs, rm->limits.sso,
+ rm->limits.sso, 0, &rm->lock,
+ &vf_limit_ops);
+ if (rm->vf_limits.sso == NULL) {
+ dev_err(&pdev->dev,
+ "Failed to allocate sso limits structures.\n");
+ err = -EFAULT;
+ goto error;
+ }
+ rm->vf_limits.ssow = quotas_alloc(rm->num_vfs, rm->limits.ssow,
+ rm->limits.ssow, 0, &rm->lock,
+ &vf_limit_ops);
+ if (rm->vf_limits.ssow == NULL) {
+ dev_err(&pdev->dev,
+ "Failed to allocate ssow limits structures.\n");
+ err = -EFAULT;
+ goto error;
+ }
+ /* AF currently reports only 0-1 for PF but there's more free LFs.
+ * Until we implement proper limits in AF, use max num_vfs in total.
+ */
+ rm->vf_limits.npa = quotas_alloc(rm->num_vfs, 1, rm->num_vfs, 0,
+ &rm->lock, &vf_limit_ops);
+ if (rm->vf_limits.npa == NULL) {
+ dev_err(&pdev->dev,
+ "Failed to allocate npa limits structures.\n");
+ err = -EFAULT;
+ goto error;
+ }
+ rm->vf_limits.cpt = quotas_alloc(rm->num_vfs, rm->limits.cpt,
+ rm->limits.cpt, 0, &rm->lock,
+ &vf_limit_ops);
+ if (rm->vf_limits.cpt == NULL) {
+ dev_err(&pdev->dev,
+ "Failed to allocate cpt limits structures.\n");
+ err = -EFAULT;
+ goto error;
+ }
+ rm->vf_limits.tim = quotas_alloc(rm->num_vfs, rm->limits.tim,
+ rm->limits.tim, 0, &rm->lock,
+ &vf_limit_ops);
+ if (rm->vf_limits.tim == NULL) {
+ dev_err(&pdev->dev,
+ "Failed to allocate tim limits structures.\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ /* loop through all the VFs and create sysfs entries for them */
+ while ((vdev = pci_get_device(pdev->vendor, PCI_DEVID_OCTEONTX2_SSO_VF,
+ vdev))) {
+ if (!vdev->is_virtfn || (vdev->physfn != pdev))
+ continue;
+ vf = &rm->vf_info[i];
+ vf->pdev = pci_dev_get(vdev);
+ vf->limits_kobj = kobject_create_and_add("limits",
+ &vdev->dev.kobj);
+ if (vf->limits_kobj == NULL) {
+ err = -ENOMEM;
+ goto error;
+ }
+ if (quota_sysfs_create("sso", vf->limits_kobj, &vdev->dev,
+ &rm->vf_limits.sso->a[i], vf) != 0) {
+ dev_err(&pdev->dev,
+ "Failed to create sso limits sysfs for %s\n",
+ pci_name(vdev));
+ err = -EFAULT;
+ goto error;
+ }
+ if (quota_sysfs_create("ssow", vf->limits_kobj, &vdev->dev,
+ &rm->vf_limits.ssow->a[i], vf) != 0) {
+ dev_err(&pdev->dev,
+ "Failed to create ssow limits sysfs for %s\n",
+ pci_name(vdev));
+ err = -EFAULT;
+ goto error;
+ }
+ if (quota_sysfs_create("npa", vf->limits_kobj, &vdev->dev,
+ &rm->vf_limits.npa->a[i], vf) != 0) {
+ dev_err(&pdev->dev,
+ "Failed to create npa limits sysfs for %s\n",
+ pci_name(vdev));
+ err = -EFAULT;
+ goto error;
+ }
+ if (quota_sysfs_create("cpt", vf->limits_kobj, &vdev->dev,
+ &rm->vf_limits.cpt->a[i], vf) != 0) {
+ dev_err(&pdev->dev,
+ "Failed to create cpt limits sysfs for %s\n",
+ pci_name(vdev));
+ err = -EFAULT;
+ goto error;
+ }
+ if (quota_sysfs_create("tim", vf->limits_kobj, &vdev->dev,
+ &rm->vf_limits.tim->a[i], vf) != 0) {
+ dev_err(&pdev->dev,
+ "Failed to create tim limits sysfs for %s\n",
+ pci_name(vdev));
+ err = -EFAULT;
+ goto error;
+ }
+
+ vf->in_use_attr.show = vf_in_use_show;
+ vf->in_use_attr.attr.name = "in_use";
+ vf->in_use_attr.attr.mode = 0444;
+ sysfs_attr_init(&vf->in_use_attr.attr);
+ if (sysfs_create_file(&vdev->dev.kobj, &vf->in_use_attr.attr)) {
+ dev_err(&pdev->dev,
+ "Failed to create in_use sysfs entry for %s\n",
+ pci_name(vdev));
+ err = -EFAULT;
+ goto error;
+ }
+ i++;
+ }
+
+ return 0;
+error:
+ vf_sysfs_destroy(pdev);
+ return err;
+}
+
+static int rm_check_pf_usable(struct rm_dev *rm)
+{
+ u64 rev;
+
+ rev = rm_read64(rm, BLKADDR_RVUM, 0,
+ RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
+ rev = (rev >> 12) & 0xFF;
+ /* Check if AF has setup revision for RVUM block,
+ * otherwise this driver probe should be deferred
+ * until AF driver comes up.
+ */
+ if (!rev) {
+ dev_warn(&rm->pdev->dev,
+ "AF is not initialized, deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+ return 0;
+}
+
+static int rm_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct rm_dev *rm;
+ int err;
+
+ rm = devm_kzalloc(dev, sizeof(struct rm_dev), GFP_KERNEL);
+ if (rm == NULL)
+ return -ENOMEM;
+
+ rm->pdev = pdev;
+ pci_set_drvdata(pdev, rm);
+
+ mutex_init(&rm->lock);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto enable_failed;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto map_failed;
+ }
+
+ if (pci_sriov_get_totalvfs(pdev) <= 0) {
+ err = -ENODEV;
+ goto set_mask_failed;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to set DMA mask\n");
+ goto set_mask_failed;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to set DMA mask\n");
+ goto set_mask_failed;
+ }
+
+ pci_set_master(pdev);
+
+ /* CSR Space mapping */
+ rm->bar2 = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM,
+ pci_resource_len(pdev, PCI_CFG_REG_BAR_NUM));
+ if (!rm->bar2) {
+ dev_err(&pdev->dev, "Unable to map BAR2\n");
+ err = -ENODEV;
+ goto set_mask_failed;
+ }
+
+ err = rm_check_pf_usable(rm);
+ if (err)
+ goto pf_unusable;
+
+ /* Map PF-AF mailbox memory */
+ rm->af_mbx_base = ioremap_wc(pci_resource_start(pdev, PCI_MBOX_BAR_NUM),
+ pci_resource_len(pdev, PCI_MBOX_BAR_NUM));
+ if (!rm->af_mbx_base) {
+ dev_err(&pdev->dev, "Unable to map BAR4\n");
+ err = -ENODEV;
+ goto pf_unusable;
+ }
+
+ /* Request IRQ for PF-VF mailbox here - TBD: check if this can be moved
+ * to sriov enable function
+ */
+ if (rm_alloc_irqs(pdev)) {
+ dev_err(&pdev->dev,
+ "Unable to allocate MSIX Interrupt vectors\n");
+ err = -ENODEV;
+ goto alloc_irqs_failed;
+ }
+
+ if (rm_register_mbox_irq(pdev) != 0) {
+ dev_err(&pdev->dev,
+ "Unable to allocate MBOX Interrupt vectors\n");
+ err = -ENODEV;
+ goto reg_mbox_irq_failed;
+ }
+
+ if (rm_register_flr_irq(pdev) != 0) {
+ dev_err(&pdev->dev,
+ "Unable to allocate FLR Interrupt vectors\n");
+ err = -ENODEV;
+ goto reg_flr_irq_failed;
+ }
+
+ if (rm_register_me_irq(pdev) != 0) {
+ dev_err(&pdev->dev,
+ "Unable to allocate ME Interrupt vectors\n");
+ err = -ENODEV;
+ goto reg_me_irq_failed;
+ }
+
+ enable_af_mbox_int(pdev);
+
+ if (rm_get_pcifunc(rm)) {
+ dev_err(&pdev->dev,
+ "Failed to retrieve pcifunc from AF\n");
+ err = -ENODEV;
+ goto get_pcifunc_failed;
+ }
+
+ /* Add to global list of PFs found */
+ spin_lock(&rm_lst_lock);
+ list_add(&rm->list, &rm_dev_lst_head);
+ spin_unlock(&rm_lst_lock);
+
+ return 0;
+
+get_pcifunc_failed:
+ disable_af_mbox_int(pdev);
+ rm_afpf_mbox_term(pdev);
+ rm_free_me_irq(pdev);
+reg_me_irq_failed:
+ rm_free_flr_irq(pdev);
+reg_flr_irq_failed:
+ rm_afpf_mbox_term(pdev);
+reg_mbox_irq_failed:
+ rm_free_irqs(pdev);
+alloc_irqs_failed:
+ iounmap(rm->af_mbx_base);
+pf_unusable:
+ pcim_iounmap(pdev, rm->bar2);
+set_mask_failed:
+ pci_release_regions(pdev);
+map_failed:
+ pci_disable_device(pdev);
+enable_failed:
+ pci_set_drvdata(pdev, NULL);
+ devm_kfree(dev, rm);
+ return err;
+}
+
+static void enable_vf_flr_int(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int ena_bits;
+
+ rm = pci_get_drvdata(pdev);
+ /* Clear any pending interrupts */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(0), ~0x0ULL);
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0), ~0x0ULL);
+
+ if (rm->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(1), ~0x0ULL);
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1), ~0x0ULL);
+ }
+
+ /* Enable for first 64 VFs here - upto number of VFs enabled */
+ ena_bits = ((rm->num_vfs - 1) % 64);
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INT_ENA_W1SX(0),
+ GENMASK_ULL(ena_bits, 0));
+
+ if (rm->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ /* Enable for VF interrupts for VFs 64 to 128 */
+ ena_bits = rm->num_vfs - 64 - 1;
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INT_ENA_W1SX(1),
+ GENMASK_ULL(ena_bits, 0));
+ }
+}
+
+static void disable_vf_flr_int(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int ena_bits;
+ u64 intr;
+
+ rm = pci_get_drvdata(pdev);
+ /* clear any pending interrupt */
+
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0));
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0), intr);
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(0));
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(0), intr);
+
+ if (rm->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1));
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1), intr);
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(1));
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(1), intr);
+ }
+
+ /* Disable for first 64 VFs here - upto number of VFs enabled */
+ ena_bits = ((rm->num_vfs - 1) % 64);
+
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INT_ENA_W1CX(0),
+ GENMASK_ULL(ena_bits, 0));
+
+ if (rm->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ /* Enable for VF interrupts for VFs 64 to 128 */
+ ena_bits = rm->num_vfs - 64 - 1;
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INT_ENA_W1CX(1),
+ GENMASK_ULL(ena_bits, 0));
+ }
+}
+
+static void enable_vf_me_int(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int i, ena_bits;
+
+ rm = pci_get_drvdata(pdev);
+ /* Clear any pending interrupts */
+ for (i = 0; i < RVU_PF_INT_VEC_VFME_MAX; i++) {
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(i), ~0x0ULL);
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(i), ~0x0ULL);
+ }
+ /* Enable interrupts */
+ ena_bits = GENMASK_ULL((rm->num_vfs - 1) % 64, 0);
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFME_INT_ENA_W1SX(0), ena_bits);
+
+ if (rm->num_vfs > 64) {
+ ena_bits = GENMASK_ULL(rm->num_vfs - 64 - 1, 0);
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFME_INT_ENA_W1SX(1),
+ ena_bits);
+ }
+}
+
+static void disable_vf_me_int(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int ena_bits;
+ u64 intr;
+ int i;
+
+ rm = pci_get_drvdata(pdev);
+
+ /* Clear pending interrupt */
+ for (i = 0; i < RVU_PF_INT_VEC_VFME_MAX; i++) {
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(i));
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(i), intr);
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(i));
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(i), intr);
+ }
+ /* Disable interrupts */
+ ena_bits = GENMASK_ULL((rm->num_vfs - 1) % 64, 0);
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFME_INT_ENA_W1CX(0), ena_bits);
+
+ if (rm->num_vfs > 64) {
+ ena_bits = GENMASK_ULL(rm->num_vfs - 64 - 1, 0);
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFME_INT_ENA_W1CX(1),
+ ena_bits);
+ }
+}
+
+static void enable_vf_mbox_int(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int ena_bits;
+
+ rm = pci_get_drvdata(pdev);
+ /* Clear any pending interrupts */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(0), ~0x0ULL);
+
+ if (rm->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(1),
+ ~0x0ULL);
+ }
+
+ /* Enable for first 64 VFs here - upto number of VFs enabled */
+ ena_bits = ((rm->num_vfs - 1) % 64);
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0),
+ GENMASK_ULL(ena_bits, 0));
+
+ if (rm->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ /* Enable for VF interrupts for VFs 64 to 128 */
+ ena_bits = rm->num_vfs - 64 - 1;
+ rm_write64(rm, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
+ GENMASK_ULL(ena_bits, 0));
+ }
+}
+
+static void disable_vf_mbox_int(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int ena_bits;
+ u64 intr;
+
+ rm = pci_get_drvdata(pdev);
+ /* clear any pending interrupt */
+
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(0));
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(0), intr);
+
+ if (rm->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(1));
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(1), intr);
+ }
+
+ /* Disable for first 64 VFs here - upto number of VFs enabled */
+ ena_bits = ((rm->num_vfs - 1) % 64);
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0),
+ GENMASK_ULL(ena_bits, 0));
+
+ if (rm->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ /* Enable for VF interrupts for VFs 64 to 128 */
+ ena_bits = rm->num_vfs - 64 - 1;
+ rm_write64(rm, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
+ GENMASK_ULL(ena_bits, 0));
+ }
+}
+
+static int __sriov_disable(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+
+ rm = pci_get_drvdata(pdev);
+ if (pci_vfs_assigned(pdev)) {
+ dev_err(&pdev->dev, "Disabing VFs while VFs are assigned\n");
+ dev_err(&pdev->dev, "VFs will not be freed\n");
+ return -EPERM;
+ }
+
+ disable_vf_me_int(pdev);
+ disable_vf_flr_int(pdev);
+ disable_vf_mbox_int(pdev);
+
+#ifdef CONFIG_OCTEONTX2_RM_DOM_SYSFS
+ domain_sysfs_destroy(rm);
+#endif
+ vf_sysfs_destroy(pdev);
+
+ if (rm->pfvf_mbox_wq) {
+ flush_workqueue(rm->pfvf_mbox_wq);
+ destroy_workqueue(rm->pfvf_mbox_wq);
+ rm->pfvf_mbox_wq = NULL;
+ }
+ if (rm->pfvf_mbx_base) {
+ iounmap(rm->pfvf_mbx_base);
+ rm->pfvf_mbx_base = NULL;
+ }
+
+ otx2_mbox_destroy(&rm->pfvf_mbox);
+ otx2_mbox_destroy(&rm->pfvf_mbox_up);
+
+ pci_disable_sriov(pdev);
+
+ kfree(rm->vf_info);
+ rm->vf_info = NULL;
+
+ return 0;
+}
+
+static int __sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+ int curr_vfs, vf = 0;
+ int err;
+ struct rm_dev *rm;
+ struct rvu_vf *vf_ptr;
+ u64 pf_vf_mbox_base;
+
+ curr_vfs = pci_num_vf(pdev);
+ if (!curr_vfs && !num_vfs)
+ return -EINVAL;
+
+ if (curr_vfs) {
+ dev_err(
+ &pdev->dev,
+ "Virtual Functions are already enabled on this device\n");
+ return -EINVAL;
+ }
+ if (num_vfs > RM_MAX_VFS)
+ num_vfs = RM_MAX_VFS;
+
+ rm = pci_get_drvdata(pdev);
+
+ if (rm_get_available_rsrcs(rm)) {
+ dev_err(&pdev->dev, "Failed to get resource limits.\n");
+ return -EFAULT;
+ }
+
+ rm->vf_info = kcalloc(num_vfs, sizeof(struct rvu_vf), GFP_KERNEL);
+ if (rm->vf_info == NULL)
+ return -ENOMEM;
+
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable to SRIOV VFs: %d\n", err);
+ goto err_enable_sriov;
+ }
+
+ rm->num_vfs = num_vfs;
+
+ /* Map PF-VF mailbox memory */
+ pf_vf_mbox_base = (u64)rm->bar2 + RVU_PF_VF_BAR4_ADDR;
+ pf_vf_mbox_base = readq((void __iomem *)(unsigned long)pf_vf_mbox_base);
+ if (!pf_vf_mbox_base) {
+ dev_err(&pdev->dev, "PF-VF Mailbox address not configured\n");
+ err = -ENOMEM;
+ goto err_mbox_mem_map;
+ }
+ rm->pfvf_mbx_base = ioremap_wc(pf_vf_mbox_base, MBOX_SIZE * num_vfs);
+ if (!rm->pfvf_mbx_base) {
+ dev_err(&pdev->dev,
+ "Mapping of PF-VF mailbox address failed\n");
+ err = -ENOMEM;
+ goto err_mbox_mem_map;
+ }
+ err = otx2_mbox_init(&rm->pfvf_mbox, rm->pfvf_mbx_base, pdev, rm->bar2,
+ MBOX_DIR_PFVF, num_vfs);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to initialize PF/VF MBOX for %d VFs\n",
+ num_vfs);
+ goto err_mbox_init;
+ }
+
+ err = otx2_mbox_init(&rm->pfvf_mbox_up, rm->pfvf_mbx_base, pdev,
+ rm->bar2, MBOX_DIR_PFVF_UP, num_vfs);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to initialize PF/VF MBOX UP for %d VFs\n",
+ num_vfs);
+ goto err_mbox_up_init;
+ }
+
+ /* Allocate a single workqueue for VF/PF mailbox because access to
+ * AF/PF mailbox has to be synchronized.
+ */
+ rm->pfvf_mbox_wq =
+ alloc_workqueue("rm_pfvf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, 1);
+ if (rm->pfvf_mbox_wq == NULL) {
+ dev_err(&pdev->dev,
+ "Workqueue allocation failed for PF-VF MBOX\n");
+ err = -ENOMEM;
+ goto err_workqueue_alloc;
+ }
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ vf_ptr = &rm->vf_info[vf];
+ vf_ptr->vf_id = vf;
+ vf_ptr->rm = (void *)rm;
+ vf_ptr->intr_idx = vf % 64;
+ INIT_WORK(&vf_ptr->mbox_wrk, rm_pfvf_mbox_handler);
+ INIT_WORK(&vf_ptr->mbox_wrk_up, rm_pfvf_mbox_handler_up);
+ INIT_WORK(&vf_ptr->pfvf_flr_work, rm_pfvf_flr_handler);
+ }
+
+ err = vf_sysfs_create(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to initialize VF sysfs entries. Err=%d\n",
+ err);
+ err = -EFAULT;
+ goto err_vf_sysfs_create;
+ }
+
+#ifdef CONFIG_OCTEONTX2_RM_DOM_SYSFS
+ err = domain_sysfs_create(rm);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to create RM domain sysfs\n");
+ err = -EFAULT;
+ goto err_domain_sysfs_create;
+ }
+#endif
+
+ enable_vf_mbox_int(pdev);
+ enable_vf_flr_int(pdev);
+ enable_vf_me_int(pdev);
+ return num_vfs;
+
+#ifdef CONFIG_OCTEONTX2_RM_DOM_SYSFS
+err_domain_sysfs_create:
+ vf_sysfs_destroy(pdev);
+#endif
+err_vf_sysfs_create:
+err_workqueue_alloc:
+ destroy_workqueue(rm->pfvf_mbox_wq);
+ if (rm->pfvf_mbox_up.dev != NULL)
+ otx2_mbox_destroy(&rm->pfvf_mbox_up);
+err_mbox_up_init:
+ if (rm->pfvf_mbox.dev != NULL)
+ otx2_mbox_destroy(&rm->pfvf_mbox);
+err_mbox_init:
+ iounmap(rm->pfvf_mbx_base);
+err_mbox_mem_map:
+ pci_disable_sriov(pdev);
+err_enable_sriov:
+ kfree(rm->vf_info);
+
+ return err;
+}
+
+static int rm_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ if (num_vfs == 0)
+ return __sriov_disable(pdev);
+ else
+ return __sriov_enable(pdev, num_vfs);
+}
+
+static void rm_remove(struct pci_dev *pdev)
+{
+ struct rm_dev *rm = pci_get_drvdata(pdev);
+
+ spin_lock(&rm_lst_lock);
+ list_del(&rm->list);
+ spin_unlock(&rm_lst_lock);
+
+ if (rm->num_vfs)
+ __sriov_disable(pdev);
+
+ disable_af_mbox_int(pdev);
+ rm_free_flr_irq(pdev);
+ rm_afpf_mbox_term(pdev);
+ rm_free_irqs(pdev);
+
+ if (rm->af_mbx_base)
+ iounmap(rm->af_mbx_base);
+ if (rm->bar2)
+ pcim_iounmap(pdev, rm->bar2);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ devm_kfree(&pdev->dev, rm);
+}
+
+static struct pci_driver rm_driver = {
+ .name = DRV_NAME,
+ .id_table = rvu_rm_id_table,
+ .probe = rm_probe,
+ .remove = rm_remove,
+ .sriov_configure = rm_sriov_configure,
+};
+
+static int rm_open(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static int rm_close(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static int mem_read(struct otx_mem *umem)
+{
+ struct otx_mem mem;
+ uint8_t *base;
+
+ if (copy_from_user(&mem, umem, sizeof(struct otx_mem)))
+ return -EIO;
+
+ base = phys_to_virt(mem.pa);
+ if (base == NULL)
+ return -ENOMEM;
+
+ return copy_to_user(mem.buf, base, mem.nbytes);
+}
+
+static int mem_readv(struct otx_memv *umemv)
+{
+ struct otx_memv memv;
+ struct otx_mem *mm;
+ uint8_t *base, *addr;
+ int i, rc = 0;
+
+ if (copy_from_user(&memv, umemv, sizeof(struct otx_memv)))
+ return -EIO;
+
+ mm = kcalloc(memv.msize, sizeof(struct otx_mem), GFP_KERNEL);
+ if (mm == NULL)
+ return -ENOMEM;
+
+ base = phys_to_virt(memv.pbase);
+ if (base == NULL) {
+ rc = -ENOMEM;
+ goto eexit;
+ }
+ for (i = 0; i < memv.msize; i++) {
+ if (copy_from_user(&mm[i], &memv.mm[i],
+ sizeof(struct otx_mem))) {
+ rc = -EIO;
+ goto eexit;
+ }
+ addr = base + mm[i].pa;
+ if (copy_to_user(mm[i].buf, addr, mm[i].nbytes)) {
+ rc = -EIO;
+ goto eexit;
+ }
+ }
+eexit:
+ kfree(mm);
+ return rc;
+}
+
+static ssize_t rm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int rc = 0;
+
+ switch (cmd) {
+ case IOC_MEMREAD:
+ rc = mem_read((void *)arg);
+ break;
+ case IOC_MEMREADV:
+ rc = mem_readv((void *)arg);
+ break;
+ default:
+ return -EINVAL;
+ };
+ return rc;
+}
+
+static const struct file_operations rm_fops = {
+ .owner = THIS_MODULE,
+ .open = rm_open,
+ .release = rm_close,
+ .unlocked_ioctl = rm_ioctl,
+};
+
+static int __init otx2_rm_init_module(void)
+{
+ dev_t dev;
+
+ pr_info("%s\n", DRV_NAME);
+ switch (MIDR_PARTNUM(read_cpuid_id())) {
+ case CPU_MODEL_98XX_PART:
+ case CPU_MODEL_96XX_PART:
+ case CPU_MODEL_95XX_PART:
+ case CPU_MODEL_95XXN_PART:
+ case CPU_MODEL_95XXMM_PART:
+ break;
+ default:
+ return 0;
+ }
+ cls = class_create(THIS_MODULE, CLS_NAME);
+ if (cls == NULL)
+ goto eexit1;
+
+ if (alloc_chrdev_region(&devno, 0, 1, DEV_NAME) < 0)
+ goto eexit2;
+
+ dev = MKDEV(MAJOR(devno), DEV_MINOR);
+ if (device_create(cls, NULL, dev, NULL, DEV_NAME) == NULL)
+ goto eexit3;
+
+ cdev_init(&cdev, &rm_fops);
+ if (cdev_add(&cdev, dev, 1) != 0)
+ goto eexit4;
+
+ spin_lock_init(&rm_lst_lock);
+ return pci_register_driver(&rm_driver);
+
+eexit4:
+ device_destroy(cls, dev);
+eexit3:
+ unregister_chrdev_region(devno, 1);
+eexit2:
+ class_destroy(cls);
+eexit1:
+ pr_info("%s: failed to install\n", DEV_NAME);
+ return -EIO;
+}
+
+static void __exit otx2_rm_exit_module(void)
+{
+ dev_t dev = MKDEV(MAJOR(devno), DEV_MINOR);
+
+ pci_unregister_driver(&rm_driver);
+ cdev_del(&cdev);
+ device_destroy(cls, dev);
+ unregister_chrdev_region(devno, 1);
+ class_destroy(cls);
+}
+
+module_init(otx2_rm_init_module);
+module_exit(otx2_rm_exit_module);
diff --git a/drivers/soc/marvell/octeontx2-rm/otx2_rm.h b/drivers/soc/marvell/octeontx2-rm/otx2_rm.h
new file mode 100644
index 000000000000..73bdde1487a8
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-rm/otx2_rm.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* OcteonTX2 RVU Resource Manager driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef RM_H_
+#define RM_H_
+
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include "mbox.h"
+#include "quota.h"
+
+#define MAX_DOM_VFS 8
+#define RM_MAX_VFS 128
+/* 12 CGX PFs + max HWVFs - VFs used for domains */
+#define RM_MAX_PORTS (12 + 256 - MAX_DOM_VFS)
+#define NAME_SIZE 32
+
+#define RVU_PFVF_PF_SHIFT 10
+#define RVU_PFVF_PF_MASK 0x3F
+#define RVU_PFVF_FUNC_SHIFT 0
+#define RVU_PFVF_FUNC_MASK 0x3FF
+
+#define RVU_PFFUNC(pf, func) \
+ ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \
+ (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT))
+
+/* PCI device IDs */
+#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
+#define PCI_DEVID_OCTEONTX2_PASS1_RVU_PF 0x0063 /* Errata */
+#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
+#define PCI_DEVID_OCTEONTX2_PASS1_RVU_AFVF 0x00F8
+#define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064
+#define PCI_DEVID_OCTEONTX2_PASS1_RVU_VF 0xA064
+
+struct rm_dev;
+
+struct rvu_vf {
+ struct work_struct mbox_wrk;
+ struct work_struct mbox_wrk_up;
+ struct work_struct pfvf_flr_work;
+ struct device_attribute in_use_attr;
+ struct pci_dev *pdev;
+ struct kobject *limits_kobj;
+ /* pointer to PF struct this PF belongs to */
+ struct rm_dev *rm;
+ int vf_id;
+ int intr_idx; /* vf_id%64 actually */
+ bool in_use;
+ bool got_flr;
+};
+
+struct rvu_limits {
+ struct quotas *sso;
+ struct quotas *ssow;
+ struct quotas *npa;
+ struct quotas *tim;
+ struct quotas *cpt;
+};
+
+struct rm_dev {
+ struct list_head list;
+ struct mutex lock;
+ struct pci_dev *pdev;
+ void __iomem *bar2;
+ void __iomem *af_mbx_base;
+ void __iomem *pfvf_mbx_base;
+#define RM_VF_ENABLED 0x1
+ u32 flags;
+ u32 num_vfs;
+ bool *irq_allocated;
+ char *irq_names;
+ int msix_count;
+ int pf;
+
+ struct otx2_mbox pfvf_mbox; /* MBOXes for VF => PF channel */
+ struct otx2_mbox pfvf_mbox_up; /* MBOXes for PF => VF channel */
+ struct otx2_mbox afpf_mbox; /* MBOX for PF => AF channel */
+ struct otx2_mbox afpf_mbox_up; /* MBOX for AF => PF channel */
+ struct work_struct mbox_wrk;
+ struct work_struct mbox_wrk_up;
+ struct workqueue_struct *afpf_mbox_wq; /* MBOX handler */
+ struct workqueue_struct *pfvf_mbox_wq; /* VF MBOX handler */
+ struct rvu_vf *vf_info;
+ struct free_rsrcs_rsp limits; /* Maximum limits for all VFs */
+ struct rvu_limits vf_limits; /* Limits for each VF */
+};
+
+#endif /* RM_H_ */
diff --git a/drivers/soc/marvell/octeontx2-rm/otxrmcmd.h b/drivers/soc/marvell/octeontx2-rm/otxrmcmd.h
new file mode 100644
index 000000000000..0ae5b9cac613
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-rm/otxrmcmd.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Resource Manager driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __OTXRMCMD_H__
+#define __OTXRMCMD_H__
+
+#include <linux/ioctl.h>
+
+#define VERPACK(_mj, _mn, _rl) ((_mj) << 16 | (_mn) << 8 | (_rl))
+#define VERMAJ(_v) ((_v) >> 16)
+#define VERMIN(_v) (((_v) >> 8) & 0xFF)
+
+#define OTXRM_VERSION VERPACK(1, 0, 0)
+#define OTXRM_DRVNAME "/dev/otxrm"
+
+/* MEM */
+struct otx_mem {
+ uint64_t pa; /* Phys.base address or offset from base address */
+ uint16_t nbytes; /* Number of bytes to read */
+ uint8_t *buf; /* Buffer address for return memory values */
+} __packed;
+
+/* MEMV */
+struct otx_memv {
+ uint64_t pbase; /* Base physical address */
+ uint16_t msize; /* Array of mem size */
+ struct otx_mem *mm; /* Array of mem */
+} __packed;
+
+/* OTXRM IOCTL commands/messages */
+#define IOC_TYPE 110
+
+#define IOC_MEMREAD _IOWR(IOC_TYPE, 1, struct otx_mem *)
+#define IOC_MEMREADV _IOWR(IOC_TYPE, 2, struct otx_memv *)
+
+#endif /* __OTXRMCMD_H__ */
diff --git a/drivers/soc/marvell/octeontx2-rm/quota.c b/drivers/soc/marvell/octeontx2-rm/quota.c
new file mode 100644
index 000000000000..158d577ef098
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-rm/quota.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OcteonTX2 RVU Resource Manager driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/pci.h>
+#include <linux/sysfs.h>
+
+#include "quota.h"
+
+static ssize_t quota_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct quota *quota;
+ int val;
+
+ quota = container_of(attr, struct quota, sysfs);
+
+ if (quota->base->lock)
+ mutex_lock(quota->base->lock);
+ val = quota->val;
+ if (quota->base->lock)
+ mutex_unlock(quota->base->lock);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t quota_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct quota *quota;
+ struct quotas *base;
+ struct device *dev;
+ int old_val, new_val, res = 0;
+ u64 lf_sum;
+
+ quota = container_of(attr, struct quota, sysfs);
+ dev = quota->dev;
+ base = quota->base;
+
+ if (kstrtoint(buf, 0, &new_val)) {
+ dev_err(dev, "Invalid %s quota: %s\n", attr->attr.name, buf);
+ return -EIO;
+ }
+ if (new_val < 0) {
+ dev_err(dev, "Invalid %s quota: %d < 0\n", attr->attr.name,
+ new_val);
+ return -EIO;
+ }
+
+ if (new_val > base->max) {
+ dev_err(dev, "Invalid %s quota: %d > %d\n", attr->attr.name,
+ new_val, base->max);
+ return -EIO;
+ }
+
+ if (base->lock)
+ mutex_lock(base->lock);
+ old_val = quota->val;
+
+ if (base->ops.pre_store)
+ res = base->ops.pre_store(quota->ops_arg, quota, new_val);
+
+ if (res != 0) {
+ res = -EIO;
+ goto unlock;
+ }
+
+ lf_sum = quotas_get_sum(quota->base);
+
+ if (lf_sum + new_val - quota->val > base->max_sum) {
+ dev_err(dev,
+ "Not enough resources for %s quota. Used: %lld, Max: %lld\n",
+ attr->attr.name, lf_sum, base->max_sum);
+ res = -EIO;
+ goto unlock;
+ }
+ quota->val = new_val;
+
+ if (base->ops.post_store)
+ base->ops.post_store(quota->ops_arg, quota, old_val);
+
+ res = count;
+
+unlock:
+ if (base->lock)
+ mutex_unlock(base->lock);
+ return res;
+}
+
+struct quotas *quotas_alloc(u32 cnt, u32 max, u64 max_sum,
+ int init_val, struct mutex *lock,
+ struct quota_ops *ops)
+{
+ struct quotas *quotas;
+ u64 i;
+
+ if (cnt == 0)
+ return NULL;
+
+ quotas = kzalloc(sizeof(struct quotas) + cnt * sizeof(struct quota),
+ GFP_KERNEL);
+ if (quotas == NULL)
+ return NULL;
+
+ for (i = 0; i < cnt; i++) {
+ quotas->a[i].base = quotas;
+ quotas->a[i].val = init_val;
+ }
+
+ quotas->cnt = cnt;
+ quotas->max = max;
+ quotas->max_sum = max_sum;
+ if (ops) {
+ quotas->ops.pre_store = ops->pre_store;
+ quotas->ops.post_store = ops->post_store;
+ }
+ quotas->lock = lock;
+
+ return quotas;
+}
+
+void quotas_free(struct quotas *quotas)
+{
+ u64 i;
+
+ if (quotas == NULL)
+ return;
+ WARN_ON(quotas->cnt == 0);
+
+ for (i = 0; i < quotas->cnt; i++)
+ quota_sysfs_destroy(&quotas->a[i]);
+
+ kfree(quotas);
+}
+
+int quota_sysfs_create(const char *name, struct kobject *parent,
+ struct device *log_dev, struct quota *quota,
+ void *ops_arg)
+{
+ int err;
+
+ if (name == NULL || quota == NULL || log_dev == NULL)
+ return -EINVAL;
+
+ quota->sysfs.show = quota_show;
+ quota->sysfs.store = quota_store;
+ quota->sysfs.attr.name = name;
+ quota->sysfs.attr.mode = 0644;
+ quota->parent = parent;
+ quota->dev = log_dev;
+ quota->ops_arg = ops_arg;
+
+ sysfs_attr_init(&quota->sysfs.attr);
+ err = sysfs_create_file(quota->parent, &quota->sysfs.attr);
+ if (err) {
+ dev_err(quota->dev,
+ "Failed to create '%s' quota sysfs for '%s'\n",
+ name, kobject_name(quota->parent));
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int quota_sysfs_destroy(struct quota *quota)
+{
+ if (quota == NULL)
+ return -EINVAL;
+ if (quota->sysfs.attr.mode != 0) {
+ sysfs_remove_file(quota->parent, &quota->sysfs.attr);
+ quota->sysfs.attr.mode = 0;
+ }
+ return 0;
+}
+
+u64 quotas_get_sum(struct quotas *quotas)
+{
+ u64 lf_sum = 0;
+ int i;
+
+ for (i = 0; i < quotas->cnt; i++)
+ lf_sum += quotas->a[i].val;
+
+ return lf_sum;
+}
diff --git a/drivers/soc/marvell/octeontx2-rm/quota.h b/drivers/soc/marvell/octeontx2-rm/quota.h
new file mode 100644
index 000000000000..37c3ceaacadf
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-rm/quota.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* OcteonTX2 RVU Resource Manager driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef QUOTA_H_
+#define QUOTA_H_
+
+#include <linux/kobject.h>
+#include <linux/mutex.h>
+
+struct quotas;
+
+struct quota {
+ struct kobj_attribute sysfs;
+ /* Device to scope logs to */
+ struct device *dev;
+ /* Kobject of the sysfs file */
+ struct kobject *parent;
+ /* Pointer to base structure */
+ struct quotas *base;
+ /* Argument passed to the quota_ops when this quota is modified */
+ void *ops_arg;
+ /* Value of the quota */
+ int val;
+};
+
+struct quota_ops {
+ /**
+ * Called before sysfs store(). store() will proceed if returns 0.
+ * It is called with struct quotas::lock taken.
+ */
+ int (*pre_store)(void *arg, struct quota *quota, int new_val);
+ /** called after sysfs store(). */
+ void (*post_store)(void *arg, struct quota *quota, int old_val);
+};
+
+struct quotas {
+ struct quota_ops ops;
+ struct mutex *lock; /* lock taken for each sysfs operation */
+ u32 cnt; /* number of elements in arr */
+ u32 max; /* maximum value for a single quota */
+ u64 max_sum; /* maximum sum of all quotas */
+ struct quota a[0]; /* array of quota assignments */
+};
+
+/**
+ * Allocate and setup quotas structure.
+ *
+ * @p cnt number of quotas to allocate
+ * @p max maximum value of a single quota
+ * @p max_sum maximum sum of all quotas
+ * @p init_val initial value set to all quotas
+ * @p ops callbacks for sysfs manipulation notifications
+ */
+struct quotas *quotas_alloc(u32 cnt, u32 max, u64 max_sum,
+ int init_val, struct mutex *lock,
+ struct quota_ops *ops);
+/**
+ * Frees quota array and any sysfs entries associated with it.
+ */
+void quotas_free(struct quotas *quotas);
+
+/**
+ * Create a sysfs entry controling given quota entry.
+ *
+ * File created under parent will read the current value of the quota and
+ * write will take quotas lock and check if new value does not exceed
+ * configured maximum values.
+ *
+ * @return 0 if succeeded, negative error code otherwise.
+ */
+int quota_sysfs_create(const char *name, struct kobject *parent,
+ struct device *log_dev, struct quota *quota,
+ void *ops_arg);
+/**
+ * Remove sysfs entry for a given quota if it was created.
+ */
+int quota_sysfs_destroy(struct quota *quota);
+
+/**
+ * Return current sum of values for current quota.
+ */
+u64 quotas_get_sum(struct quotas *quotas);
+
+#endif /* QUOTA_H_ */
diff --git a/drivers/soc/marvell/octeontx2-sdp/Makefile b/drivers/soc/marvell/octeontx2-sdp/Makefile
new file mode 100644
index 000000000000..16a5d353ea44
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-sdp/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's OcteonTX2 SDP PF driver
+#
+
+obj-$(CONFIG_OCTEONTX2_SDP_PF) += octeontx2_sdp.o
+
+octeontx2_sdp-y := sdp.o
+ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/soc/marvell/octeontx2-sdp/sdp.c b/drivers/soc/marvell/octeontx2-sdp/sdp.c
new file mode 100644
index 000000000000..de77942d4a25
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-sdp/sdp.c
@@ -0,0 +1,1789 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OcteonTX2 SDP driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sysfs.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include "rvu.h"
+#include "rvu_reg.h"
+#include "rvu_struct.h"
+#include "sdp.h"
+
+#define DRV_NAME "octeontx2-sdp"
+#define DRV_VERSION "1.1"
+
+#define PCI_DEVID_OCTEONTX2_SDP_PF 0xA0F6
+#define PCI_DEVID_OCTEONTX2_SDP_VF 0xA0F7
+
+/* PCI BAR nos */
+#define PCI_AF_REG_BAR_NUM 0
+#define PCI_CFG_REG_BAR_NUM 2
+#define MBOX_BAR_NUM 4
+
+#define SDP_PPAIR_THOLD 0x400
+
+/* Supported devices */
+static const struct pci_device_id rvu_sdp_id_table[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_SDP_PF)},
+ {0} /* end of table */
+};
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION("Marvell OcteonTX2 SDP PF Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, rvu_sdp_id_table);
+
+/* All PF devices found are stored here */
+static spinlock_t sdp_lst_lock;
+LIST_HEAD(sdp_dev_lst_head);
+static int sdp_sriov_configure(struct pci_dev *pdev, int num_vfs);
+
+static inline bool is_otx3_sdp(struct sdp_dev *sdp)
+{
+ if (sdp->pdev->subsystem_device >= PCI_SUBSYS_DEVID_CN10K_A)
+ return 1;
+
+ return 0;
+}
+
+static void
+sdp_write64(struct sdp_dev *rvu, u64 b, u64 s, u64 o, u64 v)
+{
+ writeq(v, rvu->bar2 + ((b << 20) | (s << 12) | o));
+}
+
+static u64 sdp_read64(struct sdp_dev *rvu, u64 b, u64 s, u64 o)
+{
+ return readq(rvu->bar2 + ((b << 20) | (s << 12) | o));
+}
+
+static void enable_af_mbox_int(struct pci_dev *pdev)
+{
+ struct sdp_dev *sdp;
+
+ sdp = pci_get_drvdata(pdev);
+ /* Clear interrupt if any */
+ sdp_write64(sdp, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
+
+ /* Now Enable AF-PF interrupt */
+ sdp_write64(sdp, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1S, 0x1ULL);
+}
+
+static void disable_af_mbox_int(struct pci_dev *pdev)
+{
+ struct sdp_dev *sdp;
+
+ sdp = pci_get_drvdata(pdev);
+ /* Clear interrupt if any */
+ sdp_write64(sdp, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
+
+ /* Now Disable AF-PF interrupt */
+ sdp_write64(sdp, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1C, 0x1ULL);
+}
+
+static int
+forward_to_mbox(struct sdp_dev *sdp, struct otx2_mbox *mbox, int devid,
+ struct mbox_msghdr *req, int size, const char *mstr)
+{
+ struct mbox_msghdr *msg;
+ int res = 0;
+
+ msg = otx2_mbox_alloc_msg(mbox, devid, size);
+ if (msg == NULL)
+ return -ENOMEM;
+
+ memcpy((uint8_t *)msg + sizeof(struct mbox_msghdr),
+ (uint8_t *)req + sizeof(struct mbox_msghdr), size);
+ msg->id = req->id;
+ msg->pcifunc = req->pcifunc;
+ msg->sig = req->sig;
+ msg->ver = req->ver;
+
+ otx2_mbox_msg_send(mbox, devid);
+ res = otx2_mbox_wait_for_rsp(mbox, devid);
+ if (res == -EIO) {
+ dev_err(&sdp->pdev->dev, "RVU %s MBOX timeout.\n", mstr);
+ goto err;
+ } else if (res) {
+ dev_err(&sdp->pdev->dev,
+ "RVU %s MBOX error: %d.\n", mstr, res);
+ res = -EFAULT;
+ goto err;
+ }
+
+ return 0;
+err:
+ return res;
+}
+
+static int
+handle_af_req(struct sdp_dev *sdp, struct rvu_vf *vf, struct mbox_msghdr *req,
+ int size)
+{
+ /* We expect a request here */
+ if (req->sig != OTX2_MBOX_REQ_SIG) {
+ dev_err(&sdp->pdev->dev,
+ "UP MBOX msg with wrong signature %x, ID 0x%x\n",
+ req->sig, req->id);
+ return -EINVAL;
+ }
+
+ /* If handling notifs in PF is required,add a switch-case here. */
+ return forward_to_mbox(sdp, &sdp->pfvf_mbox_up, vf->vf_id, req, size,
+ "VF");
+}
+
+
+static void sdp_afpf_mbox_handler_up(struct work_struct *work)
+{
+ struct sdp_dev *sdp = container_of(work, struct sdp_dev, mbox_wrk_up);
+ struct otx2_mbox *mbox = &sdp->afpf_mbox_up;
+ struct otx2_mbox_dev *mdev = mbox->dev;
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ int offset, id, err;
+ struct rvu_vf *vf;
+
+ /* sync with mbox memory region */
+ smp_rmb();
+
+ /* Process received mbox messages */
+ req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+ for (id = 0; id < req_hdr->num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
+ offset);
+
+ if ((msg->pcifunc >> RVU_PFVF_PF_SHIFT) != sdp->pf ||
+ (msg->pcifunc & RVU_PFVF_FUNC_MASK) <= sdp->num_vfs)
+ err = -EINVAL;
+ else {
+ vf = &sdp->vf_info[msg->pcifunc & RVU_PFVF_FUNC_MASK];
+ err = handle_af_req(sdp, vf, msg,
+ msg->next_msgoff - offset);
+ }
+ if (err)
+ otx2_reply_invalid_msg(mbox, 0, msg->pcifunc, msg->id);
+ offset = msg->next_msgoff;
+ }
+
+ otx2_mbox_msg_send(mbox, 0);
+}
+
+static void sdp_afpf_mbox_handler(struct work_struct *work)
+{
+ struct nix_lf_alloc_rsp *alloc_rsp;
+ struct otx2_mbox *af_mbx, *vf_mbx;
+ struct mbox_msghdr *msg, *fwd;
+ struct free_rsrcs_rsp *rsp;
+ int offset, i, vf_id, size;
+ struct mbox_hdr *rsp_hdr;
+ struct sdp_dev *sdp;
+ struct rvu_vf *vf;
+
+ /* Read latest mbox data */
+ smp_rmb();
+
+ sdp = container_of(work, struct sdp_dev, mbox_wrk);
+ af_mbx = &sdp->afpf_mbox;
+ vf_mbx = &sdp->pfvf_mbox;
+ rsp_hdr = (struct mbox_hdr *)(af_mbx->dev->mbase + af_mbx->rx_start);
+ if (rsp_hdr->num_msgs == 0)
+ return;
+ offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < rsp_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)(af_mbx->dev->mbase +
+ af_mbx->rx_start + offset);
+ size = msg->next_msgoff - offset;
+
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(&sdp->pdev->dev,
+ "MBOX msg with unknown ID 0x%x\n", msg->id);
+ goto end;
+ }
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(&sdp->pdev->dev,
+ "MBOX msg with wrong signature %x, ID 0x%x\n",
+ msg->sig, msg->id);
+ goto end;
+ }
+
+ vf_id = (msg->pcifunc & RVU_PFVF_FUNC_MASK);
+
+ if (msg->id == MBOX_MSG_NIX_LF_ALLOC) {
+ alloc_rsp = (struct nix_lf_alloc_rsp *)msg;
+ if (vf_id == 1)
+ alloc_rsp->rx_chan_cnt = sdp->info.vf_rings[0];
+ else
+ alloc_rsp->rx_chan_cnt = sdp->info.vf_rings[1];
+ alloc_rsp->tx_chan_cnt = alloc_rsp->rx_chan_cnt;
+ }
+
+ if (vf_id > 0) {
+ if (vf_id > sdp->num_vfs) {
+ dev_err(&sdp->pdev->dev,
+ "MBOX msg to unknown VF: %d >= %d\n",
+ vf_id, sdp->num_vfs);
+ goto end;
+ }
+ vf = &sdp->vf_info[vf_id - 1];
+ /* Ignore stale responses and VFs in FLR. */
+ if (!vf->in_use || vf->got_flr)
+ goto end;
+ fwd = otx2_mbox_alloc_msg(vf_mbx, vf_id - 1, size);
+ if (!fwd) {
+ dev_err(&sdp->pdev->dev,
+ "Forwarding to VF%d failed.\n", vf_id);
+ goto end;
+ }
+ memcpy((uint8_t *)fwd + sizeof(struct mbox_msghdr),
+ (uint8_t *)msg + sizeof(struct mbox_msghdr),
+ size);
+ fwd->id = msg->id;
+ fwd->pcifunc = msg->pcifunc;
+ fwd->sig = msg->sig;
+ fwd->ver = msg->ver;
+ fwd->rc = msg->rc;
+ } else {
+ if (msg->ver < OTX2_MBOX_VERSION) {
+ dev_err(&sdp->pdev->dev,
+ "MBOX msg with version %04x != %04x\n",
+ msg->ver, OTX2_MBOX_VERSION);
+ goto end;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_READY:
+ sdp->pf = (msg->pcifunc >> RVU_PFVF_PF_SHIFT) &
+ RVU_PFVF_PF_MASK;
+ break;
+ case MBOX_MSG_FREE_RSRC_CNT:
+ rsp = (struct free_rsrcs_rsp *)msg;
+ memcpy(&sdp->limits, msg, sizeof(*rsp));
+ break;
+ case MBOX_MSG_SET_SDP_CHAN_INFO:
+ /* Nothing to do */
+ break;
+ case MBOX_MSG_GET_SDP_CHAN_INFO:
+ /* Nothing to do */
+ break;
+ default:
+ dev_err(&sdp->pdev->dev,
+ "Unsupported msg %d received.\n",
+ msg->id);
+ break;
+ }
+ }
+end:
+ offset = msg->next_msgoff;
+ af_mbx->dev->msgs_acked++;
+ }
+ otx2_mbox_reset(af_mbx, 0);
+}
+
+static int
+reply_free_rsrc_cnt(struct sdp_dev *sdp, struct rvu_vf *vf,
+ struct mbox_msghdr *req, int size)
+{
+ struct free_rsrcs_rsp *rsp;
+
+ rsp = (struct free_rsrcs_rsp *)otx2_mbox_alloc_msg(&sdp->pfvf_mbox,
+ vf->vf_id,
+ sizeof(*rsp));
+ if (rsp == NULL)
+ return -ENOMEM;
+
+ rsp->hdr.id = MBOX_MSG_FREE_RSRC_CNT;
+ rsp->hdr.pcifunc = req->pcifunc;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ return 0;
+}
+
+static int
+handle_vf_req(struct sdp_dev *sdp, struct rvu_vf *vf, struct mbox_msghdr *req,
+ int size)
+{
+ int err = 0, chan_idx, chan_diff, reg_off = 0, vf_id;
+ uint64_t en_bp;
+ u16 chan_base;
+ u8 chan_cnt;
+
+ /* Check if valid, if not reply with a invalid msg */
+ if (req->sig != OTX2_MBOX_REQ_SIG) {
+ dev_err(&sdp->pdev->dev,
+ "VF MBOX msg with wrong signature %x, ID 0x%x\n",
+ req->sig, req->id);
+ return -EINVAL;
+ }
+
+ switch (req->id) {
+ case MBOX_MSG_READY:
+ if (req->ver < OTX2_MBOX_VERSION) {
+ dev_err(&sdp->pdev->dev,
+ "VF MBOX msg with version %04x != %04x\n",
+ req->ver, OTX2_MBOX_VERSION);
+ return -EINVAL;
+ }
+ vf->in_use = true;
+ err = forward_to_mbox(sdp, &sdp->afpf_mbox, 0, req, size, "AF");
+ break;
+ case MBOX_MSG_FREE_RSRC_CNT:
+ if (req->ver < OTX2_MBOX_VERSION) {
+ dev_err(&sdp->pdev->dev,
+ "VF MBOX msg with version %04x != %04x\n",
+ req->ver, OTX2_MBOX_VERSION);
+ return -EINVAL;
+ }
+ err = reply_free_rsrc_cnt(sdp, vf, req, size);
+ break;
+ case MBOX_MSG_ATTACH_RESOURCES:
+ if (req->ver < OTX2_MBOX_VERSION) {
+ dev_err(&sdp->pdev->dev,
+ "VF MBOX msg with version %04x != %04x\n",
+ req->ver, OTX2_MBOX_VERSION);
+ return -EINVAL;
+ }
+ err = forward_to_mbox(sdp, &sdp->afpf_mbox, 0, req, size, "AF");
+ break;
+ case MBOX_MSG_NIX_LF_ALLOC:
+ chan_base = sdp->chan_base + sdp->info.num_pf_rings;
+ for (vf_id = 0; vf_id < vf->vf_id; vf_id++)
+ chan_base += sdp->info.vf_rings[vf_id];
+ chan_cnt = sdp->info.vf_rings[vf->vf_id];
+ for (chan_idx = 0; chan_idx < chan_cnt; chan_idx++) {
+ chan_diff = chan_base + chan_idx - sdp->chan_base;
+ reg_off = 0;
+ while (chan_diff > 63) {
+ reg_off += 1;
+ chan_diff -= 64;
+ }
+
+ en_bp = readq(sdp->sdp_base +
+ SDPX_OUT_BP_ENX_W1S(reg_off));
+ en_bp |= (1ULL << chan_diff);
+ writeq(en_bp, sdp->sdp_base +
+ SDPX_OUT_BP_ENX_W1S(reg_off));
+ }
+ /* Fall through */
+ default:
+ err = forward_to_mbox(sdp, &sdp->afpf_mbox, 0, req, size, "AF");
+ break;
+ }
+
+ return err;
+}
+
+static int send_flr_msg(struct otx2_mbox *mbox, int dev_id, int pcifunc)
+{
+ struct msg_req *req;
+
+ req = (struct msg_req *)
+ otx2_mbox_alloc_msg(mbox, dev_id, sizeof(*req));
+ if (req == NULL)
+ return -ENOMEM;
+
+ req->hdr.pcifunc = pcifunc;
+ req->hdr.id = MBOX_MSG_VF_FLR;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+
+ otx2_mbox_msg_send(mbox, 0);
+
+ return 0;
+}
+
+static void sdp_send_flr_msg(struct sdp_dev *sdp, struct rvu_vf *vf)
+{
+ int res, pcifunc;
+
+ pcifunc = (vf->sdp->pf << RVU_PFVF_PF_SHIFT) |
+ ((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK);
+
+ if (send_flr_msg(&sdp->afpf_mbox, 0, pcifunc) != 0) {
+ dev_err(&sdp->pdev->dev, "Sending FLR to AF failed\n");
+ return;
+ }
+
+ res = otx2_mbox_wait_for_rsp(&sdp->afpf_mbox, 0);
+ if (res == -EIO) {
+ dev_err(&sdp->pdev->dev, "RVU AF MBOX timeout.\n");
+ } else if (res) {
+ dev_err(&sdp->pdev->dev,
+ "RVU MBOX error: %d.\n", res);
+ }
+}
+
+static void sdp_send_flr_to_dpi(struct sdp_dev *sdp)
+{
+ /* TODO: DPI VF's needs to be handled */
+}
+
+static void sdp_pfvf_flr_handler(struct work_struct *work)
+{
+ struct rvu_vf *vf = container_of(work, struct rvu_vf, pfvf_flr_work);
+ struct sdp_dev *sdp = vf->sdp;
+ struct otx2_mbox *mbox;
+
+ mbox = &sdp->pfvf_mbox;
+
+ sdp_send_flr_to_dpi(sdp);
+ sdp_send_flr_msg(sdp, vf);
+
+ /* Disable interrupts from AF and wait for any pending
+ * responses to be handled for this VF and then reset the
+ * mailbox
+ */
+ disable_af_mbox_int(sdp->pdev);
+ flush_workqueue(sdp->afpf_mbox_wq);
+ otx2_mbox_reset(mbox, vf->vf_id);
+ vf->in_use = false;
+ vf->got_flr = false;
+ enable_af_mbox_int(sdp->pdev);
+ sdp_write64(sdp, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(vf->vf_id / 64),
+ BIT_ULL(vf->intr_idx));
+}
+
+static void sdp_pfvf_mbox_handler_up(struct work_struct *work)
+{
+ struct otx2_mbox *af_mbx, *vf_mbx;
+ struct mbox_msghdr *msg, *fwd;
+ struct mbox_hdr *rsp_hdr;
+ struct sdp_dev *sdp;
+ int offset, i, size;
+ struct rvu_vf *vf;
+
+ /* Read latest mbox data */
+ smp_rmb();
+
+ vf = container_of(work, struct rvu_vf, mbox_wrk_up);
+ sdp = vf->sdp;
+ af_mbx = &sdp->afpf_mbox;
+ vf_mbx = &sdp->pfvf_mbox;
+ rsp_hdr = (struct mbox_hdr *)(vf_mbx->dev[vf->vf_id].mbase +
+ vf_mbx->rx_start);
+ if (rsp_hdr->num_msgs == 0)
+ return;
+ offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < rsp_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)(vf_mbx->dev->mbase +
+ vf_mbx->rx_start + offset);
+ size = msg->next_msgoff - offset;
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(&sdp->pdev->dev,
+ "UP MBOX msg with wrong signature %x, ID 0x%x\n",
+ msg->sig, msg->id);
+ goto end;
+ }
+
+ /* override message value with actual values */
+ msg->pcifunc = (sdp->pf << RVU_PFVF_PF_SHIFT) | vf->vf_id;
+
+ fwd = otx2_mbox_alloc_msg(af_mbx, 0, size);
+ if (!fwd) {
+ dev_err(&sdp->pdev->dev,
+ "UP Forwarding from VF%d to AF failed.\n",
+ vf->vf_id);
+ goto end;
+ }
+ memcpy((uint8_t *)fwd + sizeof(struct mbox_msghdr),
+ (uint8_t *)msg + sizeof(struct mbox_msghdr),
+ size);
+ fwd->id = msg->id;
+ fwd->pcifunc = msg->pcifunc;
+ fwd->sig = msg->sig;
+ fwd->ver = msg->ver;
+ fwd->rc = msg->rc;
+end:
+ offset = msg->next_msgoff;
+ vf_mbx->dev->msgs_acked++;
+ }
+ otx2_mbox_reset(vf_mbx, vf->vf_id);
+}
+
+static void sdp_pfvf_mbox_handler(struct work_struct *work)
+{
+ struct rvu_vf *vf = container_of(work, struct rvu_vf, mbox_wrk);
+ struct sdp_dev *sdp = vf->sdp;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ int offset, id, err;
+
+ mbox = &sdp->pfvf_mbox;
+ mdev = &mbox->dev[vf->vf_id];
+
+ /* sync with mbox memory region */
+ smp_rmb();
+
+ /* Process received mbox messages */
+ req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+ for (id = 0; id < req_hdr->num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
+ offset);
+
+ /* Set which VF sent this message based on mbox IRQ */
+ msg->pcifunc = ((u16)sdp->pf << RVU_PFVF_PF_SHIFT) |
+ ((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK);
+ err = handle_vf_req(sdp, vf, msg, msg->next_msgoff - offset);
+ if (err)
+ otx2_reply_invalid_msg(mbox, vf->vf_id, msg->pcifunc,
+ msg->id);
+ offset = msg->next_msgoff;
+ }
+ /* Send mbox responses to VF */
+ if (mdev->num_msgs)
+ otx2_mbox_msg_send(mbox, vf->vf_id);
+}
+
+static irqreturn_t sdp_af_pf_mbox_intr(int irq, void *arg)
+{
+ struct sdp_dev *sdp = (struct sdp_dev *)arg;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+
+ /* Read latest mbox data */
+ smp_rmb();
+
+ mbox = &sdp->afpf_mbox;
+ mdev = &mbox->dev[0];
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ /* Handle PF => AF channel response */
+ if (hdr->num_msgs)
+ queue_work(sdp->afpf_mbox_wq, &sdp->mbox_wrk);
+
+ mbox = &sdp->afpf_mbox_up;
+ mdev = &mbox->dev[0];
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ /* Handle AF => PF request */
+ if (hdr->num_msgs)
+ queue_work(sdp->afpf_mbox_wq, &sdp->mbox_wrk_up);
+
+ /* Clear and ack the interrupt */
+ sdp_write64(sdp, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
+
+ return IRQ_HANDLED;
+}
+
+static void __handle_vf_flr(struct sdp_dev *sdp, struct rvu_vf *vf_ptr)
+{
+ if (vf_ptr->in_use) {
+ /* Using the same MBOX workqueue here, so that we can
+ * synchronize with other VF->PF messages being forwarded to
+ * AF
+ */
+ vf_ptr->got_flr = true;
+ queue_work(sdp->pfvf_mbox_wq, &vf_ptr->pfvf_flr_work);
+ } else
+ sdp_write64(sdp, BLKADDR_RVUM, 0,
+ RVU_PF_VFTRPENDX(vf_ptr->vf_id / 64),
+ BIT_ULL(vf_ptr->intr_idx));
+}
+
+static irqreturn_t sdp_pf_vf_flr_intr(int irq, void *arg)
+{
+ struct sdp_dev *sdp = (struct sdp_dev *)arg;
+ struct rvu_vf *vf_ptr;
+ int vf, i;
+ u64 intr;
+
+ /* Check which VF FLR has been raised and process accordingly */
+ for (i = 0; i < 2; i++) {
+ /* Read the interrupt bits */
+ intr = sdp_read64(sdp, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(i));
+
+ for (vf = i * 64; vf < sdp->num_vfs; vf++) {
+ vf_ptr = &sdp->vf_info[vf];
+ if (intr & (1ULL << vf_ptr->intr_idx)) {
+ /* Clear the interrupts */
+ sdp_write64(sdp, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INTX(i),
+ BIT_ULL(vf_ptr->intr_idx));
+ __handle_vf_flr(sdp, vf_ptr);
+ }
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sdp_pf_vf_mbox_intr(int irq, void *arg)
+{
+ struct sdp_dev *sdp = (struct sdp_dev *)arg;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+ struct rvu_vf *vf;
+ int i, vfi;
+ u64 intr;
+
+ /* Check which VF has raised an interrupt and schedule corresponding
+ * workq to process the MBOX
+ */
+ for (i = 0; i < 2; i++) {
+ /* Read the interrupt bits */
+ intr = sdp_read64(sdp, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(i));
+
+ for (vfi = i * 64; vfi < sdp->num_vfs; vfi++) {
+ vf = &sdp->vf_info[vfi];
+ if ((intr & (1ULL << vf->intr_idx)) == 0)
+ continue;
+ mbox = &sdp->pfvf_mbox;
+ mdev = &mbox->dev[vf->vf_id];
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ /* Handle VF => PF channel request */
+ if (hdr->num_msgs)
+ queue_work(sdp->pfvf_mbox_wq, &vf->mbox_wrk);
+
+ mbox = &sdp->pfvf_mbox_up;
+ mdev = &mbox->dev[vf->vf_id];
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ /* Handle PF => VF channel response */
+ if (hdr->num_msgs)
+ queue_work(sdp->pfvf_mbox_wq, &vf->mbox_wrk_up);
+ /* Clear the interrupt */
+ sdp_write64(sdp, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(i),
+ BIT_ULL(vf->intr_idx));
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int sdp_register_flr_irq(struct pci_dev *pdev)
+{
+ struct sdp_dev *sdp;
+ int err, vec, i;
+
+ sdp = pci_get_drvdata(pdev);
+
+ /* Register for VF FLR interrupts
+ * There are 2 vectors starting at index 0x0
+ */
+ for (vec = RVU_PF_INT_VEC_VFFLR0, i = 0;
+ vec + i <= RVU_PF_INT_VEC_VFFLR1; i++) {
+ sprintf(&sdp->irq_names[(vec + i) * NAME_SIZE],
+ "SDP_PF%02d_VF_FLR%d", pdev->bus->number, i);
+ err = request_irq(pci_irq_vector(pdev, vec + i),
+ sdp_pf_vf_flr_intr, 0,
+ &sdp->irq_names[(vec + i) * NAME_SIZE], sdp);
+ if (err) {
+ dev_err(&pdev->dev,
+ "request_irq() failed for PFVF FLR intr %d\n",
+ vec);
+ goto reg_fail;
+ }
+ sdp->irq_allocated[vec + i] = true;
+ }
+
+ return 0;
+
+reg_fail:
+
+ return err;
+}
+
+static void sdp_free_flr_irq(struct pci_dev *pdev)
+{
+ (void) pdev;
+ /* Nothing here but will free workqueues */
+}
+
+static int sdp_alloc_irqs(struct pci_dev *pdev)
+{
+ struct sdp_dev *sdp;
+ int err;
+
+ sdp = pci_get_drvdata(pdev);
+
+ /* Get number of MSIX vector count and allocate vectors first */
+ sdp->msix_count = pci_msix_vec_count(pdev);
+
+ err = pci_alloc_irq_vectors(pdev, sdp->msix_count, sdp->msix_count,
+ PCI_IRQ_MSIX);
+
+ if (err < 0) {
+ dev_err(&pdev->dev, "pci_alloc_irq_vectors() failed %d\n", err);
+ return err;
+ }
+
+ sdp->irq_names = kmalloc_array(sdp->msix_count, NAME_SIZE, GFP_KERNEL);
+ if (!sdp->irq_names) {
+ err = -ENOMEM;
+ goto err_irq_names;
+ }
+
+ sdp->irq_allocated = kcalloc(sdp->msix_count, sizeof(bool), GFP_KERNEL);
+ if (!sdp->irq_allocated) {
+ err = -ENOMEM;
+ goto err_irq_allocated;
+ }
+
+ return 0;
+
+err_irq_allocated:
+ kfree(sdp->irq_names);
+ sdp->irq_names = NULL;
+err_irq_names:
+ pci_free_irq_vectors(pdev);
+ sdp->msix_count = 0;
+
+ return err;
+}
+
+static void sdp_free_irqs(struct pci_dev *pdev)
+{
+ struct sdp_dev *sdp;
+ int irq;
+
+ sdp = pci_get_drvdata(pdev);
+ for (irq = 0; irq < sdp->msix_count; irq++) {
+ if (sdp->irq_allocated[irq])
+ free_irq(pci_irq_vector(sdp->pdev, irq), sdp);
+ }
+
+ pci_free_irq_vectors(pdev);
+
+ kfree(sdp->irq_names);
+ kfree(sdp->irq_allocated);
+}
+
+static int sdp_register_mbox_irq(struct pci_dev *pdev)
+{
+ int err, vec = RVU_PF_INT_VEC_VFPF_MBOX0, i;
+ struct sdp_dev *sdp;
+
+ sdp = pci_get_drvdata(pdev);
+
+ /* Register PF-AF interrupt handler */
+ sprintf(&sdp->irq_names[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE],
+ "PF%02d_AF_MBOX_IRQ", pdev->devfn);
+ err = request_irq(pci_irq_vector(pdev, RVU_PF_INT_VEC_AFPF_MBOX),
+ sdp_af_pf_mbox_intr, 0,
+ &sdp->irq_names[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE],
+ sdp);
+ if (err) {
+ dev_err(&pdev->dev,
+ "request_irq() failed for AF_PF MSIX vector\n");
+ return err;
+ }
+ sdp->irq_allocated[RVU_PF_INT_VEC_AFPF_MBOX] = true;
+
+ err = otx2_mbox_init(&sdp->afpf_mbox, sdp->af_mbx_base, pdev, sdp->bar2,
+ MBOX_DIR_PFAF, 1);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize PF/AF MBOX\n");
+ goto error;
+ }
+ err = otx2_mbox_init(&sdp->afpf_mbox_up, sdp->af_mbx_base, pdev,
+ sdp->bar2, MBOX_DIR_PFAF_UP, 1);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize PF/AF UP MBOX\n");
+ goto error;
+ }
+
+ /* Register for PF-VF mailbox interrupts
+ * There are 2 vectors starting at index 0x4
+ */
+ for (vec = RVU_PF_INT_VEC_VFPF_MBOX0, i = 0;
+ vec + i <= RVU_PF_INT_VEC_VFPF_MBOX1; i++) {
+ sprintf(&sdp->irq_names[(vec + i) * NAME_SIZE],
+ "PF%02d_VF_MBOX_IRQ%d", pdev->devfn, i);
+ err = request_irq(pci_irq_vector(pdev, vec + i),
+ sdp_pf_vf_mbox_intr, 0,
+ &sdp->irq_names[(vec + i) * NAME_SIZE], sdp);
+ if (err) {
+ dev_err(&pdev->dev,
+ "request_irq() failed for PFVF Mbox intr %d\n",
+ vec + i);
+ goto error;
+ }
+ sdp->irq_allocated[vec + i] = true;
+ }
+
+ sdp->afpf_mbox_wq = alloc_workqueue(
+ "sdp_pfaf_mailbox", WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, 1);
+ if (!sdp->afpf_mbox_wq)
+ goto error;
+
+ INIT_WORK(&sdp->mbox_wrk, sdp_afpf_mbox_handler);
+ INIT_WORK(&sdp->mbox_wrk_up, sdp_afpf_mbox_handler_up);
+
+ return err;
+
+error:
+ if (sdp->afpf_mbox_up.dev != NULL)
+ otx2_mbox_destroy(&sdp->afpf_mbox_up);
+ if (sdp->afpf_mbox.dev != NULL)
+ otx2_mbox_destroy(&sdp->afpf_mbox);
+
+ return err;
+}
+
+static int sdp_get_pcifunc(struct sdp_dev *sdp)
+{
+ struct msg_req *ready_req;
+ int res = 0;
+
+ ready_req = (struct msg_req *)
+ otx2_mbox_alloc_msg_rsp(&sdp->afpf_mbox, 0, sizeof(ready_req),
+ sizeof(struct ready_msg_rsp));
+ if (ready_req == NULL) {
+ dev_err(&sdp->pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+
+ ready_req->hdr.id = MBOX_MSG_READY;
+ ready_req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ otx2_mbox_msg_send(&sdp->afpf_mbox, 0);
+ res = otx2_mbox_wait_for_rsp(&sdp->afpf_mbox, 0);
+ if (res == -EIO) {
+ dev_err(&sdp->pdev->dev, "RVU AF MBOX timeout.\n");
+ } else if (res) {
+ dev_err(&sdp->pdev->dev, "RVU MBOX error: %d.\n", res);
+ res = -EFAULT;
+ }
+ return res;
+}
+
+static int sdp_get_available_rsrcs(struct sdp_dev *sdp)
+{
+ struct mbox_msghdr *rsrc_req;
+ int res = 0;
+
+ rsrc_req = otx2_mbox_alloc_msg(&sdp->afpf_mbox, 0, sizeof(*rsrc_req));
+ if (rsrc_req == NULL) {
+ dev_err(&sdp->pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+ rsrc_req->id = MBOX_MSG_FREE_RSRC_CNT;
+ rsrc_req->sig = OTX2_MBOX_REQ_SIG;
+ rsrc_req->pcifunc = RVU_PFFUNC(sdp->pf, 0);
+ otx2_mbox_msg_send(&sdp->afpf_mbox, 0);
+ res = otx2_mbox_wait_for_rsp(&sdp->afpf_mbox, 0);
+ if (res == -EIO) {
+ dev_err(&sdp->pdev->dev, "RVU AF MBOX timeout.\n");
+ } else if (res) {
+ dev_err(&sdp->pdev->dev,
+ "RVU MBOX error: %d.\n", res);
+ res = -EFAULT;
+ }
+ return res;
+}
+
+static void sdp_afpf_mbox_term(struct pci_dev *pdev)
+{
+ struct sdp_dev *sdp = pci_get_drvdata(pdev);
+
+ destroy_workqueue(sdp->afpf_mbox_wq);
+ otx2_mbox_destroy(&sdp->afpf_mbox);
+ otx2_mbox_destroy(&sdp->afpf_mbox_up);
+}
+
+static int sdp_check_pf_usable(struct sdp_dev *sdp)
+{
+ u64 rev;
+
+ rev = sdp_read64(sdp, BLKADDR_RVUM, 0,
+ RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
+ rev = (rev >> 12) & 0xFF;
+ /* Check if AF has setup revision for RVUM block,
+ * otherwise this driver probe should be deferred
+ * until AF driver comes up.
+ */
+ if (!rev) {
+ dev_warn(&sdp->pdev->dev,
+ "AF is not initialized, deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+ return 0;
+}
+
+static int sdp_parse_rinfo(struct pci_dev *pdev,
+ struct sdp_node_info *info)
+{
+ u32 vf_ring_cnts, vf_rings;
+ struct device_node *dev;
+ struct device *sdev;
+ const void *ptr;
+ int len, vfid;
+
+ sdev = &pdev->dev;
+ dev = of_find_node_by_name(NULL, "rvu-sdp");
+ if (dev == NULL) {
+ dev_err(sdev, "can't find FDT dev %s\n", "rvu-sdp");
+ return -EINVAL;
+ }
+
+ ptr = of_get_property(dev, "num-rvu-vfs", &len);
+ if (ptr == NULL) {
+ dev_err(sdev, "SDP DTS: Failed to get num-rvu-vfs\n");
+ return -EINVAL;
+ }
+
+ if (len != sizeof(u32)) {
+ dev_err(sdev, "SDP DTS: Wrong field length: num-rvu-vfs\n");
+ return -EINVAL;
+ }
+ info->max_vfs = be32_to_cpup((u32 *)ptr);
+
+ if (info->max_vfs > pci_sriov_get_totalvfs(pdev)) {
+ dev_err(sdev, "SDP DTS: Invalid field value: num-rvu-vfs\n");
+ return -EINVAL;
+ }
+
+ ptr = of_get_property(dev, "num-pf-rings", &len);
+ if (ptr == NULL) {
+ dev_err(sdev, "SDP DTS: Failed to get num-pf-rings\n");
+ return -EINVAL;
+ }
+ if (len != sizeof(u32)) {
+ dev_err(sdev, "SDP DTS: Wrong field length: num-pf-rings\n");
+ return -EINVAL;
+ }
+ info->num_pf_rings = be32_to_cpup((u32 *)ptr);
+
+ ptr = of_get_property(dev, "pf-srn", &len);
+ if (ptr == NULL) {
+ dev_err(sdev, "SDP DTS: Failed to get pf-srn\n");
+ return -EINVAL;
+ }
+ if (len != sizeof(u32)) {
+ dev_err(sdev, "SDP DTS: Wrong field length: pf-srn\n");
+ return -EINVAL;
+ }
+ info->pf_srn = be32_to_cpup((u32 *)ptr);
+
+ ptr = of_get_property(dev, "num-vf-rings", &len);
+ if (ptr == NULL) {
+ dev_err(sdev, "SDP DTS: Failed to get num-vf-rings\n");
+ return -EINVAL;
+ }
+
+ vf_ring_cnts = len / sizeof(u32);
+ if (vf_ring_cnts > info->max_vfs) {
+ dev_err(sdev, "SDP DTS: Wrong field length: num-vf-rings\n");
+ return -EINVAL;
+ }
+
+ for (vfid = 0; vfid < info->max_vfs; vfid++) {
+ if (vfid < vf_ring_cnts) {
+ if (of_property_read_u32_index(dev, "num-vf-rings",
+ vfid, &vf_rings)) {
+ dev_err(sdev, "SDP DTS: Failed to get vf ring count\n");
+ return -EINVAL;
+ }
+ info->vf_rings[vfid] = vf_rings;
+ } else {
+ /*
+ * Rest of the VFs use the same last ring count
+ * specified
+ */
+ info->vf_rings[vfid] = info->vf_rings[vf_ring_cnts - 1];
+ }
+ }
+ dev_info(sdev, "pf start ring number:%d num_pf_rings:%d max_vfs:%d vf_ring_cnts:%d\n",
+ info->pf_srn, info->num_pf_rings, info->max_vfs, vf_ring_cnts);
+
+ return 0;
+}
+
+static ssize_t sdp_vf0_rings_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev;
+ struct sdp_dev *sdp;
+
+ pdev = to_pci_dev(dev);
+ sdp = pci_get_drvdata(pdev);
+ return sprintf(buf, "%d", sdp->info.vf_rings[0]);
+}
+static DEVICE_ATTR_RO(sdp_vf0_rings);
+
+static ssize_t sdp_vfx_rings_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev;
+ struct sdp_dev *sdp;
+
+ pdev = to_pci_dev(dev);
+ sdp = pci_get_drvdata(pdev);
+ return sprintf(buf, "%d", sdp->info.vf_rings[1]);
+}
+static DEVICE_ATTR_RO(sdp_vfx_rings);
+
+static struct attribute *sdp_ring_attrs[] = {
+ &dev_attr_sdp_vf0_rings.attr,
+ &dev_attr_sdp_vfx_rings.attr,
+ NULL
+};
+
+static struct attribute_group sdp_ring_attr_group = {
+ .name = "sdp_ring_attr",
+ .attrs = sdp_ring_attrs,
+};
+
+static int sdp_sysfs_init(struct device *dev)
+{
+ int ret;
+
+ ret = sysfs_create_group(&dev->kobj, &sdp_ring_attr_group);
+ if (ret < 0) {
+ dev_err(dev, " create_domain sysfs failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sdp_sysfs_remove(struct device *dev)
+{
+ sysfs_remove_group(&dev->kobj, &sdp_ring_attr_group);
+}
+
+static int get_chan_info(struct sdp_dev *sdp)
+{
+ struct sdp_get_chan_info_msg *rsp;
+ struct msg_req *req;
+ int res = 0;
+
+ req = (struct msg_req *) otx2_mbox_alloc_msg(&sdp->afpf_mbox, 0, sizeof(*req));
+ if (req == NULL) {
+ dev_err(&sdp->pdev->dev, "RVU Mbox failed to alloc\n");
+ return -EFAULT;
+ }
+ req->hdr.id = MBOX_MSG_GET_SDP_CHAN_INFO;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = RVU_PFFUNC(sdp->pf, 0);
+
+ otx2_mbox_msg_send(&sdp->afpf_mbox, 0);
+ res = otx2_mbox_wait_for_rsp(&sdp->afpf_mbox, 0);
+ if (res == -EIO)
+ dev_err(&sdp->pdev->dev, "RVU AF Mbox timeout\n");
+ else if (res) {
+ dev_err(&sdp->pdev->dev, "RVU Mbox error: %d\n", res);
+ res = -EFAULT;
+ }
+ rsp = (struct sdp_get_chan_info_msg *)otx2_mbox_get_rsp(&sdp->afpf_mbox, 0,
+ &req->hdr);
+ sdp->chan_base = rsp->chan_base;
+ sdp->num_chan = rsp->num_chan;
+
+ return res;
+}
+static int send_chan_info(struct sdp_dev *sdp, struct sdp_node_info *info)
+{
+ struct sdp_chan_info_msg *cinfo;
+ int res = 0;
+
+ cinfo = (struct sdp_chan_info_msg *)
+ otx2_mbox_alloc_msg(&sdp->afpf_mbox, 0, sizeof(*cinfo));
+ if (cinfo == NULL) {
+ dev_err(&sdp->pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+ cinfo->hdr.id = MBOX_MSG_SET_SDP_CHAN_INFO;
+ cinfo->hdr.sig = OTX2_MBOX_REQ_SIG;
+ cinfo->hdr.pcifunc = RVU_PFFUNC(sdp->pf, 0);
+
+ memcpy(&cinfo->info, info, sizeof(struct sdp_node_info));
+ otx2_mbox_msg_send(&sdp->afpf_mbox, 0);
+ res = otx2_mbox_wait_for_rsp(&sdp->afpf_mbox, 0);
+ if (res == -EIO) {
+ dev_err(&sdp->pdev->dev, "RVU AF MBOX timeout.\n");
+ } else if (res) {
+ dev_err(&sdp->pdev->dev, "RVU MBOX error: %d.\n", res);
+ res = -EFAULT;
+ }
+
+ return res;
+}
+
+static void program_sdp_rinfo(struct sdp_dev *sdp)
+{
+ u32 rppf, rpvf, numvf, pf_srn, npfs, npfs_per_pem;
+ void __iomem *addr;
+ u32 mac, mac_mask;
+ u64 cfg, val, pkg_ver;
+ u64 ep_pem, valid_ep_pem_mask, npem, epf_base;
+
+ /* PF doesn't have any rings */
+ rppf = sdp->info.vf_rings[0];
+ rpvf = sdp->info.vf_rings[1];
+ numvf = sdp->info.max_vfs - 1;
+ pf_srn = sdp->info.pf_srn;
+
+ dev_info(&sdp->pdev->dev, "rppf:%u rpvf:%u numvf:%u pf_srn:%u\n", rppf,
+ rpvf, numvf, pf_srn);
+
+ /* TODO: add support for 10K */
+ mac_mask = MAC_MASK_96XX;
+ switch (sdp->pdev->subsystem_device) {
+ case PCI_SUBSYS_DEVID_96XX:
+ valid_ep_pem_mask = VALID_EP_PEMS_MASK_96XX;
+ addr = ioremap(GPIO_PKG_VER, 8);
+ pkg_ver = readq(addr);
+ iounmap(addr);
+ if (pkg_ver == CN93XXN_PKG)
+ valid_ep_pem_mask = VALID_EP_PEMS_MASK_93XX;
+ break;
+ case PCI_SUBSYS_DEVID_95XXO:
+ case PCI_SUBSYS_DEVID_95XXN:
+ valid_ep_pem_mask = VALID_EP_PEMS_MASK_95XX;
+ break;
+ case PCI_SUBSYS_DEVID_98XX:
+ if (sdp->info.node_id == 0)
+ valid_ep_pem_mask = VALID_EP_PEMS_MASK_98XX_SDP0;
+ else
+ valid_ep_pem_mask = VALID_EP_PEMS_MASK_98XX_SDP1;
+ mac_mask = MAC_MASK_98XX;
+ break;
+ default:
+ dev_err(&sdp->pdev->dev,
+ "Failed to set SDP ring info: unsupported platform\n");
+ break;
+ }
+ sdp->valid_ep_pem_mask = valid_ep_pem_mask;
+ sdp->mac_mask = mac_mask;
+ /* TODO npfs should be obtained from dts */
+ npfs_per_pem = NUM_PFS_PER_PEM;
+ npem = 0;
+ for (ep_pem = 0; ep_pem < MAX_PEMS; ep_pem++) {
+ if (!(valid_ep_pem_mask & (1ul << ep_pem)))
+ continue;
+ addr = ioremap(PEMX_CFG(ep_pem), 8);
+ cfg = readq(addr);
+ iounmap(addr);
+ if ((!((cfg >> PEMX_CFG_LANES_BIT_POS) &
+ PEMX_CFG_LANES_BIT_MASK)) ||
+ ((cfg >> PEMX_CFG_HOSTMD_BIT_POS) &
+ PEMX_CFG_HOSTMD_BIT_MASK))
+ continue;
+ /* found the PEM in endpoint mode */
+ epf_base = 0;
+ for (npfs = 0; npfs < npfs_per_pem; npfs++) {
+ val = (((u64)numvf << RINFO_NUMVF_BIT) |
+ ((u64)rpvf << RINFO_RPVF_BIT) |
+ ((u64)(pf_srn + rppf) << RINFO_SRN_BIT));
+ writeq(val,
+ sdp->sdp_base +
+ SDPX_EPFX_RINFO((epf_base +
+ (npem * MAX_PFS_PER_PEM))));
+
+ if (sdp->pdev->subsystem_device !=
+ PCI_SUBSYS_DEVID_98XX)
+ val = (((u64)rppf << RPPF_BIT_96XX) |
+ ((u64)pf_srn << PF_SRN_BIT_96XX) |
+ ((u64)npfs_per_pem << NPFS_BIT_96XX));
+ else
+ val = (((u64)rppf << RPPF_BIT_98XX) |
+ ((u64)pf_srn << PF_SRN_BIT_98XX) |
+ ((u64)npfs_per_pem << NPFS_BIT_98XX));
+ mac = ep_pem & mac_mask;
+ writeq(val, sdp->sdp_base + SDPX_MACX_PF_RING_CTL(mac));
+ pf_srn += rppf + (rpvf * numvf);
+ epf_base++;
+ }
+ npem++;
+ }
+}
+
+static void set_firmware_ready(struct sdp_dev *sdp)
+{
+ u32 npfs, npfs_per_pem;
+ void __iomem *addr;
+ u64 ep_pem, val;
+ u64 cfg;
+
+ /* TODO: add support for 10K model */
+ /* TODO npfs should be obtained from dts */
+ npfs_per_pem = NUM_PFS_PER_PEM;
+ for (ep_pem = 0; ep_pem < MAX_PEMS; ep_pem++) {
+ if (!(sdp->valid_ep_pem_mask & (1ul << ep_pem)))
+ continue;
+ addr = ioremap(PEMX_CFG(ep_pem), 8);
+ cfg = readq(addr);
+ iounmap(addr);
+ if ((!((cfg >> PEMX_CFG_LANES_BIT_POS) &
+ PEMX_CFG_LANES_BIT_MASK)) ||
+ ((cfg >> PEMX_CFG_HOSTMD_BIT_POS) &
+ PEMX_CFG_HOSTMD_BIT_MASK))
+ continue;
+ /* found the PEM in endpoint mode */
+ for (npfs = 0; npfs < npfs_per_pem; npfs++) {
+ addr = ioremap(PEMX_CFG_WR(ep_pem), 8);
+ val = ((FW_STATUS_READY << PEMX_CFG_WR_DATA) |
+ (npfs << PEMX_CFG_WR_PF) |
+ (1 << 15) |
+ (PCIEEP_VSECST_CTL << PEMX_CFG_WR_REG));
+ writeq(val, addr);
+ }
+ }
+}
+
+static int sdp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ uint64_t inst, sdp_gbl_ctl;
+ struct sdp_dev *sdp;
+ uint64_t regval;
+ int err;
+
+ sdp = devm_kzalloc(dev, sizeof(struct sdp_dev), GFP_KERNEL);
+ if (sdp == NULL)
+ return -ENOMEM;
+
+ sdp->pdev = pdev;
+ pci_set_drvdata(pdev, sdp);
+
+ mutex_init(&sdp->lock);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto enable_failed;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto map_failed;
+ }
+
+ if (pci_sriov_get_totalvfs(pdev) <= 0) {
+ err = -ENODEV;
+ goto set_mask_failed;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to set DMA mask\n");
+ goto set_mask_failed;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to set DMA mask\n");
+ goto set_mask_failed;
+ }
+
+ pci_set_master(pdev);
+
+ /* CSR Space mapping */
+ sdp->bar2 = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM,
+ pci_resource_len(pdev, PCI_CFG_REG_BAR_NUM));
+ if (!sdp->bar2) {
+ dev_err(&pdev->dev, "Unable to map BAR2\n");
+ err = -ENODEV;
+ goto set_mask_failed;
+ }
+
+ err = sdp_check_pf_usable(sdp);
+ if (err)
+ goto pf_unusable;
+
+ /* Map SDP register area */
+ /* right now only 2 SDP blocks are supported */
+ inst = list_empty(&sdp_dev_lst_head) ? 0 : 1;
+ sdp->sdp_base = ioremap(SDP_BASE(inst), SDP_REG_SIZE);
+ if (!sdp->sdp_base) {
+ dev_err(&pdev->dev, "Unable to map SDP CSR space\n");
+ err = -ENODEV;
+ goto pf_unusable;
+ }
+ /* Map PF-AF mailbox memory */
+ sdp->af_mbx_base = ioremap_wc(pci_resource_start(pdev, MBOX_BAR_NUM),
+ pci_resource_len(pdev, MBOX_BAR_NUM));
+ if (!sdp->af_mbx_base) {
+ dev_err(&pdev->dev, "Unable to map BAR4\n");
+ err = -ENODEV;
+ goto pf_unusable;
+ }
+
+ /* Request IRQ for PF-VF mailbox here - TBD: check if this can be moved
+ * to sriov enable function
+ */
+ if (sdp_alloc_irqs(pdev)) {
+ dev_err(&pdev->dev,
+ "Unable to allocate MSIX Interrupt vectors\n");
+ err = -ENODEV;
+ goto alloc_irqs_failed;
+ }
+
+ if (sdp_register_mbox_irq(pdev) != 0) {
+ dev_err(&pdev->dev,
+ "Unable to allocate MBOX Interrupt vectors\n");
+ err = -ENODEV;
+ goto reg_mbox_irq_failed;
+ }
+
+ if (sdp_register_flr_irq(pdev) != 0) {
+ dev_err(&pdev->dev,
+ "Unable to allocate FLR Interrupt vectors\n");
+ err = -ENODEV;
+ goto reg_flr_irq_failed;
+ }
+
+ enable_af_mbox_int(pdev);
+
+ if (sdp_get_pcifunc(sdp)) {
+ dev_err(&pdev->dev,
+ "Failed to retrieve pcifunc from AF\n");
+ err = -ENODEV;
+ goto get_pcifunc_failed;
+ }
+
+ err = get_chan_info(sdp);
+ if (err) {
+ dev_err(&pdev->dev, "SDP get channel info failed\n");
+ goto get_chan_info_failed;
+ }
+
+ dev_info(&sdp->pdev->dev, "SDP chan base: 0x%x, num chan: 0x%x\n",
+ sdp->chan_base, sdp->num_chan);
+
+ /* From cn10k onwards the SDP channel configuration is programmable */
+ if (pdev->subsystem_device >= PCI_SUBSYS_DEVID_CN10K_A) {
+ regval = sdp->chan_base;
+ regval |= ilog2(sdp->num_chan) << 16;
+ writeq(regval, sdp->sdp_base + SDPX_LINK_CFG);
+ }
+
+ err = sdp_parse_rinfo(pdev, &sdp->info);
+ if (err) {
+ err = -EINVAL;
+ goto get_rinfo_failed;
+ }
+
+ /* To differentiate a PF between SDP0 or SDP1 we make use of the
+ * revision ID field in the config space. The revision is filled
+ * by the firmware. The lower 4 bits field is used here.
+ * 0 means SDP0
+ * 1 means SDP1
+ */
+ if (pdev->revision & 0x0F)
+ sdp->info.node_id = 1;
+ else
+ sdp->info.node_id = 0;
+
+
+ /*
+ * For 98xx there are 2xSDPs so start the PF ring from 128 for SDP1
+ * SDP0 has PCI revid 0 and SDP1 has PCI revid 1
+ */
+ sdp->info.pf_srn = (pdev->revision & 0x0F) ? 128 : sdp->info.pf_srn;
+
+ err = send_chan_info(sdp, &sdp->info);
+ if (err) {
+ err = -EINVAL;
+ goto get_rinfo_failed;
+ }
+
+ program_sdp_rinfo(sdp);
+
+ /* Water mark for backpressuring NIX Tx when enabled */
+ if (pdev->subsystem_device >= PCI_SUBSYS_DEVID_CN10K_A)
+ writeq(SDP_PPAIR_THOLD, sdp->sdp_base + SDPX_OUT_WMARK);
+ sdp_gbl_ctl = readq(sdp->sdp_base + SDPX_GBL_CONTROL);
+ sdp_gbl_ctl |= (1 << 2); /* BPFLR_D disable clearing BP in FLR */
+ writeq(sdp_gbl_ctl, sdp->sdp_base + SDPX_GBL_CONTROL);
+
+ /* Add to global list of PFs found */
+ err = sdp_sysfs_init(&sdp->pdev->dev);
+ if (err != 0) {
+ err = -ENODEV;
+ dev_info(&sdp->pdev->dev, "Sysfs init failed\n");
+ }
+ sdp_sriov_configure(sdp->pdev, sdp->info.max_vfs);
+ set_firmware_ready(sdp);
+
+ spin_lock(&sdp_lst_lock);
+ list_add(&sdp->list, &sdp_dev_lst_head);
+ spin_unlock(&sdp_lst_lock);
+
+ return 0;
+
+get_chan_info_failed:
+get_rinfo_failed:
+get_pcifunc_failed:
+ disable_af_mbox_int(pdev);
+ sdp_free_flr_irq(pdev);
+reg_flr_irq_failed:
+ sdp_afpf_mbox_term(pdev);
+reg_mbox_irq_failed:
+ sdp_free_irqs(pdev);
+alloc_irqs_failed:
+ iounmap(sdp->af_mbx_base);
+pf_unusable:
+ pcim_iounmap(pdev, sdp->bar2);
+set_mask_failed:
+ pci_release_regions(pdev);
+map_failed:
+ pci_disable_device(pdev);
+enable_failed:
+ pci_set_drvdata(pdev, NULL);
+ devm_kfree(dev, sdp);
+ return err;
+}
+
+static void enable_vf_flr_int(struct pci_dev *pdev)
+{
+ struct sdp_dev *sdp;
+ int ena_bits, idx;
+
+ sdp = pci_get_drvdata(pdev);
+
+ /* Clear any pending interrupts */
+ for (idx = 0; idx < 2; idx++) {
+ sdp_write64(sdp, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(idx),
+ ~0x0ULL);
+ sdp_write64(sdp, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(idx),
+ ~0x0ULL);
+ }
+
+ /* Enable for FLR interrupts for VFs */
+ if (sdp->num_vfs > 64) {
+ sdp_write64(sdp, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INT_ENA_W1SX(0),
+ GENMASK_ULL(63, 0));
+ ena_bits = (sdp->num_vfs - 64) - 1;
+ sdp_write64(sdp, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INT_ENA_W1SX(1),
+ GENMASK_ULL(ena_bits, 0));
+ } else {
+ ena_bits = sdp->num_vfs - 1;
+ sdp_write64(sdp, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INT_ENA_W1SX(0),
+ GENMASK_ULL(ena_bits, 0));
+ }
+}
+
+static void disable_vf_flr_int(struct pci_dev *pdev)
+{
+ struct sdp_dev *sdp;
+ int ena_bits, idx;
+
+ sdp = pci_get_drvdata(pdev);
+
+ /* Clear any pending interrupts */
+ for (idx = 0; idx < 2; idx++) {
+ sdp_write64(sdp, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(idx),
+ ~0x0ULL);
+ sdp_write64(sdp, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(idx),
+ ~0x0ULL);
+ }
+
+ /* Disable the FLR interrupts for VFs */
+ if (sdp->num_vfs > 64) {
+ sdp_write64(sdp, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INT_ENA_W1CX(0),
+ GENMASK_ULL(63, 0));
+ ena_bits = (sdp->num_vfs - 64) - 1;
+ sdp_write64(sdp, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INT_ENA_W1CX(1),
+ GENMASK_ULL(ena_bits, 0));
+ } else {
+ ena_bits = sdp->num_vfs - 1;
+ sdp_write64(sdp, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INT_ENA_W1CX(0),
+ GENMASK_ULL(ena_bits, 0));
+ }
+}
+
+static void enable_vf_mbox_int(struct pci_dev *pdev)
+{
+ struct sdp_dev *sdp;
+ int ena_bits, idx;
+
+ sdp = pci_get_drvdata(pdev);
+
+ /* Clear any pending interrupts */
+ for (idx = 0; idx < 2; idx++) {
+ sdp_write64(sdp, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(idx),
+ ~0x0ULL);
+ }
+
+ /* Enable VF MBOX interrupts */
+ if (sdp->num_vfs > 64) {
+ sdp_write64(sdp, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0),
+ GENMASK_ULL(63, 0));
+ ena_bits = (sdp->num_vfs - 64) - 1;
+ sdp_write64(sdp, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
+ GENMASK_ULL(ena_bits, 0));
+ } else {
+ ena_bits = sdp->num_vfs - 1;
+ sdp_write64(sdp, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0),
+ GENMASK_ULL(ena_bits, 0));
+ }
+}
+
+static void disable_vf_mbox_int(struct pci_dev *pdev)
+{
+ struct sdp_dev *sdp;
+ int ena_bits, idx;
+
+ sdp = pci_get_drvdata(pdev);
+
+ /* Clear any pending interrupts */
+ for (idx = 0; idx < 2; idx++) {
+ sdp_write64(sdp, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(idx),
+ ~0x0ULL);
+ }
+
+ /* Disable the MBOX interrupts for VFs */
+ if (sdp->num_vfs > 64) {
+ sdp_write64(sdp, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0),
+ GENMASK_ULL(63, 0));
+ ena_bits = (sdp->num_vfs - 64) - 1;
+ sdp_write64(sdp, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
+ GENMASK_ULL(ena_bits, 0));
+ } else {
+ ena_bits = sdp->num_vfs - 1;
+ sdp_write64(sdp, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0),
+ GENMASK_ULL(ena_bits, 0));
+ }
+}
+
+static int __sriov_disable(struct pci_dev *pdev)
+{
+ struct sdp_dev *sdp;
+
+ sdp = pci_get_drvdata(pdev);
+ if (pci_vfs_assigned(pdev)) {
+ dev_err(&pdev->dev, "Disabing VFs while VFs are assigned\n");
+ dev_err(&pdev->dev, "VFs will not be freed\n");
+ return -EPERM;
+ }
+
+ disable_vf_flr_int(pdev);
+ disable_vf_mbox_int(pdev);
+
+ if (sdp->pfvf_mbox_wq) {
+ destroy_workqueue(sdp->pfvf_mbox_wq);
+ sdp->pfvf_mbox_wq = NULL;
+ }
+ if (sdp->pfvf_mbx_base) {
+ iounmap(sdp->pfvf_mbx_base);
+ sdp->pfvf_mbx_base = NULL;
+ }
+
+ otx2_mbox_destroy(&sdp->pfvf_mbox);
+ otx2_mbox_destroy(&sdp->pfvf_mbox_up);
+
+ pci_disable_sriov(pdev);
+
+ kfree(sdp->vf_info);
+ sdp->vf_info = NULL;
+
+ return 0;
+}
+
+static int __sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+ struct rvu_vf *vf_ptr;
+ int curr_vfs, vf = 0;
+ struct sdp_dev *sdp;
+ u64 pf_vf_mbox_base;
+ int err;
+
+ curr_vfs = pci_num_vf(pdev);
+ if (!curr_vfs && !num_vfs)
+ return -EINVAL;
+
+ if (curr_vfs) {
+ dev_err(
+ &pdev->dev,
+ "Virtual Functions are already enabled on this device\n");
+ return -EINVAL;
+ }
+ if (num_vfs > SDP_MAX_VFS)
+ num_vfs = SDP_MAX_VFS;
+
+ sdp = pci_get_drvdata(pdev);
+
+ if (sdp_get_available_rsrcs(sdp)) {
+ dev_err(&pdev->dev, "Failed to get resource limits.\n");
+ return -EFAULT;
+ }
+
+ sdp->vf_info = kcalloc(num_vfs, sizeof(struct rvu_vf), GFP_KERNEL);
+ if (sdp->vf_info == NULL)
+ return -ENOMEM;
+
+ sdp->num_vfs = num_vfs;
+
+ /* Map PF-VF mailbox memory.
+ * On CN10K platform, PF <-> VF mailbox region follows after
+ * PF <-> AF mailbox region.
+ */
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A)
+ pf_vf_mbox_base = pci_resource_start(pdev, PCI_MBOX_BAR_NUM) + MBOX_SIZE;
+ else
+ pf_vf_mbox_base = readq((void __iomem *)((u64)sdp->bar2 +
+ RVU_PF_VF_BAR4_ADDR));
+
+ if (!pf_vf_mbox_base) {
+ dev_err(&pdev->dev, "PF-VF Mailbox address not configured\n");
+ err = -ENOMEM;
+ goto err_mbox_mem_map;
+ }
+ sdp->pfvf_mbx_base = ioremap_wc(pf_vf_mbox_base, MBOX_SIZE * num_vfs);
+ if (!sdp->pfvf_mbx_base) {
+ dev_err(&pdev->dev,
+ "Mapping of PF-VF mailbox address failed\n");
+ err = -ENOMEM;
+ goto err_mbox_mem_map;
+ }
+ err = otx2_mbox_init(&sdp->pfvf_mbox, sdp->pfvf_mbx_base, pdev,
+ sdp->bar2, MBOX_DIR_PFVF, num_vfs);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to initialize PF/VF MBOX for %d VFs\n",
+ num_vfs);
+ goto err_mbox_init;
+ }
+
+ err = otx2_mbox_init(&sdp->pfvf_mbox_up, sdp->pfvf_mbx_base, pdev,
+ sdp->bar2, MBOX_DIR_PFVF_UP, num_vfs);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to initialize PF/VF MBOX UP for %d VFs\n",
+ num_vfs);
+ goto err_mbox_up_init;
+ }
+
+ /* Allocate a single workqueue for VF/PF mailbox because access to
+ * AF/PF mailbox has to be synchronized.
+ */
+ sdp->pfvf_mbox_wq =
+ alloc_workqueue("sdp_pfvf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, 1);
+ if (sdp->pfvf_mbox_wq == NULL) {
+ dev_err(&pdev->dev,
+ "Workqueue allocation failed for PF-VF MBOX\n");
+ err = -ENOMEM;
+ goto err_workqueue_alloc;
+ }
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ vf_ptr = &sdp->vf_info[vf];
+ vf_ptr->vf_id = vf;
+ vf_ptr->sdp = (void *)sdp;
+ vf_ptr->intr_idx = vf % 64;
+ INIT_WORK(&vf_ptr->mbox_wrk, sdp_pfvf_mbox_handler);
+ INIT_WORK(&vf_ptr->mbox_wrk_up, sdp_pfvf_mbox_handler_up);
+ INIT_WORK(&vf_ptr->pfvf_flr_work, sdp_pfvf_flr_handler);
+ }
+
+ enable_vf_mbox_int(pdev);
+ enable_vf_flr_int(pdev);
+
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable to SRIOV VFs: %d\n", err);
+ goto err_enable_sriov;
+ }
+
+ return num_vfs;
+
+err_enable_sriov:
+ disable_vf_flr_int(pdev);
+ disable_vf_mbox_int(pdev);
+err_workqueue_alloc:
+ destroy_workqueue(sdp->pfvf_mbox_wq);
+ if (sdp->pfvf_mbox_up.dev != NULL)
+ otx2_mbox_destroy(&sdp->pfvf_mbox_up);
+err_mbox_up_init:
+ if (sdp->pfvf_mbox.dev != NULL)
+ otx2_mbox_destroy(&sdp->pfvf_mbox);
+err_mbox_init:
+ iounmap(sdp->pfvf_mbx_base);
+err_mbox_mem_map:
+ kfree(sdp->vf_info);
+
+ return err;
+}
+
+static int sdp_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ if (num_vfs == 0)
+ return __sriov_disable(pdev);
+ else
+ return __sriov_enable(pdev, num_vfs);
+}
+
+static void sdp_remove(struct pci_dev *pdev)
+{
+ struct sdp_dev *sdp = pci_get_drvdata(pdev);
+
+
+ spin_lock(&sdp_lst_lock);
+ list_del(&sdp->list);
+ spin_unlock(&sdp_lst_lock);
+
+ sdp_sysfs_remove(&pdev->dev);
+
+ if (sdp->num_vfs)
+ __sriov_disable(pdev);
+
+ disable_af_mbox_int(pdev);
+ sdp_free_flr_irq(pdev);
+ sdp_afpf_mbox_term(pdev);
+ sdp_free_irqs(pdev);
+
+ if (sdp->af_mbx_base)
+ iounmap(sdp->af_mbx_base);
+ if (sdp->bar2)
+ pcim_iounmap(pdev, sdp->bar2);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ devm_kfree(&pdev->dev, sdp);
+}
+
+static struct pci_driver sdp_driver = {
+ .name = DRV_NAME,
+ .id_table = rvu_sdp_id_table,
+ .probe = sdp_probe,
+ .remove = sdp_remove,
+ .sriov_configure = sdp_sriov_configure,
+};
+
+static int __init otx2_sdp_init_module(void)
+{
+ pr_info("%s\n", DRV_NAME);
+
+ spin_lock_init(&sdp_lst_lock);
+ return pci_register_driver(&sdp_driver);
+}
+
+static void __exit otx2_sdp_exit_module(void)
+{
+ pci_unregister_driver(&sdp_driver);
+}
+
+module_init(otx2_sdp_init_module);
+module_exit(otx2_sdp_exit_module);
diff --git a/drivers/soc/marvell/octeontx2-sdp/sdp.h b/drivers/soc/marvell/octeontx2-sdp/sdp.h
new file mode 100644
index 000000000000..6fd66e40d9b0
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-sdp/sdp.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * OcteonTX2 SDP driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef SDP_H_
+#define SDP_H_
+
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include "mbox.h"
+
+#define MAX_DOM_VFS 8
+#define SDP_MAX_VFS 128
+/* 12 CGX PFs + max HWVFs - VFs used for domains */
+#define SDP_MAX_PORTS (12 + 256 - MAX_DOM_VFS)
+#define NAME_SIZE 32
+
+#define RVU_PFVF_PF_SHIFT 10
+#define RVU_PFVF_PF_MASK 0x3F
+#define RVU_PFVF_FUNC_SHIFT 0
+#define RVU_PFVF_FUNC_MASK 0x3FF
+
+#define RVU_PFFUNC(pf, func) \
+ ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \
+ (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT))
+
+#define SDP_BASE(a) (0x86E080000000ull | a << 36)
+#define SDP_REG_SIZE 0x42000000
+
+#define SDPX_RINGX_IN_PKT_CNT(a) (0x10080ull | a << 17)
+
+#define SDPX_OUT_BP_ENX_W1S(a) ({ \
+ u64 offset; \
+ \
+ offset = (0x80280ull | a << 4); \
+ if (is_otx3_sdp(sdp)) \
+ offset = (0x40080280ull | a << 4); \
+ offset; }) \
+
+#define SDPX_OUT_WMARK (0x40060000ull)
+#define SDPX_LINK_CFG (0x40080180ull)
+#define SDPX_GBL_CONTROL (0x40080200ull)
+#define SDPX_EPFX_RINFO(a) (0x205f0ull | a << 25)
+#define RINFO_NUMVF_BIT 48
+#define RINFO_RPVF_BIT 32
+#define RINFO_SRN_BIT 0
+
+#define SDPX_MACX_PF_RING_CTL(a) (0x2c000ull | a << 4)
+#define RPPF_BIT_96XX 16
+#define RPPF_BIT_98XX 32
+#define PF_SRN_BIT_96XX 8
+#define PF_SRN_BIT_98XX 0
+#define NPFS_BIT_96XX 0
+#define NPFS_BIT_98XX 48
+
+#define MAX_PEMS 4
+#define MAC_MASK_96XX 0x3
+#define MAC_MASK_98XX 0x1
+#define MAX_PFS_PER_PEM 8
+#define NUM_PFS_PER_PEM 1
+
+/* 96xx only PEM0 and PEM2 have SDP */
+#define VALID_EP_PEMS_MASK_96XX 0x5
+/* 95xx only PEM0 has SDP */
+#define VALID_EP_PEMS_MASK_95XX 0x1
+/* 93xx only PEM0 has SDP */
+#define VALID_EP_PEMS_MASK_93XX 0x1
+
+/* 98xx only PEM0 and PEM1 for SDP0 */
+#define VALID_EP_PEMS_MASK_98XX_SDP0 0x3
+/* 98xx only PEM2 and PEM3 for SDP1 */
+#define VALID_EP_PEMS_MASK_98XX_SDP1 0xc
+
+#define PEMX_CFG(a) (0x8E00000000D8ull | a << 36)
+#define PEMX_CFG_HOSTMD_BIT_MASK 0x1
+#define PEMX_CFG_HOSTMD_BIT_POS 0
+#define PEMX_CFG_LANES_BIT_MASK 0x3
+#define PEMX_CFG_LANES_BIT_POS 1
+
+#define PEMX_CFG_WR(a) (0x8E0000000018ull | a << 36)
+#define PEMX_CFG_WR_DATA 32
+#define PEMX_CFG_WR_PF 18
+#define PEMX_CFG_WR_REG 0
+
+#define PCIEEP_VSECST_CTL 0x4d0
+#define FW_STATUS_READY 0x1ul
+
+#define GPIO_PKG_VER (0x803000001610ull)
+#define CN93XXN_PKG 5
+
+#define PCI_SUBSYS_DEVID_95XXN 0xB400
+#define PCI_SUBSYS_DEVID_95XXO 0xB600
+
+struct sdp_dev;
+
+struct rvu_vf {
+ struct work_struct mbox_wrk;
+ struct work_struct mbox_wrk_up;
+ struct work_struct pfvf_flr_work;
+ struct device_attribute in_use_attr;
+ struct pci_dev *pdev;
+ struct kobject *limits_kobj;
+ /* pointer to PF struct this PF belongs to */
+ struct sdp_dev *sdp;
+ int vf_id;
+ int intr_idx; /* vf_id%64 actually */
+ bool in_use;
+ bool got_flr;
+};
+
+struct sdp_dev {
+ struct list_head list;
+ struct mutex lock;
+ struct pci_dev *pdev;
+ void __iomem *sdp_base;
+ void __iomem *bar2;
+ void __iomem *af_mbx_base;
+ void __iomem *pfvf_mbx_base;
+#define SDP_VF_ENABLED 0x1
+ u32 flags;
+ u32 num_vfs;
+ u16 chan_base;
+ u16 num_chan;
+ bool *irq_allocated;
+ char *irq_names;
+ int msix_count;
+ int pf;
+ u8 valid_ep_pem_mask;
+ u8 mac_mask;
+
+ struct sdp_node_info info;
+ struct otx2_mbox pfvf_mbox; /* MBOXes for VF => PF channel */
+ struct otx2_mbox pfvf_mbox_up; /* MBOXes for PF => VF channel */
+ struct otx2_mbox afpf_mbox; /* MBOX for PF => AF channel */
+ struct otx2_mbox afpf_mbox_up; /* MBOX for AF => PF channel */
+ struct work_struct mbox_wrk;
+ struct work_struct mbox_wrk_up;
+ struct workqueue_struct *afpf_mbox_wq; /* MBOX handler */
+ struct workqueue_struct *pfvf_mbox_wq; /* VF MBOX handler */
+ struct rvu_vf *vf_info;
+ struct free_rsrcs_rsp limits; /* Maximum limits for all VFs */
+};
+
+#endif /* SDP_H_ */
diff --git a/drivers/soc/marvell/octeontx2-serdes/Makefile b/drivers/soc/marvell/octeontx2-serdes/Makefile
new file mode 100644
index 000000000000..0693683b0cee
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-serdes/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's OcteonTX2 serdes diagnostic commands
+#
+
+obj-$(CONFIG_OCTEONTX_SERDES) += serdes_debugfs.o
diff --git a/drivers/soc/marvell/octeontx2-serdes/serdes_debugfs.c b/drivers/soc/marvell/octeontx2-serdes/serdes_debugfs.c
new file mode 100644
index 000000000000..74745636af92
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-serdes/serdes_debugfs.c
@@ -0,0 +1,1055 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "octeontx2-serdes: " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/pci.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/arm-smccc.h>
+#include <soc/marvell/octeontx/octeontx_smc.h>
+
+#define OCTEONTX_SERDES_DBG_GET_MEM 0xc2000d04
+#define OCTEONTX_SERDES_DBG_GET_EYE 0xc2000d05
+#define OCTEONTX_SERDES_DBG_GET_CONF 0xc2000d06
+#define OCTEONTX_SERDES_DBG_PRBS 0xc2000d07
+#define OCTEONTX_SERDES_DBG_SET_TUNE 0xc2000d08
+#define OCTEONTX_SERDES_DBG_SET_LOOP 0xc2000d09
+
+#define MAX_LMAC_PER_CGX 4
+
+#define OCTEONTX_SMC_PENDING 0x1
+
+#define SERDES_SETTINGS_SIZE 0x1000
+
+
+enum qlm_type {
+ QLM_GSERC_TYPE,
+ QLM_GSERR_TYPE,
+ QLM_GSERN_TYPE,
+};
+
+
+enum cgx_prbs_cmd {
+ CGX_PRBS_START_CMD = 1,
+ CGX_PRBS_STOP_CMD,
+ CGX_PRBS_GET_DATA_CMD,
+ CGX_PRBS_CLEAR_CMD
+};
+
+struct cgx_prbs_errors {
+ u64 err;
+ u64 phy_host;
+ u64 phy_line;
+};
+
+struct cgx_prbs_data {
+ u64 num_lanes;
+ struct cgx_prbs_errors errors[MAX_LMAC_PER_CGX];
+};
+
+struct prbs_status {
+ struct list_head list;
+ int qlm;
+ int qlm_lane;
+ long start_time;
+ struct prbs_status *next;
+};
+
+struct eye_data {
+ int width;
+ int height;
+ u32 data[64][128];
+ enum qlm_type type;
+};
+
+static struct {
+ int qlm;
+ int lane;
+ struct eye_data *res;
+} eye_cmd_data;
+
+static struct {
+ int qlm;
+ int lane;
+ char *res;
+} serdes_cmd_data;
+
+static struct {
+ int qlm;
+ int lane;
+ int swing;
+ int pre;
+ int post;
+ char *res;
+} tune_serdes_cmd;
+
+static struct {
+ int qlm;
+ int lane;
+ int type;
+ char *res;
+} loop_serdes_cmd;
+
+static struct {
+ int qlm;
+ int qlm_lane;
+ struct prbs_status status_list;
+ struct cgx_prbs_data *res;
+} prbs_cmd_data;
+
+
+/* Debugfs root directory for serdes */
+static struct dentry *pserdes_root;
+
+
+static int serdes_dbg_lane_parse(const char __user *buffer,
+ size_t count, int *qlm, int *lane)
+{
+ char *cmd_buf, *cmd_buf_tmp, *subtoken;
+ int ec;
+
+ cmd_buf = memdup_user(buffer, count);
+ if (IS_ERR(cmd_buf))
+ return -ENOMEM;
+
+ cmd_buf[count] = '\0';
+
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ cmd_buf_tmp = cmd_buf;
+ subtoken = strsep(&cmd_buf, " ");
+ ec = subtoken ? kstrtoint(subtoken, 10, qlm) : -EINVAL;
+
+ if (ec < 0) {
+ kfree(cmd_buf_tmp);
+ return ec;
+ }
+
+ subtoken = strsep(&cmd_buf, " ");
+ ec = subtoken ? kstrtoint(subtoken, 10, lane) : -EINVAL;
+
+ kfree(cmd_buf_tmp);
+ return ec;
+}
+
+static ssize_t serdes_dbg_eye_write_op(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct arm_smccc_res res;
+ int ec;
+
+ ec = serdes_dbg_lane_parse(buffer, count, &eye_cmd_data.qlm,
+ &eye_cmd_data.lane);
+ if (ec < 0) {
+ pr_info("Usage: echo <qlm> <lane> > eye\n");
+ return ec;
+ }
+
+ do {
+ arm_smccc_smc(OCTEONTX_SERDES_DBG_GET_EYE, eye_cmd_data.qlm,
+ eye_cmd_data.lane, 0, 0, 0, 0, 0, &res);
+ } while (res.a0 == OCTEONTX_SMC_PENDING);
+ if (res.a0 != SMCCC_RET_SUCCESS) {
+ pr_info("CGX eye capture failed.\n");
+ return -EIO;
+ }
+
+ return count;
+}
+
+static int serdes_dbg_eye_print_gsern(struct seq_file *s)
+{
+ struct eye_data *eye;
+ int v, t, v_height;
+ int errors_tr_ones, errors_nt_ones, errors_tr_zeros, errors_nt_zeros;
+
+ eye = eye_cmd_data.res;
+
+ seq_printf(s, "V T %-20s %-20s %-20s %-20s\n", "TRANS_ONE_ECNT",
+ "NON_TRANS_ONE_ECNT", "TRANS_ZEROS_ECNT",
+ "NON_TRANS_ZEROS_ECNT");
+
+ v_height = (eye->height + 1) / 2;
+
+ for (t = 0; t < eye->width; t++) {
+ for (v = 0; v < v_height; v++) {
+ errors_nt_ones = eye->data[v_height-v-1][t];
+ errors_tr_ones = eye->data[v_height-v-1][t+64];
+ errors_nt_zeros = eye->data[v_height+v-1][t];
+ errors_tr_zeros = eye->data[v_height+v-1][t+64];
+
+ seq_printf(s, "%02x %02x %020x %020x %020x %020x\n",
+ v, t, errors_tr_ones, errors_nt_ones,
+ errors_tr_zeros, errors_nt_zeros);
+ }
+ }
+
+ return 0;
+}
+
+static int serdes_dbg_eye_print_gserx(struct seq_file *s)
+{
+ struct eye_data *eye;
+ int x_min = 0;
+ int x_step = 1;
+ int y_min = -255;
+ int y_step = 8;
+ int x;
+ int y;
+
+ eye = eye_cmd_data.res;
+
+ seq_printf(s, "%5s %5s %s\n", "V", "T", "Errors");
+
+ for (x = 0; x < eye->width; x++) {
+ for (y = 0; y < eye->height; y++) {
+ seq_printf(s, "%5d %5d %u\n", y * y_step + y_min,
+ x * x_step + x_min, eye->data[y][x]);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Square root by abacus algorithm, Martin Guy @ UKC, June 1985.
+ * From a book on programming abaci by Mr C. Woo.
+ */
+static u64 isqrt(u64 num)
+{
+ u64 result = 0;
+ /* The second-to-top bit is set: 1 << 62 for 64 bits */
+ u64 bit = 1ull << 62;
+
+ /* "bit" starts at the highest power of four <= the argument. */
+ while (bit > num)
+ bit >>= 2;
+
+ while (bit != 0) {
+ if (num >= result + bit) {
+ num -= result + bit;
+ result = (result >> 1) + bit;
+ } else {
+ result >>= 1;
+ }
+ bit >>= 2;
+ }
+
+ return result;
+}
+
+static u64 log_10(u64 num)
+{
+ u64 result = 0;
+
+ while (num > 10) {
+ num /= 10;
+ result++;
+ }
+ if (num >= 5)
+ result++;
+
+ return result;
+}
+
+static int serdes_dbg_eye_read_op(struct seq_file *s, void *unused)
+{
+ struct eye_data *eye;
+ u64 data;
+ int ec, x, y, width, height, last_color, level, deltay, deltax, dy, dx;
+ int dist, color;
+ int eye_area = 0;
+ int eye_width = 0;
+ int eye_height = 0;
+ char color_str[] = "\33[40m"; /* Note: This is modified, not constant */
+
+ eye = eye_cmd_data.res;
+
+ /* GSERN eye needs to be handled differently */
+ if (eye->type == QLM_GSERN_TYPE) {
+ ec = serdes_dbg_eye_print_gsern(s);
+ if (ec)
+ return ec;
+ for (y = 0; y < eye->height; y++) {
+ for (x = 0; x < eye->width; x++) {
+ data = eye->data[y][x] + eye->data[y][x + 64];
+ if (data > U32_MAX)
+ data = U32_MAX;
+ eye->data[y][x] = data;
+ }
+ }
+ } else {
+ ec = serdes_dbg_eye_print_gserx(s);
+ if (ec)
+ return ec;
+ }
+
+ /* Calculate the max eye width */
+ for (y = 0; y < eye->height; y++) {
+ width = 0;
+ for (x = 0; x < eye->width; x++) {
+ if (eye->data[y][x] == 0) {
+ width++;
+ eye_area++;
+ }
+ }
+ if (width > eye_width)
+ eye_width = width;
+ }
+
+ /* Calculate the max eye height */
+ for (x = 0; x < eye->width; x++) {
+ height = 0;
+ for (y = 0; y < eye->height; y++) {
+ if (eye->data[y][x] == 0) {
+ height++;
+ eye_area++;
+ }
+ }
+ if (height > eye_height)
+ eye_height = height;
+ }
+
+ seq_printf(s, "\nEye Diagram for QLM %d, Lane %d\n", eye_cmd_data.qlm,
+ eye_cmd_data.lane);
+
+ last_color = -1;
+ for (y = 0; y < eye->height; y++) {
+ for (x = 0; x < eye->width; x++) {
+ level = log_10(eye->data[y][x] + 1);
+ if (level > 9)
+ level = 9;
+ #define DIFF(a, b) (((a) < (b)) ? (b)-(a) : (a)-(b))
+ deltay = (y == (eye->height - 1)) ? -1 : 1;
+ deltax = (x == (eye->width - 1)) ? -1 : 1;
+ dy = DIFF(eye->data[y][x], eye->data[y + deltay][x]);
+ dx = DIFF(eye->data[y][x], eye->data[y][x + deltax]);
+ #undef DIFF
+ dist = dx * dx + dy * dy;
+ color = log_10(isqrt(dist) + 1);
+ if (color > 6)
+ color = 6;
+ if (level == 0)
+ color = 0;
+ if (color != last_color) {
+ color_str[3] = '0' + color;
+ seq_printf(s, "%s", color_str);
+ last_color = color;
+ }
+ seq_printf(s, "%c", '0' + level);
+ }
+ seq_puts(s, "\33[0m\n");
+ last_color = -1;
+ }
+ seq_printf(s, "\nEye Width %d, Height %d, Area %d\n",
+ eye_width, eye_height, eye_area);
+
+ return 0;
+}
+
+static int serdes_dbg_open_eye(struct inode *inode, struct file *file)
+{
+ return single_open(file, serdes_dbg_eye_read_op, inode->i_private);
+}
+
+static const struct file_operations serdes_dbg_eye_fops = {
+ .owner = THIS_MODULE,
+ .open = serdes_dbg_open_eye,
+ .read = seq_read,
+ .write = serdes_dbg_eye_write_op,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static ssize_t serdes_dbg_settings_write_op(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct arm_smccc_res res;
+ int ec;
+
+ ec = serdes_dbg_lane_parse(buffer, count, &serdes_cmd_data.qlm,
+ &serdes_cmd_data.lane);
+ if (ec < 0) {
+ pr_info("Usage: echo <qlm> <lane> > serdes\n");
+ return ec;
+ }
+
+ arm_smccc_smc(OCTEONTX_SERDES_DBG_GET_CONF, serdes_cmd_data.qlm,
+ serdes_cmd_data.lane, 0, 0, 0, 0, 0, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS) {
+ pr_info("CGX serdes display command failed.\n");
+ return -EIO;
+ }
+
+ return count;
+}
+
+static int serdes_dbg_settings_read_op(struct seq_file *s, void *unused)
+{
+ serdes_cmd_data.res[SERDES_SETTINGS_SIZE - 1] = '\0';
+
+ seq_printf(s, "%s", serdes_cmd_data.res);
+
+ return 0;
+}
+
+static int serdes_dbg_open_settings(struct inode *inode, struct file *file)
+{
+ return single_open(file, serdes_dbg_settings_read_op, inode->i_private);
+}
+
+static const struct file_operations serdes_dbg_settings_fops = {
+ .owner = THIS_MODULE,
+ .open = serdes_dbg_open_settings,
+ .read = seq_read,
+ .write = serdes_dbg_settings_write_op,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int tune_serdes_dbg_lane_parse(const char __user *buffer,
+ size_t count, int *qlm, int *lane,
+ int *swing, int *pre, int *post)
+{
+ char *cmd_buf, *cmd_buf_tmp, *subtoken;
+ int ec;
+
+ cmd_buf = memdup_user(buffer, count);
+ if (IS_ERR(cmd_buf))
+ return -ENOMEM;
+
+ cmd_buf[count] = '\0';
+
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ cmd_buf_tmp = cmd_buf;
+ subtoken = strsep(&cmd_buf, " ");
+ ec = subtoken ? kstrtoint(subtoken, 10, qlm) : -EINVAL;
+
+ if (ec < 0) {
+ kfree(cmd_buf_tmp);
+ return ec;
+ }
+
+ subtoken = strsep(&cmd_buf, " ");
+ ec = subtoken ? kstrtoint(subtoken, 10, lane) : -EINVAL;
+
+ if (ec == -EINVAL) {
+ kfree(cmd_buf_tmp);
+ return ec;
+ }
+
+ subtoken = strsep(&cmd_buf, " ");
+ ec = subtoken ? kstrtoint(subtoken, 10, swing) : -EINVAL;
+
+ if (ec == -EINVAL) {
+ kfree(cmd_buf_tmp);
+ return ec;
+ }
+
+ subtoken = strsep(&cmd_buf, " ");
+ ec = subtoken ? kstrtoint(subtoken, 10, pre) : -EINVAL;
+
+ if (ec == -EINVAL) {
+ kfree(cmd_buf_tmp);
+ return ec;
+ }
+
+ subtoken = strsep(&cmd_buf, " ");
+ ec = subtoken ? kstrtoint(subtoken, 10, post) : -EINVAL;
+
+ kfree(cmd_buf_tmp);
+ return ec;
+}
+
+static int tune_serdes_dbg_settings_read_op(struct seq_file *s, void *unused)
+{
+ tune_serdes_cmd.res = '\0';
+
+ seq_printf(s, "%s", tune_serdes_cmd.res);
+
+ return 0;
+}
+
+static ssize_t tune_serdes_dbg_settings_write_op(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct arm_smccc_res res;
+ int ec;
+
+ ec = tune_serdes_dbg_lane_parse(buffer, count, &tune_serdes_cmd.qlm,
+ &tune_serdes_cmd.lane, &tune_serdes_cmd.swing,
+ &tune_serdes_cmd.pre, &tune_serdes_cmd.post);
+ if (ec < 0) {
+ pr_info("Usage: echo <qlm> <lane> <swing> <pre> <post> > tunetx\n");
+ return ec;
+ }
+
+ arm_smccc_smc(OCTEONTX_SERDES_DBG_SET_TUNE, tune_serdes_cmd.qlm,
+ tune_serdes_cmd.lane, tune_serdes_cmd.swing,
+ (tune_serdes_cmd.pre << 8) | (tune_serdes_cmd.post & 0xff),
+ 0, 0, 0, &res);
+
+ if (res.a0 != SMCCC_RET_SUCCESS) {
+ pr_info("QLM serdes TX settings command failed.\n");
+ return -EIO;
+ }
+
+ return count;
+}
+
+static int tune_serdes_dbg_open_settings(struct inode *inode, struct file *file)
+{
+ return single_open(file, tune_serdes_dbg_settings_read_op,
+ inode->i_private);
+}
+
+static const struct file_operations tune_serdes_dbg_settings_fops = {
+ .owner = THIS_MODULE,
+ .open = tune_serdes_dbg_open_settings,
+ .read = seq_read,
+ .write = tune_serdes_dbg_settings_write_op,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int loop_serdes_dbg_lane_parse(const char __user *buffer,
+ size_t count, int *qlm, int *lane,
+ int *type)
+{
+ char *cmd_buf, *cmd_buf_tmp, *subtoken;
+ int ec;
+
+ cmd_buf = memdup_user(buffer, count);
+ if (IS_ERR(cmd_buf))
+ return -ENOMEM;
+
+ cmd_buf[count] = '\0';
+
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ cmd_buf_tmp = cmd_buf;
+ subtoken = strsep(&cmd_buf, " ");
+ ec = subtoken ? kstrtoint(subtoken, 10, qlm) : -EINVAL;
+
+ if (ec < 0) {
+ kfree(cmd_buf_tmp);
+ return ec;
+ }
+
+ subtoken = strsep(&cmd_buf, " ");
+ ec = subtoken ? kstrtoint(subtoken, 10, lane) : -EINVAL;
+
+ if (ec == -EINVAL) {
+ kfree(cmd_buf_tmp);
+ return ec;
+ }
+
+ subtoken = strsep(&cmd_buf, " ");
+ ec = subtoken ? kstrtoint(subtoken, 10, type) : -EINVAL;
+
+ kfree(cmd_buf_tmp);
+ return ec;
+}
+
+static ssize_t loop_serdes_dbg_settings_write_op(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct arm_smccc_res res;
+ int ec;
+
+ ec = loop_serdes_dbg_lane_parse(buffer, count, &loop_serdes_cmd.qlm,
+ &loop_serdes_cmd.lane, &loop_serdes_cmd.type);
+ if (ec < 0) {
+ pr_info("Usage: echo <qlm> <lane> <type> > loop\n");
+ return ec;
+ }
+
+ arm_smccc_smc(OCTEONTX_SERDES_DBG_SET_LOOP, loop_serdes_cmd.qlm,
+ loop_serdes_cmd.lane, loop_serdes_cmd.type,
+ 0, 0, 0, 0, &res);
+
+ if (res.a0 != SMCCC_RET_SUCCESS) {
+ pr_info("QLM serdes loop command failed.\n");
+ return -EIO;
+ }
+
+ return count;
+}
+
+static int loop_serdes_dbg_settings_read_op(struct seq_file *s, void *unused)
+{
+ loop_serdes_cmd.res = '\0';
+
+ seq_printf(s, "%s", loop_serdes_cmd.res);
+
+ return 0;
+}
+
+static int loop_serdes_dbg_open_settings(struct inode *inode, struct file *file)
+{
+ return single_open(file, loop_serdes_dbg_settings_read_op,
+ inode->i_private);
+}
+
+static const struct file_operations loop_serdes_dbg_settings_fops = {
+ .owner = THIS_MODULE,
+ .open = loop_serdes_dbg_open_settings,
+ .read = seq_read,
+ .write = loop_serdes_dbg_settings_write_op,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int serdes_dbg_prbs_lane_parse(const char __user *buffer,
+ size_t count, int *qlm,
+ enum cgx_prbs_cmd *cmd, int *mode,
+ int *qlm_lane, int *inject)
+{
+ char *cmd_buf, *cmd_buf_tmp, *subtoken;
+ int ec;
+
+ cmd_buf = memdup_user(buffer, count);
+ if (IS_ERR(cmd_buf))
+ return -ENOMEM;
+
+ cmd_buf[count] = '\0';
+
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ cmd_buf_tmp = cmd_buf;
+ subtoken = strsep(&cmd_buf, " ");
+ ec = subtoken ? kstrtoint(subtoken, 10, qlm) : -EINVAL;
+
+ if (ec < 0) {
+ kfree(cmd_buf_tmp);
+ return ec;
+ }
+
+ subtoken = strsep(&cmd_buf, " ");
+ ec = subtoken ? kstrtoint(subtoken, 10, qlm_lane) : -EINVAL;
+
+ if (ec == -EINVAL || *qlm_lane < 0) {
+ kfree(cmd_buf_tmp);
+ return ec;
+ }
+
+ subtoken = strsep(&cmd_buf, " ");
+ if (subtoken == NULL) {
+ *cmd = CGX_PRBS_GET_DATA_CMD;
+ } else {
+ if (!strcmp(subtoken, "start")) {
+ *cmd = CGX_PRBS_START_CMD;
+ subtoken = strsep(&cmd_buf, " ");
+ ec = subtoken ? kstrtoint(subtoken, 10, mode) :
+ -EINVAL;
+ if (ec == -EINVAL)
+ goto out;
+ subtoken = strsep(&cmd_buf, " ");
+ if (subtoken) {
+ ec = kstrtoint(subtoken, 10, inject);
+ if (ec)
+ goto out;
+ } else {
+ *inject = 0;
+ }
+ } else if (!strcmp(subtoken, "stop")) {
+ *cmd = CGX_PRBS_STOP_CMD;
+ } else if (!strcmp(subtoken, "clear")) {
+ *cmd = CGX_PRBS_CLEAR_CMD;
+ } else {
+ ec = -EINVAL;
+ }
+ }
+
+out:
+ kfree(cmd_buf_tmp);
+ return ec;
+}
+
+static ssize_t serdes_dbg_prbs_write_op(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct prbs_status *status = NULL;
+ struct arm_smccc_res res;
+ enum cgx_prbs_cmd cmd;
+ int mode;
+ int qlm;
+ int ec;
+ int qlm_lane;
+ int inject;
+
+ ec = serdes_dbg_prbs_lane_parse(buffer, count, &prbs_cmd_data.qlm,
+ &cmd, &mode, &prbs_cmd_data.qlm_lane,
+ &inject);
+ if (ec < 0) {
+ pr_info("Usage: echo <qlm> <lane> [{start <mode> [inject]|stop|clear}] > prbs\n");
+ return ec;
+ }
+
+ qlm = prbs_cmd_data.qlm;
+ qlm_lane = prbs_cmd_data.qlm_lane;
+
+ switch (cmd) {
+ case CGX_PRBS_START_CMD:
+ arm_smccc_smc(OCTEONTX_SERDES_DBG_PRBS, cmd, qlm,
+ mode | (inject << 8),
+ qlm_lane, 0, 0, 0, &res);
+
+ list_for_each_entry(status,
+ &prbs_cmd_data.status_list.list,
+ list) {
+ if ((status->qlm == qlm) &&
+ (status->qlm_lane == qlm_lane))
+ break;
+ }
+
+ /*
+ * If status is head of the list, status for specific
+ * qlm doesn't exist
+ */
+ if (&status->list == &prbs_cmd_data.status_list.list)
+ status = NULL;
+
+ if (res.a0 != SMCCC_RET_SUCCESS) {
+ if (status != NULL) {
+ list_del(&status->list);
+ kfree(status);
+ }
+ pr_info("GSER prbs start command failed.\n");
+ return -EIO;
+ }
+
+ if (status == NULL) {
+ status = kmalloc(sizeof(struct prbs_status),
+ GFP_KERNEL);
+ if (status == NULL)
+ return -ENOMEM;
+ status->qlm = qlm;
+ status->qlm_lane = qlm_lane;
+ list_add(&status->list,
+ &prbs_cmd_data.status_list.list);
+ }
+ status->start_time = get_seconds();
+ pr_info("GSER PRBS-%d start on QLM %d on lane %d.\n", mode,
+ qlm, qlm_lane);
+ break;
+
+ case CGX_PRBS_STOP_CMD:
+ arm_smccc_smc(OCTEONTX_SERDES_DBG_PRBS, cmd,
+ qlm, 0, qlm_lane, 0, 0, 0, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS) {
+ pr_info("GSER prbs stop command failed.\n");
+ return -EIO;
+ }
+ list_for_each_entry(status,
+ &prbs_cmd_data.status_list.list,
+ list) {
+ if ((status->qlm == qlm) &&
+ (status->qlm_lane == qlm_lane)) {
+ list_del(&status->list);
+ kfree(status);
+ break;
+ }
+ }
+ pr_info("GSER PRBS stop on QLM %d on Lane %d.\n", qlm,
+ qlm_lane);
+ break;
+
+ case CGX_PRBS_CLEAR_CMD:
+ arm_smccc_smc(OCTEONTX_SERDES_DBG_PRBS, cmd,
+ qlm, 0, qlm_lane, 0, 0, 0, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS) {
+ pr_info("GSER prbs clear command failed.\n");
+ return -EIO;
+ }
+ pr_info("GSER PRBS errors cleared on QLM%d Lane%d\n", qlm,
+ qlm_lane);
+ break;
+
+ default:
+ pr_info("GSER PRBS set QLM %d Lane %d to read.\n", qlm,
+ qlm_lane);
+ break;
+ }
+
+ return count;
+}
+
+static int serdes_dbg_prbs_read_op(struct seq_file *s, void *unused)
+{
+ struct prbs_status *status = NULL;
+ struct cgx_prbs_errors *errors;
+ struct arm_smccc_res res;
+ long time = -1;
+ int lane;
+ int qlm;
+
+ qlm = prbs_cmd_data.qlm;
+ lane = prbs_cmd_data.qlm_lane;
+
+ list_for_each_entry(status,
+ &prbs_cmd_data.status_list.list,
+ list) {
+ if (status->qlm == qlm) {
+ time = status->start_time;
+ break;
+ }
+ }
+
+ if (time == -1) {
+ seq_printf(s, "GSER PRBS not started for QLM%d.Lane%d.\n", qlm,
+ lane);
+ return 0;
+ }
+
+ time = get_seconds() - time;
+
+ arm_smccc_smc(OCTEONTX_SERDES_DBG_PRBS, CGX_PRBS_GET_DATA_CMD,
+ qlm, 0, lane, 0, 0, 0, &res);
+
+ if (res.a0 != SMCCC_RET_SUCCESS) {
+ seq_printf(s, "GSER prbs get command failed for QLM%d.Lane%d.\n",
+ qlm, lane);
+ return 0;
+ }
+
+ errors = prbs_cmd_data.res->errors;
+
+ seq_printf(s, "Time: %ld seconds QLM%d.Lane%d: errors: ", time, qlm,
+ lane);
+ if (errors[lane].err != -1)
+ seq_printf(s, "%lld", errors[lane].err);
+ else
+ seq_puts(s, "No lock");
+
+ if (errors[lane].phy_host != -2) {
+ seq_puts(s, ", PHY Host errors: ");
+ if (errors[lane].phy_host != -1)
+ seq_printf(s, "%lld", errors[lane].phy_host);
+ else
+ seq_puts(s, "No lock");
+ }
+
+ if (errors[lane].phy_line != -2) {
+ seq_puts(s, ", PHY Line errors: ");
+ if (errors[lane].phy_line != -1)
+ seq_printf(s, "%lld", errors[lane].phy_line);
+ else
+ seq_puts(s, "No lock");
+ }
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int serdes_dbg_open_prbs(struct inode *inode, struct file *file)
+{
+ return single_open(file, serdes_dbg_prbs_read_op, inode->i_private);
+}
+
+static const struct file_operations serdes_dbg_prbs_fops = {
+ .owner = THIS_MODULE,
+ .open = serdes_dbg_open_prbs,
+ .read = seq_read,
+ .write = serdes_dbg_prbs_write_op,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int serdes_dbg_setup_debugfs(struct dentry *root)
+{
+ struct dentry *pfile;
+
+ pfile = debugfs_create_file("eye", 0644, root, NULL,
+ &serdes_dbg_eye_fops);
+ if (IS_ERR_OR_NULL(pfile))
+ goto create_failed;
+
+ pfile = debugfs_create_file("settings", 0644, root, NULL,
+ &serdes_dbg_settings_fops);
+ if (IS_ERR_OR_NULL(pfile))
+ goto create_failed;
+
+ pfile = debugfs_create_file("prbs", 0644, root, NULL,
+ &serdes_dbg_prbs_fops);
+ if (IS_ERR_OR_NULL(pfile))
+ goto create_failed;
+
+ pfile = debugfs_create_file("tunetx", 0644, root, NULL,
+ &tune_serdes_dbg_settings_fops);
+ if (IS_ERR_OR_NULL(pfile))
+ goto create_failed;
+
+ pfile = debugfs_create_file("loop", 0644, root, NULL,
+ &loop_serdes_dbg_settings_fops);
+ if (IS_ERR_OR_NULL(pfile))
+ goto create_failed;
+
+ return 0;
+
+create_failed:
+ pr_err("Failed to create debugfs dir/file for serdes\n");
+ return IS_ERR(pfile) ? PTR_ERR(pfile) : -ENODEV;
+}
+
+static int __init serdes_dbg_init(void)
+{
+ struct arm_smccc_res res;
+ int ret;
+
+ /* Check the debugfs presence */
+ pserdes_root = debugfs_create_dir("octeontx2_serdes", NULL);
+ if (IS_ERR_OR_NULL(pserdes_root)) {
+ if (IS_ERR(pserdes_root)) {
+ int ret = PTR_ERR(pserdes_root);
+
+ pr_err("Can't access debugfs, error (%d)\n", ret);
+ return ret;
+ }
+ /* It should not happen that ERR != 0 && pserdes_root == NULL */
+ pr_info("Can't create debugfs entry\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Compare response for standard SVC_UID commandi with OcteonTX UUID.
+ * Continue only if it is OcteonTX.
+ */
+ if (octeontx_soc_check_smc() != 0) {
+ pr_info("OcteonTX2 serdes diagnostics not support\n");
+ ret = -EPERM;
+ goto smc_access_failed;
+ }
+
+ arm_smccc_smc(OCTEONTX_SERDES_DBG_GET_MEM, 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 == SMCCC_RET_NOT_SUPPORTED) {
+ pr_info("Firmware doesn't support serdes diagnostic cmds.\n");
+ ret = -EPERM;
+ goto smc_access_failed;
+ }
+
+ if (res.a0 != SMCCC_RET_SUCCESS)
+ goto serdes_mem_init_failed;
+
+ eye_cmd_data.res = ioremap_wc(res.a1, sizeof(struct eye_data));
+ if (!eye_cmd_data.res)
+ goto serdes_mem_init_failed;
+
+ serdes_cmd_data.res = ioremap_wc(res.a2, SERDES_SETTINGS_SIZE);
+ if (!serdes_cmd_data.res)
+ goto serdes_mem_init_failed;
+
+ prbs_cmd_data.res = ioremap_wc(res.a3, sizeof(struct cgx_prbs_data));
+ if (!prbs_cmd_data.res)
+ goto serdes_mem_init_failed;
+
+ tune_serdes_cmd.res = ioremap_wc(res.a0, sizeof(tune_serdes_cmd));
+ if (!tune_serdes_cmd.res)
+ goto serdes_mem_init_failed;
+
+ loop_serdes_cmd.res = ioremap_wc(res.a0, sizeof(loop_serdes_cmd));
+ if (!loop_serdes_cmd.res)
+ goto serdes_mem_init_failed;
+
+ ret = serdes_dbg_setup_debugfs(pserdes_root);
+ if (ret)
+ goto serdes_debugfs_failed;
+
+ INIT_LIST_HEAD(&prbs_cmd_data.status_list.list);
+
+ return 0;
+
+serdes_mem_init_failed:
+ pr_err("Failed to obtain shared memory for serdes debug commands\n");
+ ret = -EACCES;
+
+serdes_debugfs_failed:
+ if (eye_cmd_data.res)
+ iounmap(eye_cmd_data.res);
+
+ if (serdes_cmd_data.res)
+ iounmap(serdes_cmd_data.res);
+
+ if (prbs_cmd_data.res)
+ iounmap(prbs_cmd_data.res);
+
+ if (tune_serdes_cmd.res)
+ iounmap(tune_serdes_cmd.res);
+
+ if (loop_serdes_cmd.res)
+ iounmap(loop_serdes_cmd.res);
+
+smc_access_failed:
+ debugfs_remove_recursive(pserdes_root);
+
+ return ret;
+}
+
+static void __exit serdes_dbg_exit(void)
+{
+ struct prbs_status *status, *n;
+
+ debugfs_remove_recursive(pserdes_root);
+
+ if (eye_cmd_data.res)
+ iounmap(eye_cmd_data.res);
+
+ if (serdes_cmd_data.res)
+ iounmap(serdes_cmd_data.res);
+
+ if (prbs_cmd_data.res)
+ iounmap(prbs_cmd_data.res);
+
+ if (tune_serdes_cmd.res)
+ iounmap(tune_serdes_cmd.res);
+
+ list_for_each_entry_safe(status, n,
+ &prbs_cmd_data.status_list.list,
+ list) {
+ kfree(status);
+ }
+}
+
+module_init(serdes_dbg_init);
+module_exit(serdes_dbg_exit);
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION("Serdes diagnostic commands for OcteonTX2");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/marvell/octeontx_info.c b/drivers/soc/marvell/octeontx_info.c
new file mode 100644
index 000000000000..9b01697aa68a
--- /dev/null
+++ b/drivers/soc/marvell/octeontx_info.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Proc entry for board information
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "octtx_info: " fmt
+
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/of.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <soc/marvell/octeontx/octeontx_smc.h>
+
+#define OCTTX_NODE "octeontx_brd"
+#define FW_LAYOUT_NODE "firmware-layout"
+#define SOC_NODE "soc"
+#define MAX_MACS 32 // Please keep this in sync with EBF
+
+struct octeontx_info_mac_addr {
+ union {
+ u64 num;
+ struct {
+ u8 pad[2];
+ u8 bytes[6];
+ } s;
+ };
+};
+
+struct octtx_fw_info {
+ const char *name;
+ const char *version_string;
+ u32 address;
+ u32 max_size;
+ u8 major_version;
+ u8 minor_version;
+ u8 revision_number;
+ u8 revision_type;
+ u16 year;
+ u8 month;
+ u8 day;
+ u8 hour;
+ u8 minute;
+ u16 flags;
+ u32 customer_version;
+ struct octtx_fw_info *next;
+};
+
+struct octtx_brd_info {
+ const char *board_revision;
+ const char *board_serial;
+ const char *board_model;
+ const char *board_num_of_mac;
+ const char *board_num_of_mac_id;
+ const char *reset_count_cold;
+ const char *reset_count_warm;
+ const char *reset_count_core_wdog;
+ const char *reset_count_scp_wdog;
+ const char *reset_count_mcp_wdog;
+ const char *reset_count_ecp_wdog;
+ int dev_tree_parsed;
+ int use_mac_id;
+ struct octeontx_info_mac_addr mac_addrs[MAX_MACS];
+ struct octtx_fw_info *fw_info;
+ const char *sdk_version;
+};
+
+static struct proc_dir_entry *ent;
+static struct octtx_brd_info brd;
+static const char null_string[5] = "NULL";
+
+static int oct_brd_proc_show(struct seq_file *seq, void *v)
+{
+ struct octtx_fw_info *fw_info = brd.fw_info;
+ struct octeontx_info_mac_addr *mac_addr;
+
+ if (!brd.dev_tree_parsed) {
+ seq_puts(seq, "No board info available!\n");
+ return -EPERM;
+ }
+
+ seq_printf(seq, "board_model: %s\n", brd.board_model);
+ seq_printf(seq, "board_revision: %s\n", brd.board_revision);
+ seq_printf(seq, "board_serial_number: %s\n", brd.board_serial);
+ seq_printf(seq, "SDK Version: %s\n", brd.sdk_version);
+ if (!brd.use_mac_id) {
+ mac_addr = &brd.mac_addrs[0];
+
+ seq_printf(seq, "mac_addr_count: %s\n", brd.board_num_of_mac);
+ seq_printf(seq, "mac_addr_base: %pM\n", mac_addr->s.bytes);
+ } else {
+ u32 u, num;
+
+ if (brd.board_num_of_mac_id == null_string)
+ seq_printf(seq, "mac_addr_count: %s\n",
+ brd.board_num_of_mac_id);
+
+ if (!kstrtou32(brd.board_num_of_mac_id, 16, &num)) {
+ seq_printf(seq, "mac_addr_count: %s\n",
+ brd.board_num_of_mac_id);
+
+ for (u = 0; u < num; u++) {
+ mac_addr = &brd.mac_addrs[u];
+
+ seq_printf(seq, "board-mac-addr-id%d: %pM\n",
+ u, mac_addr->s.bytes);
+ }
+ }
+ }
+
+ if (is_soc_cn10kx()) {
+ seq_printf(seq, "cold_reset_count: %s\n", brd.reset_count_cold);
+ seq_printf(seq, "warm_reset_count: %s\n", brd.reset_count_warm);
+ seq_printf(seq, "core_wdog_reset_count: %s\n",
+ brd.reset_count_core_wdog);
+ seq_printf(seq, "scp_wdog_reset_count: %s\n",
+ brd.reset_count_scp_wdog);
+ seq_printf(seq, "mcp_wdog_reset_count: %s\n",
+ brd.reset_count_mcp_wdog);
+ seq_printf(seq, "ecp_wdog_reset_count: %s\n",
+ brd.reset_count_ecp_wdog);
+ }
+
+ while (fw_info) {
+ seq_printf(seq, "firmware-file: %s\n", fw_info->name);
+ seq_printf(seq, " firmware-address: 0x%08x\n",
+ fw_info->address);
+ seq_printf(seq, " firmware-max-size: 0x%08x\n",
+ fw_info->max_size);
+ seq_printf(seq, " version-string: %s\n",
+ fw_info->version_string);
+ seq_printf(seq, " version: %02u.%02u.%02u\n",
+ fw_info->major_version, fw_info->minor_version,
+ fw_info->revision_number);
+ seq_printf(seq, " revision-type: 0x%x\n",
+ fw_info->revision_type);
+ seq_printf(seq, " date: %04u-%02u-%02u\n",
+ fw_info->year, fw_info->month, fw_info->day);
+ seq_printf(seq, " time: %02u:%02u\n",
+ fw_info->hour, fw_info->minute);
+ seq_printf(seq, " flags: 0x%04x\n",
+ fw_info->flags);
+ seq_printf(seq, " customer-version: 0x%08x\n",
+ fw_info->customer_version);
+ fw_info = fw_info->next;
+ }
+ return 0;
+}
+
+static int oct_brd_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, oct_brd_proc_show, NULL);
+}
+
+static const struct proc_ops oct_brd_fops = {
+ .proc_open = oct_brd_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+};
+
+static int octtx_parse_mac_info(struct device_node *node)
+{
+ const char *board_mac;
+ struct octeontx_info_mac_addr mac_addr;
+ int ret;
+ u32 num, id_num, u;
+
+ if (!node)
+ return -EINVAL;
+
+ /* Initialize variables */
+ memset(brd.mac_addrs, 0, sizeof(brd.mac_addrs));
+ brd.use_mac_id = 0;
+
+ ret = of_property_read_string(node, "BOARD-MAC-ADDRESS-NUM",
+ &brd.board_num_of_mac);
+ if (ret) {
+ pr_warn("Board MAC address number not available\n");
+ brd.board_num_of_mac = null_string;
+ num = -1;
+ } else {
+ if (kstrtou32(brd.board_num_of_mac, 16, &num))
+ pr_warn("Board MAC address number is not available\n");
+ }
+
+ ret = of_property_read_string(node, "BOARD-MAC-ADDRESS", &board_mac);
+ if (ret) {
+ pr_warn("Board MAC address not available\n");
+ brd.mac_addrs[0].num = 0;
+ } else {
+ if (!kstrtoull(board_mac, 16, &mac_addr.num))
+ brd.mac_addrs[0].num = be64_to_cpu(mac_addr.num);
+ }
+
+ /* This part is not mandatory */
+ ret = of_property_read_string(node, "BOARD-MAC-ADDRESS-ID-NUM",
+ &brd.board_num_of_mac_id);
+ if (ret) {
+ brd.board_num_of_mac_id = null_string;
+ id_num = -1;
+ } else {
+ if (kstrtou32(brd.board_num_of_mac_id, 16, &id_num))
+ pr_warn("Board MAC addressess IDs number is not available\n");
+ }
+
+ if ((brd.board_num_of_mac_id != null_string) && (id_num > 0)) {
+ for (u = 0; u < id_num; u++) {
+ char prop_name[32] = { 0 };
+
+ snprintf(prop_name, sizeof(prop_name),
+ "BOARD-MAC-ADDRESS-ID%u",
+ u);
+ ret = of_property_read_string(node, prop_name,
+ &board_mac);
+ if (ret) {
+ brd.mac_addrs[u].num = 0;
+ } else {
+ if (!kstrtou64(board_mac, 16, &mac_addr.num))
+ brd.mac_addrs[u].num = be64_to_cpu(mac_addr.num);
+ }
+ }
+
+ brd.use_mac_id = 1;
+ }
+
+ return 0;
+}
+
+/** Reads reset counters information and store it in global board structure
+ *
+ * @param np device tree node to parse
+ *
+ */
+static void octtx_parse_reset_couters(struct device_node *np)
+{
+ int ret;
+
+ ret = of_property_read_string(np, "RESET-COUNT-COLD",
+ &brd.reset_count_cold);
+ if (ret) {
+ pr_warn("Cold reset count not available\n");
+ /* Default name is "NULL" */
+ brd.reset_count_cold = null_string;
+ }
+
+ ret = of_property_read_string(np, "RESET-COUNT-WARM",
+ &brd.reset_count_warm);
+ if (ret) {
+ pr_warn("Warm reset count not available\n");
+ /* Default name is "NULL" */
+ brd.reset_count_warm = null_string;
+ }
+
+ ret = of_property_read_string(np, "RESET-COUNT-CORE-WDOG",
+ &brd.reset_count_core_wdog);
+ if (ret) {
+ pr_warn("Core Watchdog reset count not available\n");
+ /* Default name is "NULL" */
+ brd.reset_count_core_wdog = null_string;
+ }
+
+ ret = of_property_read_string(np, "RESET-COUNT-SCP-WDOG",
+ &brd.reset_count_scp_wdog);
+ if (ret) {
+ pr_warn("SCP Watchdog reset count not available\n");
+ /* Default name is "NULL" */
+ brd.reset_count_scp_wdog = null_string;
+ }
+
+ ret = of_property_read_string(np, "RESET-COUNT-MCP-WDOG",
+ &brd.reset_count_mcp_wdog);
+ if (ret) {
+ pr_warn("MCP Watchdog reset count not available\n");
+ /* Default name is "NULL" */
+ brd.reset_count_mcp_wdog = null_string;
+ }
+
+ ret = of_property_read_string(np, "RESET-COUNT-ECP-WDOG",
+ &brd.reset_count_ecp_wdog);
+ if (ret) {
+ pr_warn("ECP Watchdog reset count not available\n");
+ /* Default name is "NULL" */
+ brd.reset_count_ecp_wdog = null_string;
+ }
+}
+
+static int octtx_parse_firmware_layout(struct device_node *parent)
+{
+ struct device_node *np = NULL;
+ struct octtx_fw_info *fw_info = NULL, *last_fw_info = NULL;
+ const char *version_string;
+ const char *name;
+ int ret;
+ u32 ver_num;
+ u32 date;
+ u32 time;
+ u32 flags;
+
+ for_each_child_of_node(parent, np) {
+ pr_debug("Getting firmware layout from node %s\n",
+ of_node_full_name(np));
+ ret = of_property_read_string(np, "description", &name);
+ if (ret) {
+ pr_warn("Could not obtain firmware file name\n");
+ break;
+ }
+ pr_debug("Firmware file name: %s\n", name);
+
+ /* We only care about entries with version info */
+ ret = of_property_read_string(np, "version", &version_string);
+ if (ret) {
+ pr_debug("No version information found for %s\n", name);
+ continue;
+ }
+
+ fw_info = kzalloc(sizeof(*fw_info), GFP_KERNEL);
+ if (!fw_info) {
+ pr_err("Out of memory for firmware info\n");
+ ret = -ENOMEM;
+ goto bailout;
+ }
+
+ fw_info->name = kstrdup(name, GFP_KERNEL);
+ if (!fw_info->name) {
+ pr_err("Out of memory\n");
+ ret = -ENOMEM;
+ goto bailout;
+ }
+ fw_info->version_string = kstrdup(version_string, GFP_KERNEL);
+ if (!fw_info->version_string) {
+ pr_err("Out of memory\n");
+ ret = -ENOMEM;
+ goto bailout;
+ }
+
+ ret = of_property_read_u32_index(np, "reg", 0,
+ &fw_info->address);
+ if (ret) {
+ pr_warn("Could not obtain firmware address for %s\n",
+ fw_info->name);
+ fw_info->address = (u32)-1;
+ ret = -EINVAL;
+ goto bailout;
+ }
+
+ ret = of_property_read_u32_index(np, "reg", 1,
+ &fw_info->max_size);
+ if (ret) {
+ pr_warn("Could not obtain firmware maximum size for %s\n",
+ fw_info->name);
+ fw_info->max_size = (u32)-1;
+ ret = -EINVAL;
+ goto bailout;
+ }
+
+ ret = of_property_read_u32(np, "revision", &ver_num);
+ if (ret) {
+ pr_warn("Could not obtain revision number for %s\n",
+ fw_info->name);
+ } else {
+ fw_info->major_version = (ver_num >> 24) & 0xff;
+ fw_info->minor_version = (ver_num >> 16) & 0xff;
+ fw_info->revision_number = (ver_num >> 8) & 0xff;
+ fw_info->revision_type = ver_num & 0xff;
+ }
+
+ ret = of_property_read_u32(np, "date", &date);
+ if (ret) {
+ pr_warn("Could not obtain date for %s\n",
+ fw_info->name);
+ } else {
+ fw_info->year = (date >> 16) & 0xffff;
+ fw_info->month = (date >> 8) & 0xff;
+ fw_info->day = date & 0xff;
+ }
+ ret = of_property_read_u32(np, "time", &time);
+ if (ret) {
+ pr_warn("Could not obtain time for %s\n",
+ fw_info->name);
+ } else {
+ fw_info->hour = (time >> 24) & 0xff;
+ fw_info->minute = (time >> 16) & 0xff;
+ }
+ ret = of_property_read_u32(np, "flags", &flags);
+ if (ret) {
+ pr_warn("Could not obtain flags for %s\n",
+ fw_info->name);
+ fw_info->flags = 0;
+ } else {
+ fw_info->flags = flags & 0xFFFF;
+ }
+ ret = of_property_read_u32(np, "customer-version",
+ &fw_info->customer_version);
+ if (ret) {
+ pr_warn("Could not obtain customer version for %s\n",
+ fw_info->name);
+ }
+
+ if (!brd.fw_info)
+ brd.fw_info = fw_info;
+ if (last_fw_info)
+ last_fw_info->next = fw_info;
+ last_fw_info = fw_info;
+ }
+ pr_debug("octtx_info parsing firmware done\n");
+ return 0;
+
+bailout:
+ if (fw_info) {
+ kfree(fw_info->name);
+ kfree(fw_info->version_string);
+ }
+ kfree(fw_info);
+
+ return ret;
+}
+
+static int __init octtx_info_init(void)
+{
+ int ret;
+ struct device_node *np = NULL;
+
+ if (!brd.dev_tree_parsed) {
+ np = of_find_node_by_name(NULL, OCTTX_NODE);
+ if (!np) {
+ pr_err("No board info available!\n");
+ return -ENODEV;
+ }
+ ret = of_property_read_string(np, "BOARD-MODEL",
+ &brd.board_model);
+ if (ret) {
+ pr_warn("Board model not available\n");
+ /* Default name is "NULL" */
+ brd.board_model = null_string;
+ }
+ ret = of_property_read_string(np, "BOARD-REVISION",
+ &brd.board_revision);
+ if (ret) {
+ pr_warn("Board revision not available\n");
+ /* Default name is "NULL" */
+ brd.board_revision = null_string;
+
+ }
+ ret = of_property_read_string(np, "BOARD-SERIAL",
+ &brd.board_serial);
+ if (ret) {
+ pr_warn("Board serial not available\n");
+ /* Default name is "NULL" */
+ brd.board_serial = null_string;
+ }
+
+ ret = octtx_parse_mac_info(np);
+ if (ret) {
+ pr_warn("Board MAC addess not available\n");
+ }
+
+ /* Parse elements related to CN10KX */
+ if (is_soc_cn10kx()) {
+ octtx_parse_reset_couters(np);
+
+ np = of_find_node_by_name(np, FW_LAYOUT_NODE);
+ if (np) {
+ ret = octtx_parse_firmware_layout(np);
+ if (ret)
+ pr_err("Error parsing firmware-layout\n");
+ }
+ }
+
+ /* Read SOC@0 node to get SDK Version */
+ np = of_find_node_by_name(NULL, SOC_NODE);
+ if (!np) {
+ pr_err("soc node not available!\n");
+ return -ENODEV;
+ }
+ ret = of_property_read_string(np, "sdk-version",
+ &brd.sdk_version);
+ if (ret) {
+ pr_warn("SDK Version not available\n");
+ /* Default name is "NULL" */
+ brd.sdk_version = null_string;
+ }
+
+ brd.dev_tree_parsed = 1;
+ }
+
+ ent = proc_create("octtx_info", 0444, NULL, &oct_brd_fops);
+ if (!ent) {
+ pr_err("proc entry creation for octtx info failed\n");
+ return -ENODEV;
+ }
+ pr_info("Added /proc/octtx_info");
+
+ return 0;
+}
+
+static void __exit octtx_info_cleanup(void)
+{
+ proc_remove(ent);
+}
+
+module_init(octtx_info_init);
+module_exit(octtx_info_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("octeontx board info");
+MODULE_AUTHOR("Sujeet Baranwal <sbaranwal@marvell.com>");
diff --git a/drivers/soc/marvell/phy_diag.c b/drivers/soc/marvell/phy_diag.c
new file mode 100644
index 000000000000..249ab2db8a11
--- /dev/null
+++ b/drivers/soc/marvell/phy_diag.c
@@ -0,0 +1,1022 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Marvell
+ *
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/uaccess.h>
+#include <soc/marvell/octeontx/octeontx_smc.h>
+
+
+#define PLAT_OCTEONTX_PHY_DBG_PRBS 0xc2000e00
+#define PLAT_OCTEONTX_PHY_LOOPBACK 0xc2000e01
+#define PLAT_OCTEONTX_PHY_GET_TEMP 0xc2000e02
+#define PLAT_OCTEONTX_PHY_SERDES_CFG 0xc2000e03
+#define PLAT_OCTEONTX_PHY_MDIO 0xc2000e04
+#define PLAT_OCTEONTX_PHY_EYE_CAPTURE 0xc2000e05
+#define PLAT_OCTEONTX_PHY_PKT_GEN 0xc2000e06
+#define MAX_ETH 10
+#define MAX_LMAC_PER_ETH 4
+
+#define DEFINE_ATTRIBUTE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _read, inode->i_private); \
+} \
+ \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .write = __name ## _write, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+#define DEFINE_STR_2_ENUM_FUNC(_conv_arr) \
+static inline int _conv_arr ## _str2enum(const char *str) \
+{ \
+ size_t idx; \
+ size_t len = ARRAY_SIZE(_conv_arr); \
+ \
+ if (!str) \
+ return -1; \
+ \
+ for (idx = 0; idx < len; idx++) { \
+ if (!strcmp(_conv_arr[idx].s, str)) \
+ return _conv_arr[idx].e; \
+ } \
+ \
+ return -1; \
+}
+
+struct dentry *phy_dbgfs_root;
+
+#define CMD_SZ 64
+char cmd_buf[CMD_SZ];
+
+static struct {
+ int eth;
+ int lmac;
+} phy_data;
+
+enum phy_sgmii_vod {
+ PHY_SGMII_VOD_14mV = 0,
+ PHY_SGMII_VOD_112mV,
+ PHY_SGMII_VOD_210mV,
+ PHY_SGMII_VOD_308mV,
+ PHY_SGMII_VOD_406mV,
+ PHY_SGMII_VOD_504mV,
+ PHY_SGMII_VOD_602mV,
+ PHY_SGMII_VOD_700mV,
+
+ PHY_SGMII_VOD_MAX
+};
+
+#define VOD(_val) {PHY_SGMII_VOD_ ## _val, #_val}
+static struct {
+ enum phy_sgmii_vod e;
+ const char *s;
+} sgmii_vod_values[] = {
+ VOD(14mV),
+ VOD(112mV),
+ VOD(210mV),
+ VOD(308mV),
+ VOD(406mV),
+ VOD(504mV),
+ VOD(602mV),
+ VOD(700mV),
+};
+DEFINE_STR_2_ENUM_FUNC(sgmii_vod_values)
+
+enum phy_prbs_cmd {
+ PHY_PRBS_START_CMD = 1,
+ PHY_PRBS_STOP_CMD,
+ PHY_PRBS_GET_DATA_CMD,
+};
+
+static struct {
+ enum phy_prbs_cmd e;
+ const char *s;
+} prbs_cmds[] = {
+ {PHY_PRBS_START_CMD, "start"},
+ {PHY_PRBS_STOP_CMD, "stop"},
+};
+DEFINE_STR_2_ENUM_FUNC(prbs_cmds)
+
+
+enum phy_prbs_side {
+ PRBS_SIDE_LINE = 0,
+ PRBS_SIDE_HOST,
+};
+
+static struct {
+ enum phy_prbs_side e;
+ const char *s;
+} prbs_sides[] = {
+ {PRBS_SIDE_LINE, "line"},
+ {PRBS_SIDE_HOST, "host"},
+};
+DEFINE_STR_2_ENUM_FUNC(prbs_sides)
+
+enum phy_prbs_direction {
+ PRBS_DIRECTION_TX = 1,
+ PRBS_DIRECTION_RX,
+ PRBS_DIRECTION_TX_RX,
+};
+
+static struct {
+ enum phy_prbs_direction e;
+ const char *s;
+} prbs_directions[] = {
+ {PRBS_DIRECTION_TX, "tx"},
+ {PRBS_DIRECTION_RX, "rx"},
+ {PRBS_DIRECTION_TX_RX, "tx-rx"},
+};
+DEFINE_STR_2_ENUM_FUNC(prbs_directions)
+
+
+enum phy_prbs_type {
+ PRBS_7 = 0,
+ PRBS_23,
+ PRBS_31,
+ PRBS_1010,
+};
+
+static struct {
+ enum phy_prbs_type e;
+ const char *s;
+} prbs_types[] = {
+ {PRBS_7, "prbs_7"},
+ {PRBS_23, "prbs_23"},
+ {PRBS_31, "prbs_31"},
+ {PRBS_1010, "prbs_1010"},
+};
+DEFINE_STR_2_ENUM_FUNC(prbs_types)
+
+/* loopback definitions */
+enum phy_loopback_cmd {
+ PHY_LOOPBACK_START_CMD = 1,
+ PHY_LOOPBACK_STOP_CMD,
+};
+
+static struct {
+ enum phy_loopback_cmd e;
+ const char *s;
+} loopback_cmds[] = {
+ {PHY_LOOPBACK_START_CMD, "start"},
+ {PHY_LOOPBACK_STOP_CMD, "stop"},
+};
+DEFINE_STR_2_ENUM_FUNC(loopback_cmds)
+
+
+enum phy_loopback_side {
+ LOOPBACK_SIDE_LINE = 0,
+ LOOPBACK_SIDE_HOST,
+};
+
+static struct {
+ enum phy_loopback_side e;
+ const char *s;
+} loopback_sides[] = {
+ {LOOPBACK_SIDE_LINE, "line"},
+ {LOOPBACK_SIDE_HOST, "host"},
+};
+DEFINE_STR_2_ENUM_FUNC(loopback_sides)
+
+enum phy_loopback_type {
+ PCS_SHALLOW = 0,
+ PCS_DEEP,
+ PMA_DEEP,
+};
+
+static struct {
+ enum phy_loopback_type e;
+ const char *s;
+} loopback_types[] = {
+ {PCS_SHALLOW, "pcs_shallow"},
+ {PCS_DEEP, "pcs_deep"},
+ {PMA_DEEP, "pma_deep"},
+};
+DEFINE_STR_2_ENUM_FUNC(loopback_types)
+
+/* eye capture definitions */
+enum phy_eye_side {
+ EYE_SIDE_LINE = 0,
+ EYE_SIDE_HOST,
+ EYE_SIDE_TEST_3,
+ EYE_SIDE_TEST_4,
+};
+
+static struct {
+ enum phy_eye_side e;
+ const char *s;
+} eye_sides[] = {
+ {EYE_SIDE_LINE, "line"},
+ {EYE_SIDE_HOST, "host"},
+ {EYE_SIDE_TEST_3, "test_3"},
+ {EYE_SIDE_TEST_4, "test_4"},
+};
+DEFINE_STR_2_ENUM_FUNC(eye_sides)
+
+enum phy_eye_type {
+ EYE_MEASURE = 0,
+ EYE_PLOT,
+};
+
+static struct {
+ enum phy_eye_type e;
+ const char *s;
+} eye_types[] = {
+ {EYE_MEASURE, "measure"},
+ {EYE_PLOT, "plot"},
+};
+DEFINE_STR_2_ENUM_FUNC(eye_types)
+
+/* pktgen definitions */
+enum phy_pktgen_cmd {
+ PHY_PKTGEN_START_CMD = 1,
+ PHY_PKTGEN_STOP_CMD,
+ PHY_PKTGEN_SET_CMD,
+ PHY_PKTGEN_GET_CMD
+};
+
+static struct {
+ enum phy_pktgen_cmd e;
+ const char *s;
+} pktgen_cmds[] = {
+ {PHY_PKTGEN_START_CMD, "start"},
+ {PHY_PKTGEN_STOP_CMD, "stop"},
+ {PHY_PKTGEN_SET_CMD, "set"},
+};
+DEFINE_STR_2_ENUM_FUNC(pktgen_cmds)
+
+enum phy_pktgen_mode {
+ PHY_PKTGEN_GENERATOR = 0,
+ PHY_PKTGEN_CHECKER,
+ PHY_PKTGEN_GEN_CHECK,
+};
+
+static struct {
+ enum phy_pktgen_mode e;
+ const char *s;
+} pktgen_modes[] = {
+ {PHY_PKTGEN_GENERATOR, "tx"},
+ {PHY_PKTGEN_CHECKER, "rx"},
+ {PHY_PKTGEN_GEN_CHECK, "tx-rx"},
+};
+DEFINE_STR_2_ENUM_FUNC(pktgen_modes)
+
+
+enum phy_pktgen_side {
+ PKTGEN_SIDE_LINE = 0,
+ PKTGEN_SIDE_HOST,
+};
+
+static struct {
+ enum phy_pktgen_side e;
+ const char *s;
+} pktgen_sides[] = {
+ {PKTGEN_SIDE_LINE, "line"},
+ {PKTGEN_SIDE_HOST, "host"},
+};
+DEFINE_STR_2_ENUM_FUNC(pktgen_sides)
+
+enum phy_pktgen_type {
+ PKTGEN_TYPE_SFD = 0,
+ PKTGEN_TYPE_PATTERN_CTL,
+ PKTGEN_TYPE_DIS_CRC,
+ PKTGEN_TYPE_IN_PAYLOAD,
+ PKTGEN_TYPE_FRAME_LEN_CTL,
+ PKTGEN_TYPE_NUM_PACKETS,
+ PKTGEN_TYPE_RANDOM_IPG,
+ PKTGEN_TYPE_IPG_DURATION,
+};
+
+static struct {
+ enum phy_pktgen_type e;
+ const char *s;
+} pktgen_types[] = {
+ {PKTGEN_TYPE_SFD, "sfd"},
+ {PKTGEN_TYPE_PATTERN_CTL, "pattern"},
+ {PKTGEN_TYPE_DIS_CRC, "dis_crc"},
+ {PKTGEN_TYPE_IN_PAYLOAD, "in_payload"},
+ {PKTGEN_TYPE_FRAME_LEN_CTL, "frame_len"},
+ {PKTGEN_TYPE_NUM_PACKETS, "num_packets"},
+ {PKTGEN_TYPE_RANDOM_IPG, "random_ipg"},
+ {PKTGEN_TYPE_IPG_DURATION, "ipg_duration"},
+};
+DEFINE_STR_2_ENUM_FUNC(pktgen_types)
+
+enum phy_mdio_optype {
+ CLAUSE_22 = 0,
+ CLAUSE_45,
+};
+
+static struct {
+ enum phy_mdio_optype e;
+ const char *s;
+} mdio_optype[] = {
+ {CLAUSE_22, "c22"},
+ {CLAUSE_45, "c45"},
+};
+DEFINE_STR_2_ENUM_FUNC(mdio_optype)
+
+static int copy_user_input(const char __user *buffer,
+ size_t count, char *cmd_buf, size_t buf_sz)
+{
+ size_t cnt;
+
+ cnt = (count >= buf_sz - 1) ? buf_sz - 1 : count;
+
+ memset(cmd_buf, 0, buf_sz);
+ if (copy_from_user(cmd_buf, buffer, cnt))
+ return -EFAULT;
+
+ cmd_buf[cnt] = '\0';
+ return 0;
+}
+
+
+static int parse_phy_mdio_op_data(char *cmd, int write,
+ int *clause, int *dev_page, int *reg, int *val)
+{
+ char *end;
+ char *token;
+ int optype;
+ int devpage;
+
+ end = skip_spaces(cmd);
+ token = strsep(&end, " \t\n");
+ if (!token)
+ return -EINVAL;
+
+ optype = mdio_optype_str2enum(token);
+ if (optype == -1)
+ return -EINVAL;
+
+ *clause = optype;
+
+ end = skip_spaces(end);
+ token = strsep(&end, " \t\n");
+ if (!token)
+ return -EINVAL;
+
+ if (kstrtoint(token, 10, &devpage))
+ return -EINVAL;
+
+ if (devpage >= 0) {
+ /* device addr or page nr are 5 bits */
+ devpage &= 0x1f;
+ *dev_page = devpage;
+ } else if (devpage != -1) {
+ return -EINVAL;
+ }
+
+ /* Cannot ignore devad when using clause 45 */
+ if (devpage == -1 && optype == CLAUSE_45)
+ return -EINVAL;
+
+ end = skip_spaces(end);
+ token = strsep(&end, " \t\n");
+ if (!token)
+ return -EINVAL;
+
+ if (kstrtouint(token, 16, reg))
+ return -EINVAL;
+
+ *reg &= (clause == CLAUSE_22) ? 0x1f : 0xffff;
+
+ if (write) {
+ end = skip_spaces(end);
+ token = strsep(&end, " \t\n");
+ if (!token)
+ return -EINVAL;
+
+ if (kstrtouint(token, 16, val))
+ return -EINVAL;
+
+ *val &= 0xffff;
+ }
+
+ return 0;
+}
+
+
+static ssize_t phy_debug_read_reg_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct arm_smccc_res res;
+ int clause;
+ int dev_page = (1 << 5);
+ int reg;
+ int val;
+ int x1;
+ int x2;
+
+ if (copy_user_input(buffer, count, cmd_buf, CMD_SZ))
+ return -EFAULT;
+
+ if (parse_phy_mdio_op_data(cmd_buf, 0, &clause,
+ &dev_page, &reg, &val)) {
+ return -EINVAL;
+ }
+
+ x1 = (dev_page << 2) | (clause << 1) | 0;
+ x2 = reg;
+
+ arm_smccc_smc(PLAT_OCTEONTX_PHY_MDIO, x1, x2,
+ phy_data.eth, phy_data.lmac, 0, 0, 0, &res);
+
+ if (res.a0) {
+ pr_warn("MDIO: Reading PHY register failed!\n");
+ return count;
+ }
+
+ val = res.a1 & 0xffff;
+
+ pr_info("MDIO: val=0x%x\n", val);
+
+ return count;
+}
+
+static int phy_debug_read_reg_read(struct seq_file *s, void *unused)
+{
+ return 0;
+}
+DEFINE_ATTRIBUTE(phy_debug_read_reg);
+
+static ssize_t phy_debug_write_reg_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct arm_smccc_res res;
+ int clause;
+ int dev_page = (1 << 5);
+ int reg;
+ int val;
+ int x1;
+ int x2;
+
+ if (copy_user_input(buffer, count, cmd_buf, CMD_SZ))
+ return -EFAULT;
+
+ if (parse_phy_mdio_op_data(cmd_buf, 1, &clause,
+ &dev_page, &reg, &val)) {
+ return -EINVAL;
+ }
+
+ x1 = (dev_page << 2) | (clause << 1) | 1;
+ x2 = (val << 16) | reg;
+
+ arm_smccc_smc(PLAT_OCTEONTX_PHY_MDIO, x1, x2,
+ phy_data.eth, phy_data.lmac, 0, 0, 0, &res);
+
+ if (res.a0)
+ pr_warn("MDIO: Writing PHY register failed!\n");
+
+ return count;
+}
+
+static int phy_debug_write_reg_read(struct seq_file *s, void *unused)
+{
+ return 0;
+}
+DEFINE_ATTRIBUTE(phy_debug_write_reg);
+
+static ssize_t phy_debug_prbs_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct arm_smccc_res res;
+ char *end;
+ char *token;
+ int cmd;
+ int host;
+ int direction;
+ int type = 0;
+ int cfg = 0;
+
+ if (copy_user_input(buffer, count, cmd_buf, CMD_SZ))
+ return -EFAULT;
+
+ end = skip_spaces(cmd_buf);
+ token = strsep(&end, " \t\n");
+ if (!token)
+ return -EINVAL;
+
+ cmd = prbs_cmds_str2enum(token);
+ if (cmd == -1)
+ return -EINVAL;
+
+ end = skip_spaces(end);
+ token = strsep(&end, " \t\n");
+ if (!token)
+ return -EINVAL;
+
+ host = prbs_sides_str2enum(token);
+ if (host == -1)
+ return -EINVAL;
+
+ end = skip_spaces(end);
+ token = strsep(&end, " \t\n");
+ if (!token)
+ return -EINVAL;
+
+ direction = prbs_directions_str2enum(token);
+ if (direction == -1)
+ return -EINVAL;
+
+ if (cmd == PHY_PRBS_START_CMD) {
+ end = skip_spaces(end);
+ token = strsep(&end, " \t\n");
+ if (!token)
+ return -EINVAL;
+
+ type = prbs_types_str2enum(token);
+ if (type == -1)
+ return -EINVAL;
+ }
+
+ cfg |= (type << 3) | (direction << 1) | host;
+
+ arm_smccc_smc(PLAT_OCTEONTX_PHY_DBG_PRBS, cmd, cfg,
+ phy_data.eth, phy_data.lmac, 0, 0, 0, &res);
+
+ if (res.a0) {
+ pr_warn("Configuring PRBS failed!\n");
+ return count;
+ }
+
+ if (cmd == PHY_PRBS_START_CMD) {
+ pr_info("PRBS %s started: side=%s, type=%s\n",
+ prbs_directions[direction-1].s, prbs_sides[host].s, prbs_types[type].s);
+ } else
+ pr_info("PRBS stopped\n");
+ return count;
+}
+
+static int phy_debug_prbs_read(struct seq_file *s, void *unused)
+{
+ struct arm_smccc_res res;
+ int cfg;
+ int host_errors;
+ int line_errors;
+
+ cfg = 1;
+ arm_smccc_smc(PLAT_OCTEONTX_PHY_DBG_PRBS, PHY_PRBS_GET_DATA_CMD, cfg,
+ phy_data.eth, phy_data.lmac, 0, 0, 0, &res);
+
+ host_errors = res.a0;
+
+ cfg = 0;
+ arm_smccc_smc(PLAT_OCTEONTX_PHY_DBG_PRBS, PHY_PRBS_GET_DATA_CMD, cfg,
+ phy_data.eth, phy_data.lmac, 0, 0, 0, &res);
+
+ line_errors = res.a0;
+
+ seq_printf(s, "PRBS errors: host=%d line=%d\n", host_errors, line_errors);
+
+ return 0;
+}
+DEFINE_ATTRIBUTE(phy_debug_prbs);
+
+static ssize_t phy_debug_loopback_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct arm_smccc_res res;
+ char *end;
+ char *token;
+ int cmd;
+ int side;
+ int type = 0;
+ int cfg = 0;
+
+ if (copy_user_input(buffer, count, cmd_buf, CMD_SZ))
+ return -EFAULT;
+
+ end = skip_spaces(cmd_buf);
+ token = strsep(&end, " \t\n");
+ if (!token)
+ return -EINVAL;
+
+ cmd = loopback_cmds_str2enum(token);
+ if (cmd == -1)
+ return -EINVAL;
+
+ end = skip_spaces(end);
+ token = strsep(&end, " \t\n");
+ if (!token)
+ return -EINVAL;
+
+ side = loopback_sides_str2enum(token);
+ /* If no side is passed, assume line side as default */
+ if (side == -1)
+ side = LOOPBACK_SIDE_LINE;
+
+ end = skip_spaces(end);
+ token = strsep(&end, " \t\n");
+ if (!token)
+ return -EINVAL;
+
+ if (cmd == PHY_LOOPBACK_START_CMD) {
+
+ type = loopback_types_str2enum(token);
+ /* If not loopback type is passed, assume shallow loopback */
+ if (type == -1)
+ type = PCS_SHALLOW;
+ }
+
+ cfg |= (type << 2) | side;
+
+ arm_smccc_smc(PLAT_OCTEONTX_PHY_LOOPBACK, cmd, cfg,
+ phy_data.eth, phy_data.lmac, 0, 0, 0, &res);
+
+ if (res.a0) {
+ if (cmd == PHY_LOOPBACK_START_CMD)
+ pr_warn("Enabling %s side %s Loopback failed!\n",
+ loopback_sides[side].s, loopback_types[type].s);
+ else
+ pr_warn("Disabling %s side Loopback failed!\n",
+ loopback_sides[side].s);
+
+ return count;
+ }
+ if (cmd == PHY_LOOPBACK_START_CMD) {
+ pr_info("Loopback %s side %s type started\n",
+ loopback_sides[side].s, loopback_types[type].s);
+ } else {
+ pr_info("Loopback %s side type stopped\n",
+ loopback_sides[side].s);
+ }
+
+ return count;
+}
+
+static int phy_debug_loopback_read(struct seq_file *s, void *unused)
+{
+ return 0;
+}
+DEFINE_ATTRIBUTE(phy_debug_loopback);
+
+/* Eye measurement */
+static ssize_t phy_debug_eye_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct arm_smccc_res res;
+ char *end;
+ char *token;
+ int type = 0, side = 0;
+
+ if (copy_user_input(buffer, count, cmd_buf, CMD_SZ))
+ return -EFAULT;
+
+ end = skip_spaces(cmd_buf);
+ token = strsep(&end, " \t\n");
+ if (!token)
+ return -EINVAL;
+
+ side = eye_sides_str2enum(token);
+ if (side == -1)
+ return -EINVAL;
+
+ end = skip_spaces(end);
+ token = strsep(&end, " \t\n");
+ if (!token)
+ return -EINVAL;
+
+ type = eye_types_str2enum(token);
+
+ arm_smccc_smc(PLAT_OCTEONTX_PHY_EYE_CAPTURE, side, type,
+ phy_data.eth, phy_data.lmac, 0, 0, 0, &res);
+
+ if (res.a0)
+ pr_warn("Eye %s side %s failed!\n", eye_sides[side].s, eye_types[type].s);
+ else
+ pr_info("Eye %s side %s success\n", eye_sides[side].s, eye_types[type].s);
+ return count;
+}
+
+static int phy_debug_eye_read(struct seq_file *s, void *unused)
+{
+ return 0;
+}
+
+DEFINE_ATTRIBUTE(phy_debug_eye);
+
+/* pktgen */
+static ssize_t phy_debug_pktgen_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct arm_smccc_res res;
+ char *end;
+ char *token;
+ int cmd;
+ int mode;
+ int type = 0;
+ int value = 0;
+ int side;
+ int cfg = 0;
+
+ if (copy_user_input(buffer, count, cmd_buf, CMD_SZ))
+ return -EFAULT;
+
+ end = skip_spaces(cmd_buf);
+ token = strsep(&end, " \t\n");
+ if (!token)
+ return -EINVAL;
+
+ cmd = pktgen_cmds_str2enum(token);
+ if (cmd == -1)
+ return -EINVAL;
+ switch (cmd) {
+ case PHY_PKTGEN_START_CMD:
+ case PHY_PKTGEN_STOP_CMD:
+ end = skip_spaces(end);
+ token = strsep(&end, " \t\n");
+ if (!token)
+ return -EINVAL;
+ mode = pktgen_modes_str2enum(token);
+ if (mode == -1)
+ return -EINVAL;
+ end = skip_spaces(end);
+ token = strsep(&end, " \t\n");
+ if (!token)
+ return -EINVAL;
+ side = pktgen_sides_str2enum(token);
+ if (side == -1)
+ return -EINVAL;
+ cfg = ((cmd & 0xff) << 8) | (mode & 0xff);
+ pr_info("cmd=%d mode=%d side=%d cfg=0x%x\n", cmd, mode, side, cfg);
+
+ arm_smccc_smc(PLAT_OCTEONTX_PHY_PKT_GEN, cfg, side,
+ phy_data.eth, phy_data.lmac, 0, 0, 0, &res);
+ break;
+ case PHY_PKTGEN_SET_CMD:
+ end = skip_spaces(end);
+ token = strsep(&end, " \t\n");
+ if (!token)
+ return -EINVAL;
+ type = pktgen_types_str2enum(token);
+ if (type == -1)
+ return -EINVAL;
+ end = skip_spaces(end);
+ token = strsep(&end, " \t\n");
+ if (!token)
+ return -EINVAL;
+ if (kstrtouint(token, 0, &value))
+ return -EINVAL;
+ cfg = ((cmd & 0xff) << 8) | (type & 0xff);
+
+ arm_smccc_smc(PLAT_OCTEONTX_PHY_PKT_GEN, cfg, value,
+ phy_data.eth, phy_data.lmac, 0, 0, 0, &res);
+ break;
+ default:
+ pr_warn("PKTGEN failed for invalid command %d!\n", cmd);
+ return -EINVAL;
+ }
+
+ if (res.a0) {
+ pr_warn("PKTGEN command failed!\n");
+ return count;
+ }
+
+ pr_info("PKTGEN command success!\n");
+ return count;
+}
+
+static int phy_debug_pktgen_read(struct seq_file *s, void *unused)
+{
+ struct arm_smccc_res res;
+ int cmd;
+ int cfg = 0;
+
+ cmd = PHY_PKTGEN_GET_CMD;
+ cfg = (cmd & 0xff) << 8;
+ arm_smccc_smc(PLAT_OCTEONTX_PHY_PKT_GEN, cfg, 0,
+ phy_data.eth, phy_data.lmac, 0, 0, 0, &res);
+
+ seq_printf(s, "PKTGEN status: %ld\n", res.a0);
+
+ return 0;
+}
+DEFINE_ATTRIBUTE(phy_debug_pktgen);
+
+static int phy_debug_serdes_read(struct seq_file *s, void *unused)
+{
+ struct arm_smccc_res res;
+ const char *vod_str;
+
+ arm_smccc_smc(PLAT_OCTEONTX_PHY_SERDES_CFG, 0, 0,
+ phy_data.eth, phy_data.lmac, 0, 0, 0, &res);
+
+ if (res.a0) {
+ seq_puts(s, "Reading SERDES config failed!\n");
+ return 0;
+ }
+
+ vod_str = sgmii_vod_values[res.a1 & 0x7].s;
+ seq_printf(s, "SERDES config: VOD=%s\n", vod_str);
+
+ return 0;
+}
+
+static ssize_t phy_debug_serdes_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct arm_smccc_res res;
+ char *token;
+ int vod_val;
+
+ if (copy_user_input(buffer, count, cmd_buf, CMD_SZ))
+ return -EFAULT;
+
+ token = strim(skip_spaces(cmd_buf));
+ if (!token)
+ return -EINVAL;
+
+ vod_val = sgmii_vod_values_str2enum(token);
+ if (vod_val == -1)
+ return -EINVAL;
+
+ vod_val &= 0x7;
+
+ arm_smccc_smc(PLAT_OCTEONTX_PHY_SERDES_CFG, 1, vod_val,
+ phy_data.eth, phy_data.lmac, 0, 0, 0, &res);
+
+ if (res.a0) {
+ pr_warn("Changing SERDES config failed!\n");
+ return count;
+ }
+
+ pr_info("New SERDES config: VOD=%s\n", sgmii_vod_values[vod_val].s);
+
+ return count;
+}
+DEFINE_ATTRIBUTE(phy_debug_serdes);
+
+static int phy_debug_temp_show(struct seq_file *s, void *unused)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(PLAT_OCTEONTX_PHY_GET_TEMP, phy_data.eth,
+ phy_data.lmac, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0)
+ seq_puts(s, "Reading temperature failed!\n");
+ else
+ seq_printf(s, "Temperature: %ld\n", res.a1);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(phy_debug_temp);
+
+static int parse_eth_lmac(char *cmd, int *eth, int *lmac)
+{
+ char *end;
+ char *token;
+
+ end = skip_spaces(cmd);
+ token = strsep(&end, " \t\n");
+ if (!token)
+ return -EINVAL;
+
+ if (kstrtouint(token, 10, eth) ||
+ *eth >= MAX_ETH)
+ return -EINVAL;
+
+ end = skip_spaces(end);
+ token = strsep(&end, " \t\n");
+ if (!token)
+ return -EINVAL;
+
+ if (kstrtouint(token, 10, lmac) ||
+ *lmac >= MAX_LMAC_PER_ETH)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int phy_debug_phy_read(struct seq_file *s, void *unused)
+{
+ seq_printf(s, "Selected PHY: @(eth=%d, lmac=%d)\n",
+ phy_data.eth, phy_data.lmac);
+ return 0;
+}
+
+static ssize_t phy_debug_phy_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int eth;
+ int lmac;
+
+ if (copy_user_input(buffer, count, cmd_buf, CMD_SZ))
+ return -EFAULT;
+
+ if (parse_eth_lmac(cmd_buf, &eth, &lmac))
+ return -EINVAL;
+
+ phy_data.eth = eth;
+ phy_data.lmac = lmac;
+
+ pr_info("New PHY selected: @(eth=%d, lmac=%d)\n",
+ phy_data.eth, phy_data.lmac);
+
+ return count;
+}
+DEFINE_ATTRIBUTE(phy_debug_phy);
+
+static int phy_dbg_setup_debugfs(void)
+{
+ struct dentry *dbg_file;
+
+ phy_dbgfs_root = debugfs_create_dir("phy_diagnostics", NULL);
+
+ dbg_file = debugfs_create_file("phy", 0644, phy_dbgfs_root, NULL,
+ &phy_debug_phy_fops);
+ if (!dbg_file)
+ goto create_failed;
+
+ dbg_file = debugfs_create_file("temperature", 0644, phy_dbgfs_root, NULL,
+ &phy_debug_temp_fops);
+ if (!dbg_file)
+ goto create_failed;
+
+ dbg_file = debugfs_create_file("serdes", 0644, phy_dbgfs_root, NULL,
+ &phy_debug_serdes_fops);
+ if (!dbg_file)
+ goto create_failed;
+
+ dbg_file = debugfs_create_file("loopback", 0644, phy_dbgfs_root, NULL,
+ &phy_debug_loopback_fops);
+ if (!dbg_file)
+ goto create_failed;
+
+ dbg_file = debugfs_create_file("prbs", 0644, phy_dbgfs_root, NULL,
+ &phy_debug_prbs_fops);
+ if (!dbg_file)
+ goto create_failed;
+
+ dbg_file = debugfs_create_file("eye", 0644, phy_dbgfs_root, NULL,
+ &phy_debug_eye_fops);
+ if (!dbg_file)
+ goto create_failed;
+
+ dbg_file = debugfs_create_file("pktgen", 0644, phy_dbgfs_root, NULL,
+ &phy_debug_pktgen_fops);
+ if (!dbg_file)
+ goto create_failed;
+
+ dbg_file = debugfs_create_file("write_reg", 0644, phy_dbgfs_root, NULL,
+ &phy_debug_write_reg_fops);
+ if (!dbg_file)
+ goto create_failed;
+
+ dbg_file = debugfs_create_file("read_reg", 0644, phy_dbgfs_root, NULL,
+ &phy_debug_read_reg_fops);
+ if (!dbg_file)
+ goto create_failed;
+
+ return 0;
+
+create_failed:
+ pr_err("Failed to create debugfs dir/file for octeontx_phy\n");
+ debugfs_remove_recursive(phy_dbgfs_root);
+ return -1;
+}
+
+static int __init phy_dbg_init(void)
+{
+ if (octeontx_soc_check_smc() < 0) {
+ pr_info("PHY diagnostics: Not supported\n");
+ return -EPERM;
+ }
+
+ return phy_dbg_setup_debugfs();
+}
+
+static void __exit phy_dbg_exit(void)
+{
+ debugfs_remove_recursive(phy_dbgfs_root);
+}
+
+module_init(phy_dbg_init);
+module_exit(phy_dbg_exit);
+
+MODULE_AUTHOR("Damian Eppel <deppel@marvell.com>");
+MODULE_DESCRIPTION("PHYs diagnostic commands for OcteonTX");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index aadaea052f51..e69e58e98814 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -212,6 +212,18 @@ config SPI_CADENCE_QUADSPI
device with a Cadence QSPI controller and want to access the
Flash as an MTD device.
+config SPI_CADENCE_XSPI
+ tristate "Cadence XSPI controller"
+ depends on (OF || COMPILE_TEST) && HAS_IOMEM
+ depends on SPI_MEM
+ help
+ Enable support for the Cadence XSPI Flash controller.
+
+ Cadence XSPI is a specialized controller for connecting an SPI
+ Flash over upto 8bit wide bus. Enable this option if you have a
+ device with a Cadence XSPI controller and want to access the
+ Flash as an MTD device.
+
config SPI_CLPS711X
tristate "CLPS711X host SPI controller"
depends on ARCH_CLPS711X || COMPILE_TEST
@@ -550,6 +562,15 @@ config SPI_OCTEON
SPI host driver for the hardware found on some Cavium OCTEON
SOCs.
+config SPI_OCTEONTX2
+ tristate "Marvell OcteonTX2 SPI controller"
+ depends on PCI && 64BIT && (ARM64 || COMPILE_TEST)
+ help
+ This driver supports the OcteonTX2 SPI controller in master
+ mode. It supports single, dual and quad mode transfers.
+ This controller hardware is found on some of Marvell
+ OcteonTX2 SoCs.
+
config SPI_OMAP_UWIRE
tristate "OMAP1 MicroWire"
depends on ARCH_OMAP1
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 6fea5821662e..7500c1c490ad 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o
obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o
obj-$(CONFIG_SPI_CADENCE) += spi-cadence.o
obj-$(CONFIG_SPI_CADENCE_QUADSPI) += spi-cadence-quadspi.o
+obj-$(CONFIG_SPI_CADENCE_XSPI) += spi-cadence-xspi.o
obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o
obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o
obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o
@@ -77,6 +78,7 @@ obj-$(CONFIG_SPI_NXP_FLEXSPI) += spi-nxp-fspi.o
obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o
spi-octeon-objs := spi-cavium.o spi-cavium-octeon.o
obj-$(CONFIG_SPI_OCTEON) += spi-octeon.o
+obj-$(CONFIG_SPI_OCTEONTX2) += spi-octeontx2.o
obj-$(CONFIG_SPI_OMAP_UWIRE) += spi-omap-uwire.o
obj-$(CONFIG_SPI_OMAP_100K) += spi-omap-100k.o
obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o
diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c
new file mode 100644
index 000000000000..181a4c6b9e6e
--- /dev/null
+++ b/drivers/spi/spi-cadence-xspi.c
@@ -0,0 +1,936 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Cadence XSPI flash controller driver
+// Copyright (C) 2020-21 Cadence
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/bitfield.h>
+#include <linux/limits.h>
+#include <linux/log2.h>
+
+#define CDNS_XSPI_MAGIC_NUM_VALUE 0x6522
+#define CDNS_XSPI_MAX_BANKS 8
+#define CDNS_XSPI_NAME "cadence-xspi"
+
+/*
+ * Note: below are additional auxiliary registers to
+ * configure XSPI controller pin-strap settings
+ */
+
+/* PHY DQ timing register */
+#define CDNS_XSPI_CCP_PHY_DQ_TIMING 0x0000
+
+/* PHY DQS timing register */
+#define CDNS_XSPI_CCP_PHY_DQS_TIMING 0x0004
+
+/* PHY gate loopback control register */
+#define CDNS_XSPI_CCP_PHY_GATE_LPBCK_CTRL 0x0008
+
+/* PHY DLL slave control register */
+#define CDNS_XSPI_CCP_PHY_DLL_SLAVE_CTRL 0x0010
+
+/* DLL PHY control register */
+#define CDNS_XSPI_DLL_PHY_CTRL 0x1034
+
+/* Command registers */
+#define CDNS_XSPI_CMD_REG_0 0x0000
+#define CDNS_XSPI_CMD_REG_1 0x0004
+#define CDNS_XSPI_CMD_REG_2 0x0008
+#define CDNS_XSPI_CMD_REG_3 0x000C
+#define CDNS_XSPI_CMD_REG_4 0x0010
+#define CDNS_XSPI_CMD_REG_5 0x0014
+
+/* Command status registers */
+#define CDNS_XSPI_CMD_STATUS_REG 0x0044
+
+/* Controller status register */
+#define CDNS_XSPI_CTRL_STATUS_REG 0x0100
+#define CDNS_XSPI_INIT_COMPLETED BIT(16)
+#define CDNS_XSPI_INIT_LEGACY BIT(9)
+#define CDNS_XSPI_INIT_FAIL BIT(8)
+#define CDNS_XSPI_CTRL_BUSY BIT(7)
+
+/* Controller interrupt status register */
+#define CDNS_XSPI_INTR_STATUS_REG 0x0110
+#define CDNS_XSPI_STIG_DONE BIT(23)
+#define CDNS_XSPI_SDMA_ERROR BIT(22)
+#define CDNS_XSPI_SDMA_TRIGGER BIT(21)
+#define CDNS_XSPI_CMD_IGNRD_EN BIT(20)
+#define CDNS_XSPI_DDMA_TERR_EN BIT(18)
+#define CDNS_XSPI_CDMA_TREE_EN BIT(17)
+#define CDNS_XSPI_CTRL_IDLE_EN BIT(16)
+
+#define CDNS_XSPI_TRD_COMP_INTR_STATUS 0x0120
+#define CDNS_XSPI_TRD_ERR_INTR_STATUS 0x0130
+#define CDNS_XSPI_TRD_ERR_INTR_EN 0x0134
+
+/* Controller interrupt enable register */
+#define CDNS_XSPI_INTR_ENABLE_REG 0x0114
+#define CDNS_XSPI_INTR_EN BIT(31)
+#define CDNS_XSPI_STIG_DONE_EN BIT(23)
+#define CDNS_XSPI_SDMA_ERROR_EN BIT(22)
+#define CDNS_XSPI_SDMA_TRIGGER_EN BIT(21)
+
+#define CDNS_XSPI_INTR_MASK (CDNS_XSPI_INTR_EN | \
+ CDNS_XSPI_STIG_DONE_EN | \
+ CDNS_XSPI_SDMA_ERROR_EN | \
+ CDNS_XSPI_SDMA_TRIGGER_EN)
+
+/* Controller config register */
+#define CDNS_XSPI_CTRL_CONFIG_REG 0x0230
+#define CDNS_XSPI_CTRL_WORK_MODE GENMASK(6, 5)
+
+#define CDNS_XSPI_WORK_MODE_DIRECT 0
+#define CDNS_XSPI_WORK_MODE_STIG 1
+#define CDNS_XSPI_WORK_MODE_ACMD 3
+
+/* SDMA trigger transaction registers */
+#define CDNS_XSPI_SDMA_SIZE_REG 0x0240
+#define CDNS_XSPI_SDMA_TRD_INFO_REG 0x0244
+#define CDNS_XSPI_SDMA_DIR BIT(8)
+
+/* Controller features register */
+#define CDNS_XSPI_CTRL_FEATURES_REG 0x0F04
+#define CDNS_XSPI_NUM_BANKS GENMASK(25, 24)
+#define CDNS_XSPI_DMA_DATA_WIDTH BIT(21)
+#define CDNS_XSPI_NUM_THREADS GENMASK(3, 0)
+
+/* Controller version register */
+#define CDNS_XSPI_CTRL_VERSION_REG 0x0F00
+#define CDNS_XSPI_MAGIC_NUM GENMASK(31, 16)
+#define CDNS_XSPI_CTRL_REV GENMASK(7, 0)
+
+/* STIG Profile 1.0 instruction fields (split into registers) */
+#define CDNS_XSPI_CMD_INSTR_TYPE GENMASK(6, 0)
+#define CDNS_XSPI_CMD_P1_R1_ADDR0 GENMASK(31, 24)
+#define CDNS_XSPI_CMD_P1_R2_ADDR1 GENMASK(7, 0)
+#define CDNS_XSPI_CMD_P1_R2_ADDR2 GENMASK(15, 8)
+#define CDNS_XSPI_CMD_P1_R2_ADDR3 GENMASK(23, 16)
+#define CDNS_XSPI_CMD_P1_R2_ADDR4 GENMASK(31, 24)
+#define CDNS_XSPI_CMD_P1_R3_ADDR5 GENMASK(7, 0)
+#define CDNS_XSPI_CMD_P1_R3_CMD GENMASK(23, 16)
+#define CDNS_XSPI_CMD_P1_R3_NUM_ADDR_BYTES GENMASK(30, 28)
+#define CDNS_XSPI_CMD_P1_R4_ADDR_IOS GENMASK(1, 0)
+#define CDNS_XSPI_CMD_P1_R4_CMD_IOS GENMASK(9, 8)
+#define CDNS_XSPI_CMD_P1_R4_BANK GENMASK(14, 12)
+
+/* STIG data sequence instruction fields (split into registers) */
+#define CDNS_XSPI_CMD_DSEQ_R2_DCNT_L GENMASK(31, 16)
+#define CDNS_XSPI_CMD_DSEQ_R3_DCNT_H GENMASK(15, 0)
+#define CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY GENMASK(25, 20)
+#define CDNS_XSPI_CMD_DSEQ_R4_BANK GENMASK(14, 12)
+#define CDNS_XSPI_CMD_DSEQ_R4_DATA_IOS GENMASK(9, 8)
+#define CDNS_XSPI_CMD_DSEQ_R4_DIR BIT(4)
+
+/* STIG command status fields */
+#define CDNS_XSPI_CMD_STATUS_COMPLETED BIT(15)
+#define CDNS_XSPI_CMD_STATUS_FAILED BIT(14)
+#define CDNS_XSPI_CMD_STATUS_DQS_ERROR BIT(3)
+#define CDNS_XSPI_CMD_STATUS_CRC_ERROR BIT(2)
+#define CDNS_XSPI_CMD_STATUS_BUS_ERROR BIT(1)
+#define CDNS_XSPI_CMD_STATUS_INV_SEQ_ERROR BIT(0)
+
+#define CDNS_XSPI_STIG_DONE_FLAG BIT(0)
+#define CDNS_XSPI_TRD_STATUS 0x0104
+
+/* Helper macros for filling command registers */
+#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase) ( \
+ FIELD_PREP(CDNS_XSPI_CMD_INSTR_TYPE, (data_phase) ? \
+ CDNS_XSPI_STIG_INSTR_TYPE_1 : CDNS_XSPI_STIG_INSTR_TYPE_0) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R1_ADDR0, (op)->addr.val & 0xff))
+
+#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_2(op) ( \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR1, ((op)->addr.val >> 8) & 0xFF) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR2, ((op)->addr.val >> 16) & 0xFF) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR3, ((op)->addr.val >> 24) & 0xFF) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR4, ((op)->addr.val >> 32) & 0xFF))
+
+#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op) ( \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R3_ADDR5, ((op)->addr.val >> 40) & 0xFF) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R3_CMD, (op)->cmd.opcode) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R3_NUM_ADDR_BYTES, (op)->addr.nbytes))
+
+#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op, chipsel) ( \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R4_ADDR_IOS, ilog2((op)->addr.buswidth)) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R4_CMD_IOS, ilog2((op)->cmd.buswidth)) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R4_BANK, chipsel))
+
+#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_1(op) \
+ FIELD_PREP(CDNS_XSPI_CMD_INSTR_TYPE, CDNS_XSPI_STIG_INSTR_TYPE_DATA_SEQ)
+
+#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op) \
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R2_DCNT_L, (op)->data.nbytes & 0xFFFF)
+
+#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op) ( \
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_DCNT_H, \
+ (op->data.nbytes >> 16) & 0xffff) | \
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, \
+ op->dummy.buswidth != 0 ? \
+ ((op->dummy.nbytes * 8) / op->dummy.buswidth) : \
+ 0))
+
+#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op, chipsel) ( \
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_BANK, chipsel) | \
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_DATA_IOS, \
+ ilog2((op)->data.buswidth)) | \
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_DIR, \
+ ((op)->data.dir == SPI_MEM_DATA_IN) ? \
+ CDNS_XSPI_STIG_CMD_DIR_READ : CDNS_XSPI_STIG_CMD_DIR_WRITE))
+
+/* clock config register */
+#define CDNS_XSPI_CLK_CTRL_AUX_REG 0x2020
+#define CDNS_XSPI_CLK_ENABLE BIT(0)
+#define CDNS_XSPI_CLK_DIV GENMASK(4, 1)
+
+/* MSI-X clear interrupt register */
+#define CDNS_XSPI_SPIX_INTR_AUX 0x2000
+/* Clock macros */
+#define CDNS_XSPI_CLOCK_IO_HZ 800000000
+#define CDNS_XSPI_CLOCK_DIVIDED(div) ((CDNS_XSPI_CLOCK_IO_HZ) / (div))
+
+/*PHY default values*/
+#define REGS_DLL_PHY_CTRL 0x00000707
+#define CTB_RFILE_PHY_CTRL 0x00004000
+#define RFILE_PHY_TSEL 0x00000000
+#define RFILE_PHY_DQ_TIMING 0x00000101
+#define RFILE_PHY_DQS_TIMING 0x00700404
+#define RFILE_PHY_GATE_LPBK_CTRL 0x00200030
+#define RFILE_PHY_DLL_MASTER_CTRL 0x00800000
+#define RFILE_PHY_DLL_SLAVE_CTRL 0x0000ff01
+
+/*PHY config rtegisters*/
+#define CDNS_XSPI_RF_MINICTRL_REGS_DLL_PHY_CTRL 0x1034
+#define CDNS_XSPI_PHY_CTB_RFILE_PHY_CTRL 0x0080
+#define CDNS_XSPI_PHY_CTB_RFILE_PHY_TSEL 0x0084
+#define CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_DQ_TIMING 0x0000
+#define CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_DQS_TIMING 0x0004
+#define CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_GATE_LPBK_CTRL 0x0008
+#define CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_DLL_MASTER_CTRL 0x000c
+#define CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_DLL_SLAVE_CTRL 0x0010
+#define CDNS_XSPI_DATASLICE_RFILE_PHY_DLL_OBS_REG_0 0x001c
+
+#define CDNS_XSPI_DLL_RST_N BIT(24)
+#define CDNS_XSPI_DLL_LOCK BIT(0)
+
+enum cdns_xspi_stig_instr_type {
+ CDNS_XSPI_STIG_INSTR_TYPE_0,
+ CDNS_XSPI_STIG_INSTR_TYPE_1,
+ CDNS_XSPI_STIG_INSTR_TYPE_DATA_SEQ = 127,
+};
+
+enum cdns_xspi_sdma_dir {
+ CDNS_XSPI_SDMA_DIR_READ,
+ CDNS_XSPI_SDMA_DIR_WRITE,
+};
+
+enum cdns_xspi_stig_cmd_dir {
+ CDNS_XSPI_STIG_CMD_DIR_READ,
+ CDNS_XSPI_STIG_CMD_DIR_WRITE,
+};
+
+enum cdns_xspi_sdma_size {
+ CDNS_XSPI_SDMA_SIZE_8B=0,
+ CDNS_XSPI_SDMA_SIZE_64B=1,
+};
+
+struct cdns_xspi_dev {
+ struct platform_device *pdev;
+ struct device *dev;
+
+ void __iomem *iobase;
+ void __iomem *auxbase;
+ void __iomem *sdmabase;
+
+ int irq;
+ int cur_cs;
+ unsigned int sdmasize;
+
+ struct completion cmd_complete;
+ struct completion auto_cmd_complete;
+ struct completion sdma_complete;
+ bool sdma_error;
+
+ void *in_buffer;
+ const void *out_buffer;
+
+ u8 hw_num_banks;
+ enum cdns_xspi_sdma_size read_size;
+};
+
+const int cdns_xspi_clk_div_list[] = {
+ 4, //0x0 = Divide by 4. SPI clock is 200 MHz.
+ 6, //0x1 = Divide by 6. SPI clock is 133.33 MHz.
+ 8, //0x2 = Divide by 8. SPI clock is 100 MHz.
+ 10, //0x3 = Divide by 10. SPI clock is 80 MHz.
+ 12, //0x4 = Divide by 12. SPI clock is 66.666 MHz.
+ 16, //0x5 = Divide by 16. SPI clock is 50 MHz.
+ 18, //0x6 = Divide by 18. SPI clock is 44.44 MHz.
+ 20, //0x7 = Divide by 20. SPI clock is 40 MHz.
+ 24, //0x8 = Divide by 24. SPI clock is 33.33 MHz.
+ 32, //0x9 = Divide by 32. SPI clock is 25 MHz.
+ 40, //0xA = Divide by 40. SPI clock is 20 MHz.
+ 50, //0xB = Divide by 50. SPI clock is 16 MHz.
+ 64, //0xC = Divide by 64. SPI clock is 12.5 MHz.
+ 128, //0xD = Divide by 128. SPI clock is 6.25 MHz.
+ -1 //End of list
+};
+
+static bool cdns_xspi_reset_dll(struct cdns_xspi_dev *cdns_xspi)
+{
+ u32 dll_cntrl = readl(cdns_xspi->iobase + CDNS_XSPI_RF_MINICTRL_REGS_DLL_PHY_CTRL);
+ u32 dll_lock;
+
+ /*Reset DLL*/
+ dll_cntrl |= CDNS_XSPI_DLL_RST_N;
+ writel(dll_cntrl, cdns_xspi->iobase + CDNS_XSPI_RF_MINICTRL_REGS_DLL_PHY_CTRL);
+
+ /*Wait for DLL lock*/
+ return readl_relaxed_poll_timeout(cdns_xspi->iobase +
+ CDNS_XSPI_INTR_STATUS_REG,
+ dll_lock, ((dll_lock & CDNS_XSPI_DLL_LOCK) == 1), 10, 10000);
+}
+
+//Static confiuration of PHY
+static bool cdns_xspi_configure_phy(struct cdns_xspi_dev *cdns_xspi)
+{
+ writel(REGS_DLL_PHY_CTRL,
+ cdns_xspi->iobase + CDNS_XSPI_RF_MINICTRL_REGS_DLL_PHY_CTRL);
+ writel(CTB_RFILE_PHY_CTRL,
+ cdns_xspi->auxbase + CDNS_XSPI_PHY_CTB_RFILE_PHY_CTRL);
+ writel(RFILE_PHY_TSEL,
+ cdns_xspi->auxbase + CDNS_XSPI_PHY_CTB_RFILE_PHY_TSEL);
+ writel(RFILE_PHY_DQ_TIMING,
+ cdns_xspi->auxbase + CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_DQ_TIMING);
+ writel(RFILE_PHY_DQS_TIMING,
+ cdns_xspi->auxbase + CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_DQS_TIMING);
+ writel(RFILE_PHY_GATE_LPBK_CTRL,
+ cdns_xspi->auxbase + CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_GATE_LPBK_CTRL);
+ writel(RFILE_PHY_DLL_MASTER_CTRL,
+ cdns_xspi->auxbase + CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_DLL_MASTER_CTRL);
+ writel(RFILE_PHY_DLL_SLAVE_CTRL,
+ cdns_xspi->auxbase + CDNS_XSPI_PHY_DATASLICE_RFILE_PHY_DLL_SLAVE_CTRL);
+
+ return cdns_xspi_reset_dll(cdns_xspi);
+}
+
+// Find max avalible clocl
+static bool cdns_xspi_setup_clock(struct cdns_xspi_dev *cdns_xspi, int requested_clk)
+{
+ int i = 0;
+ int clk_val;
+ u32 clk_reg;
+ bool update_clk = false;
+
+ while (cdns_xspi_clk_div_list[i] > 0) {
+ clk_val = CDNS_XSPI_CLOCK_DIVIDED(cdns_xspi_clk_div_list[i]);
+ if (clk_val <= requested_clk)
+ break;
+ i++;
+ }
+
+ if (cdns_xspi_clk_div_list[i] == -1) {
+ pr_info("Unable to find clock divider for CLK: %d - setting 6.25MHz\n",
+ requested_clk);
+ i = 0x0D;
+ } else {
+ pr_debug("Found clk div: %d, clk val: %d\n", cdns_xspi_clk_div_list[i],
+ CDNS_XSPI_CLOCK_DIVIDED(cdns_xspi_clk_div_list[i]));
+ }
+
+ clk_reg = readl(cdns_xspi->auxbase + CDNS_XSPI_CLK_CTRL_AUX_REG);
+
+ if (FIELD_GET(CDNS_XSPI_CLK_DIV, clk_reg) != i) {
+ clk_reg = FIELD_PREP(CDNS_XSPI_CLK_DIV, i);
+ clk_reg |= CDNS_XSPI_CLK_ENABLE;
+ update_clk = true;
+ }
+
+ if (update_clk)
+ writel(clk_reg, cdns_xspi->auxbase + CDNS_XSPI_CLK_CTRL_AUX_REG);
+
+ return update_clk;
+}
+
+static int cdns_xspi_wait_for_controller_idle(struct cdns_xspi_dev *cdns_xspi)
+{
+ u32 ctrl_stat;
+
+ return readl_relaxed_poll_timeout(cdns_xspi->iobase +
+ CDNS_XSPI_CTRL_STATUS_REG,
+ ctrl_stat,
+ ((ctrl_stat &
+ CDNS_XSPI_CTRL_BUSY) == 0),
+ 100, 50000);
+}
+
+static void cdns_xspi_trigger_command(struct cdns_xspi_dev *cdns_xspi,
+ u32 cmd_regs[6])
+{
+ writel(cmd_regs[5], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_5);
+ writel(cmd_regs[4], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_4);
+ writel(cmd_regs[3], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_3);
+ writel(cmd_regs[2], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_2);
+ writel(cmd_regs[1], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_1);
+ writel(cmd_regs[0], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_0);
+}
+
+static int cdns_xspi_check_command_status(struct cdns_xspi_dev *cdns_xspi)
+{
+ int ret = 0;
+ int retry_count = 2;
+ u32 cmd_status = readl(cdns_xspi->iobase + CDNS_XSPI_CMD_STATUS_REG);
+
+ while (retry_count) {
+ if (cmd_status & CDNS_XSPI_CMD_STATUS_COMPLETED) {
+ if ((cmd_status & CDNS_XSPI_CMD_STATUS_FAILED) != 0) {
+ if (cmd_status & CDNS_XSPI_CMD_STATUS_DQS_ERROR) {
+ dev_err(cdns_xspi->dev,
+ "Incorrect DQS pulses detected\n");
+ ret = -EPROTO;
+ break;
+ }
+ if (cmd_status & CDNS_XSPI_CMD_STATUS_CRC_ERROR) {
+ dev_err(cdns_xspi->dev,
+ "CRC error received\n");
+ ret = -EPROTO;
+ break;
+ }
+ if (cmd_status & CDNS_XSPI_CMD_STATUS_BUS_ERROR) {
+ dev_err(cdns_xspi->dev,
+ "Error resp on system DMA interface\n");
+ ret = -EPROTO;
+ break;
+ }
+ if (cmd_status & CDNS_XSPI_CMD_STATUS_INV_SEQ_ERROR) {
+ dev_err(cdns_xspi->dev,
+ "Invalid command sequence detected\n");
+ ret = -EPROTO;
+ break;
+ }
+ }
+ break;
+ } else {
+ if (retry_count == 0) {
+ dev_err(cdns_xspi->dev, "Fatal err - command not completed\n");
+ ret = -EPROTO;
+ }
+ cmd_status = readl(cdns_xspi->iobase + CDNS_XSPI_CMD_STATUS_REG);
+ retry_count--;
+ }
+ }
+
+ return ret;
+}
+
+static void cdns_xspi_set_interrupts(struct cdns_xspi_dev *cdns_xspi,
+ bool enabled)
+{
+ u32 intr_enable;
+
+ if (!cdns_xspi->irq)
+ return;
+
+ intr_enable = readl(cdns_xspi->iobase + CDNS_XSPI_INTR_ENABLE_REG);
+ if (enabled)
+ intr_enable |= CDNS_XSPI_INTR_MASK;
+ else
+ intr_enable &= ~CDNS_XSPI_INTR_MASK;
+ writel(intr_enable, cdns_xspi->iobase + CDNS_XSPI_INTR_ENABLE_REG);
+}
+
+static int cdns_xspi_controller_init(struct cdns_xspi_dev *cdns_xspi)
+{
+ u32 ctrl_ver;
+ u32 ctrl_features;
+ u16 hw_magic_num;
+
+ ctrl_ver = readl(cdns_xspi->iobase + CDNS_XSPI_CTRL_VERSION_REG);
+ hw_magic_num = FIELD_GET(CDNS_XSPI_MAGIC_NUM, ctrl_ver);
+ if (hw_magic_num != CDNS_XSPI_MAGIC_NUM_VALUE) {
+ dev_err(cdns_xspi->dev,
+ "Incorrect XSPI magic nunber: %x, expected: %x\n",
+ hw_magic_num, CDNS_XSPI_MAGIC_NUM_VALUE);
+ return -EIO;
+ }
+
+ writel(FIELD_PREP(CDNS_XSPI_CTRL_WORK_MODE, CDNS_XSPI_WORK_MODE_STIG),
+ cdns_xspi->iobase + CDNS_XSPI_CTRL_CONFIG_REG);
+
+ ctrl_features = readl(cdns_xspi->iobase + CDNS_XSPI_CTRL_FEATURES_REG);
+ cdns_xspi->hw_num_banks = FIELD_GET(CDNS_XSPI_NUM_BANKS, ctrl_features);
+ cdns_xspi_set_interrupts(cdns_xspi, false);
+
+ return 0;
+}
+
+static void cdns_ioreadq(void __iomem *addr, void *buf, int len)
+{
+ int i = 0;
+ int rcount = len / 8;
+ int rcount_nf = len % 8;
+ uint64_t tmp;
+ uint64_t *buf64 = (uint64_t *)buf;
+
+ if (((uint64_t)buf % 8) == 0) {
+ for (i = 0; i < rcount; i++)
+ *buf64++ = readq(addr);
+ } else {
+ for (i = 0; i < rcount; i++) {
+ tmp = readq(addr);
+ memcpy(buf+(i*8), &tmp, 8);
+ }
+ }
+
+ if (rcount_nf != 0) {
+ tmp = readq(addr);
+ memcpy(buf+(i*8), &tmp, rcount_nf);
+ }
+}
+
+static void cdns_iowriteq(void __iomem *addr, const void *buf, int len)
+{
+ int i = 0;
+ int rcount = len / 8;
+ int rcount_nf = len % 8;
+ uint64_t tmp;
+ uint64_t *buf64 = (uint64_t *)buf;
+
+ if (((uint64_t)buf % 8) == 0) {
+ for (i = 0; i < rcount; i++)
+ writeq(*buf64++, addr);
+ } else {
+ for (i = 0; i < rcount; i++) {
+ memcpy(&tmp, buf+(i*8), 8);
+ writeq(tmp, addr);
+ }
+ }
+
+ if (rcount_nf != 0) {
+ memcpy(&tmp, buf+(i*8), rcount_nf);
+ writeq(tmp, addr);
+ }
+}
+
+static void cdns_xspi_sdma_memread(struct cdns_xspi_dev *cdns_xspi, enum cdns_xspi_sdma_size size, int len) {
+ switch (size) {
+ case CDNS_XSPI_SDMA_SIZE_8B:
+ ioread8_rep(cdns_xspi->sdmabase,
+ cdns_xspi->in_buffer, len);
+ break;
+ case CDNS_XSPI_SDMA_SIZE_64B:
+ cdns_ioreadq(cdns_xspi->sdmabase, cdns_xspi->in_buffer, len);
+ break;
+ }
+}
+
+static void cdns_xspi_sdma_memwrite(struct cdns_xspi_dev *cdns_xspi, enum cdns_xspi_sdma_size size, int len) {
+ switch (size) {
+ case CDNS_XSPI_SDMA_SIZE_8B:
+ iowrite8_rep(cdns_xspi->sdmabase,
+ cdns_xspi->out_buffer, len);
+ break;
+ case CDNS_XSPI_SDMA_SIZE_64B:
+ cdns_iowriteq(cdns_xspi->sdmabase, cdns_xspi->in_buffer, len);
+ break;
+ }
+}
+
+static void cdns_xspi_sdma_handle(struct cdns_xspi_dev *cdns_xspi)
+{
+ u32 sdma_size, sdma_trd_info;
+ u8 sdma_dir;
+
+ sdma_size = readl(cdns_xspi->iobase + CDNS_XSPI_SDMA_SIZE_REG);
+ sdma_trd_info = readl(cdns_xspi->iobase + CDNS_XSPI_SDMA_TRD_INFO_REG);
+ sdma_dir = FIELD_GET(CDNS_XSPI_SDMA_DIR, sdma_trd_info);
+
+ switch (sdma_dir) {
+ case CDNS_XSPI_SDMA_DIR_READ:
+ cdns_xspi_sdma_memread(cdns_xspi,
+ cdns_xspi->read_size,
+ sdma_size);
+ break;
+
+ case CDNS_XSPI_SDMA_DIR_WRITE:
+ cdns_xspi_sdma_memwrite(cdns_xspi,
+ cdns_xspi->read_size,
+ sdma_size);
+ break;
+ }
+}
+
+bool cdns_xspi_stig_ready(struct cdns_xspi_dev *cdns_xspi, bool sleep)
+{
+ u32 ctrl_stat;
+
+ return readl_relaxed_poll_timeout
+ (cdns_xspi->iobase + CDNS_XSPI_CTRL_STATUS_REG,
+ ctrl_stat,
+ ((ctrl_stat & BIT(3)) == 0),
+ sleep ? 10 : 0,
+ sleep ? 1000 : 0);
+}
+
+bool cdns_xspi_sdma_ready(struct cdns_xspi_dev *cdns_xspi, bool sleep)
+{
+ u32 ctrl_stat;
+
+ return readl_relaxed_poll_timeout
+ (cdns_xspi->iobase + CDNS_XSPI_INTR_STATUS_REG,
+ ctrl_stat,
+ (ctrl_stat & CDNS_XSPI_SDMA_TRIGGER),
+ sleep ? 10 : 0,
+ sleep ? 1000 : 0);
+}
+
+static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
+ const struct spi_mem_op *op,
+ bool data_phase,
+ bool pstore_sleep)
+{
+ u32 cmd_regs[6];
+ u32 cmd_status;
+ int ret;
+
+ ret = cdns_xspi_wait_for_controller_idle(cdns_xspi);
+ if (ret < 0)
+ return -EIO;
+
+ writel(FIELD_PREP(CDNS_XSPI_CTRL_WORK_MODE, CDNS_XSPI_WORK_MODE_STIG),
+ cdns_xspi->iobase + CDNS_XSPI_CTRL_CONFIG_REG);
+
+ cdns_xspi_set_interrupts(cdns_xspi, true);
+ cdns_xspi->sdma_error = false;
+
+ memset(cmd_regs, 0, sizeof(cmd_regs));
+ cmd_regs[1] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase);
+ cmd_regs[2] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_2(op);
+ cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op);
+ cmd_regs[4] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op,
+ cdns_xspi->cur_cs);
+
+ cdns_xspi_trigger_command(cdns_xspi, cmd_regs);
+
+ if (data_phase) {
+ cmd_regs[0] = CDNS_XSPI_STIG_DONE_FLAG;
+ cmd_regs[1] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_1(op);
+ cmd_regs[2] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op);
+ cmd_regs[3] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op);
+ cmd_regs[4] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op,
+ cdns_xspi->cur_cs);
+
+ cdns_xspi->in_buffer = op->data.buf.in;
+ cdns_xspi->out_buffer = op->data.buf.out;
+
+ cdns_xspi_trigger_command(cdns_xspi, cmd_regs);
+
+ if (cdns_xspi->irq) {
+ wait_for_completion(&cdns_xspi->sdma_complete);
+ if (cdns_xspi->sdma_error) {
+ cdns_xspi_set_interrupts(cdns_xspi, false);
+ return -EIO;
+ }
+ } else {
+ if (cdns_xspi_sdma_ready(cdns_xspi, pstore_sleep))
+ return -EIO;
+ }
+ cdns_xspi_sdma_handle(cdns_xspi);
+ }
+
+ if (cdns_xspi->irq) {
+ wait_for_completion(&cdns_xspi->cmd_complete);
+ cdns_xspi_set_interrupts(cdns_xspi, false);
+ } else {
+ if (cdns_xspi_stig_ready(cdns_xspi, pstore_sleep))
+ return -EIO;
+ }
+
+ cmd_status = cdns_xspi_check_command_status(cdns_xspi);
+ if (cmd_status)
+ return -EPROTO;
+
+ return 0;
+}
+
+static int cdns_xspi_mem_op(struct cdns_xspi_dev *cdns_xspi,
+ struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ bool pstore)
+{
+ enum spi_mem_data_dir dir = op->data.dir;
+
+ if (cdns_xspi->cur_cs != mem->spi->chip_select)
+ cdns_xspi->cur_cs = mem->spi->chip_select;
+
+ return cdns_xspi_send_stig_command(cdns_xspi, op,
+ (dir != SPI_MEM_NO_DATA),
+ !pstore);
+}
+
+static int cdns_xspi_mem_op_execute(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct cdns_xspi_dev *cdns_xspi =
+ spi_master_get_devdata(mem->spi->master);
+ struct spi_nor *nor = spi_mem_get_drvdata(mem);
+ int ret = 0;
+
+ ret = cdns_xspi_mem_op(cdns_xspi, mem, op, nor->pstore);
+
+ return ret;
+}
+
+static int cdns_xspi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ struct cdns_xspi_dev *cdns_xspi =
+ spi_master_get_devdata(mem->spi->master);
+
+ op->data.nbytes = clamp_val(op->data.nbytes, 0, cdns_xspi->sdmasize);
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops cadence_xspi_mem_ops = {
+ .exec_op = cdns_xspi_mem_op_execute,
+ .adjust_op_size = cdns_xspi_adjust_mem_op_size,
+};
+
+static irqreturn_t cdns_xspi_irq_handler(int this_irq, void *dev)
+{
+ struct cdns_xspi_dev *cdns_xspi = dev;
+ u32 irq_status;
+ irqreturn_t result = IRQ_NONE;
+
+ irq_status = readl(cdns_xspi->iobase + CDNS_XSPI_INTR_STATUS_REG);
+ writel(irq_status, cdns_xspi->iobase + CDNS_XSPI_INTR_STATUS_REG);
+ writel(0x01, cdns_xspi->auxbase + CDNS_XSPI_SPIX_INTR_AUX);
+
+ if (irq_status &
+ (CDNS_XSPI_SDMA_ERROR | CDNS_XSPI_SDMA_TRIGGER |
+ CDNS_XSPI_STIG_DONE)) {
+ if (irq_status & CDNS_XSPI_SDMA_ERROR) {
+ dev_err(cdns_xspi->dev,
+ "Slave DMA transaction error\n");
+ cdns_xspi->sdma_error = true;
+ complete(&cdns_xspi->sdma_complete);
+ }
+
+ if (irq_status & CDNS_XSPI_SDMA_TRIGGER)
+ complete(&cdns_xspi->sdma_complete);
+
+ if (irq_status & CDNS_XSPI_STIG_DONE)
+ complete(&cdns_xspi->cmd_complete);
+
+ result = IRQ_HANDLED;
+ }
+
+ irq_status = readl(cdns_xspi->iobase + CDNS_XSPI_TRD_COMP_INTR_STATUS);
+ if (irq_status) {
+ writel(irq_status,
+ cdns_xspi->iobase + CDNS_XSPI_TRD_COMP_INTR_STATUS);
+
+ complete(&cdns_xspi->auto_cmd_complete);
+
+ result = IRQ_HANDLED;
+ }
+
+ return result;
+}
+
+static int cdns_xspi_of_get_plat_data(struct platform_device *pdev)
+{
+ struct device_node *node_prop = pdev->dev.of_node;
+ struct device_node *node_child;
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct cdns_xspi_dev *cdns_xspi = spi_master_get_devdata(master);
+ unsigned int cs;
+ unsigned int read_size = 0;
+
+ if (of_property_read_u32(node_prop, "cdns,read-size", &read_size))
+ dev_info(&pdev->dev, "Missing read size property, usining byte acess\n");
+ cdns_xspi->read_size = read_size;
+
+ for_each_child_of_node(node_prop, node_child) {
+ if (!of_device_is_available(node_child))
+ continue;
+
+ if (of_property_read_u32(node_child, "reg", &cs)) {
+ dev_err(&pdev->dev, "Couldn't get memory chip select\n");
+ of_node_put(node_child);
+ return -ENXIO;
+ } else if (cs >= CDNS_XSPI_MAX_BANKS) {
+ dev_err(&pdev->dev, "reg (cs) parameter value too large\n");
+ of_node_put(node_child);
+ return -ENXIO;
+ }
+ }
+
+ return 0;
+}
+
+static void cdns_xspi_print_phy_config(struct cdns_xspi_dev *cdns_xspi)
+{
+ struct device *dev = cdns_xspi->dev;
+
+ dev_info(dev, "PHY configuration\n");
+ dev_info(dev, " * xspi_dll_phy_ctrl: %08x\n",
+ readl(cdns_xspi->iobase + CDNS_XSPI_DLL_PHY_CTRL));
+ dev_info(dev, " * phy_dq_timing: %08x\n",
+ readl(cdns_xspi->auxbase + CDNS_XSPI_CCP_PHY_DQ_TIMING));
+ dev_info(dev, " * phy_dqs_timing: %08x\n",
+ readl(cdns_xspi->auxbase + CDNS_XSPI_CCP_PHY_DQS_TIMING));
+ dev_info(dev, " * phy_gate_loopback_ctrl: %08x\n",
+ readl(cdns_xspi->auxbase + CDNS_XSPI_CCP_PHY_GATE_LPBCK_CTRL));
+ dev_info(dev, " * phy_dll_slave_ctrl: %08x\n",
+ readl(cdns_xspi->auxbase + CDNS_XSPI_CCP_PHY_DLL_SLAVE_CTRL));
+}
+
+static int cdns_xspi_setup(struct spi_device *spi_dev)
+{
+ struct cdns_xspi_dev *cdns_xspi = spi_master_get_devdata(spi_dev->master);
+
+ cdns_xspi_setup_clock(cdns_xspi, spi_dev->max_speed_hz);
+
+ return 0;
+}
+
+
+static int cdns_xspi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_master *master = NULL;
+ struct cdns_xspi_dev *cdns_xspi = NULL;
+ struct resource *res;
+ int ret;
+
+ master = devm_spi_alloc_master(dev, sizeof(*cdns_xspi));
+ if (!master)
+ return -ENOMEM;
+
+ master->mode_bits = SPI_3WIRE | SPI_TX_DUAL | SPI_TX_QUAD |
+ SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_OCTAL | SPI_RX_OCTAL |
+ SPI_MODE_0 | SPI_MODE_3;
+
+ master->mem_ops = &cadence_xspi_mem_ops;
+ master->setup = cdns_xspi_setup;
+ master->dev.of_node = pdev->dev.of_node;
+ master->bus_num = -1;
+
+ platform_set_drvdata(pdev, master);
+
+ cdns_xspi = spi_master_get_devdata(master);
+ cdns_xspi->pdev = pdev;
+ cdns_xspi->dev = &pdev->dev;
+ cdns_xspi->cur_cs = 0;
+
+ init_completion(&cdns_xspi->cmd_complete);
+ init_completion(&cdns_xspi->auto_cmd_complete);
+ init_completion(&cdns_xspi->sdma_complete);
+
+ ret = cdns_xspi_of_get_plat_data(pdev);
+ if (ret)
+ return -ENODEV;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "io");
+ cdns_xspi->iobase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cdns_xspi->iobase)) {
+ dev_err(dev, "Failed to remap controller base address\n");
+ return PTR_ERR(cdns_xspi->iobase);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sdma");
+ cdns_xspi->sdmabase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cdns_xspi->sdmabase)) {
+ dev_err(dev, "Failed to remap SDMA address\n");
+ return PTR_ERR(cdns_xspi->sdmabase);
+ }
+ cdns_xspi->sdmasize = resource_size(res);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aux");
+ cdns_xspi->auxbase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cdns_xspi->auxbase)) {
+ dev_err(dev, "Failed to remap AUX address\n");
+ return PTR_ERR(cdns_xspi->auxbase);
+ }
+
+ cdns_xspi->irq = platform_get_irq(pdev, 0);
+ if (cdns_xspi->irq < 0) {
+ dev_err(dev, "Failed to get IRQ, switching to polling mode\n");
+ cdns_xspi->irq = 0;
+ }
+
+ if (cdns_xspi->irq) {
+ ret = devm_request_irq(dev, cdns_xspi->irq, cdns_xspi_irq_handler,
+ IRQF_SHARED, pdev->name, cdns_xspi);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ: %d\n", cdns_xspi->irq);
+ return ret;
+ }
+ }
+
+ cdns_xspi_setup_clock(cdns_xspi, 25000000);
+ cdns_xspi_configure_phy(cdns_xspi);
+
+ cdns_xspi_print_phy_config(cdns_xspi);
+ ret = cdns_xspi_controller_init(cdns_xspi);
+ if (ret) {
+ dev_err(dev, "Failed to initialize controller\n");
+ return ret;
+ }
+
+ master->num_chipselect = 1 << cdns_xspi->hw_num_banks;
+
+ ret = devm_spi_register_master(dev, master);
+ if (ret) {
+ dev_err(dev, "Failed to register SPI master\n");
+ return ret;
+ }
+
+ dev_info(dev, "Successfully registered SPI master\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id cdns_xspi_of_match[] = {
+ {
+ .compatible = "cdns,xspi-nor",
+ },
+ { /* end of table */}
+};
+MODULE_DEVICE_TABLE(of, cdns_xspi_of_match);
+#else
+#define cdns_xspi_of_match NULL
+#endif /* CONFIG_OF */
+
+static struct platform_driver cdns_xspi_platform_driver = {
+ .probe = cdns_xspi_probe,
+ .remove = NULL,
+ .driver = {
+ .name = CDNS_XSPI_NAME,
+ .of_match_table = cdns_xspi_of_match,
+ },
+};
+
+module_platform_driver(cdns_xspi_platform_driver);
+
+MODULE_DESCRIPTION("Cadence XSPI Controller Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" CDNS_XSPI_NAME);
+MODULE_AUTHOR("Konrad Kociolek <konrad@cadence.com>");
+MODULE_AUTHOR("Jayshri Pawar <jpawar@cadence.com>");
+MODULE_AUTHOR("Parshuram Thombare <pthombar@cadence.com>");
diff --git a/drivers/spi/spi-cavium-thunderx.c b/drivers/spi/spi-cavium-thunderx.c
index 60c0d6934654..7a8f6080ce38 100644
--- a/drivers/spi/spi-cavium-thunderx.c
+++ b/drivers/spi/spi-cavium-thunderx.c
@@ -16,6 +16,11 @@
#define SYS_FREQ_DEFAULT 700000000 /* 700 Mhz */
+#define PCI_DEVICE_ID_THUNDER_SPI 0xA00B
+#define PCI_SUBSYS_DEVID_88XX_SPI 0xA10B
+#define PCI_SUBSYS_DEVID_81XX_SPI 0xA20B
+#define PCI_SUBSYS_DEVID_83XX_SPI 0xA30B
+
static int thunderx_spi_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -104,7 +109,18 @@ static void thunderx_spi_remove(struct pci_dev *pdev)
}
static const struct pci_device_id thunderx_spi_pci_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa00b) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_THUNDER_SPI,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_88XX_SPI) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_THUNDER_SPI,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_81XX_SPI) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_THUNDER_SPI,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_83XX_SPI) },
{ 0, }
};
diff --git a/drivers/spi/spi-octeontx2.c b/drivers/spi/spi-octeontx2.c
new file mode 100644
index 000000000000..8e063b2cbdd5
--- /dev/null
+++ b/drivers/spi/spi-octeontx2.c
@@ -0,0 +1,392 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Marvell OcteonTX2 SPI driver.
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/spi/spi.h>
+
+#include "spi-octeontx2.h"
+
+#define DRV_NAME "spi-octeontx2"
+
+#define TBI_FREQ 100000000 /* 100 Mhz */
+#define SYS_FREQ_DEFAULT 700000000 /* 700 Mhz */
+
+static int tbi_clk_en = 1;
+module_param(tbi_clk_en, uint, 0644);
+MODULE_PARM_DESC(tbi_clk_en,
+ "Use Fixed Time Base 100MHz Reference Clock (0=Disable, 1=Enable [default])");
+
+static int cfg_mode_delay = 30;
+module_param(cfg_mode_delay, uint, 0644);
+MODULE_PARM_DESC(cfg_mode_delay,
+ "Delay in micro-seconds for mode change in MPI CFG register (30 [default])");
+
+static void octeontx2_spi_wait_ready(struct octeontx2_spi *p)
+{
+ union mpix_sts mpi_sts;
+ unsigned int loops = 0;
+
+ mpi_sts.u64 = 0; /* Prevents infinite loop */
+ do {
+ if (loops++)
+ __delay(500);
+ mpi_sts.u64 = readq(p->register_base + OCTEONTX2_SPI_STS(p));
+ } while (mpi_sts.s.busy);
+}
+
+static int octeontx2_spi_do_transfer(struct octeontx2_spi *p,
+ struct spi_message *msg,
+ struct spi_transfer *xfer,
+ bool last_xfer,
+ int cs)
+{
+ struct spi_device *spi = msg->spi;
+ union mpix_cfg mpi_cfg;
+ union mpix_xmit mpi_xmit;
+ unsigned int clkdiv, calc_spd;
+ int mode;
+ bool cpha, cpol;
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ int len, rem;
+ int i;
+ void __iomem *wbuf_ptr = p->register_base + OCTEONTX2_SPI_WBUF(p);
+ void __iomem *rx_ptr = wbuf_ptr;
+
+ mode = spi->mode;
+ cpha = mode & SPI_CPHA;
+ cpol = mode & SPI_CPOL;
+
+ clkdiv = p->sys_freq / (2 * xfer->speed_hz);
+ /* Perform check to not exceed requested speed */
+ while (1) {
+ calc_spd = p->sys_freq / (2 * clkdiv);
+ if (calc_spd <= xfer->speed_hz)
+ break;
+ clkdiv += 1;
+ }
+
+ if ((clkdiv > 8191) || (!tbi_clk_en && (clkdiv == 1))) {
+ dev_err(&spi->dev,
+ "can't support xfer->speed_hz %d for reference clock %d\n",
+ xfer->speed_hz, p->sys_freq);
+ return -EINVAL;
+ }
+
+ mpi_cfg.u64 = 0;
+
+ mpi_cfg.s.clkdiv = clkdiv;
+ mpi_cfg.s.cshi = (mode & SPI_CS_HIGH) ? 1 : 0;
+ mpi_cfg.s.lsbfirst = (mode & SPI_LSB_FIRST) ? 1 : 0;
+ mpi_cfg.s.wireor = (mode & SPI_3WIRE) ? 1 : 0;
+ mpi_cfg.s.idlelo = cpha != cpol;
+ mpi_cfg.s.cslate = cpha ? 1 : 0;
+ mpi_cfg.s.tritx = 1;
+ mpi_cfg.s.enable = 1;
+ mpi_cfg.s.cs_sticky = 1;
+ mpi_cfg.s.legacy_dis = 1;
+ if (tbi_clk_en)
+ mpi_cfg.s.tb100_en = 1;
+
+ /* Set x1 mode as default */
+ mpi_cfg.s.iomode = 0;
+ /* Set x2 mode if either tx or rx request dual */
+ if ((xfer->tx_nbits == SPI_NBITS_DUAL) ||
+ (xfer->rx_nbits == SPI_NBITS_DUAL))
+ mpi_cfg.s.iomode = 2;
+ /* Set x4 mode if either tx or rx request quad */
+ if ((xfer->tx_nbits == SPI_NBITS_QUAD) ||
+ (xfer->rx_nbits == SPI_NBITS_QUAD))
+ mpi_cfg.s.iomode = 3;
+
+ p->cs_enax |= (0xFull << 12);
+ mpi_cfg.u64 |= p->cs_enax;
+
+ if (mpi_cfg.u64 != p->last_cfg) {
+ p->last_cfg = mpi_cfg.u64;
+ writeq(mpi_cfg.u64, p->register_base + OCTEONTX2_SPI_CFG(p));
+ mpi_cfg.u64 = readq(p->register_base + OCTEONTX2_SPI_CFG(p));
+ udelay(cfg_mode_delay); /* allow CS change to settle */
+ }
+ tx_buf = xfer->tx_buf;
+ rx_buf = xfer->rx_buf;
+ len = xfer->len;
+
+ /* Except T96 A0, use rcvdx register for x1 uni-directional mode */
+ if (!mpi_cfg.s.iomode && p->rcvd_present)
+ rx_ptr = p->register_base + OCTEONTX2_SPI_RCVD(p);
+
+ while (len > OCTEONTX2_SPI_MAX_BYTES) {
+ if (tx_buf) {
+ /* 8 bytes per iteration */
+ for (i = 0; i < OCTEONTX2_SPI_MAX_BYTES / 8; i++) {
+ u64 data = *(uint64_t *)tx_buf;
+
+ tx_buf += 8;
+ writeq(data, wbuf_ptr + (8 * i));
+ }
+ }
+ mpi_xmit.u64 = 0;
+ mpi_xmit.s.csid = cs;
+ mpi_xmit.s.leavecs = 1;
+ mpi_xmit.s.txnum = tx_buf ? OCTEONTX2_SPI_MAX_BYTES : 0;
+ mpi_xmit.s.totnum = OCTEONTX2_SPI_MAX_BYTES;
+ writeq(mpi_xmit.u64, p->register_base + OCTEONTX2_SPI_XMIT(p));
+
+ octeontx2_spi_wait_ready(p);
+ if (rx_buf) {
+ /* 8 bytes per iteration */
+ for (i = 0; i < OCTEONTX2_SPI_MAX_BYTES / 8; i++) {
+ u64 v;
+
+ v = readq(rx_ptr + (8 * i));
+ *(uint64_t *)rx_buf = v;
+ rx_buf += 8;
+ }
+ }
+ len -= OCTEONTX2_SPI_MAX_BYTES;
+ }
+
+ rem = len % 8;
+
+ if (tx_buf) {
+ u64 data;
+ /* 8 bytes per iteration */
+ for (i = 0; i < len / 8; i++) {
+ data = *(uint64_t *)tx_buf;
+ tx_buf += 8;
+ writeq(data, wbuf_ptr + (8 * i));
+ }
+ /* remaining <8 bytes */
+ if (rem) {
+ data = 0;
+ memcpy(&data, tx_buf, rem);
+ writeq(data, wbuf_ptr + (8 * i));
+ }
+ }
+
+ mpi_xmit.u64 = 0;
+ mpi_xmit.s.csid = cs;
+ if (last_xfer)
+ mpi_xmit.s.leavecs = xfer->cs_change;
+ else
+ mpi_xmit.s.leavecs = !xfer->cs_change;
+ mpi_xmit.s.txnum = tx_buf ? len : 0;
+ mpi_xmit.s.totnum = len;
+ writeq(mpi_xmit.u64, p->register_base + OCTEONTX2_SPI_XMIT(p));
+
+ octeontx2_spi_wait_ready(p);
+ if (rx_buf) {
+ u64 v;
+ /* 8 bytes per iteration */
+ for (i = 0; i < len / 8; i++) {
+ v = readq(rx_ptr + (8 * i));
+ *(uint64_t *)rx_buf = v;
+ rx_buf += 8;
+ }
+ /* remaining <8 bytes */
+ if (rem) {
+ v = readq(rx_ptr + (8 * i));
+ memcpy(rx_buf, &v, rem);
+ rx_buf += rem;
+ }
+ }
+
+ if (xfer->delay_usecs)
+ udelay(xfer->delay_usecs);
+
+ return xfer->len;
+}
+
+int octeontx2_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct octeontx2_spi *p = spi_master_get_devdata(master);
+ unsigned int total_len = 0;
+ int status = 0;
+ struct spi_transfer *xfer;
+ int cs = msg->spi->chip_select;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ bool last_xfer = list_is_last(&xfer->transfer_list,
+ &msg->transfers);
+ int r = octeontx2_spi_do_transfer(p, msg, xfer, last_xfer, cs);
+
+ if (r < 0) {
+ status = r;
+ goto err;
+ }
+ total_len += r;
+ }
+err:
+ msg->status = status;
+ msg->actual_length = total_len;
+ spi_finalize_current_message(master);
+ return status;
+}
+
+static int octeontx2_spi_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_master *master;
+ struct octeontx2_spi *p;
+ union mpix_sts mpi_sts;
+ int ret = -ENOENT;
+
+ /* may need to hunt for devtree entry */
+ if (!pdev->dev.of_node) {
+ struct device_node *np = of_find_node_by_name(NULL, "spi");
+
+ if (IS_ERR(np)) {
+ ret = PTR_ERR(np);
+ goto error;
+ }
+ pdev->dev.of_node = np;
+ of_node_put(np);
+ }
+
+ master = spi_alloc_master(dev, sizeof(struct octeontx2_spi));
+ if (!master)
+ return -ENOMEM;
+
+ p = spi_master_get_devdata(master);
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ goto error_put;
+
+ ret = pci_request_regions(pdev, DRV_NAME);
+ if (ret)
+ goto error_disable;
+
+ p->register_base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
+ if (!p->register_base) {
+ ret = -EINVAL;
+ goto error_disable;
+ }
+
+ p->regs.config = 0x1000;
+ p->regs.status = 0x1008;
+ p->regs.xmit = 0x1018;
+ p->regs.wbuf = 0x1800;
+ p->regs.rcvd = 0x2800;
+ p->last_cfg = 0x0;
+
+ mpi_sts.u64 = readq(p->register_base + OCTEONTX2_SPI_STS(p));
+ p->rcvd_present = mpi_sts.u64 & 0x4 ? true : false;
+
+ /* FIXME: need a proper clocksource object for SCLK */
+ p->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(p->clk)) {
+ p->clk = devm_clk_get(dev, "sclk");
+ p->sys_freq = 0;
+ } else {
+ ret = clk_prepare_enable(p->clk);
+ if (!ret)
+ p->sys_freq = clk_get_rate(p->clk);
+ }
+
+ if (!p->sys_freq)
+ p->sys_freq = SYS_FREQ_DEFAULT;
+ if (tbi_clk_en)
+ p->sys_freq = TBI_FREQ;
+ dev_info(dev, "Reference clock is %u\n", p->sys_freq);
+
+ master->num_chipselect = 4;
+ master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH |
+ SPI_LSB_FIRST | SPI_3WIRE;
+ master->transfer_one_message = octeontx2_spi_transfer_one_message;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->max_speed_hz = OCTEONTX2_SPI_MAX_CLOCK_HZ;
+ master->dev.of_node = pdev->dev.of_node;
+
+ pci_set_drvdata(pdev, master);
+
+ ret = devm_spi_register_master(dev, master);
+ if (ret)
+ goto error_disable;
+
+ return 0;
+
+error_disable:
+ clk_disable_unprepare(p->clk);
+error_put:
+ spi_master_put(master);
+error:
+ return ret;
+}
+
+static void octeontx2_spi_remove(struct pci_dev *pdev)
+{
+ struct spi_master *master = pci_get_drvdata(pdev);
+ struct octeontx2_spi *p;
+
+ p = spi_master_get_devdata(master);
+ /* Put everything in a known state. */
+ if (p) {
+ clk_disable_unprepare(p->clk);
+ writeq(0, p->register_base + OCTEONTX2_SPI_CFG(p));
+ }
+
+ pci_disable_device(pdev);
+ spi_master_put(master);
+}
+
+static const struct pci_device_id octeontx2_spi_pci_id_table[] = {
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_SPI,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OTX2_98XX) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_SPI,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OTX2_96XX) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_SPI,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OTX2_95XX) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_SPI,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OTX2_LOKI) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_SPI,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OTX2_95MM) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_SPI,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_CN10K_A) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_SPI,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_CNF10K_A) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_SPI,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_CNF10K_B) },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, octeontx2_spi_pci_id_table);
+
+static struct pci_driver octeontx2_spi_driver = {
+ .name = DRV_NAME,
+ .id_table = octeontx2_spi_pci_id_table,
+ .probe = octeontx2_spi_probe,
+ .remove = octeontx2_spi_remove,
+};
+
+module_pci_driver(octeontx2_spi_driver);
+
+MODULE_DESCRIPTION("OcteonTX2 SPI bus driver");
+MODULE_AUTHOR("Marvell Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-octeontx2.h b/drivers/spi/spi-octeontx2.h
new file mode 100644
index 000000000000..c2d0c656d1e4
--- /dev/null
+++ b/drivers/spi/spi-octeontx2.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SPI_OCTEONTX2_H
+#define __SPI_OCTEONTX2_H
+
+#include <linux/clk.h>
+
+#define PCI_DEVID_OCTEONTX2_SPI 0xA00B
+#define PCI_SUBSYS_DEVID_OTX2_98XX 0xB100
+#define PCI_SUBSYS_DEVID_OTX2_96XX 0xB200
+#define PCI_SUBSYS_DEVID_OTX2_95XX 0xB300
+#define PCI_SUBSYS_DEVID_OTX2_LOKI 0xB400
+#define PCI_SUBSYS_DEVID_OTX2_95MM 0xB500
+#define PCI_SUBSYS_DEVID_CN10K_A 0xB900
+#define PCI_SUBSYS_DEVID_CNF10K_A 0xBA00
+#define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00
+
+#define OCTEONTX2_SPI_MAX_BYTES 1024
+#define OCTEONTX2_SPI_MAX_CLOCK_HZ 25000000
+
+struct octeontx2_spi_regs {
+ int config;
+ int status;
+ int xmit;
+ int wbuf;
+ int rcvd;
+};
+
+struct octeontx2_spi {
+ void __iomem *register_base;
+ u64 last_cfg;
+ u64 cs_enax;
+ int sys_freq;
+ bool rcvd_present;
+ struct octeontx2_spi_regs regs;
+ struct clk *clk;
+};
+
+#define OCTEONTX2_SPI_CFG(x) ((x)->regs.config)
+#define OCTEONTX2_SPI_STS(x) ((x)->regs.status)
+#define OCTEONTX2_SPI_XMIT(x) ((x)->regs.xmit)
+#define OCTEONTX2_SPI_WBUF(x) ((x)->regs.wbuf)
+#define OCTEONTX2_SPI_RCVD(x) ((x)->regs.rcvd)
+
+int octeontx2_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg);
+
+
+union mpix_cfg {
+ uint64_t u64;
+ struct mpix_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_50_63:14;
+ uint64_t tb100_en:1;
+ uint64_t reserved_48:1;
+ uint64_t cs_espi_en:4;
+ uint64_t reserved_36_43:8;
+ uint64_t iomode:2;
+ uint64_t reserved_32_33:2;
+ uint64_t legacy_dis:1;
+ uint64_t reserved_29_30:2;
+ uint64_t clkdiv:13;
+ uint64_t csena3:1;
+ uint64_t csena2:1;
+ uint64_t csena1:1;
+ uint64_t csena0:1;
+ uint64_t cslate:1;
+ uint64_t tritx:1;
+ uint64_t idleclks:2;
+ uint64_t cshi:1;
+ uint64_t reserved_6:1;
+ uint64_t cs_sticky:1;
+ uint64_t lsbfirst:1;
+ uint64_t wireor:1;
+ uint64_t clk_cont:1;
+ uint64_t idlelo:1;
+ uint64_t enable:1;
+#else
+ uint64_t enable:1;
+ uint64_t idlelo:1;
+ uint64_t clk_cont:1;
+ uint64_t wireor:1;
+ uint64_t lsbfirst:1;
+ uint64_t cs_sticky:1;
+ uint64_t reserved_6:1;
+ uint64_t cshi:1;
+ uint64_t idleclks:2;
+ uint64_t tritx:1;
+ uint64_t cslate:1;
+ uint64_t csena0:1;
+ uint64_t csena1:1;
+ uint64_t csena2:1;
+ uint64_t csena3:1;
+ uint64_t clkdiv:13;
+ uint64_t reserved_29_30:2;
+ uint64_t legacy_dis:1;
+ uint64_t reserved_32_33:2;
+ uint64_t iomode:2;
+ uint64_t reserved_36_43:8;
+ uint64_t cs_espi_en:4;
+ uint64_t reserved_48:1;
+ uint64_t tb100_en:1;
+ uint64_t reserved_50_63:14;
+#endif
+ } s;
+};
+
+union mpix_sts {
+ uint64_t u64;
+ struct mpix_sts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63:24;
+ uint64_t crc:8;
+ uint64_t reserved_27_31:5;
+ uint64_t crc_err:1;
+ uint64_t reserved_19_25:7;
+ uint64_t rxnum:11;
+ uint64_t reserved_2_7:6;
+ uint64_t mpi_intr:1;
+ uint64_t busy:1;
+#else
+ uint64_t busy:1;
+ uint64_t mpi_intr:1;
+ uint64_t reserved_2_7:6;
+ uint64_t rxnum:11;
+ uint64_t reserved_19_25:7;
+ uint64_t crc_err:1;
+ uint64_t reserved_27_31:5;
+ uint64_t crc:8;
+ uint64_t reserved_40_63:24;
+#endif
+ } s;
+};
+
+union mpix_xmit {
+ uint64_t u64;
+ struct mpix_xmit_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_63:1;
+ uint64_t csid:2;
+ uint64_t leavecs:1;
+ uint64_t reserved_31_59:29;
+ uint64_t txnum:11;
+ uint64_t reserved_11_19:9;
+ uint64_t totnum:11;
+#else
+ uint64_t totnum:11;
+ uint64_t reserved_11_19:9;
+ uint64_t txnum:11;
+ uint64_t reserved_31_59:29;
+ uint64_t leavecs:1;
+ uint64_t csid:2;
+ uint64_t reserved_63:1;
+#endif
+ } s;
+};
+#endif /* __SPI_OCTEONTX2_H */
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index b57b8b3cc26e..ae0cbf791d0e 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -96,10 +96,16 @@ struct orion_spi {
struct clk *clk;
struct clk *axi_clk;
const struct orion_spi_dev *devdata;
+ struct device *dev;
struct orion_child_options child[ORION_NUM_CHIPSELECTS];
};
+#ifdef CONFIG_PM
+static int orion_spi_runtime_suspend(struct device *dev);
+static int orion_spi_runtime_resume(struct device *dev);
+#endif
+
static inline void __iomem *spi_reg(struct orion_spi *orion_spi, u32 reg)
{
return orion_spi->base + reg;
@@ -369,8 +375,15 @@ orion_spi_write_read_8bit(struct spi_device *spi,
{
void __iomem *tx_reg, *rx_reg, *int_reg;
struct orion_spi *orion_spi;
+ bool cs_single_byte;
+
+ cs_single_byte = spi->mode & SPI_CS_WORD;
orion_spi = spi_master_get_devdata(spi->master);
+
+ if (cs_single_byte)
+ orion_spi_set_cs(spi, 0);
+
tx_reg = spi_reg(orion_spi, ORION_SPI_DATA_OUT_REG);
rx_reg = spi_reg(orion_spi, ORION_SPI_DATA_IN_REG);
int_reg = spi_reg(orion_spi, ORION_SPI_INT_CAUSE_REG);
@@ -384,6 +397,11 @@ orion_spi_write_read_8bit(struct spi_device *spi,
writel(0, tx_reg);
if (orion_spi_wait_till_ready(orion_spi) < 0) {
+ if (cs_single_byte) {
+ orion_spi_set_cs(spi, 1);
+ /* Satisfy some SLIC devices requirements */
+ udelay(4);
+ }
dev_err(&spi->dev, "TXS timed out\n");
return -1;
}
@@ -391,6 +409,12 @@ orion_spi_write_read_8bit(struct spi_device *spi,
if (rx_buf && *rx_buf)
*(*rx_buf)++ = readl(rx_reg);
+ if (cs_single_byte) {
+ orion_spi_set_cs(spi, 1);
+ /* Satisfy some SLIC devices requirements */
+ udelay(4);
+ }
+
return 1;
}
@@ -401,6 +425,11 @@ orion_spi_write_read_16bit(struct spi_device *spi,
void __iomem *tx_reg, *rx_reg, *int_reg;
struct orion_spi *orion_spi;
+ if (spi->mode & SPI_CS_WORD) {
+ dev_err(&spi->dev, "SPI_CS_WORD is only supported for 8 bit words\n");
+ return -1;
+ }
+
orion_spi = spi_master_get_devdata(spi->master);
tx_reg = spi_reg(orion_spi, ORION_SPI_DATA_OUT_REG);
rx_reg = spi_reg(orion_spi, ORION_SPI_DATA_IN_REG);
@@ -440,12 +469,13 @@ orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
orion_spi = spi_master_get_devdata(spi->master);
/*
- * Use SPI direct write mode if base address is available. Otherwise
- * fall back to PIO mode for this transfer.
+ * Use SPI direct write mode if base address is available
+ * and SPI_CS_WORD flag is not set.
+ * Otherwise fall back to PIO mode for this transfer.
*/
vaddr = orion_spi->child[cs].direct_access.vaddr;
- if (vaddr && xfer->tx_buf && word_len == 8) {
+ if (vaddr && xfer->tx_buf && word_len == 8 && (spi->mode & SPI_CS_WORD) == 0) {
unsigned int cnt = count / 4;
unsigned int rem = count % 4;
@@ -507,7 +537,21 @@ static int orion_spi_transfer_one(struct spi_master *master,
static int orion_spi_setup(struct spi_device *spi)
{
- return orion_spi_setup_transfer(spi, NULL);
+ int ret;
+#ifdef CONFIG_PM
+ struct orion_spi *orion_spi = spi_master_get_devdata(spi->master);
+ struct device *dev = orion_spi->dev;
+
+ orion_spi_runtime_resume(dev);
+#endif
+
+ ret = orion_spi_setup_transfer(spi, NULL);
+
+#ifdef CONFIG_PM
+ orion_spi_runtime_suspend(dev);
+#endif
+
+ return ret;
}
static int orion_spi_reset(struct orion_spi *orion_spi)
@@ -558,6 +602,13 @@ static const struct orion_spi_dev armada_380_spi_dev_data = {
.is_errata_50mhz_ac = true,
};
+static const struct orion_spi_dev armada_cp110_spi_dev_data = {
+ .typ = ARMADA_SPI,
+ .max_hz = 41000000,
+ .max_divisor = 1920,
+ .prescale_mask = ARMADA_SPI_CLK_PRESCALE_MASK,
+};
+
static const struct of_device_id orion_spi_of_match_table[] = {
{
.compatible = "marvell,orion-spi",
@@ -580,6 +631,10 @@ static const struct of_device_id orion_spi_of_match_table[] = {
.data = &armada_xp_spi_dev_data,
},
{
+ .compatible = "marvell,armada-cp110-spi",
+ .data = &armada_cp110_spi_dev_data,
+ },
+ {
.compatible = "marvell,armada-xp-spi",
.data = &armada_xp_spi_dev_data,
},
@@ -616,7 +671,7 @@ static int orion_spi_probe(struct platform_device *pdev)
}
/* we support all 4 SPI modes and LSB first option */
- master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST;
+ master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST | SPI_CS_WORD;
master->set_cs = orion_spi_set_cs;
master->transfer_one = orion_spi_transfer_one;
master->num_chipselect = ORION_NUM_CHIPSELECTS;
@@ -630,6 +685,7 @@ static int orion_spi_probe(struct platform_device *pdev)
spi = spi_master_get_devdata(master);
spi->master = master;
+ spi->dev = &pdev->dev;
of_id = of_match_device(orion_spi_of_match_table, &pdev->dev);
devdata = (of_id) ? of_id->data : &orion_spi_dev_data;
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index c2ebfb5be4b3..8025bb335c79 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -4,6 +4,7 @@
*
* Copyright (C) 2013 Marvell
*/
+#include <linux/arm-smccc.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -18,6 +19,8 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/interrupt.h>
+#include <linux/time.h>
+#include "soc/marvell/armada8k/fw.h"
#include "thermal_core.h"
@@ -62,6 +65,8 @@
#define STATUS_POLL_TIMEOUT_US 100000
#define OVERHEAT_INT_POLL_DELAY_MS 1000
+#define THERMAL_SUPPORTED_IN_FIRMWARE(priv) (priv->data->is_smc_supported)
+
struct armada_thermal_data;
/* Marvell EBU Thermal Sensor Dev Structure */
@@ -111,6 +116,12 @@ struct armada_thermal_data {
/* One sensor is in the thermal IC, the others are in the CPUs if any */
unsigned int cpu_nr;
+
+ /*
+ * Thermal sensor operations exposed as firmware SIP services and
+ * accessed via SMC
+ */
+ bool is_smc_supported;
};
struct armada_drvdata {
@@ -135,6 +146,18 @@ struct armada_thermal_sensor {
int id;
};
+static int thermal_smc(u32 addr, u32 *reg, u32 val1, u32 val2)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(MV_SIP_DFX, addr, val1, val2, 0, 0, 0, 0, &res);
+
+ if (res.a0 == 0 && reg != NULL)
+ *reg = res.a1;
+
+ return res.a0;
+}
+
static void armadaxp_init(struct platform_device *pdev,
struct armada_thermal_priv *priv)
{
@@ -206,6 +229,27 @@ static void armada375_init(struct platform_device *pdev,
static int armada_wait_sensor_validity(struct armada_thermal_priv *priv)
{
u32 reg;
+ int ret;
+ ktime_t timeout;
+
+ if (THERMAL_SUPPORTED_IN_FIRMWARE(priv)) {
+ timeout = ktime_add_us(ktime_get(), STATUS_POLL_TIMEOUT_US);
+ do {
+ ret = thermal_smc(MV_SIP_DFX_THERMAL_IS_VALID,
+ &reg, 0, 0);
+ if (ret || reg)
+ break;
+
+ usleep_range((STATUS_POLL_PERIOD_US >> 2) + 1,
+ STATUS_POLL_PERIOD_US);
+
+ } while (ktime_before(ktime_get(), timeout));
+
+ if (ret == SMCCC_RET_SUCCESS)
+ return reg ? 0 : -ETIMEDOUT;
+
+ return ret;
+ }
return regmap_read_poll_timeout(priv->syscon,
priv->data->syscon_status_off, reg,
@@ -233,11 +277,27 @@ static void armada380_init(struct platform_device *pdev,
regmap_write(priv->syscon, data->syscon_control0_off, reg);
}
-static void armada_ap806_init(struct platform_device *pdev,
+static void armada_ap80x_init(struct platform_device *pdev,
struct armada_thermal_priv *priv)
{
struct armada_thermal_data *data = priv->data;
u32 reg;
+ int ret;
+
+ /*
+ * The ap806 thermal sensor registers are part of DFX which is secured
+ * by latest firmware, therefore accessing relevant registers from
+ * not-secure world will not be possible. In that case Arm Trusted
+ * Firmware exposes thermal operations as firmware run-time service. If
+ * SMC initialization succeeds, perform other thermal operations using
+ * SMC, otherwise (old fw case) fallback to regmap handling.
+ */
+ ret = thermal_smc(MV_SIP_DFX_THERMAL_INIT, 0x0, 0, 0);
+ if (ret == SMCCC_RET_SUCCESS) {
+ dev_info(&pdev->dev, "firmware support\n");
+ THERMAL_SUPPORTED_IN_FIRMWARE(priv) = true;
+ return;
+ }
regmap_read(priv->syscon, data->syscon_control0_off, &reg);
reg &= ~CONTROL0_TSEN_RESET;
@@ -274,11 +334,17 @@ static void armada_cp110_init(struct platform_device *pdev,
static bool armada_is_valid(struct armada_thermal_priv *priv)
{
+ int ret;
u32 reg;
if (!priv->data->is_valid_bit)
return true;
+ if (THERMAL_SUPPORTED_IN_FIRMWARE(priv)) {
+ ret = thermal_smc(MV_SIP_DFX_THERMAL_IS_VALID, &reg, 0, 0);
+ return ret ? false : reg;
+ }
+
regmap_read(priv->syscon, priv->data->syscon_status_off, &reg);
return reg & priv->data->is_valid_bit;
@@ -324,6 +390,7 @@ static int armada_select_channel(struct armada_thermal_priv *priv, int channel)
{
struct armada_thermal_data *data = priv->data;
u32 ctrl0;
+ int ret;
if (channel < 0 || channel > priv->data->cpu_nr)
return -EINVAL;
@@ -331,6 +398,16 @@ static int armada_select_channel(struct armada_thermal_priv *priv, int channel)
if (priv->current_channel == channel)
return 0;
+ if (THERMAL_SUPPORTED_IN_FIRMWARE(priv)) {
+ ret = thermal_smc(MV_SIP_DFX_THERMAL_SEL_CHANNEL,
+ NULL, channel, 0);
+ if (ret)
+ return ret;
+
+ priv->current_channel = channel;
+ goto is_valid;
+ }
+
/* Stop the measurements */
regmap_read(priv->syscon, data->syscon_control0_off, &ctrl0);
ctrl0 &= ~CONTROL0_TSEN_START;
@@ -357,6 +434,7 @@ static int armada_select_channel(struct armada_thermal_priv *priv, int channel)
ctrl0 |= CONTROL0_TSEN_START;
regmap_write(priv->syscon, data->syscon_control0_off, ctrl0);
+is_valid:
/*
* The IP has a latency of ~15ms, so after updating the selected source,
* we must absolutely wait for the sensor validity bit to ensure we read
@@ -376,6 +454,9 @@ static int armada_read_sensor(struct armada_thermal_priv *priv, int *temp)
u32 reg, div;
s64 sample, b, m;
+ if (THERMAL_SUPPORTED_IN_FIRMWARE(priv))
+ return thermal_smc(MV_SIP_DFX_THERMAL_READ, temp, 0, 0);
+
regmap_read(priv->syscon, priv->data->syscon_status_off, &reg);
reg = (reg >> priv->data->temp_shift) & priv->data->temp_mask;
if (priv->data->signed_sample)
@@ -559,7 +640,13 @@ static irqreturn_t armada_overheat_isr_thread(int irq, void *blob)
goto enable_irq;
} while (temperature >= low_threshold);
- regmap_read(priv->syscon, priv->data->dfx_irq_cause_off, &dummy);
+ if (THERMAL_SUPPORTED_IN_FIRMWARE(priv)) {
+ if (thermal_smc(MV_SIP_DFX_THERMAL_IRQ, 0, 0, 0))
+ return IRQ_NONE;
+ } else {
+ regmap_read(priv->syscon, priv->data->dfx_irq_cause_off,
+ &dummy);
+ }
/* Notify the thermal core that the temperature is acceptable again */
thermal_zone_device_update(priv->overheat_sensor,
@@ -622,15 +709,39 @@ static const struct armada_thermal_data armada380_data = {
};
static const struct armada_thermal_data armada_ap806_data = {
- .init = armada_ap806_init,
+ .init = armada_ap80x_init,
.is_valid_bit = BIT(16),
.temp_shift = 0,
.temp_mask = 0x3ff,
.thresh_shift = 3,
.hyst_shift = 19,
.hyst_mask = 0x3,
- .coef_b = -150000LL,
- .coef_m = 423ULL,
+ .coef_b = -153400LL,
+ .coef_m = 425ULL,
+ .coef_div = 1,
+ .inverted = true,
+ .signed_sample = true,
+ .syscon_control0_off = 0x84,
+ .syscon_control1_off = 0x88,
+ .syscon_status_off = 0x8C,
+ .dfx_irq_cause_off = 0x108,
+ .dfx_irq_mask_off = 0x10C,
+ .dfx_overheat_irq = BIT(22),
+ .dfx_server_irq_mask_off = 0x104,
+ .dfx_server_irq_en = BIT(1),
+ .cpu_nr = 4,
+};
+
+static const struct armada_thermal_data armada_ap807_data = {
+ .init = armada_ap80x_init,
+ .is_valid_bit = BIT(16),
+ .temp_shift = 0,
+ .temp_mask = 0x3ff,
+ .thresh_shift = 3,
+ .hyst_shift = 19,
+ .hyst_mask = 0x3,
+ .coef_b = -128900LL,
+ .coef_m = 394ULL,
.coef_div = 1,
.inverted = true,
.signed_sample = true,
@@ -689,6 +800,10 @@ static const struct of_device_id armada_thermal_id_table[] = {
.data = &armada_ap806_data,
},
{
+ .compatible = "marvell,armada-ap807-thermal",
+ .data = &armada_ap807_data,
+ },
+ {
.compatible = "marvell,armada-cp110-thermal",
.data = &armada_cp110_data,
},
@@ -773,6 +888,27 @@ static void armada_set_sane_name(struct platform_device *pdev,
}
/*
+ * Let the firmware configure the thermal overheat threshold, hysteresis and
+ * enable overheat interrupt
+ */
+static int armada_fw_overheat_settings(struct armada_thermal_priv *priv,
+ int thresh_mc, int hyst_mc)
+{
+ int ret;
+
+ ret = thermal_smc(MV_SIP_DFX_THERMAL_THRESH, NULL, thresh_mc, hyst_mc);
+ if (ret)
+ return ret;
+
+ if (thresh_mc >= 0)
+ priv->current_threshold = thresh_mc;
+ if (hyst_mc >= 0)
+ priv->current_hysteresis = hyst_mc;
+
+ return 0;
+}
+
+/*
* The IP can manage to trigger interrupts on overheat situation from all the
* sensors. However, the interrupt source changes along with the last selected
* source (ie. the last read sensor), which is an inconsistent behavior. Avoid
@@ -803,11 +939,22 @@ static int armada_configure_overheat_int(struct armada_thermal_priv *priv,
if (ret)
return ret;
+ priv->overheat_sensor = tz;
+ priv->interrupt_source = sensor_id;
+
+ if (THERMAL_SUPPORTED_IN_FIRMWARE(priv)) {
+ /*
+ * When thermal supported in firmware the configuring overheat
+ * threshold and enabling overheat interrupt is done in one
+ * step.
+ */
+ return armada_fw_overheat_settings(priv, trips[i].temperature,
+ trips[i].hysteresis);
+ }
+
armada_set_overheat_thresholds(priv,
trips[i].temperature,
trips[i].hysteresis);
- priv->overheat_sensor = tz;
- priv->interrupt_source = sensor_id;
armada_enable_overheat_interrupt(priv);
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 34ff2181afd1..948025490d20 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -238,6 +238,7 @@ static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
struct tty_port *tport = &port->state->port;
unsigned char ch = 0;
char flag = 0;
+ int ret;
do {
if (status & STAT_RX_RDY(port)) {
@@ -250,6 +251,16 @@ static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
port->icount.parity++;
}
+ /*
+ * For UART2, error bits are not cleared on buffer read.
+ * This causes interrupt loop and system hang.
+ */
+ if (IS_EXTENDED(port) && (status & STAT_BRK_ERR)) {
+ ret = readl(port->membase + UART_STAT);
+ ret |= STAT_BRK_ERR;
+ writel(ret, port->membase + UART_STAT);
+ }
+
if (status & STAT_BRK_DET) {
port->icount.brk++;
status &= ~(STAT_FRM_ERR | STAT_PAR_ERR);
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
index 202ee81cfc2b..16d713658c7f 100644
--- a/drivers/uio/Kconfig
+++ b/drivers/uio/Kconfig
@@ -165,4 +165,13 @@ config UIO_HV_GENERIC
to network and storage devices from userspace.
If you compile this as a module, it will be called uio_hv_generic.
+
+config UIO_PCI_EP
+ tristate "PCI EP driver"
+ depends on PCI
+ help
+ Userspace I/O interface for the resources of PCI EP Function.
+ This driver configures the PCI EP according to information
+ received from device tree and exposes the EP
+ resources (BARs, MSIs) to userspace
endif
diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
index c285dd2a4539..5cc746364835 100644
--- a/drivers/uio/Makefile
+++ b/drivers/uio/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_UIO_PRUSS) += uio_pruss.o
obj-$(CONFIG_UIO_MF624) += uio_mf624.o
obj-$(CONFIG_UIO_FSL_ELBC_GPCM) += uio_fsl_elbc_gpcm.o
obj-$(CONFIG_UIO_HV_GENERIC) += uio_hv_generic.o
+obj-$(CONFIG_UIO_PCI_EP) += uio_pci_ep.o
diff --git a/drivers/uio/uio_pci_ep.c b/drivers/uio/uio_pci_ep.c
new file mode 100644
index 000000000000..13036fd62a37
--- /dev/null
+++ b/drivers/uio/uio_pci_ep.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * UIO PCIe end point driver
+ *
+ * This driver exposes PCI EP resources to user-space
+ * It is currently coupled to armada PCI EP driver but it
+ * in the future it will use the standard PCI EP stack
+ */
+
+#include <linux/armada-pcie-ep.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/pci_ids.h>
+#include <linux/pci_regs.h>
+#include <linux/uio_driver.h>
+
+struct uio_pci {
+ struct device *dev;
+ void *ep;
+ struct resource *host_map;
+};
+
+/* make sure we have at least one mem regions to map the host ram */
+#define MAX_BAR_MAP 5
+
+/* export the BAR0/2 address/size, used by Facility */
+void __iomem *bar0_addr;
+EXPORT_SYMBOL(bar0_addr);
+size_t bar0_size;
+EXPORT_SYMBOL(bar0_size);
+void __iomem *bar2_addr;
+EXPORT_SYMBOL(bar2_addr);
+size_t bar2_size;
+EXPORT_SYMBOL(bar2_size);
+
+
+static int uio_pci_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ void *ep;
+ struct uio_pci *uio_pci;
+ struct resource *res;
+ struct uio_info *info;
+ struct uio_mem *mem;
+ char *name;
+ int bar_id, mem_id;
+
+ ep = armada_pcie_ep_get();
+ if (!ep) {
+ dev_info(dev, "PCI EP probe deferred\n");
+ return -EPROBE_DEFER;
+ }
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ uio_pci = devm_kzalloc(dev, sizeof(*uio_pci), GFP_KERNEL);
+ if (!uio_pci)
+ return -ENOMEM;
+
+ /* connect the objects */
+ platform_set_drvdata(pdev, info);
+ info->priv = uio_pci;
+ uio_pci->dev = dev;
+
+ /* store private data */
+ info->name = "uio_pci_ep_0";
+ info->version = "1.0.1";
+
+ /* setup the PCI EP topology. This should eventually move to the PCI
+ * EP Function driver
+ */
+ uio_pci->ep = ep;
+
+ /* Setup the BARs according to device tree */
+ for (bar_id = 0, mem_id = 0; bar_id < MAX_BAR_MAP;
+ mem_id++, bar_id++) {
+ name = devm_kzalloc(dev, 6 * sizeof(char), GFP_KERNEL);
+ if (name == NULL)
+ return -ENOMEM;
+ snprintf(name, 5, "bar%d", bar_id);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ if (!res) {
+ kfree(name);
+ dev_err(dev, "Did not find BAR-%d resource %pR\n",
+ bar_id, res);
+ return -ENOMEM;
+ }
+ /* Setup the UIO memory attributes */
+ mem = &info->mem[mem_id];
+ mem->memtype = UIO_MEM_PHYS;
+ mem->size = resource_size(res);
+ mem->name = name;
+
+ if (!is_power_of_2(mem->size)) {
+ dev_err(dev, "BAR-%d size in not power of 2\n",
+ bar_id);
+ return -EINVAL;
+ }
+
+ if (bar_id == 0) {
+ struct page *pg = alloc_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(mem->size));
+ if (!pg) {
+ dev_err(dev, "alloc RAM resource %pR failed\n",
+ res);
+ return -ENOMEM;
+ }
+ mem->internal_addr = page_address(pg);
+ bar0_addr = mem->internal_addr;
+ bar0_size = mem->size;
+ mem->addr = virt_to_phys(mem->internal_addr);
+ armada_pcie_ep_bar_map(ep, 0, bar_id,
+ (phys_addr_t)mem->addr,
+ mem->size);
+ } else {
+ mem->addr = res->start;
+ mem->internal_addr = devm_ioremap(dev, mem->addr,
+ mem->size);
+ if (IS_ERR(mem->internal_addr)) {
+ dev_err(dev, "map BAR-%d memory %pR failed\n",
+ bar_id, res);
+ return -ENOMEM;
+ }
+
+ if (bar_id == 2) {
+ bar2_addr = mem->internal_addr;
+ bar2_size = mem->size;
+ }
+ }
+
+ /* First 2 BARs in HW are 64 bit registers
+ * and consume 2 BAR slots
+ */
+ if (bar_id < 4)
+ bar_id++;
+ }
+
+ /* remap host RAM to local memory space using shift mapping.
+ * i.e. address 0x0 in host becomes uio_pci->host_map->start.
+ * Additionally map the host physical space to the virtual
+ * memory using ioremap.
+ */
+ uio_pci->host_map = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "host-map");
+ if (!uio_pci->host_map) {
+ dev_err(dev, "Device tree missing host mappings\n");
+ return -ENODEV;
+ }
+
+ /* Describe the host as a UIO space */
+ name = devm_kzalloc(dev, 16 * sizeof(char), GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ snprintf(name, 16, "host-map");
+ mem = &info->mem[mem_id];
+ mem->memtype = UIO_MEM_PHYS;
+ mem->size = resource_size(uio_pci->host_map);
+ mem->name = name;
+ mem->addr = uio_pci->host_map->start;
+ mem->internal_addr = devm_ioremap_resource(dev, uio_pci->host_map);
+ if (IS_ERR(mem->internal_addr)) {
+ dev_err(dev, "map host memory %pR failed\n", uio_pci->host_map);
+ return -ENODEV;
+ }
+
+ /* register the UIO device */
+ if (uio_register_device(dev, info) != 0) {
+ dev_err(dev, "UIO registration failed\n");
+ return -ENODEV;
+ }
+
+ dev_info(dev, "Registered UIO PCI EP successfully\n");
+
+ return 0;
+}
+
+static int uio_pci_ep_remove(struct platform_device *pdev)
+{
+ struct uio_info *info = platform_get_drvdata(pdev);
+
+ uio_unregister_device(info);
+ return 0;
+}
+
+static const struct of_device_id uio_pci_ep_match[] = {
+ { .compatible = "marvell,pci-ep-uio", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, uio_pci_ep_match);
+
+static struct platform_driver uio_pci_ep_driver = {
+ .driver = {
+ .name = "marvell,pci-ep-uio",
+ .owner = THIS_MODULE,
+ .of_match_table = uio_pci_ep_match,
+ },
+ .probe = uio_pci_ep_probe,
+ .remove = uio_pci_ep_remove,
+};
+module_platform_driver(uio_pci_ep_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Yehuda Yitschak <yehuday@marvell.com>");
+MODULE_DESCRIPTION("PCI EP Function UIO driver");
diff --git a/drivers/vfio/platform/vfio_amba.c b/drivers/vfio/platform/vfio_amba.c
index 3626c2150101..62c97aa21a79 100644
--- a/drivers/vfio/platform/vfio_amba.c
+++ b/drivers/vfio/platform/vfio_amba.c
@@ -73,11 +73,13 @@ static int vfio_amba_probe(struct amba_device *adev, const struct amba_id *id)
static void vfio_amba_remove(struct amba_device *adev)
{
- struct vfio_platform_device *vdev =
- vfio_platform_remove_common(&adev->dev);
+ struct vfio_platform_device *vdev;
- kfree(vdev->name);
- kfree(vdev);
+ vdev = vfio_platform_remove_common(&adev->dev);
+ if (vdev) {
+ kfree(vdev->name);
+ kfree(vdev);
+ }
}
static const struct amba_id pl330_ids[] = {
diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
index 8efe60487b48..ec42ecaf82b5 100644
--- a/fs/pstore/Kconfig
+++ b/fs/pstore/Kconfig
@@ -16,7 +16,7 @@ config PSTORE
config PSTORE_DEFLATE_COMPRESS
tristate "DEFLATE (ZLIB) compression"
- default y
+ default n
depends on PSTORE
select CRYPTO_DEFLATE
help
@@ -59,7 +59,7 @@ config PSTORE_ZSTD_COMPRESS
This option enables zstd compression algorithm support.
config PSTORE_COMPRESS
- def_bool y
+ def_bool n
depends on PSTORE
depends on PSTORE_DEFLATE_COMPRESS || PSTORE_LZO_COMPRESS || \
PSTORE_LZ4_COMPRESS || PSTORE_LZ4HC_COMPRESS || \
diff --git a/include/acpi/apei.h b/include/acpi/apei.h
index 680f80960c3d..2a64def09bbf 100644
--- a/include/acpi/apei.h
+++ b/include/acpi/apei.h
@@ -33,8 +33,12 @@ extern bool ghes_disable;
#ifdef CONFIG_ACPI_APEI
void __init acpi_hest_init(void);
+void __init hest_table_set(struct acpi_table_hest *table);
+void __init bert_table_set(struct acpi_table_bert *table);
#else
static inline void acpi_hest_init(void) { return; }
+static inline void hest_table_set(struct acpi_table_hest *table) { return; }
+static inline void bert_table_set(struct acpi_table_bert *table) { return; }
#endif
typedef int (*apei_hest_func_t)(struct acpi_hest_header *hest_hdr, void *data);
diff --git a/include/linux/armada-pcie-ep.h b/include/linux/armada-pcie-ep.h
new file mode 100644
index 000000000000..f80d61723f22
--- /dev/null
+++ b/include/linux/armada-pcie-ep.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Armada PCIe EP
+ * Copyright (c) 2019, Marvell Semiconductor.
+ */
+#ifndef _ARMADA_PCIE_EP_
+#define _ARMADA_PCIE_EP_
+
+#include <linux/msi.h>
+
+/* BAR bitmaps for use with armada_pcie_ep_disable_bars */
+#define PCIE_EP_BAR0 BIT(0)
+#define PCIE_EP_BAR1 BIT(1)
+#define PCIE_EP_BAR0_64 (PCIE_EP_BAR0 | PCIE_EP_BAR1)
+#define PCIE_EP_BAR2 BIT(2)
+#define PCIE_EP_BAR3 BIT(3)
+#define PCIE_EP_BAR2_64 (PCIE_EP_BAR3 | PCIE_EP_BAR2)
+#define PCIE_EP_BAR4 BIT(4)
+#define PCIE_EP_BAR5 BIT(5)
+#define PCIE_EP_BAR4_64 (PCIE_EP_BAR4 | PCIE_EP_BAR5)
+#define PCIE_EP_BAR_ROM BIT(8) /* matches the offset, see pci.c */
+#define PCIE_EP_ALL_BARS ((BIT(9) - 1) & ~(BIT(6) || BIT(7)))
+
+void armada_pcie_ep_bar_map(void *ep, u32 func_id, int bar, phys_addr_t addr,
+ u64 size);
+void armada_pcie_ep_setup_bar(void *ep, int func_id, u32 bar_num, u32 props,
+ u64 sz);
+void armada_pcie_ep_disable_bars(void *ep, int func_id, u16 mask);
+void armada_pcie_ep_cfg_enable(void *ep, int func_id);
+int armada_pcie_ep_get_msi(void *ep, int func_id, int vec_id,
+ struct msi_msg *msg);
+int armada_pcie_ep_remap_host(void *ep, u32 func_id, u64 local_base,
+ u64 host_base, u64 size);
+void *armada_pcie_ep_get(void);
+
+#endif /* _ARMADA_PCIE_EP_ */
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h
index b0e35eec6499..4ac5c081af93 100644
--- a/include/linux/coresight-pmu.h
+++ b/include/linux/coresight-pmu.h
@@ -10,17 +10,27 @@
#define CORESIGHT_ETM_PMU_NAME "cs_etm"
#define CORESIGHT_ETM_PMU_SEED 0x10
-/* ETMv3.5/PTM's ETMCR config bit */
-#define ETM_OPT_CYCACC 12
-#define ETM_OPT_CTXTID 14
-#define ETM_OPT_TS 28
-#define ETM_OPT_RETSTK 29
+/*
+ * Below are the definition of bit offsets for perf option, and works as
+ * arbitrary values for all ETM versions.
+ *
+ * Most of them are orignally from ETMv3.5/PTM's ETMCR config, therefore,
+ * ETMv3.5/PTM doesn't define ETMCR config bits with prefix "ETM3_" and
+ * directly use below macros as config bits.
+ */
+#define ETM_OPT_CYCACC 12
+#define ETM_OPT_CTXTID 14
+#define ETM_OPT_CTXTID2 15
+#define ETM_OPT_TS 28
+#define ETM_OPT_RETSTK 29
/* ETMv4 CONFIGR programming bits for the ETM OPTs */
#define ETM4_CFG_BIT_CYCACC 4
#define ETM4_CFG_BIT_CTXTID 6
+#define ETM4_CFG_BIT_VMID 7
#define ETM4_CFG_BIT_TS 11
#define ETM4_CFG_BIT_RETSTK 12
+#define ETM4_CFG_BIT_VMID_OPT 15
static inline int coresight_get_trace_id(int cpu)
{
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 7d3c87e5b97c..85008a65e21f 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -7,6 +7,7 @@
#define _LINUX_CORESIGHT_H
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/perf_event.h>
#include <linux/sched.h>
@@ -49,6 +50,7 @@ enum coresight_dev_subtype_sink {
CORESIGHT_DEV_SUBTYPE_SINK_PORT,
CORESIGHT_DEV_SUBTYPE_SINK_BUFFER,
CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM,
+ CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM,
};
enum coresight_dev_subtype_link {
@@ -115,6 +117,32 @@ struct coresight_platform_data {
};
/**
+ * struct csdev_access - Abstraction of a CoreSight device access.
+ *
+ * @io_mem : True if the device has memory mapped I/O
+ * @base : When io_mem == true, base address of the component
+ * @read : Read from the given "offset" of the given instance.
+ * @write : Write "val" to the given "offset".
+ */
+struct csdev_access {
+ bool io_mem;
+ union {
+ void __iomem *base;
+ struct {
+ u64 (*read)(u32 offset, bool relaxed, bool _64bit);
+ void (*write)(u64 val, u32 offset, bool relaxed,
+ bool _64bit);
+ };
+ };
+};
+
+#define CSDEV_ACCESS_IOMEM(_addr) \
+ ((struct csdev_access) { \
+ .io_mem = true, \
+ .base = (_addr), \
+ })
+
+/**
* struct coresight_desc - description of a component required from drivers
* @type: as defined by @coresight_dev_type.
* @subtype: as defined by @coresight_dev_subtype.
@@ -125,6 +153,7 @@ struct coresight_platform_data {
* @groups: operations specific to this component. These will end up
* in the component's sysfs sub-directory.
* @name: name for the coresight device, also shown under sysfs.
+ * @access: Describe access to the device
*/
struct coresight_desc {
enum coresight_dev_type type;
@@ -134,6 +163,7 @@ struct coresight_desc {
struct device *dev;
const struct attribute_group **groups;
const char *name;
+ struct csdev_access access;
};
/**
@@ -173,7 +203,8 @@ struct coresight_sysfs_link {
* @type: as defined by @coresight_dev_type.
* @subtype: as defined by @coresight_dev_subtype.
* @ops: generic operations for this component, as defined
- by @coresight_ops.
+ * by @coresight_ops.
+ * @access: Device i/o access abstraction for this device.
* @dev: The device entity associated to this component.
* @refcnt: keep track of what is in use.
* @orphan: true if the component has connections that haven't been linked.
@@ -195,6 +226,7 @@ struct coresight_device {
enum coresight_dev_type type;
union coresight_dev_subtype subtype;
const struct coresight_ops *ops;
+ struct csdev_access access;
struct device dev;
atomic_t *refcnt;
bool orphan;
@@ -326,23 +358,145 @@ struct coresight_ops {
};
#if IS_ENABLED(CONFIG_CORESIGHT)
+
+static inline u32 csdev_access_relaxed_read32(struct csdev_access *csa,
+ u32 offset)
+{
+ if (likely(csa->io_mem))
+ return readl_relaxed(csa->base + offset);
+
+ return csa->read(offset, true, false);
+}
+
+static inline u32 csdev_access_read32(struct csdev_access *csa, u32 offset)
+{
+ if (likely(csa->io_mem))
+ return readl(csa->base + offset);
+
+ return csa->read(offset, false, false);
+}
+
+static inline void csdev_access_relaxed_write32(struct csdev_access *csa,
+ u32 val, u32 offset)
+{
+ if (likely(csa->io_mem))
+ writel_relaxed(val, csa->base + offset);
+ else
+ csa->write(val, offset, true, false);
+}
+
+static inline void csdev_access_write32(struct csdev_access *csa, u32 val, u32 offset)
+{
+ if (likely(csa->io_mem))
+ writel(val, csa->base + offset);
+ else
+ csa->write(val, offset, false, false);
+}
+
+#ifdef CONFIG_64BIT
+
+static inline u64 csdev_access_relaxed_read64(struct csdev_access *csa,
+ u32 offset)
+{
+ if (likely(csa->io_mem))
+ return readq_relaxed(csa->base + offset);
+
+ return csa->read(offset, true, true);
+}
+
+static inline u64 csdev_access_read64(struct csdev_access *csa, u32 offset)
+{
+ if (likely(csa->io_mem))
+ return readq(csa->base + offset);
+
+ return csa->read(offset, false, true);
+}
+
+static inline void csdev_access_relaxed_write64(struct csdev_access *csa,
+ u64 val, u32 offset)
+{
+ if (likely(csa->io_mem))
+ writeq_relaxed(val, csa->base + offset);
+ else
+ csa->write(val, offset, true, true);
+}
+
+static inline void csdev_access_write64(struct csdev_access *csa, u64 val, u32 offset)
+{
+ if (likely(csa->io_mem))
+ writeq(val, csa->base + offset);
+ else
+ csa->write(val, offset, false, true);
+}
+
+#else /* !CONFIG_64BIT */
+
+static inline u64 csdev_access_relaxed_read64(struct csdev_access *csa,
+ u32 offset)
+{
+ WARN_ON(1);
+ return 0;
+}
+
+static inline u64 csdev_access_read64(struct csdev_access *csa, u32 offset)
+{
+ WARN_ON(1);
+ return 0;
+}
+
+static inline void csdev_access_relaxed_write64(struct csdev_access *csa,
+ u64 val, u32 offset)
+{
+ WARN_ON(1);
+}
+
+static inline void csdev_access_write64(struct csdev_access *csa, u64 val, u32 offset)
+{
+ WARN_ON(1);
+}
+#endif /* CONFIG_64BIT */
+
+static inline bool coresight_is_percpu_source(struct coresight_device *csdev)
+{
+ return csdev && (csdev->type == CORESIGHT_DEV_TYPE_SOURCE) &&
+ (csdev->subtype.source_subtype == CORESIGHT_DEV_SUBTYPE_SOURCE_PROC);
+}
+
+static inline bool coresight_is_percpu_sink(struct coresight_device *csdev)
+{
+ return csdev && (csdev->type == CORESIGHT_DEV_TYPE_SINK) &&
+ (csdev->subtype.sink_subtype == CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM);
+}
+
extern struct coresight_device *
coresight_register(struct coresight_desc *desc);
extern void coresight_unregister(struct coresight_device *csdev);
extern int coresight_enable(struct coresight_device *csdev);
extern void coresight_disable(struct coresight_device *csdev);
-extern int coresight_timeout(void __iomem *addr, u32 offset,
+extern int coresight_timeout(struct csdev_access *csa, u32 offset,
int position, int value);
-extern int coresight_claim_device(void __iomem *base);
-extern int coresight_claim_device_unlocked(void __iomem *base);
+extern int coresight_claim_device(struct coresight_device *csdev);
+extern int coresight_claim_device_unlocked(struct coresight_device *csdev);
-extern void coresight_disclaim_device(void __iomem *base);
-extern void coresight_disclaim_device_unlocked(void __iomem *base);
+extern void coresight_disclaim_device(struct coresight_device *csdev);
+extern void coresight_disclaim_device_unlocked(struct coresight_device *csdev);
extern char *coresight_alloc_device_name(struct coresight_dev_list *devs,
struct device *dev);
extern bool coresight_loses_context_with_cpu(struct device *dev);
+
+u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset);
+u32 coresight_read32(struct coresight_device *csdev, u32 offset);
+void coresight_write32(struct coresight_device *csdev, u32 val, u32 offset);
+void coresight_relaxed_write32(struct coresight_device *csdev,
+ u32 val, u32 offset);
+u64 coresight_relaxed_read64(struct coresight_device *csdev, u32 offset);
+u64 coresight_read64(struct coresight_device *csdev, u32 offset);
+void coresight_relaxed_write64(struct coresight_device *csdev,
+ u64 val, u32 offset);
+void coresight_write64(struct coresight_device *csdev, u64 val, u32 offset);
+
#else
static inline struct coresight_device *
coresight_register(struct coresight_desc *desc) { return NULL; }
@@ -350,29 +504,78 @@ static inline void coresight_unregister(struct coresight_device *csdev) {}
static inline int
coresight_enable(struct coresight_device *csdev) { return -ENOSYS; }
static inline void coresight_disable(struct coresight_device *csdev) {}
-static inline int coresight_timeout(void __iomem *addr, u32 offset,
- int position, int value) { return 1; }
-static inline int coresight_claim_device_unlocked(void __iomem *base)
+
+static inline int coresight_timeout(struct csdev_access *csa, u32 offset,
+ int position, int value)
+{
+ return 1;
+}
+
+static inline int coresight_claim_device_unlocked(struct coresight_device *csdev)
{
return -EINVAL;
}
-static inline int coresight_claim_device(void __iomem *base)
+static inline int coresight_claim_device(struct coresight_device *csdev)
{
return -EINVAL;
}
-static inline void coresight_disclaim_device(void __iomem *base) {}
-static inline void coresight_disclaim_device_unlocked(void __iomem *base) {}
+static inline void coresight_disclaim_device(struct coresight_device *csdev) {}
+static inline void coresight_disclaim_device_unlocked(struct coresight_device *csdev) {}
static inline bool coresight_loses_context_with_cpu(struct device *dev)
{
return false;
}
-#endif
+
+static inline u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static inline u32 coresight_read32(struct coresight_device *csdev, u32 offset)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static inline void coresight_write32(struct coresight_device *csdev, u32 val, u32 offset)
+{
+}
+
+static inline void coresight_relaxed_write32(struct coresight_device *csdev,
+ u32 val, u32 offset)
+{
+}
+
+static inline u64 coresight_relaxed_read64(struct coresight_device *csdev,
+ u32 offset)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static inline u64 coresight_read64(struct coresight_device *csdev, u32 offset)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static inline void coresight_relaxed_write64(struct coresight_device *csdev,
+ u64 val, u32 offset)
+{
+}
+
+static inline void coresight_write64(struct coresight_device *csdev, u64 val, u32 offset)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_CORESIGHT) */
extern int coresight_get_cpu(struct device *dev);
struct coresight_platform_data *coresight_get_platform_data(struct device *dev);
-#endif
+#endif /* _LINUX_COREISGHT_H */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 8fb893ed205e..b4a64ec662b6 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -179,6 +179,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index c603237e006c..9e98752f90b5 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -585,8 +585,16 @@ extern void free_unref_page_list(struct list_head *list);
struct page_frag_cache;
extern void __page_frag_cache_drain(struct page *page, unsigned int count);
-extern void *page_frag_alloc(struct page_frag_cache *nc,
- unsigned int fragsz, gfp_t gfp_mask);
+extern void *page_frag_alloc_align(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask,
+ unsigned int align_mask);
+
+static inline void *page_frag_alloc(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask)
+{
+ return page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
+}
+
extern void page_frag_free(void *addr);
#define __free_page(page) __free_pages((page), 0)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index f5392d96d688..458d9e57d14a 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -326,6 +326,12 @@ extern bool oops_may_print(void);
void do_exit(long error_code) __noreturn;
void complete_and_exit(struct completion *, long) __noreturn;
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+struct task_struct;
+int task_cleanup_handler_add(void (*handler)(struct task_struct *));
+int task_cleanup_handler_remove(void (*handler)(struct task_struct *));
+#endif
+
/* Internal, do not use. */
int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
int __must_check _kstrtol(const char *s, unsigned int base, long *res);
diff --git a/include/linux/mfd/idt82p33_reg.h b/include/linux/mfd/idt82p33_reg.h
new file mode 100644
index 000000000000..50872b0382fe
--- /dev/null
+++ b/include/linux/mfd/idt82p33_reg.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* idt82p33_reg.h
+ *
+ * Register Map - AN888_SMUforIEEE_SynchEther_82P33xxx_RevH.pdf
+ *
+ */
+#ifndef HAVE_IDT82P33_REG
+#define HAVE_IDT82P33_REG
+
+/* Register address */
+#define REG_ADDR(page, offset) (((page) << 0x7) | ((offset) & 0x7f))
+
+#define PAGE_ADDR 0x7F
+
+#define DPLL1_TOD_CNFG 0x134
+#define DPLL2_TOD_CNFG 0x1B4
+
+#define DPLL1_TOD_STS 0x10B
+#define DPLL2_TOD_STS 0x18B
+
+#define DPLL1_TOD_TRIGGER 0x115
+#define DPLL2_TOD_TRIGGER 0x195
+
+#define DPLL1_OPERATING_MODE_CNFG 0x120
+#define DPLL2_OPERATING_MODE_CNFG 0x1A0
+
+#define DPLL1_HOLDOVER_FREQ_CNFG 0x12C
+#define DPLL2_HOLDOVER_FREQ_CNFG 0x1AC
+
+#define DPLL1_PHASE_OFFSET_CNFG 0x143
+#define DPLL2_PHASE_OFFSET_CNFG 0x1C3
+
+#define DPLL1_SYNC_EDGE_CNFG 0x140
+#define DPLL2_SYNC_EDGE_CNFG 0x1C0
+
+#define DPLL1_INPUT_MODE_CNFG 0x116
+#define DPLL2_INPUT_MODE_CNFG 0x196
+
+#define DPLL1_OPERATING_STS 0x102
+#define DPLL2_OPERATING_STS 0x182
+
+#define DPLL1_CURRENT_FREQ_STS 0x103
+#define DPLL2_CURRENT_FREQ_STS 0x183
+
+#define REG_SOFT_RESET 0X381
+
+#define OUT_MUX_CNFG(outn) REG_ADDR(0x6, (0xC * (outn)))
+
+/* Register bit definitions */
+#define SYNC_TOD BIT(1)
+#define PH_OFFSET_EN BIT(7)
+#define SQUELCH_ENABLE BIT(5)
+
+/* Bit definitions for the DPLL_MODE register */
+#define PLL_MODE_SHIFT (0)
+#define PLL_MODE_MASK (0x1F)
+#define COMBO_MODE_EN BIT(5)
+#define COMBO_MODE_SHIFT (6)
+#define COMBO_MODE_MASK (0x3)
+
+/* Bit definitions for DPLL_OPERATING_STS register */
+#define OPERATING_STS_MASK (0x7)
+#define OPERATING_STS_SHIFT (0x0)
+
+/* Bit definitions for DPLL_TOD_TRIGGER register */
+#define READ_TRIGGER_MASK (0xF)
+#define READ_TRIGGER_SHIFT (0x0)
+#define WRITE_TRIGGER_MASK (0xF0)
+#define WRITE_TRIGGER_SHIFT (0x4)
+
+/* Bit definitions for REG_SOFT_RESET register */
+#define SOFT_RESET_EN BIT(7)
+
+enum pll_mode {
+ PLL_MODE_MIN = 0,
+ PLL_MODE_AUTOMATIC = PLL_MODE_MIN,
+ PLL_MODE_FORCE_FREERUN = 1,
+ PLL_MODE_FORCE_HOLDOVER = 2,
+ PLL_MODE_FORCE_LOCKED = 4,
+ PLL_MODE_FORCE_PRE_LOCKED2 = 5,
+ PLL_MODE_FORCE_PRE_LOCKED = 6,
+ PLL_MODE_FORCE_LOST_PHASE = 7,
+ PLL_MODE_DCO = 10,
+ PLL_MODE_WPH = 18,
+ PLL_MODE_MAX = PLL_MODE_WPH,
+};
+
+enum hw_tod_trig_sel {
+ HW_TOD_TRIG_SEL_MIN = 0,
+ HW_TOD_TRIG_SEL_NO_WRITE = HW_TOD_TRIG_SEL_MIN,
+ HW_TOD_TRIG_SEL_NO_READ = HW_TOD_TRIG_SEL_MIN,
+ HW_TOD_TRIG_SEL_SYNC_SEL = 1,
+ HW_TOD_TRIG_SEL_IN12 = 2,
+ HW_TOD_TRIG_SEL_IN13 = 3,
+ HW_TOD_TRIG_SEL_IN14 = 4,
+ HW_TOD_TRIG_SEL_TOD_PPS = 5,
+ HW_TOD_TRIG_SEL_TIMER_INTERVAL = 6,
+ HW_TOD_TRIG_SEL_MSB_PHASE_OFFSET_CNFG = 7,
+ HW_TOD_TRIG_SEL_MSB_HOLDOVER_FREQ_CNFG = 8,
+ HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG = 9,
+ HW_TOD_RD_TRIG_SEL_LSB_TOD_STS = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+ WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+};
+
+/** @brief Enumerated type listing DPLL operational modes */
+enum dpll_state {
+ DPLL_STATE_FREERUN = 1,
+ DPLL_STATE_HOLDOVER = 2,
+ DPLL_STATE_LOCKED = 4,
+ DPLL_STATE_PRELOCKED2 = 5,
+ DPLL_STATE_PRELOCKED = 6,
+ DPLL_STATE_LOSTPHASE = 7,
+ DPLL_STATE_MAX
+};
+
+#endif
diff --git a/include/linux/mfd/idt8a340_reg.h b/include/linux/mfd/idt8a340_reg.h
new file mode 100644
index 000000000000..c356ad626b20
--- /dev/null
+++ b/include/linux/mfd/idt8a340_reg.h
@@ -0,0 +1,846 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* idt8a340_reg.h
+ *
+ * Originally generated by regen.tcl on Thu Feb 14 19:23:44 PST 2019
+ * https://github.com/richardcochran/regen
+ *
+ * Hand modified to include some HW registers.
+ * Based on 4.8.0, SCSR rev C commit a03c7ae5
+ */
+#ifndef HAVE_IDT8A340_REG
+#define HAVE_IDT8A340_REG
+
+#define PAGE_ADDR_BASE 0x0000
+#define PAGE_ADDR 0x00fc
+
+#define HW_REVISION 0x8180
+#define REV_ID 0x007a
+
+#define HW_DPLL_0 (0x8a00)
+#define HW_DPLL_1 (0x8b00)
+#define HW_DPLL_2 (0x8c00)
+#define HW_DPLL_3 (0x8d00)
+#define HW_DPLL_4 (0x8e00)
+#define HW_DPLL_5 (0x8f00)
+#define HW_DPLL_6 (0x9000)
+#define HW_DPLL_7 (0x9100)
+
+#define HW_DPLL_TOD_SW_TRIG_ADDR__0 (0x080)
+#define HW_DPLL_TOD_CTRL_1 (0x089)
+#define HW_DPLL_TOD_CTRL_2 (0x08A)
+#define HW_DPLL_TOD_OVR__0 (0x098)
+#define HW_DPLL_TOD_OUT_0__0 (0x0B0)
+
+#define HW_Q0_Q1_CH_SYNC_CTRL_0 (0xa740)
+#define HW_Q0_Q1_CH_SYNC_CTRL_1 (0xa741)
+#define HW_Q2_Q3_CH_SYNC_CTRL_0 (0xa742)
+#define HW_Q2_Q3_CH_SYNC_CTRL_1 (0xa743)
+#define HW_Q4_Q5_CH_SYNC_CTRL_0 (0xa744)
+#define HW_Q4_Q5_CH_SYNC_CTRL_1 (0xa745)
+#define HW_Q6_Q7_CH_SYNC_CTRL_0 (0xa746)
+#define HW_Q6_Q7_CH_SYNC_CTRL_1 (0xa747)
+#define HW_Q8_CH_SYNC_CTRL_0 (0xa748)
+#define HW_Q8_CH_SYNC_CTRL_1 (0xa749)
+#define HW_Q9_CH_SYNC_CTRL_0 (0xa74a)
+#define HW_Q9_CH_SYNC_CTRL_1 (0xa74b)
+#define HW_Q10_CH_SYNC_CTRL_0 (0xa74c)
+#define HW_Q10_CH_SYNC_CTRL_1 (0xa74d)
+#define HW_Q11_CH_SYNC_CTRL_0 (0xa74e)
+#define HW_Q11_CH_SYNC_CTRL_1 (0xa74f)
+
+#define SYNC_SOURCE_DPLL0_TOD_PPS 0x14
+#define SYNC_SOURCE_DPLL1_TOD_PPS 0x15
+#define SYNC_SOURCE_DPLL2_TOD_PPS 0x16
+#define SYNC_SOURCE_DPLL3_TOD_PPS 0x17
+
+#define SYNCTRL1_MASTER_SYNC_RST BIT(7)
+#define SYNCTRL1_MASTER_SYNC_TRIG BIT(5)
+#define SYNCTRL1_TOD_SYNC_TRIG BIT(4)
+#define SYNCTRL1_FBDIV_FRAME_SYNC_TRIG BIT(3)
+#define SYNCTRL1_FBDIV_SYNC_TRIG BIT(2)
+#define SYNCTRL1_Q1_DIV_SYNC_TRIG BIT(1)
+#define SYNCTRL1_Q0_DIV_SYNC_TRIG BIT(0)
+
+#define HW_Q8_CTRL_SPARE (0xa7d4)
+#define HW_Q11_CTRL_SPARE (0xa7ec)
+
+/**
+ * Select FOD5 as sync_trigger for Q8 divider.
+ * Transition from logic zero to one
+ * sets trigger to sync Q8 divider.
+ *
+ * Unused when FOD4 is driving Q8 divider (normal operation).
+ */
+#define Q9_TO_Q8_SYNC_TRIG BIT(1)
+
+/**
+ * Enable FOD5 as driver for clock and sync for Q8 divider.
+ * Enable fanout buffer for FOD5.
+ *
+ * Unused when FOD4 is driving Q8 divider (normal operation).
+ */
+#define Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2))
+
+/**
+ * Select FOD6 as sync_trigger for Q11 divider.
+ * Transition from logic zero to one
+ * sets trigger to sync Q11 divider.
+ *
+ * Unused when FOD7 is driving Q11 divider (normal operation).
+ */
+#define Q10_TO_Q11_SYNC_TRIG BIT(1)
+
+/**
+ * Enable FOD6 as driver for clock and sync for Q11 divider.
+ * Enable fanout buffer for FOD6.
+ *
+ * Unused when FOD7 is driving Q11 divider (normal operation).
+ */
+#define Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2))
+
+#define RESET_CTRL 0xc000
+#define SM_RESET 0x0012
+#define SM_RESET_CMD 0x5A
+
+#define GENERAL_STATUS 0xc014
+#define BOOT_STATUS 0x0000
+#define HW_REV_ID 0x000A
+#define BOND_ID 0x000B
+#define HW_CSR_ID 0x000C
+#define HW_IRQ_ID 0x000E
+
+#define MAJ_REL 0x0010
+#define MIN_REL 0x0011
+#define HOTFIX_REL 0x0012
+
+#define PIPELINE_ID 0x0014
+#define BUILD_ID 0x0018
+
+#define JTAG_DEVICE_ID 0x001c
+#define PRODUCT_ID 0x001e
+
+#define OTP_SCSR_CONFIG_SELECT 0x0022
+
+#define STATUS 0xc03c
+#define DPLL0_STATUS 0x0018
+#define DPLL1_STATUS 0x0019
+#define DPLL2_STATUS 0x001a
+#define DPLL3_STATUS 0x001b
+#define DPLL4_STATUS 0x001c
+#define DPLL5_STATUS 0x001d
+#define DPLL6_STATUS 0x001e
+#define DPLL7_STATUS 0x001f
+#define DPLL_SYS_STATUS 0x0020
+#define DPLL_SYS_APLL_STATUS 0x0021
+#define DPLL0_FILTER_STATUS 0x0044
+#define DPLL1_FILTER_STATUS 0x004c
+#define DPLL2_FILTER_STATUS 0x0054
+#define DPLL3_FILTER_STATUS 0x005c
+#define DPLL4_FILTER_STATUS 0x0064
+#define DPLL5_FILTER_STATUS 0x006c
+#define DPLL6_FILTER_STATUS 0x0074
+#define DPLL7_FILTER_STATUS 0x007c
+#define DPLLSYS_FILTER_STATUS 0x0084
+#define USER_GPIO0_TO_7_STATUS 0x008a
+#define USER_GPIO8_TO_15_STATUS 0x008b
+
+#define GPIO_USER_CONTROL 0xc160
+#define GPIO0_TO_7_OUT 0x0000
+#define GPIO8_TO_15_OUT 0x0001
+
+#define STICKY_STATUS_CLEAR 0xc164
+
+#define GPIO_TOD_NOTIFICATION_CLEAR 0xc16c
+
+#define ALERT_CFG 0xc188
+
+#define SYS_DPLL_XO 0xc194
+
+#define SYS_APLL 0xc19c
+
+#define INPUT_0 0xc1b0
+
+#define INPUT_1 0xc1c0
+
+#define INPUT_2 0xc1d0
+
+#define INPUT_3 0xc200
+
+#define INPUT_4 0xc210
+
+#define INPUT_5 0xc220
+
+#define INPUT_6 0xc230
+
+#define INPUT_7 0xc240
+
+#define INPUT_8 0xc250
+
+#define INPUT_9 0xc260
+
+#define INPUT_10 0xc280
+
+#define INPUT_11 0xc290
+
+#define INPUT_12 0xc2a0
+
+#define INPUT_13 0xc2b0
+
+#define INPUT_14 0xc2c0
+
+#define INPUT_15 0xc2d0
+
+#define REF_MON_0 0xc2e0
+
+#define REF_MON_1 0xc2ec
+
+#define REF_MON_2 0xc300
+
+#define REF_MON_3 0xc30c
+
+#define REF_MON_4 0xc318
+
+#define REF_MON_5 0xc324
+
+#define REF_MON_6 0xc330
+
+#define REF_MON_7 0xc33c
+
+#define REF_MON_8 0xc348
+
+#define REF_MON_9 0xc354
+
+#define REF_MON_10 0xc360
+
+#define REF_MON_11 0xc36c
+
+#define REF_MON_12 0xc380
+
+#define REF_MON_13 0xc38c
+
+#define REF_MON_14 0xc398
+
+#define REF_MON_15 0xc3a4
+
+#define DPLL_0 0xc3b0
+#define DPLL_CTRL_REG_0 0x0002
+#define DPLL_CTRL_REG_1 0x0003
+#define DPLL_CTRL_REG_2 0x0004
+#define DPLL_TOD_SYNC_CFG 0x0031
+#define DPLL_COMBO_SLAVE_CFG_0 0x0032
+#define DPLL_COMBO_SLAVE_CFG_1 0x0033
+#define DPLL_SLAVE_REF_CFG 0x0034
+#define DPLL_REF_MODE 0x0035
+#define DPLL_PHASE_MEASUREMENT_CFG 0x0036
+#define DPLL_MODE 0x0037
+
+#define DPLL_1 0xc400
+
+#define DPLL_2 0xc438
+
+#define DPLL_3 0xc480
+
+#define DPLL_4 0xc4b8
+
+#define DPLL_5 0xc500
+
+#define DPLL_6 0xc538
+
+#define DPLL_7 0xc580
+
+#define SYS_DPLL 0xc5b8
+
+#define DPLL_CTRL_0 0xc600
+#define DPLL_CTRL_DPLL_MANU_REF_CFG 0x0001
+#define DPLL_CTRL_DPLL_FOD_FREQ 0x001c
+#define DPLL_CTRL_COMBO_MASTER_CFG 0x003a
+
+#define DPLL_CTRL_1 0xc63c
+
+#define DPLL_CTRL_2 0xc680
+
+#define DPLL_CTRL_3 0xc6bc
+
+#define DPLL_CTRL_4 0xc700
+
+#define DPLL_CTRL_5 0xc73c
+
+#define DPLL_CTRL_6 0xc780
+
+#define DPLL_CTRL_7 0xc7bc
+
+#define SYS_DPLL_CTRL 0xc800
+
+#define DPLL_PHASE_0 0xc818
+
+/* Signed 42-bit FFO in units of 2^(-53) */
+#define DPLL_WR_PHASE 0x0000
+
+#define DPLL_PHASE_1 0xc81c
+
+#define DPLL_PHASE_2 0xc820
+
+#define DPLL_PHASE_3 0xc824
+
+#define DPLL_PHASE_4 0xc828
+
+#define DPLL_PHASE_5 0xc82c
+
+#define DPLL_PHASE_6 0xc830
+
+#define DPLL_PHASE_7 0xc834
+
+#define DPLL_FREQ_0 0xc838
+
+/* Signed 42-bit FFO in units of 2^(-53) */
+#define DPLL_WR_FREQ 0x0000
+
+#define DPLL_FREQ_1 0xc840
+
+#define DPLL_FREQ_2 0xc848
+
+#define DPLL_FREQ_3 0xc850
+
+#define DPLL_FREQ_4 0xc858
+
+#define DPLL_FREQ_5 0xc860
+
+#define DPLL_FREQ_6 0xc868
+
+#define DPLL_FREQ_7 0xc870
+
+#define DPLL_PHASE_PULL_IN_0 0xc880
+#define PULL_IN_OFFSET 0x0000 /* Signed 32 bit */
+#define PULL_IN_SLOPE_LIMIT 0x0004 /* Unsigned 24 bit */
+#define PULL_IN_CTRL 0x0007
+
+#define DPLL_PHASE_PULL_IN_1 0xc888
+
+#define DPLL_PHASE_PULL_IN_2 0xc890
+
+#define DPLL_PHASE_PULL_IN_3 0xc898
+
+#define DPLL_PHASE_PULL_IN_4 0xc8a0
+
+#define DPLL_PHASE_PULL_IN_5 0xc8a8
+
+#define DPLL_PHASE_PULL_IN_6 0xc8b0
+
+#define DPLL_PHASE_PULL_IN_7 0xc8b8
+
+#define GPIO_CFG 0xc8c0
+#define GPIO_CFG_GBL 0x0000
+
+#define GPIO_0 0xc8c2
+#define GPIO_DCO_INC_DEC 0x0000
+#define GPIO_OUT_CTRL_0 0x0001
+#define GPIO_OUT_CTRL_1 0x0002
+#define GPIO_TOD_TRIG 0x0003
+#define GPIO_DPLL_INDICATOR 0x0004
+#define GPIO_LOS_INDICATOR 0x0005
+#define GPIO_REF_INPUT_DSQ_0 0x0006
+#define GPIO_REF_INPUT_DSQ_1 0x0007
+#define GPIO_REF_INPUT_DSQ_2 0x0008
+#define GPIO_REF_INPUT_DSQ_3 0x0009
+#define GPIO_MAN_CLK_SEL_0 0x000a
+#define GPIO_MAN_CLK_SEL_1 0x000b
+#define GPIO_MAN_CLK_SEL_2 0x000c
+#define GPIO_SLAVE 0x000d
+#define GPIO_ALERT_OUT_CFG 0x000e
+#define GPIO_TOD_NOTIFICATION_CFG 0x000f
+#define GPIO_CTRL 0x0010
+
+#define GPIO_1 0xc8d4
+
+#define GPIO_2 0xc8e6
+
+#define GPIO_3 0xc900
+
+#define GPIO_4 0xc912
+
+#define GPIO_5 0xc924
+
+#define GPIO_6 0xc936
+
+#define GPIO_7 0xc948
+
+#define GPIO_8 0xc95a
+
+#define GPIO_9 0xc980
+
+#define GPIO_10 0xc992
+
+#define GPIO_11 0xc9a4
+
+#define GPIO_12 0xc9b6
+
+#define GPIO_13 0xc9c8
+
+#define GPIO_14 0xc9da
+
+#define GPIO_15 0xca00
+
+#define OUT_DIV_MUX 0xca12
+
+#define OUTPUT_0 0xca14
+/* FOD frequency output divider value */
+#define OUT_DIV 0x0000
+#define OUT_DUTY_CYCLE_HIGH 0x0004
+#define OUT_CTRL_0 0x0008
+#define OUT_CTRL_1 0x0009
+/* Phase adjustment in FOD cycles */
+#define OUT_PHASE_ADJ 0x000c
+
+#define OUTPUT_1 0xca24
+
+#define OUTPUT_2 0xca34
+
+#define OUTPUT_3 0xca44
+
+#define OUTPUT_4 0xca54
+
+#define OUTPUT_5 0xca64
+
+#define OUTPUT_6 0xca80
+
+#define OUTPUT_7 0xca90
+
+#define OUTPUT_8 0xcaa0
+
+#define OUTPUT_9 0xcab0
+
+#define OUTPUT_10 0xcac0
+
+#define OUTPUT_11 0xcad0
+
+#define SERIAL 0xcae0
+
+#define PWM_ENCODER_0 0xcb00
+
+#define PWM_ENCODER_1 0xcb08
+
+#define PWM_ENCODER_2 0xcb10
+
+#define PWM_ENCODER_3 0xcb18
+
+#define PWM_ENCODER_4 0xcb20
+
+#define PWM_ENCODER_5 0xcb28
+
+#define PWM_ENCODER_6 0xcb30
+
+#define PWM_ENCODER_7 0xcb38
+
+#define PWM_DECODER_0 0xcb40
+
+#define PWM_DECODER_1 0xcb48
+
+#define PWM_DECODER_2 0xcb50
+
+#define PWM_DECODER_3 0xcb58
+
+#define PWM_DECODER_4 0xcb60
+
+#define PWM_DECODER_5 0xcb68
+
+#define PWM_DECODER_6 0xcb70
+
+#define PWM_DECODER_7 0xcb80
+
+#define PWM_DECODER_8 0xcb88
+
+#define PWM_DECODER_9 0xcb90
+
+#define PWM_DECODER_10 0xcb98
+
+#define PWM_DECODER_11 0xcba0
+
+#define PWM_DECODER_12 0xcba8
+
+#define PWM_DECODER_13 0xcbb0
+
+#define PWM_DECODER_14 0xcbb8
+
+#define PWM_DECODER_15 0xcbc0
+
+#define PWM_USER_DATA 0xcbc8
+
+#define TOD_0 0xcbcc
+
+/* Enable TOD counter, output channel sync and even-PPS mode */
+#define TOD_CFG 0x0000
+
+#define TOD_1 0xcbce
+
+#define TOD_2 0xcbd0
+
+#define TOD_3 0xcbd2
+
+
+#define TOD_WRITE_0 0xcc00
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_WRITE 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_WRITE_COUNTER 0x000c
+/* TOD write trigger configuration */
+#define TOD_WRITE_SELECT_CFG_0 0x000d
+/* TOD write trigger selection */
+#define TOD_WRITE_CMD 0x000f
+
+#define TOD_WRITE_1 0xcc10
+
+#define TOD_WRITE_2 0xcc20
+
+#define TOD_WRITE_3 0xcc30
+
+#define TOD_READ_PRIMARY_0 0xcc40
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_READ_PRIMARY 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_READ_PRIMARY_COUNTER 0x000b
+/* Read trigger configuration */
+#define TOD_READ_PRIMARY_SEL_CFG_0 0x000c
+/* Read trigger selection */
+#define TOD_READ_PRIMARY_CMD 0x000e
+
+#define TOD_READ_PRIMARY_1 0xcc50
+
+#define TOD_READ_PRIMARY_2 0xcc60
+
+#define TOD_READ_PRIMARY_3 0xcc80
+
+#define TOD_READ_SECONDARY_0 0xcc90
+
+#define TOD_READ_SECONDARY_1 0xcca0
+
+#define TOD_READ_SECONDARY_2 0xccb0
+
+#define TOD_READ_SECONDARY_3 0xccc0
+
+#define OUTPUT_TDC_CFG 0xccd0
+
+#define OUTPUT_TDC_0 0xcd00
+
+#define OUTPUT_TDC_1 0xcd08
+
+#define OUTPUT_TDC_2 0xcd10
+
+#define OUTPUT_TDC_3 0xcd18
+
+#define INPUT_TDC 0xcd20
+
+#define SCRATCH 0xcf50
+
+#define EEPROM 0xcf68
+
+#define OTP 0xcf70
+
+#define BYTE 0xcf80
+
+/* Bit definitions for the MAJ_REL register */
+#define MAJOR_SHIFT (1)
+#define MAJOR_MASK (0x7f)
+#define PR_BUILD BIT(0)
+
+/* Bit definitions for the USER_GPIO0_TO_7_STATUS register */
+#define GPIO0_LEVEL BIT(0)
+#define GPIO1_LEVEL BIT(1)
+#define GPIO2_LEVEL BIT(2)
+#define GPIO3_LEVEL BIT(3)
+#define GPIO4_LEVEL BIT(4)
+#define GPIO5_LEVEL BIT(5)
+#define GPIO6_LEVEL BIT(6)
+#define GPIO7_LEVEL BIT(7)
+
+/* Bit definitions for the USER_GPIO8_TO_15_STATUS register */
+#define GPIO8_LEVEL BIT(0)
+#define GPIO9_LEVEL BIT(1)
+#define GPIO10_LEVEL BIT(2)
+#define GPIO11_LEVEL BIT(3)
+#define GPIO12_LEVEL BIT(4)
+#define GPIO13_LEVEL BIT(5)
+#define GPIO14_LEVEL BIT(6)
+#define GPIO15_LEVEL BIT(7)
+
+/* Bit definitions for the GPIO0_TO_7_OUT register */
+#define GPIO0_DRIVE_LEVEL BIT(0)
+#define GPIO1_DRIVE_LEVEL BIT(1)
+#define GPIO2_DRIVE_LEVEL BIT(2)
+#define GPIO3_DRIVE_LEVEL BIT(3)
+#define GPIO4_DRIVE_LEVEL BIT(4)
+#define GPIO5_DRIVE_LEVEL BIT(5)
+#define GPIO6_DRIVE_LEVEL BIT(6)
+#define GPIO7_DRIVE_LEVEL BIT(7)
+
+/* Bit definitions for the GPIO8_TO_15_OUT register */
+#define GPIO8_DRIVE_LEVEL BIT(0)
+#define GPIO9_DRIVE_LEVEL BIT(1)
+#define GPIO10_DRIVE_LEVEL BIT(2)
+#define GPIO11_DRIVE_LEVEL BIT(3)
+#define GPIO12_DRIVE_LEVEL BIT(4)
+#define GPIO13_DRIVE_LEVEL BIT(5)
+#define GPIO14_DRIVE_LEVEL BIT(6)
+#define GPIO15_DRIVE_LEVEL BIT(7)
+
+/* Bit definitions for the DPLL_TOD_SYNC_CFG register */
+#define TOD_SYNC_SOURCE_SHIFT (1)
+#define TOD_SYNC_SOURCE_MASK (0x3)
+#define TOD_SYNC_EN BIT(0)
+
+/* Bit definitions for the DPLL_MODE register */
+#define WRITE_TIMER_MODE BIT(6)
+#define PLL_MODE_SHIFT (3)
+#define PLL_MODE_MASK (0x7)
+#define STATE_MODE_SHIFT (0)
+#define STATE_MODE_MASK (0x7)
+
+/* Bit definitions for the DPLL_MANU_REF_CFG register */
+#define MANUAL_REFERENCE_SHIFT (0)
+#define MANUAL_REFERENCE_MASK (0x1f)
+
+/* Bit definitions for the GPIO_CFG_GBL register */
+#define SUPPLY_MODE_SHIFT (0)
+#define SUPPLY_MODE_MASK (0x3)
+
+/* Bit definitions for the GPIO_DCO_INC_DEC register */
+#define INCDEC_DPLL_INDEX_SHIFT (0)
+#define INCDEC_DPLL_INDEX_MASK (0x7)
+
+/* Bit definitions for the GPIO_OUT_CTRL_0 register */
+#define CTRL_OUT_0 BIT(0)
+#define CTRL_OUT_1 BIT(1)
+#define CTRL_OUT_2 BIT(2)
+#define CTRL_OUT_3 BIT(3)
+#define CTRL_OUT_4 BIT(4)
+#define CTRL_OUT_5 BIT(5)
+#define CTRL_OUT_6 BIT(6)
+#define CTRL_OUT_7 BIT(7)
+
+/* Bit definitions for the GPIO_OUT_CTRL_1 register */
+#define CTRL_OUT_8 BIT(0)
+#define CTRL_OUT_9 BIT(1)
+#define CTRL_OUT_10 BIT(2)
+#define CTRL_OUT_11 BIT(3)
+#define CTRL_OUT_12 BIT(4)
+#define CTRL_OUT_13 BIT(5)
+#define CTRL_OUT_14 BIT(6)
+#define CTRL_OUT_15 BIT(7)
+
+/* Bit definitions for the GPIO_TOD_TRIG register */
+#define TOD_TRIG_0 BIT(0)
+#define TOD_TRIG_1 BIT(1)
+#define TOD_TRIG_2 BIT(2)
+#define TOD_TRIG_3 BIT(3)
+
+/* Bit definitions for the GPIO_DPLL_INDICATOR register */
+#define IND_DPLL_INDEX_SHIFT (0)
+#define IND_DPLL_INDEX_MASK (0x7)
+
+/* Bit definitions for the GPIO_LOS_INDICATOR register */
+#define REFMON_INDEX_SHIFT (0)
+#define REFMON_INDEX_MASK (0xf)
+/* Active level of LOS indicator, 0=low 1=high */
+#define ACTIVE_LEVEL BIT(4)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_0 register */
+#define DSQ_INP_0 BIT(0)
+#define DSQ_INP_1 BIT(1)
+#define DSQ_INP_2 BIT(2)
+#define DSQ_INP_3 BIT(3)
+#define DSQ_INP_4 BIT(4)
+#define DSQ_INP_5 BIT(5)
+#define DSQ_INP_6 BIT(6)
+#define DSQ_INP_7 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_1 register */
+#define DSQ_INP_8 BIT(0)
+#define DSQ_INP_9 BIT(1)
+#define DSQ_INP_10 BIT(2)
+#define DSQ_INP_11 BIT(3)
+#define DSQ_INP_12 BIT(4)
+#define DSQ_INP_13 BIT(5)
+#define DSQ_INP_14 BIT(6)
+#define DSQ_INP_15 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_2 register */
+#define DSQ_DPLL_0 BIT(0)
+#define DSQ_DPLL_1 BIT(1)
+#define DSQ_DPLL_2 BIT(2)
+#define DSQ_DPLL_3 BIT(3)
+#define DSQ_DPLL_4 BIT(4)
+#define DSQ_DPLL_5 BIT(5)
+#define DSQ_DPLL_6 BIT(6)
+#define DSQ_DPLL_7 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_3 register */
+#define DSQ_DPLL_SYS BIT(0)
+#define GPIO_DSQ_LEVEL BIT(1)
+
+/* Bit definitions for the GPIO_TOD_NOTIFICATION_CFG register */
+#define DPLL_TOD_SHIFT (0)
+#define DPLL_TOD_MASK (0x3)
+#define TOD_READ_SECONDARY BIT(2)
+#define GPIO_ASSERT_LEVEL BIT(3)
+
+/* Bit definitions for the GPIO_CTRL register */
+#define GPIO_FUNCTION_EN BIT(0)
+#define GPIO_CMOS_OD_MODE BIT(1)
+#define GPIO_CONTROL_DIR BIT(2)
+#define GPIO_PU_PD_MODE BIT(3)
+#define GPIO_FUNCTION_SHIFT (4)
+#define GPIO_FUNCTION_MASK (0xf)
+
+/* Bit definitions for the OUT_CTRL_1 register */
+#define OUT_SYNC_DISABLE BIT(7)
+#define SQUELCH_VALUE BIT(6)
+#define SQUELCH_DISABLE BIT(5)
+#define PAD_VDDO_SHIFT (2)
+#define PAD_VDDO_MASK (0x7)
+#define PAD_CMOSDRV_SHIFT (0)
+#define PAD_CMOSDRV_MASK (0x3)
+
+/* Bit definitions for the TOD_CFG register */
+#define TOD_EVEN_PPS_MODE BIT(2)
+#define TOD_OUT_SYNC_ENABLE BIT(1)
+#define TOD_ENABLE BIT(0)
+
+/* Bit definitions for the TOD_WRITE_SELECT_CFG_0 register */
+#define WR_PWM_DECODER_INDEX_SHIFT (4)
+#define WR_PWM_DECODER_INDEX_MASK (0xf)
+#define WR_REF_INDEX_SHIFT (0)
+#define WR_REF_INDEX_MASK (0xf)
+
+/* Bit definitions for the TOD_WRITE_CMD register */
+#define TOD_WRITE_SELECTION_SHIFT (0)
+#define TOD_WRITE_SELECTION_MASK (0xf)
+/* 4.8.7 */
+#define TOD_WRITE_TYPE_SHIFT (4)
+#define TOD_WRITE_TYPE_MASK (0x3)
+
+/* Bit definitions for the TOD_READ_PRIMARY_SEL_CFG_0 register */
+#define RD_PWM_DECODER_INDEX_SHIFT (4)
+#define RD_PWM_DECODER_INDEX_MASK (0xf)
+#define RD_REF_INDEX_SHIFT (0)
+#define RD_REF_INDEX_MASK (0xf)
+
+/* Bit definitions for the TOD_READ_PRIMARY_CMD register */
+#define TOD_READ_TRIGGER_MODE BIT(4)
+#define TOD_READ_TRIGGER_SHIFT (0)
+#define TOD_READ_TRIGGER_MASK (0xf)
+
+/* Bit definitions for the DPLL_CTRL_COMBO_MASTER_CFG register */
+#define COMBO_MASTER_HOLD BIT(0)
+
+/* Bit definitions for DPLL_SYS_STATUS register */
+#define DPLL_SYS_STATE_MASK (0xf)
+
+/* Bit definitions for SYS_APLL_STATUS register */
+#define SYS_APLL_LOSS_LOCK_LIVE_MASK BIT(0)
+#define SYS_APLL_LOSS_LOCK_LIVE_LOCKED 0
+#define SYS_APLL_LOSS_LOCK_LIVE_UNLOCKED 1
+
+/* Bit definitions for the DPLL0_STATUS register */
+#define DPLL_STATE_MASK (0xf)
+#define DPLL_STATE_SHIFT (0x0)
+
+/* Values of DPLL_N.DPLL_MODE.PLL_MODE */
+enum pll_mode {
+ PLL_MODE_MIN = 0,
+ PLL_MODE_PLL = PLL_MODE_MIN,
+ PLL_MODE_WRITE_PHASE = 1,
+ PLL_MODE_WRITE_FREQUENCY = 2,
+ PLL_MODE_GPIO_INC_DEC = 3,
+ PLL_MODE_SYNTHESIS = 4,
+ PLL_MODE_PHASE_MEASUREMENT = 5,
+ PLL_MODE_DISABLED = 6,
+ PLL_MODE_MAX = PLL_MODE_DISABLED,
+};
+
+/* Values of DPLL_CTRL_n.DPLL_MANU_REF_CFG.MANUAL_REFERENCE */
+enum manual_reference {
+ MANU_REF_MIN = 0,
+ MANU_REF_CLK0 = MANU_REF_MIN,
+ MANU_REF_CLK1,
+ MANU_REF_CLK2,
+ MANU_REF_CLK3,
+ MANU_REF_CLK4,
+ MANU_REF_CLK5,
+ MANU_REF_CLK6,
+ MANU_REF_CLK7,
+ MANU_REF_CLK8,
+ MANU_REF_CLK9,
+ MANU_REF_CLK10,
+ MANU_REF_CLK11,
+ MANU_REF_CLK12,
+ MANU_REF_CLK13,
+ MANU_REF_CLK14,
+ MANU_REF_CLK15,
+ MANU_REF_WRITE_PHASE,
+ MANU_REF_WRITE_FREQUENCY,
+ MANU_REF_XO_DPLL,
+ MANU_REF_MAX = MANU_REF_XO_DPLL,
+};
+
+enum hw_tod_write_trig_sel {
+ HW_TOD_WR_TRIG_SEL_MIN = 0,
+ HW_TOD_WR_TRIG_SEL_MSB = HW_TOD_WR_TRIG_SEL_MIN,
+ HW_TOD_WR_TRIG_SEL_RESERVED = 1,
+ HW_TOD_WR_TRIG_SEL_TOD_PPS = 2,
+ HW_TOD_WR_TRIG_SEL_IRIGB_PPS = 3,
+ HW_TOD_WR_TRIG_SEL_PWM_PPS = 4,
+ HW_TOD_WR_TRIG_SEL_GPIO = 5,
+ HW_TOD_WR_TRIG_SEL_FOD_SYNC = 6,
+ WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_FOD_SYNC,
+};
+
+enum scsr_read_trig_sel {
+ /* CANCEL CURRENT TOD READ; MODULE BECOMES IDLE - NO TRIGGER OCCURS */
+ SCSR_TOD_READ_TRIG_SEL_DISABLE = 0,
+ /* TRIGGER IMMEDIATELY */
+ SCSR_TOD_READ_TRIG_SEL_IMMEDIATE = 1,
+ /* TRIGGER ON RISING EDGE OF INTERNAL TOD PPS SIGNAL */
+ SCSR_TOD_READ_TRIG_SEL_TODPPS = 2,
+ /* TRGGER ON RISING EDGE OF SELECTED REFERENCE INPUT */
+ SCSR_TOD_READ_TRIG_SEL_REFCLK = 3,
+ /* TRIGGER ON RISING EDGE OF SELECTED PWM DECODER 1PPS OUTPUT */
+ SCSR_TOD_READ_TRIG_SEL_PWMPPS = 4,
+ SCSR_TOD_READ_TRIG_SEL_RESERVED = 5,
+ /* TRIGGER WHEN WRITE FREQUENCY EVENT OCCURS */
+ SCSR_TOD_READ_TRIG_SEL_WRITEFREQUENCYEVENT = 6,
+ /* TRIGGER ON SELECTED GPIO */
+ SCSR_TOD_READ_TRIG_SEL_GPIO = 7,
+ SCSR_TOD_READ_TRIG_SEL_MAX = SCSR_TOD_READ_TRIG_SEL_GPIO,
+};
+
+/* Values STATUS.DPLL_SYS_STATUS.DPLL_SYS_STATE */
+enum dpll_state {
+ DPLL_STATE_MIN = 0,
+ DPLL_STATE_FREERUN = DPLL_STATE_MIN,
+ DPLL_STATE_LOCKACQ = 1,
+ DPLL_STATE_LOCKREC = 2,
+ DPLL_STATE_LOCKED = 3,
+ DPLL_STATE_HOLDOVER = 4,
+ DPLL_STATE_OPEN_LOOP = 5,
+ DPLL_STATE_MAX = DPLL_STATE_OPEN_LOOP,
+};
+
+/* 4.8.7 only */
+enum scsr_tod_write_trig_sel {
+ SCSR_TOD_WR_TRIG_SEL_DISABLE = 0,
+ SCSR_TOD_WR_TRIG_SEL_IMMEDIATE = 1,
+ SCSR_TOD_WR_TRIG_SEL_REFCLK = 2,
+ SCSR_TOD_WR_TRIG_SEL_PWMPPS = 3,
+ SCSR_TOD_WR_TRIG_SEL_TODPPS = 4,
+ SCSR_TOD_WR_TRIG_SEL_SYNCFOD = 5,
+ SCSR_TOD_WR_TRIG_SEL_GPIO = 6,
+ SCSR_TOD_WR_TRIG_SEL_MAX = SCSR_TOD_WR_TRIG_SEL_GPIO,
+};
+
+/* 4.8.7 only */
+enum scsr_tod_write_type_sel {
+ SCSR_TOD_WR_TYPE_SEL_ABSOLUTE = 0,
+ SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS = 1,
+ SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS = 2,
+ SCSR_TOD_WR_TYPE_SEL_MAX = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS,
+};
+#endif
diff --git a/include/linux/mfd/rsmu.h b/include/linux/mfd/rsmu.h
new file mode 100644
index 000000000000..f8a4ba8dcc9d
--- /dev/null
+++ b/include/linux/mfd/rsmu.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Multi-function driver for the IDT ClockMatrix(TM) and 82p33xxx families of
+ * timing and synchronization devices.
+ *
+ * Copyright (C) 2019 Integrated Device Technology, Inc., a Renesas Company.
+ */
+
+#ifndef __LINUX_MFD_RSMU_H
+#define __LINUX_MFD_RSMU_H
+
+/* We only support Clockmatrix and Sabre now */
+enum rsmu_type {
+ RSMU_CM = 0,
+ RSMU_SABRE = 1,
+ RSMU_NONE = 2,
+};
+
+/**
+ *
+ * struct rsmu_pdata - platform data structure for MFD cell devices.
+ *
+ * @lock: Mutex used by cell devices to make sure a series of requests
+ * are not interrupted.
+ *
+ * @type: RSMU device type.
+ *
+ * @index: Device index.
+ */
+struct rsmu_pdata {
+ enum rsmu_type type;
+ struct mutex *lock;
+ u8 index;
+};
+
+/**
+ * NOTE: the functions below are not intended for use outside
+ * of the IDT synchronization management unit drivers
+ */
+extern int rsmu_write(struct device *dev, u16 reg, u8 *buf, u16 size);
+extern int rsmu_read(struct device *dev, u16 reg, u8 *buf, u16 size);
+#endif /* __LINUX_MFD_RSMU_H */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 29aa50711626..53840a361b5a 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -166,6 +166,11 @@ struct mmc_request {
struct mmc_card;
+#if IS_ENABLED(CONFIG_MMC_PSTORE)
+void mmc_wait_for_pstore_req(struct mmc_host *host, struct mmc_request *mrq);
+int mmc_claim_host_async(struct mmc_host *host);
+#endif
+
void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq);
int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd,
int retries);
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 40d7e98fc990..9d4e518f69f7 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -173,6 +173,18 @@ struct mmc_host_ops {
*/
int (*multi_io_quirk)(struct mmc_card *card,
unsigned int direction, int blk_size);
+
+#if IS_ENABLED(CONFIG_MMC_PSTORE)
+ /*
+ * The following two APIs are introduced to support mmcpstore
+ * functionality. Cleanup API to terminate the ongoing and
+ * pending requests before a panic write post, and polling API
+ * to ensure that write succeeds before the Kernel dies.
+ */
+ void (*req_cleanup_pending)(struct mmc_host *host);
+ int (*req_completion_poll)(struct mmc_host *host,
+ unsigned long timeout);
+#endif
};
struct mmc_cqe_ops {
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 60bac2c0ec45..b5ea207fc7c4 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -387,6 +387,7 @@ struct spi_nor {
} dirmap;
void *priv;
+ bool pstore;
};
static inline void spi_nor_set_flash_node(struct spi_nor *nor,
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 5ba475ca9078..096394200a74 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -10,6 +10,7 @@
#define _PCI_ACPI_H_
#include <linux/acpi.h>
+#include <linux/pci.h>
#ifdef CONFIG_ACPI
extern acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev);
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 08725a262f32..fe5ae7e5f24a 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -137,6 +137,7 @@ typedef enum {
PHY_INTERFACE_MODE_TRGMII,
PHY_INTERFACE_MODE_1000BASEX,
PHY_INTERFACE_MODE_2500BASEX,
+ PHY_INTERFACE_MODE_2500BASET,
PHY_INTERFACE_MODE_RXAUI,
PHY_INTERFACE_MODE_XAUI,
/* 10GBASE-R, XFI, SFI - single lane 10G Serdes */
@@ -144,6 +145,8 @@ typedef enum {
PHY_INTERFACE_MODE_USXGMII,
/* 10GBASE-KR - with Clause 73 AN */
PHY_INTERFACE_MODE_10GKR,
+ /* 5GBASE-KR - Single lane 5G Serdes */
+ PHY_INTERFACE_MODE_5GKR,
PHY_INTERFACE_MODE_MAX,
} phy_interface_t;
@@ -207,6 +210,8 @@ static inline const char *phy_modes(phy_interface_t interface)
return "1000base-x";
case PHY_INTERFACE_MODE_2500BASEX:
return "2500base-x";
+ case PHY_INTERFACE_MODE_2500BASET:
+ return "2500base-t";
case PHY_INTERFACE_MODE_RXAUI:
return "rxaui";
case PHY_INTERFACE_MODE_XAUI:
@@ -217,6 +222,8 @@ static inline const char *phy_modes(phy_interface_t interface)
return "usxgmii";
case PHY_INTERFACE_MODE_10GKR:
return "10gbase-kr";
+ case PHY_INTERFACE_MODE_5GKR:
+ return "5gbase-kr";
default:
return "unknown";
}
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index acbf1875ad50..87022f5dfa74 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2802,7 +2802,26 @@ void skb_queue_purge(struct sk_buff_head *list);
unsigned int skb_rbtree_purge(struct rb_root *root);
-void *netdev_alloc_frag(unsigned int fragsz);
+void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
+
+/**
+ * netdev_alloc_frag - allocate a page fragment
+ * @fragsz: fragment size
+ *
+ * Allocates a frag from a page for receive buffer.
+ * Uses GFP_ATOMIC allocations.
+ */
+static inline void *netdev_alloc_frag(unsigned int fragsz)
+{
+ return __netdev_alloc_frag_align(fragsz, ~0u);
+}
+
+static inline void *netdev_alloc_frag_align(unsigned int fragsz,
+ unsigned int align)
+{
+ WARN_ON_ONCE(!is_power_of_2(align));
+ return __netdev_alloc_frag_align(fragsz, -align);
+}
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
gfp_t gfp_mask);
@@ -2861,7 +2880,20 @@ static inline void skb_free_frag(void *addr)
page_frag_free(addr);
}
-void *napi_alloc_frag(unsigned int fragsz);
+void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
+
+static inline void *napi_alloc_frag(unsigned int fragsz)
+{
+ return __napi_alloc_frag_align(fragsz, ~0u);
+}
+
+static inline void *napi_alloc_frag_align(unsigned int fragsz,
+ unsigned int align)
+{
+ WARN_ON_ONCE(!is_power_of_2(align));
+ return __napi_alloc_frag_align(fragsz, -align);
+}
+
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
unsigned int length, gfp_t gfp_mask);
static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
diff --git a/include/linux/soc/marvell/llc.h b/include/linux/soc/marvell/llc.h
new file mode 100644
index 000000000000..6983e897445b
--- /dev/null
+++ b/include/linux/soc/marvell/llc.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 LLC driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MARVELL_LLC_H__
+#define __MARVELL_LLC_H__
+
+int octeontx2_llc_unlock(phys_addr_t addr, int size);
+int octeontx2_llc_lock(phys_addr_t addr, int size);
+
+#endif
diff --git a/include/linux/soc/marvell/octeontx2/asm.h b/include/linux/soc/marvell/octeontx2/asm.h
new file mode 100644
index 000000000000..602da9d443bb
--- /dev/null
+++ b/include/linux/soc/marvell/octeontx2/asm.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef __SOC_OTX2_ASM_H
+#define __SOC_OTX2_ASM_H
+
+#if defined(CONFIG_ARM64)
+/*
+ * otx2_lmt_flush is used for LMT store operation.
+ * On octeontx2 platform CPT instruction enqueue and
+ * NIX packet send are only possible via LMTST
+ * operations and it uses LDEOR instruction targeting
+ * the coprocessor address.
+ */
+#define otx2_lmt_flush(ioaddr) \
+({ \
+ u64 result = 0; \
+ __asm__ volatile(".cpu generic+lse\n" \
+ "ldeor xzr, %x[rf], [%[rs]]" \
+ : [rf]"=r" (result) \
+ : [rs]"r" (ioaddr)); \
+ (result); \
+})
+/*
+ * STEORL store to memory with release semantics.
+ * This will avoid using DMB barrier after each LMTST
+ * operation.
+ */
+#define cn10k_lmt_flush(val, addr) \
+({ \
+ __asm__ volatile(".cpu generic+lse\n" \
+ "steorl %x[rf],[%[rs]]" \
+ : [rf] "+r"(val) \
+ : [rs] "r"(addr)); \
+})
+
+static inline u64 otx2_atomic64_fetch_add(u64 incr, u64 *ptr)
+{
+ u64 result;
+
+ asm volatile (
+ ".cpu generic+lse\n"
+ "ldadda %x[i], %x[r], [%[b]]"
+ : [r] "=r" (result), "+m" (*ptr)
+ : [i] "r" (incr), [b] "r" (ptr)
+ : "memory");
+ return result;
+}
+
+#else
+#define otx2_lmt_flush(ioaddr) ({ 0; })
+#define cn10k_lmt_flush(val, addr) ({ addr = val; })
+#define otx2_atomic64_fetch_add ({0; })
+#endif
+
+#endif /* __SOC_OTX2_ASM_H */
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index 010d58159887..0ea9f50f36a4 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -234,6 +234,8 @@ struct flow_action_entry {
u32 index;
u32 burst;
u64 rate_bytes_ps;
+ u64 burst_pkt;
+ u64 rate_pkt_ps;
u32 mtu;
} police;
struct { /* FLOW_ACTION_CT */
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 1042c449e7db..c01ad564a58f 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -1300,6 +1300,20 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
}
+struct psched_pktrate {
+ u64 rate_pkts_ps; /* packets per second */
+ u32 mult;
+ u8 shift;
+};
+
+static inline u64 psched_pkt2t_ns(const struct psched_pktrate *r,
+ unsigned int pkt_num)
+{
+ return ((u64)pkt_num * r->mult) >> r->shift;
+}
+
+void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64);
+
/* Mini Qdisc serves for specific needs of ingress/clsact Qdisc.
* The fast path only needs to access filter list and to update stats
*/
diff --git a/include/net/tc_act/tc_police.h b/include/net/tc_act/tc_police.h
index 6d1e26b709b5..3b48e993bf76 100644
--- a/include/net/tc_act/tc_police.h
+++ b/include/net/tc_act/tc_police.h
@@ -10,10 +10,13 @@ struct tcf_police_params {
s64 tcfp_burst;
u32 tcfp_mtu;
s64 tcfp_mtu_ptoks;
+ s64 tcfp_pkt_burst;
struct psched_ratecfg rate;
bool rate_present;
struct psched_ratecfg peak;
bool peak_present;
+ struct psched_pktrate ppsrate;
+ bool pps_present;
struct rcu_head rcu;
};
@@ -24,6 +27,7 @@ struct tcf_police {
spinlock_t tcfp_lock ____cacheline_aligned_in_smp;
s64 tcfp_toks;
s64 tcfp_ptoks;
+ s64 tcfp_pkttoks;
s64 tcfp_t_c;
};
@@ -107,4 +111,50 @@ static inline u32 tcf_police_tcfp_mtu(const struct tc_action *act)
return params->tcfp_mtu;
}
+static inline u64 tcf_police_rate_pkt_ps(const struct tc_action *act)
+{
+ struct tcf_police *police = to_police(act);
+ struct tcf_police_params *params;
+
+ params = rcu_dereference_bh_rtnl(police->params);
+ return params->ppsrate.rate_pkts_ps;
+}
+
+static inline u32 tcf_police_burst_pkt(const struct tc_action *act)
+{
+ struct tcf_police *police = to_police(act);
+ struct tcf_police_params *params;
+ u32 burst;
+
+ params = rcu_dereference_bh_rtnl(police->params);
+
+ /*
+ * "rate" pkts "burst" nanoseconds
+ * ------------ * -------------------
+ * 1 second 2^6 ticks
+ *
+ * ------------------------------------
+ * NSEC_PER_SEC nanoseconds
+ * ------------------------
+ * 2^6 ticks
+ *
+ * "rate" pkts "burst" nanoseconds 2^6 ticks
+ * = ------------ * ------------------- * ------------------------
+ * 1 second 2^6 ticks NSEC_PER_SEC nanoseconds
+ *
+ * "rate" * "burst"
+ * = ---------------- pkts/nanosecond
+ * NSEC_PER_SEC^2
+ *
+ *
+ * "rate" * "burst"
+ * = ---------------- pkts/second
+ * NSEC_PER_SEC
+ */
+ burst = div_u64(params->tcfp_pkt_burst * params->ppsrate.rate_pkts_ps,
+ NSEC_PER_SEC);
+
+ return burst;
+}
+
#endif /* __NET_TC_POLICE_H */
diff --git a/include/soc/marvell/armada8k/fw.h b/include/soc/marvell/armada8k/fw.h
new file mode 100644
index 000000000000..e646212a3796
--- /dev/null
+++ b/include/soc/marvell/armada8k/fw.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _SOC_MARVELL_ARMADA8K_FW_H
+#define _SOC_MARVELL_ARMADA8K_FW_H
+
+/* FW related definitions */
+#define MV_SIP_DFX 0x82000014
+
+#define MV_SIP_DFX_THERMAL_INIT 1
+#define MV_SIP_DFX_THERMAL_READ 2
+#define MV_SIP_DFX_THERMAL_IS_VALID 3
+#define MV_SIP_DFX_THERMAL_IRQ 4
+#define MV_SIP_DFX_THERMAL_THRESH 5
+#define MV_SIP_DFX_THERMAL_SEL_CHANNEL 6
+
+#define MV_SIP_DFX_SREAD 20
+#define MV_SIP_DFX_SWRITE 21
+
+#endif /* _SOC_MARVELL_ARMADA8K_FW_H */
diff --git a/include/soc/marvell/octeontx/octeontx_smc.h b/include/soc/marvell/octeontx/octeontx_smc.h
new file mode 100644
index 000000000000..7f98572f2fbb
--- /dev/null
+++ b/include/soc/marvell/octeontx/octeontx_smc.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (C) 2021 Marvell
+ *
+ */
+
+#ifndef _SOC_MRVL_OCTEONTX_SMC_H
+#define _SOC_MRVL_OCTEONTX_SMC_H
+
+#include <linux/errno.h>
+#include <linux/arm-smccc.h>
+#include <asm/cputype.h>
+
+/* Data and defines for SMC call */
+#define OCTEONTX_ARM_SMC_SVC_UID 0xc200ff01
+
+/* This is expected OCTEONTX response for SVC UID command */
+/** Check software version and compatibility of ATF
+ *
+ * The call verifies ATF instance running on the system.
+ *
+ * @return
+ * 0 (T9x) and 2 (cn10k) on success
+ * error code on failure
+ *
+ */
+static inline int octeontx_soc_check_smc(void)
+{
+#define CPU_MODEL_CN10KX_PART 0xd49
+
+ const int octeontx_svc_uuid[] = {
+ 0x6ff498cf,
+ 0x5a4e9cfa,
+ 0x2f2a3aa4,
+ 0x5945b105,
+ };
+
+ struct arm_smccc_res res;
+
+ /* Is it OCTEONTX on the other side of SMC monitor? */
+ arm_smccc_smc(OCTEONTX_ARM_SMC_SVC_UID, 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 != octeontx_svc_uuid[0] || res.a1 != octeontx_svc_uuid[1] ||
+ res.a2 != octeontx_svc_uuid[2] || res.a3 != octeontx_svc_uuid[3])
+ return -EPERM;
+
+ if (MIDR_PARTNUM(read_cpuid_id()) == CPU_MODEL_CN10KX_PART)
+ return 2;
+
+ return 0;
+}
+
+static inline bool is_soc_cn10kx(void)
+{
+ if (MIDR_PARTNUM(read_cpuid_id()) == CPU_MODEL_CN10KX_PART)
+ return 1;
+ return 0;
+}
+
+#endif /* _SOC_MRVL_OCTEONTX_SMC_H */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index b95d3c485d27..33bdc13b81cb 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -1101,10 +1101,15 @@ enum perf_callchain_context {
/**
* PERF_RECORD_AUX::flags bits
*/
-#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
-#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
-#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */
-#define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */
+#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
+#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
+#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */
+#define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */
+#define PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK 0xff00 /* PMU specific trace format type */
+
+/* CoreSight PMU AUX buffer formats */
+#define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT 0x0000 /* Default for backward compatibility */
+#define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW 0x0100 /* Raw format of the source */
#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
#define PERF_FLAG_FD_OUTPUT (1UL << 1)
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 88f4bf0047e7..1a168a326701 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -190,6 +190,8 @@ enum {
TCA_POLICE_PAD,
TCA_POLICE_RATE64,
TCA_POLICE_PEAKRATE64,
+ TCA_POLICE_PKTRATE64,
+ TCA_POLICE_PKTBURST64,
__TCA_POLICE_MAX
#define TCA_POLICE_RESULT TCA_POLICE_RESULT
};
diff --git a/include/uapi/linux/rsmu.h b/include/uapi/linux/rsmu.h
new file mode 100644
index 000000000000..02c9e38e335d
--- /dev/null
+++ b/include/uapi/linux/rsmu.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Driver for the IDT ClockMatrix(TM) and 82p33xxx families of
+ * timing and synchronization devices.
+ *
+ * Copyright (C) 2019 Integrated Device Technology, Inc., a Renesas Company.
+ */
+
+#ifndef __UAPI_LINUX_RSMU_CDEV_H
+#define __UAPI_LINUX_RSMU_CDEV_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/* Set dpll combomode */
+struct rsmu_combomode {
+ __u8 dpll;
+ __u8 mode;
+};
+
+/* Get dpll state */
+struct rsmu_get_state {
+ __u8 dpll;
+ __u8 state;
+};
+
+/* Get dpll ffo (fractional frequency offset) in ppqt*/
+struct rsmu_get_ffo {
+ __u8 dpll;
+ __s64 ffo;
+};
+
+/*
+ * RSMU IOCTL List
+ */
+#define RSMU_MAGIC '?'
+
+/**
+ * @Description
+ * ioctl to set SMU combo mode.
+ *
+ * @Parameters
+ * pointer to struct rsmu_combomode that contains dpll combomode setting
+ */
+#define RSMU_SET_COMBOMODE _IOW(RSMU_MAGIC, 1, struct rsmu_combomode)
+
+/**
+ * @Description
+ * ioctl to get SMU dpll state.
+ *
+ * @Parameters
+ * pointer to struct rsmu_get_state that contains dpll state
+ */
+#define RSMU_GET_STATE _IOR(RSMU_MAGIC, 2, struct rsmu_get_state)
+
+/**
+ * @Description
+ * ioctl to get SMU dpll ffo.
+ *
+ * @Parameters
+ * pointer to struct rsmu_get_ffo that contains dpll ffo in ppqt
+ */
+#define RSMU_GET_FFO _IOR(RSMU_MAGIC, 3, struct rsmu_get_ffo)
+#endif /* __UAPI_LINUX_RSMU_CDEV_H */
diff --git a/kernel/exit.c b/kernel/exit.c
index d13d67fc5f4e..ac023ce2de46 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -710,6 +710,68 @@ static void check_stack_usage(void)
static inline void check_stack_usage(void) {}
#endif
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+struct task_cleanup_handler {
+ void (*handler)(struct task_struct *task);
+ struct list_head list;
+};
+
+static DEFINE_MUTEX(task_cleanup_handlers_mutex);
+static LIST_HEAD(task_cleanup_handlers);
+
+int task_cleanup_handler_add(void (*handler)(struct task_struct *))
+{
+ struct task_cleanup_handler *newhandler;
+
+ newhandler = (struct task_cleanup_handler *)
+ kmalloc(sizeof(struct task_cleanup_handler), GFP_KERNEL);
+ if (newhandler == NULL)
+ return -1;
+ newhandler->handler = handler;
+ mutex_lock(&task_cleanup_handlers_mutex);
+ list_add(&newhandler->list, &task_cleanup_handlers);
+ mutex_unlock(&task_cleanup_handlers_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(task_cleanup_handler_add);
+
+int task_cleanup_handler_remove(void (*handler)(struct task_struct *))
+{
+ struct list_head *pos, *tmppos;
+ struct task_cleanup_handler *curr_task_cleanup_handler;
+ int retval = -1;
+
+ mutex_lock(&task_cleanup_handlers_mutex);
+ list_for_each_safe(pos, tmppos, &task_cleanup_handlers) {
+ curr_task_cleanup_handler
+ = list_entry(pos, struct task_cleanup_handler, list);
+ if (curr_task_cleanup_handler->handler == handler) {
+ list_del(pos);
+ kfree(curr_task_cleanup_handler);
+ retval = 0;
+ }
+ }
+ mutex_unlock(&task_cleanup_handlers_mutex);
+ return retval;
+}
+EXPORT_SYMBOL(task_cleanup_handler_remove);
+
+static void task_cleanup_handlers_call(struct task_struct *task)
+{
+ struct list_head *pos;
+ struct task_cleanup_handler *curr_task_cleanup_handler;
+
+ mutex_lock(&task_cleanup_handlers_mutex);
+ list_for_each(pos, &task_cleanup_handlers) {
+ curr_task_cleanup_handler =
+ list_entry(pos, struct task_cleanup_handler, list);
+ if (curr_task_cleanup_handler->handler != NULL)
+ curr_task_cleanup_handler->handler(task);
+ }
+ mutex_unlock(&task_cleanup_handlers_mutex);
+}
+#endif
+
void __noreturn do_exit(long code)
{
struct task_struct *tsk = current;
@@ -795,6 +857,10 @@ void __noreturn do_exit(long code)
tsk->exit_code = code;
taskstats_exit(tsk, group_dead);
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+ task_cleanup_handlers_call(tsk);
+#endif
+
exit_mm();
if (group_dead)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f3418edb136b..5c209a331499 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5075,8 +5075,9 @@ void __page_frag_cache_drain(struct page *page, unsigned int count)
}
EXPORT_SYMBOL(__page_frag_cache_drain);
-void *page_frag_alloc(struct page_frag_cache *nc,
- unsigned int fragsz, gfp_t gfp_mask)
+void *page_frag_alloc_align(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask,
+ unsigned int align_mask)
{
unsigned int size = PAGE_SIZE;
struct page *page;
@@ -5128,11 +5129,12 @@ refill:
}
nc->pagecnt_bias--;
+ offset &= align_mask;
nc->offset = offset;
return nc->va + offset;
}
-EXPORT_SYMBOL(page_frag_alloc);
+EXPORT_SYMBOL(page_frag_alloc_align);
/*
* Frees a page fragment allocated out of either a compound or order 0 page.
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 48b6438f2a3d..9b47f8ba0972 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -374,29 +374,23 @@ struct napi_alloc_cache {
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
-static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
+static void *__alloc_frag_align(unsigned int fragsz, gfp_t gfp_mask,
+ unsigned int align_mask)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
- return page_frag_alloc(&nc->page, fragsz, gfp_mask);
+ return page_frag_alloc_align(&nc->page, fragsz, gfp_mask, align_mask);
}
-void *napi_alloc_frag(unsigned int fragsz)
+void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
{
fragsz = SKB_DATA_ALIGN(fragsz);
- return __napi_alloc_frag(fragsz, GFP_ATOMIC);
+ return __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask);
}
-EXPORT_SYMBOL(napi_alloc_frag);
+EXPORT_SYMBOL(__napi_alloc_frag_align);
-/**
- * netdev_alloc_frag - allocate a page fragment
- * @fragsz: fragment size
- *
- * Allocates a frag from a page for receive buffer.
- * Uses GFP_ATOMIC allocations.
- */
-void *netdev_alloc_frag(unsigned int fragsz)
+void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
{
struct page_frag_cache *nc;
void *data;
@@ -404,15 +398,15 @@ void *netdev_alloc_frag(unsigned int fragsz)
fragsz = SKB_DATA_ALIGN(fragsz);
if (in_irq() || irqs_disabled()) {
nc = this_cpu_ptr(&netdev_alloc_cache);
- data = page_frag_alloc(nc, fragsz, GFP_ATOMIC);
+ data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask);
} else {
local_bh_disable();
- data = __napi_alloc_frag(fragsz, GFP_ATOMIC);
+ data = __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask);
local_bh_enable();
}
return data;
}
-EXPORT_SYMBOL(netdev_alloc_frag);
+EXPORT_SYMBOL(__netdev_alloc_frag_align);
/**
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 8d8452b1cdd4..0fab8de176d2 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -42,6 +42,8 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
[TCA_POLICE_RESULT] = { .type = NLA_U32 },
[TCA_POLICE_RATE64] = { .type = NLA_U64 },
[TCA_POLICE_PEAKRATE64] = { .type = NLA_U64 },
+ [TCA_POLICE_PKTRATE64] = { .type = NLA_U64, .min = 1 },
+ [TCA_POLICE_PKTBURST64] = { .type = NLA_U64, .min = 1 },
};
static int tcf_police_init(struct net *net, struct nlattr *nla,
@@ -61,6 +63,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
bool exists = false;
u32 index;
u64 rate64, prate64;
+ u64 pps, ppsburst;
if (nla == NULL)
return -EINVAL;
@@ -142,6 +145,21 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
}
}
+ if ((tb[TCA_POLICE_PKTRATE64] && !tb[TCA_POLICE_PKTBURST64]) ||
+ (!tb[TCA_POLICE_PKTRATE64] && tb[TCA_POLICE_PKTBURST64])) {
+ NL_SET_ERR_MSG(extack,
+ "Both or neither packet-per-second burst and rate must be provided");
+ err = -EINVAL;
+ goto failure;
+ }
+
+ if (tb[TCA_POLICE_PKTRATE64] && R_tab) {
+ NL_SET_ERR_MSG(extack,
+ "packet-per-second and byte-per-second rate limits not allowed in same action");
+ err = -EINVAL;
+ goto failure;
+ }
+
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (unlikely(!new)) {
err = -ENOMEM;
@@ -183,6 +201,14 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
if (tb[TCA_POLICE_AVRATE])
new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
+ if (tb[TCA_POLICE_PKTRATE64]) {
+ pps = nla_get_u64(tb[TCA_POLICE_PKTRATE64]);
+ ppsburst = nla_get_u64(tb[TCA_POLICE_PKTBURST64]);
+ new->pps_present = true;
+ new->tcfp_pkt_burst = PSCHED_TICKS2NS(ppsburst);
+ psched_ppscfg_precompute(&new->ppsrate, pps);
+ }
+
spin_lock_bh(&police->tcf_lock);
spin_lock_bh(&police->tcfp_lock);
police->tcfp_t_c = ktime_get_ns();
@@ -217,8 +243,8 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
struct tcf_police *police = to_police(a);
+ s64 now, toks, ppstoks = 0, ptoks = 0;
struct tcf_police_params *p;
- s64 now, toks, ptoks = 0;
int ret;
tcf_lastuse_update(&police->tcf_tm);
@@ -236,7 +262,7 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
}
if (qdisc_pkt_len(skb) <= p->tcfp_mtu) {
- if (!p->rate_present) {
+ if (!p->rate_present && !p->pps_present) {
ret = p->tcfp_result;
goto end;
}
@@ -251,14 +277,23 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
ptoks -= (s64)psched_l2t_ns(&p->peak,
qdisc_pkt_len(skb));
}
- toks += police->tcfp_toks;
- if (toks > p->tcfp_burst)
- toks = p->tcfp_burst;
- toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb));
- if ((toks|ptoks) >= 0) {
+ if (p->rate_present) {
+ toks += police->tcfp_toks;
+ if (toks > p->tcfp_burst)
+ toks = p->tcfp_burst;
+ toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb));
+ } else if (p->pps_present) {
+ ppstoks = min_t(s64, now - police->tcfp_t_c, p->tcfp_pkt_burst);
+ ppstoks += police->tcfp_pkttoks;
+ if (ppstoks > p->tcfp_pkt_burst)
+ ppstoks = p->tcfp_pkt_burst;
+ ppstoks -= (s64)psched_pkt2t_ns(&p->ppsrate, 1);
+ }
+ if ((toks | ptoks | ppstoks) >= 0) {
police->tcfp_t_c = now;
police->tcfp_toks = toks;
police->tcfp_ptoks = ptoks;
+ police->tcfp_pkttoks = ppstoks;
spin_unlock_bh(&police->tcfp_lock);
ret = p->tcfp_result;
goto inc_drops;
@@ -331,6 +366,16 @@ static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
TCA_POLICE_PAD))
goto nla_put_failure;
}
+ if (p->pps_present) {
+ if (nla_put_u64_64bit(skb, TCA_POLICE_PKTRATE64,
+ police->params->ppsrate.rate_pkts_ps,
+ TCA_POLICE_PAD))
+ goto nla_put_failure;
+ if (nla_put_u64_64bit(skb, TCA_POLICE_PKTBURST64,
+ PSCHED_NS2TICKS(p->tcfp_pkt_burst),
+ TCA_POLICE_PAD))
+ goto nla_put_failure;
+ }
if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
goto nla_put_failure;
if (p->tcfp_result &&
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index b8ffb7e4f696..a36af74c01fd 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -3677,6 +3677,8 @@ int tc_setup_flow_action(struct flow_action *flow_action,
entry->police.burst = tcf_police_burst(act);
entry->police.rate_bytes_ps =
tcf_police_rate_bytes_ps(act);
+ entry->police.burst_pkt = tcf_police_burst_pkt(act);
+ entry->police.rate_pkt_ps = tcf_police_rate_pkt_ps(act);
entry->police.mtu = tcf_police_tcfp_mtu(act);
entry->police.index = act->tcfa_index;
} else if (is_tcf_ct(act)) {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 5d5391adb667..0a485996c763 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -1383,6 +1383,48 @@ void dev_shutdown(struct net_device *dev)
WARN_ON(timer_pending(&dev->watchdog_timer));
}
+/**
+ * psched_ratecfg_precompute__() - Pre-compute values for reciprocal division
+ * @rate: Rate to compute reciprocal division values of
+ * @mult: Multiplier for reciprocal division
+ * @shift: Shift for reciprocal division
+ *
+ * The multiplier and shift for reciprocal division by rate are stored
+ * in mult and shift.
+ *
+ * The deal here is to replace a divide by a reciprocal one
+ * in fast path (a reciprocal divide is a multiply and a shift)
+ *
+ * Normal formula would be :
+ * time_in_ns = (NSEC_PER_SEC * len) / rate_bps
+ *
+ * We compute mult/shift to use instead :
+ * time_in_ns = (len * mult) >> shift;
+ *
+ * We try to get the highest possible mult value for accuracy,
+ * but have to make sure no overflows will ever happen.
+ *
+ * reciprocal_value() is not used here it doesn't handle 64-bit values.
+ */
+static void psched_ratecfg_precompute__(u64 rate, u32 *mult, u8 *shift)
+{
+ u64 factor = NSEC_PER_SEC;
+
+ *mult = 1;
+ *shift = 0;
+
+ if (rate <= 0)
+ return;
+
+ for (;;) {
+ *mult = div64_u64(factor, rate);
+ if (*mult & (1U << 31) || factor & (1ULL << 63))
+ break;
+ factor <<= 1;
+ (*shift)++;
+ }
+}
+
void psched_ratecfg_precompute(struct psched_ratecfg *r,
const struct tc_ratespec *conf,
u64 rate64)
@@ -1392,34 +1434,17 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r,
r->mpu = conf->mpu;
r->rate_bytes_ps = max_t(u64, conf->rate, rate64);
r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
- r->mult = 1;
- /*
- * The deal here is to replace a divide by a reciprocal one
- * in fast path (a reciprocal divide is a multiply and a shift)
- *
- * Normal formula would be :
- * time_in_ns = (NSEC_PER_SEC * len) / rate_bps
- *
- * We compute mult/shift to use instead :
- * time_in_ns = (len * mult) >> shift;
- *
- * We try to get the highest possible mult value for accuracy,
- * but have to make sure no overflows will ever happen.
- */
- if (r->rate_bytes_ps > 0) {
- u64 factor = NSEC_PER_SEC;
-
- for (;;) {
- r->mult = div64_u64(factor, r->rate_bytes_ps);
- if (r->mult & (1U << 31) || factor & (1ULL << 63))
- break;
- factor <<= 1;
- r->shift++;
- }
- }
+ psched_ratecfg_precompute__(r->rate_bytes_ps, &r->mult, &r->shift);
}
EXPORT_SYMBOL(psched_ratecfg_precompute);
+void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64)
+{
+ r->rate_pkts_ps = pktrate64;
+ psched_ratecfg_precompute__(r->rate_pkts_ps, &r->mult, &r->shift);
+}
+EXPORT_SYMBOL(psched_ppscfg_precompute);
+
static void mini_qdisc_rcu_func(struct rcu_head *head)
{
}
diff --git a/tools/include/linux/coresight-pmu.h b/tools/include/linux/coresight-pmu.h
index b0e35eec6499..4ac5c081af93 100644
--- a/tools/include/linux/coresight-pmu.h
+++ b/tools/include/linux/coresight-pmu.h
@@ -10,17 +10,27 @@
#define CORESIGHT_ETM_PMU_NAME "cs_etm"
#define CORESIGHT_ETM_PMU_SEED 0x10
-/* ETMv3.5/PTM's ETMCR config bit */
-#define ETM_OPT_CYCACC 12
-#define ETM_OPT_CTXTID 14
-#define ETM_OPT_TS 28
-#define ETM_OPT_RETSTK 29
+/*
+ * Below are the definition of bit offsets for perf option, and works as
+ * arbitrary values for all ETM versions.
+ *
+ * Most of them are orignally from ETMv3.5/PTM's ETMCR config, therefore,
+ * ETMv3.5/PTM doesn't define ETMCR config bits with prefix "ETM3_" and
+ * directly use below macros as config bits.
+ */
+#define ETM_OPT_CYCACC 12
+#define ETM_OPT_CTXTID 14
+#define ETM_OPT_CTXTID2 15
+#define ETM_OPT_TS 28
+#define ETM_OPT_RETSTK 29
/* ETMv4 CONFIGR programming bits for the ETM OPTs */
#define ETM4_CFG_BIT_CYCACC 4
#define ETM4_CFG_BIT_CTXTID 6
+#define ETM4_CFG_BIT_VMID 7
#define ETM4_CFG_BIT_TS 11
#define ETM4_CFG_BIT_RETSTK 12
+#define ETM4_CFG_BIT_VMID_OPT 15
static inline int coresight_get_trace_id(int cpu)
{
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index cad7bf783413..b4885289160e 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -67,6 +67,7 @@ static int cs_etm_set_context_id(struct auxtrace_record *itr,
char path[PATH_MAX];
int err = -EINVAL;
u32 val;
+ u64 contextid;
ptr = container_of(itr, struct cs_etm_recording, itr);
cs_etm_pmu = ptr->cs_etm_pmu;
@@ -86,25 +87,59 @@ static int cs_etm_set_context_id(struct auxtrace_record *itr,
goto out;
}
+ /* User has configured for PID tracing, respects it. */
+ contextid = evsel->core.attr.config &
+ (BIT(ETM_OPT_CTXTID) | BIT(ETM_OPT_CTXTID2));
+
/*
- * TRCIDR2.CIDSIZE, bit [9-5], indicates whether contextID tracing
- * is supported:
- * 0b00000 Context ID tracing is not supported.
- * 0b00100 Maximum of 32-bit Context ID size.
- * All other values are reserved.
+ * If user doesn't configure the contextid format, parse PMU format and
+ * enable PID tracing according to the "contextid" format bits:
+ *
+ * If bit ETM_OPT_CTXTID is set, trace CONTEXTIDR_EL1;
+ * If bit ETM_OPT_CTXTID2 is set, trace CONTEXTIDR_EL2.
*/
- val = BMVAL(val, 5, 9);
- if (!val || val != 0x4) {
- err = -EINVAL;
- goto out;
+ if (!contextid)
+ contextid = perf_pmu__format_bits(&cs_etm_pmu->format,
+ "contextid");
+
+ if (contextid & BIT(ETM_OPT_CTXTID)) {
+ /*
+ * TRCIDR2.CIDSIZE, bit [9-5], indicates whether contextID
+ * tracing is supported:
+ * 0b00000 Context ID tracing is not supported.
+ * 0b00100 Maximum of 32-bit Context ID size.
+ * All other values are reserved.
+ */
+ val = BMVAL(val, 5, 9);
+ if (!val || val != 0x4) {
+ pr_err("%s: CONTEXTIDR_EL1 isn't supported\n",
+ CORESIGHT_ETM_PMU_NAME);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (contextid & BIT(ETM_OPT_CTXTID2)) {
+ /*
+ * TRCIDR2.VMIDOPT[30:29] != 0 and
+ * TRCIDR2.VMIDSIZE[14:10] == 0b00100 (32bit virtual contextid)
+ * We can't support CONTEXTIDR in VMID if the size of the
+ * virtual context id is < 32bit.
+ * Any value of VMIDSIZE >= 4 (i.e, > 32bit) is fine for us.
+ */
+ if (!BMVAL(val, 29, 30) || BMVAL(val, 10, 14) < 4) {
+ pr_err("%s: CONTEXTIDR_EL2 isn't supported\n",
+ CORESIGHT_ETM_PMU_NAME);
+ err = -EINVAL;
+ goto out;
+ }
}
/* All good, let the kernel know */
- evsel->core.attr.config |= (1 << ETM_OPT_CTXTID);
+ evsel->core.attr.config |= contextid;
err = 0;
out:
-
return err;
}
@@ -156,6 +191,10 @@ out:
return err;
}
+#define ETM_SET_OPT_CTXTID (1 << 0)
+#define ETM_SET_OPT_TS (1 << 1)
+#define ETM_SET_OPT_MASK (ETM_SET_OPT_CTXTID | ETM_SET_OPT_TS)
+
static int cs_etm_set_option(struct auxtrace_record *itr,
struct evsel *evsel, u32 option)
{
@@ -169,17 +208,17 @@ static int cs_etm_set_option(struct auxtrace_record *itr,
!cpu_map__has(online_cpus, i))
continue;
- if (option & ETM_OPT_CTXTID) {
+ if (option & ETM_SET_OPT_CTXTID) {
err = cs_etm_set_context_id(itr, evsel, i);
if (err)
goto out;
}
- if (option & ETM_OPT_TS) {
+ if (option & ETM_SET_OPT_TS) {
err = cs_etm_set_timestamp(itr, evsel, i);
if (err)
goto out;
}
- if (option & ~(ETM_OPT_CTXTID | ETM_OPT_TS))
+ if (option & ~(ETM_SET_OPT_MASK))
/* Nothing else is currently supported */
goto out;
}
@@ -406,7 +445,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
evsel__set_sample_bit(cs_etm_evsel, CPU);
err = cs_etm_set_option(itr, cs_etm_evsel,
- ETM_OPT_CTXTID | ETM_OPT_TS);
+ ETM_SET_OPT_CTXTID | ETM_SET_OPT_TS);
if (err)
goto out;
}
@@ -485,7 +524,9 @@ static u64 cs_etmv4_get_config(struct auxtrace_record *itr)
config |= BIT(ETM4_CFG_BIT_TS);
if (config_opts & BIT(ETM_OPT_RETSTK))
config |= BIT(ETM4_CFG_BIT_RETSTK);
-
+ if (config_opts & BIT(ETM_OPT_CTXTID2))
+ config |= BIT(ETM4_CFG_BIT_VMID) |
+ BIT(ETM4_CFG_BIT_VMID_OPT);
return config;
}
@@ -572,7 +613,7 @@ static void cs_etm_get_metadata(int cpu, u32 *offset,
struct auxtrace_record *itr,
struct perf_record_auxtrace_info *info)
{
- u32 increment;
+ u32 increment, nr_trc_params;
u64 magic;
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
@@ -607,6 +648,7 @@ static void cs_etm_get_metadata(int cpu, u32 *offset,
/* How much space was used */
increment = CS_ETMV4_PRIV_MAX;
+ nr_trc_params = CS_ETMV4_PRIV_MAX - CS_ETMV4_TRCCONFIGR;
} else {
magic = __perf_cs_etmv3_magic;
/* Get configuration register */
@@ -624,11 +666,13 @@ static void cs_etm_get_metadata(int cpu, u32 *offset,
/* How much space was used */
increment = CS_ETM_PRIV_MAX;
+ nr_trc_params = CS_ETM_PRIV_MAX - CS_ETM_ETMCR;
}
/* Build generic header portion */
info->priv[*offset + CS_ETM_MAGIC] = magic;
info->priv[*offset + CS_ETM_CPU] = cpu;
+ info->priv[*offset + CS_ETM_NR_TRC_PARAMS] = nr_trc_params;
/* Where the next CPU entry should start from */
*offset += increment;
}
@@ -643,6 +687,8 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
u64 nr_cpu, type;
struct perf_cpu_map *cpu_map;
struct perf_cpu_map *event_cpus = session->evlist->core.cpus;
+ struct evlist *evlist = session->evlist;
+ struct evsel *evsel, *cs_etm_evsel = NULL;
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
@@ -674,11 +720,45 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
/* First fill out the session header */
info->type = PERF_AUXTRACE_CS_ETM;
- info->priv[CS_HEADER_VERSION_0] = 0;
+ info->priv[CS_HEADER_VERSION] = CS_HEADER_CURRENT_VERSION;
info->priv[CS_PMU_TYPE_CPUS] = type << 32;
info->priv[CS_PMU_TYPE_CPUS] |= nr_cpu;
info->priv[CS_ETM_SNAPSHOT] = ptr->snapshot_mode;
+ /* Find the etm_pmu event from the event list */
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel->core.attr.type == cs_etm_pmu->type) {
+ cs_etm_evsel = evsel;
+ break;
+ }
+ }
+
+ /* From the etm_pmu event determine if the sink supports
+ * formatted trace by reading the sink's FFSR register
+ * exposed through SysFS
+ */
+ if (cs_etm_evsel) {
+ struct evsel_config_term *term;
+ char path[PATH_MAX], *sink;
+ int ret;
+ u32 val;
+
+ list_for_each_entry(term, &cs_etm_evsel->config_terms, list) {
+ if (term->type != EVSEL__CONFIG_TERM_DRV_CFG)
+ continue;
+
+ sink = term->val.str;
+ snprintf(path, PATH_MAX, "sink_%s/mgmt/ffsr", sink);
+ ret = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
+ if (ret != 1) {
+ pr_err("%s: can't read file %s\n",
+ CORESIGHT_ETM_PMU_NAME, path);
+ break;
+ }
+ info->priv[CS_SINK_FORMATTED] = val & (1 << 4) ? 0 : 1;
+ }
+ }
+
offset = CS_ETM_SNAPSHOT + 1;
for (i = 0; i < cpu__max_cpu() && offset < priv_size; i++)
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index cd007cc9c283..4052c9ce6e2f 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -6,6 +6,7 @@
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*/
+#include <linux/coresight-pmu.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/zalloc.h>
@@ -419,19 +420,10 @@ cs_etm_decoder__buffer_range(struct cs_etm_queue *etmq,
packet->last_instr_subtype = elem->last_i_subtype;
packet->last_instr_cond = elem->last_instr_cond;
- switch (elem->last_i_type) {
- case OCSD_INSTR_BR:
- case OCSD_INSTR_BR_INDIRECT:
+ if (elem->last_i_type == OCSD_INSTR_BR || elem->last_i_type == OCSD_INSTR_BR_INDIRECT)
packet->last_instr_taken_branch = elem->last_instr_exec;
- break;
- case OCSD_INSTR_ISB:
- case OCSD_INSTR_DSB_DMB:
- case OCSD_INSTR_WFI_WFE:
- case OCSD_INSTR_OTHER:
- default:
+ else
packet->last_instr_taken_branch = false;
- break;
- }
packet->last_instr_size = elem->last_instr_sz;
@@ -500,13 +492,42 @@ cs_etm_decoder__set_tid(struct cs_etm_queue *etmq,
const ocsd_generic_trace_elem *elem,
const uint8_t trace_chan_id)
{
- pid_t tid;
+ pid_t tid = -1;
+ static u64 pid_fmt;
+ int ret;
+
+ /*
+ * As all the ETMs run at the same exception level, the system should
+ * have the same PID format crossing CPUs. So cache the PID format
+ * and reuse it for sequential decoding.
+ */
+ if (!pid_fmt) {
+ ret = cs_etm__get_pid_fmt(trace_chan_id, &pid_fmt);
+ if (ret)
+ return OCSD_RESP_FATAL_SYS_ERR;
+ }
+
+ /*
+ * Process the PE_CONTEXT packets if we have a valid contextID or VMID.
+ * If the kernel is running at EL2, the PID is traced in CONTEXTIDR_EL2
+ * as VMID, Bit ETM_OPT_CTXTID2 is set in this case.
+ */
+ switch (pid_fmt) {
+ case BIT(ETM_OPT_CTXTID):
+ if (elem->context.ctxt_id_valid)
+ tid = elem->context.context_id;
+ break;
+ case BIT(ETM_OPT_CTXTID2):
+ if (elem->context.vmid_valid)
+ tid = elem->context.vmid;
+ break;
+ default:
+ break;
+ }
- /* Ignore PE_CONTEXT packets that don't have a valid contextID */
- if (!elem->context.ctxt_id_valid)
+ if (tid == -1)
return OCSD_RESP_CONT;
- tid = elem->context.context_id;
if (cs_etm__etmq_set_tid(etmq, tid, trace_chan_id))
return OCSD_RESP_FATAL_SYS_ERR;
@@ -572,6 +593,8 @@ static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
case OCSD_GEN_TRC_ELEM_EVENT:
case OCSD_GEN_TRC_ELEM_SWTRACE:
case OCSD_GEN_TRC_ELEM_CUSTOM:
+ case OCSD_GEN_TRC_ELEM_SYNC_MARKER:
+ case OCSD_GEN_TRC_ELEM_MEMTRANS:
default:
break;
}
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index a2a369e2fbb6..bbfa9f6806d0 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -7,6 +7,7 @@
*/
#include <linux/bitops.h>
+#include <linux/coresight-pmu.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/log2.h>
@@ -64,6 +65,7 @@ struct cs_etm_auxtrace {
u64 **metadata;
u64 kernel_start;
unsigned int pmu_type;
+ unsigned int sink_formatted;
};
struct cs_etm_traceid_queue {
@@ -156,6 +158,47 @@ int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
return 0;
}
+/*
+ * The returned PID format is presented by two bits:
+ *
+ * Bit ETM_OPT_CTXTID: CONTEXTIDR or CONTEXTIDR_EL1 is traced;
+ * Bit ETM_OPT_CTXTID2: CONTEXTIDR_EL2 is traced.
+ *
+ * It's possible that the two bits ETM_OPT_CTXTID and ETM_OPT_CTXTID2
+ * are enabled at the same time when the session runs on an EL2 kernel.
+ * This means the CONTEXTIDR_EL1 and CONTEXTIDR_EL2 both will be
+ * recorded in the trace data, the tool will selectively use
+ * CONTEXTIDR_EL2 as PID.
+ */
+int cs_etm__get_pid_fmt(u8 trace_chan_id, u64 *pid_fmt)
+{
+ struct int_node *inode;
+ u64 *metadata, val;
+
+ inode = intlist__find(traceid_list, trace_chan_id);
+ if (!inode)
+ return -EINVAL;
+
+ metadata = inode->priv;
+
+ if (metadata[CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
+ val = metadata[CS_ETM_ETMCR];
+ /* CONTEXTIDR is traced */
+ if (val & BIT(ETM_OPT_CTXTID))
+ *pid_fmt = BIT(ETM_OPT_CTXTID);
+ } else {
+ val = metadata[CS_ETMV4_TRCCONFIGR];
+ /* CONTEXTIDR_EL2 is traced */
+ if (val & (BIT(ETM4_CFG_BIT_VMID) | BIT(ETM4_CFG_BIT_VMID_OPT)))
+ *pid_fmt = BIT(ETM_OPT_CTXTID2);
+ /* CONTEXTIDR_EL1 is traced */
+ else if (val & BIT(ETM4_CFG_BIT_CTXTID))
+ *pid_fmt = BIT(ETM_OPT_CTXTID);
+ }
+
+ return 0;
+}
+
void cs_etm__etmq_set_traceid_queue_timestamp(struct cs_etm_queue *etmq,
u8 trace_chan_id)
{
@@ -449,6 +492,7 @@ static int cs_etm__init_trace_params(struct cs_etm_trace_params *t_params,
static int cs_etm__init_decoder_params(struct cs_etm_decoder_params *d_params,
struct cs_etm_queue *etmq,
+ struct cs_etm_auxtrace *etm,
enum cs_etm_decoder_operation mode)
{
int ret = -EINVAL;
@@ -459,7 +503,7 @@ static int cs_etm__init_decoder_params(struct cs_etm_decoder_params *d_params,
d_params->packet_printer = cs_etm__packet_dump;
d_params->operation = mode;
d_params->data = etmq;
- d_params->formatted = true;
+ d_params->formatted = etm->sink_formatted;
d_params->fsyncs = false;
d_params->hsyncs = false;
d_params->frame_aligned = true;
@@ -494,7 +538,7 @@ static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
goto out_free;
/* Set decoder parameters to simply print the trace packets */
- if (cs_etm__init_decoder_params(&d_params, NULL,
+ if (cs_etm__init_decoder_params(&d_params, NULL, etm,
CS_ETM_OPERATION_PRINT))
goto out_free;
@@ -735,7 +779,7 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm)
goto out_free;
/* Set decoder parameters to decode trace packets */
- if (cs_etm__init_decoder_params(&d_params, etmq,
+ if (cs_etm__init_decoder_params(&d_params, etmq, etm,
CS_ETM_OPERATION_DECODE))
goto out_free;
@@ -2435,7 +2479,7 @@ static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
}
static const char * const cs_etm_global_header_fmts[] = {
- [CS_HEADER_VERSION_0] = " Header version %llx\n",
+ [CS_HEADER_VERSION] = " Header version %llx\n",
[CS_PMU_TYPE_CPUS] = " PMU type/num cpus %llx\n",
[CS_ETM_SNAPSHOT] = " Snapshot %llx\n",
};
@@ -2443,6 +2487,7 @@ static const char * const cs_etm_global_header_fmts[] = {
static const char * const cs_etm_priv_fmts[] = {
[CS_ETM_MAGIC] = " Magic number %llx\n",
[CS_ETM_CPU] = " CPU %lld\n",
+ [CS_ETM_NR_TRC_PARAMS] = " NR_TRC_PARAMS %llx\n",
[CS_ETM_ETMCR] = " ETMCR %llx\n",
[CS_ETM_ETMTRACEIDR] = " ETMTRACEIDR %llx\n",
[CS_ETM_ETMCCER] = " ETMCCER %llx\n",
@@ -2452,6 +2497,7 @@ static const char * const cs_etm_priv_fmts[] = {
static const char * const cs_etmv4_priv_fmts[] = {
[CS_ETM_MAGIC] = " Magic number %llx\n",
[CS_ETM_CPU] = " CPU %lld\n",
+ [CS_ETM_NR_TRC_PARAMS] = " NR_TRC_PARAMS %llx\n",
[CS_ETMV4_TRCCONFIGR] = " TRCCONFIGR %llx\n",
[CS_ETMV4_TRCTRACEIDR] = " TRCTRACEIDR %llx\n",
[CS_ETMV4_TRCIDR0] = " TRCIDR0 %llx\n",
@@ -2461,26 +2507,167 @@ static const char * const cs_etmv4_priv_fmts[] = {
[CS_ETMV4_TRCAUTHSTATUS] = " TRCAUTHSTATUS %llx\n",
};
-static void cs_etm__print_auxtrace_info(__u64 *val, int num)
+static const char * const param_unk_fmt =
+ " Unknown parameter [%d] %llx\n";
+static const char * const magic_unk_fmt =
+ " Magic number Unknown %llx\n";
+
+static int cs_etm__print_cpu_metadata_v0(__u64 *val, int *offset)
{
- int i, j, cpu = 0;
+ int i = *offset, j, nr_params = 0, fmt_offset;
+ __u64 magic;
- for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
- fprintf(stdout, cs_etm_global_header_fmts[i], val[i]);
+ /* check magic value */
+ magic = val[i + CS_ETM_MAGIC];
+ if ((magic != __perf_cs_etmv3_magic) &&
+ (magic != __perf_cs_etmv4_magic)) {
+ /* failure - note bad magic value */
+ fprintf(stdout, magic_unk_fmt, magic);
+ return -EINVAL;
+ }
+
+ /* print common header block */
+ fprintf(stdout, cs_etm_priv_fmts[CS_ETM_MAGIC], val[i++]);
+ fprintf(stdout, cs_etm_priv_fmts[CS_ETM_CPU], val[i++]);
+
+ if (magic == __perf_cs_etmv3_magic) {
+ nr_params = CS_ETM_NR_TRC_PARAMS_V0;
+ fmt_offset = CS_ETM_ETMCR;
+ /* after common block, offset format index past NR_PARAMS */
+ for (j = fmt_offset; j < nr_params + fmt_offset; j++, i++)
+ fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
+ } else if (magic == __perf_cs_etmv4_magic) {
+ nr_params = CS_ETMV4_NR_TRC_PARAMS_V0;
+ fmt_offset = CS_ETMV4_TRCCONFIGR;
+ /* after common block, offset format index past NR_PARAMS */
+ for (j = fmt_offset; j < nr_params + fmt_offset; j++, i++)
+ fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
+ }
+ *offset = i;
+ return 0;
+}
+
+static int cs_etm__print_cpu_metadata_v1(__u64 *val, int *offset)
+{
+ int i = *offset, j, total_params = 0;
+ __u64 magic;
+
+ magic = val[i + CS_ETM_MAGIC];
+ /* total params to print is NR_PARAMS + common block size for v1 */
+ total_params = val[i + CS_ETM_NR_TRC_PARAMS] + CS_ETM_COMMON_BLK_MAX_V1;
- for (i = CS_HEADER_VERSION_0_MAX; cpu < num; cpu++) {
- if (val[i] == __perf_cs_etmv3_magic)
- for (j = 0; j < CS_ETM_PRIV_MAX; j++, i++)
+ if (magic == __perf_cs_etmv3_magic) {
+ for (j = 0; j < total_params; j++, i++) {
+ /* if newer record - could be excess params */
+ if (j >= CS_ETM_PRIV_MAX)
+ fprintf(stdout, param_unk_fmt, j, val[i]);
+ else
fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
- else if (val[i] == __perf_cs_etmv4_magic)
- for (j = 0; j < CS_ETMV4_PRIV_MAX; j++, i++)
+ }
+ } else if (magic == __perf_cs_etmv4_magic) {
+ for (j = 0; j < total_params; j++, i++) {
+ /* if newer record - could be excess params */
+ if (j >= CS_ETMV4_PRIV_MAX)
+ fprintf(stdout, param_unk_fmt, j, val[i]);
+ else
fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
- else
- /* failure.. return */
+ }
+ } else {
+ /* failure - note bad magic value and error out */
+ fprintf(stdout, magic_unk_fmt, magic);
+ return -EINVAL;
+ }
+ *offset = i;
+ return 0;
+}
+
+static void cs_etm__print_auxtrace_info(__u64 *val, int num)
+{
+ int i, cpu = 0, version, err;
+
+ /* bail out early on bad header version */
+ version = val[0];
+ if (version > CS_HEADER_CURRENT_VERSION) {
+ /* failure.. return */
+ fprintf(stdout, " Unknown Header Version = %x, ", version);
+ fprintf(stdout, "Version supported <= %x\n", CS_HEADER_CURRENT_VERSION);
+ return;
+ }
+
+ for (i = 0; i < CS_HEADER_VERSION_MAX; i++)
+ fprintf(stdout, cs_etm_global_header_fmts[i], val[i]);
+
+ for (i = CS_HEADER_VERSION_MAX; cpu < num; cpu++) {
+ if (version == 0)
+ err = cs_etm__print_cpu_metadata_v0(val, &i);
+ else if (version == 1)
+ err = cs_etm__print_cpu_metadata_v1(val, &i);
+ if (err)
return;
}
}
+/*
+ * Read a single cpu parameter block from the auxtrace_info priv block.
+ *
+ * For version 1 there is a per cpu nr_params entry. If we are handling
+ * version 1 file, then there may be less, the same, or more params
+ * indicated by this value than the compile time number we understand.
+ *
+ * For a version 0 info block, there are a fixed number, and we need to
+ * fill out the nr_param value in the metadata we create.
+ */
+static u64 *cs_etm__create_meta_blk(u64 *buff_in, int *buff_in_offset,
+ int out_blk_size, int nr_params_v0)
+{
+ u64 *metadata = NULL;
+ int hdr_version;
+ int nr_in_params, nr_out_params, nr_cmn_params;
+ int i, k;
+
+ metadata = zalloc(sizeof(*metadata) * out_blk_size);
+ if (!metadata)
+ return NULL;
+
+ /* read block current index & version */
+ i = *buff_in_offset;
+ hdr_version = buff_in[CS_HEADER_VERSION];
+
+ if (!hdr_version) {
+ /* read version 0 info block into a version 1 metadata block */
+ nr_in_params = nr_params_v0;
+ metadata[CS_ETM_MAGIC] = buff_in[i + CS_ETM_MAGIC];
+ metadata[CS_ETM_CPU] = buff_in[i + CS_ETM_CPU];
+ metadata[CS_ETM_NR_TRC_PARAMS] = nr_in_params;
+ /* remaining block params at offset +1 from source */
+ for (k = CS_ETM_COMMON_BLK_MAX_V1 - 1; k < nr_in_params; k++)
+ metadata[k + 1] = buff_in[i + k];
+ /* version 0 has 2 common params */
+ nr_cmn_params = 2;
+ } else {
+ /* read version 1 info block - input and output nr_params may differ */
+ /* version 1 has 3 common params */
+ nr_cmn_params = 3;
+ nr_in_params = buff_in[i + CS_ETM_NR_TRC_PARAMS];
+
+ /* if input has more params than output - skip excess */
+ nr_out_params = nr_in_params + nr_cmn_params;
+ if (nr_out_params > out_blk_size)
+ nr_out_params = out_blk_size;
+
+ for (k = CS_ETM_MAGIC; k < nr_out_params; k++)
+ metadata[k] = buff_in[i + k];
+
+ /* record the actual nr params we copied */
+ metadata[CS_ETM_NR_TRC_PARAMS] = nr_out_params - nr_cmn_params;
+ }
+
+ /* adjust in offset by number of in params used */
+ i += nr_in_params + nr_cmn_params;
+ *buff_in_offset = i;
+ return metadata;
+}
+
int cs_etm__process_auxtrace_info(union perf_event *event,
struct perf_session *session)
{
@@ -2492,11 +2679,12 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
int info_header_size;
int total_size = auxtrace_info->header.size;
int priv_size = 0;
- int num_cpu;
- int err = 0, idx = -1;
- int i, j, k;
+ int num_cpu, trcidr_idx;
+ int err = 0;
+ int i, j;
u64 *ptr, *hdr = NULL;
u64 **metadata = NULL;
+ u64 hdr_version;
/*
* sizeof(auxtrace_info_event::type) +
@@ -2512,16 +2700,21 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
/* First the global part */
ptr = (u64 *) auxtrace_info->priv;
- /* Look for version '0' of the header */
- if (ptr[0] != 0)
+ /* Look for version of the header */
+ hdr_version = ptr[0];
+ if (hdr_version > CS_HEADER_CURRENT_VERSION) {
+ /* print routine will print an error on bad version */
+ if (dump_trace)
+ cs_etm__print_auxtrace_info(auxtrace_info->priv, 0);
return -EINVAL;
+ }
- hdr = zalloc(sizeof(*hdr) * CS_HEADER_VERSION_0_MAX);
+ hdr = zalloc(sizeof(*hdr) * CS_HEADER_VERSION_MAX);
if (!hdr)
return -ENOMEM;
/* Extract header information - see cs-etm.h for format */
- for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
+ for (i = 0; i < CS_HEADER_VERSION_MAX; i++)
hdr[i] = ptr[i];
num_cpu = hdr[CS_PMU_TYPE_CPUS] & 0xffffffff;
pmu_type = (unsigned int) ((hdr[CS_PMU_TYPE_CPUS] >> 32) &
@@ -2552,35 +2745,31 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
*/
for (j = 0; j < num_cpu; j++) {
if (ptr[i] == __perf_cs_etmv3_magic) {
- metadata[j] = zalloc(sizeof(*metadata[j]) *
- CS_ETM_PRIV_MAX);
- if (!metadata[j]) {
- err = -ENOMEM;
- goto err_free_metadata;
- }
- for (k = 0; k < CS_ETM_PRIV_MAX; k++)
- metadata[j][k] = ptr[i + k];
+ metadata[j] =
+ cs_etm__create_meta_blk(ptr, &i,
+ CS_ETM_PRIV_MAX,
+ CS_ETM_NR_TRC_PARAMS_V0);
/* The traceID is our handle */
- idx = metadata[j][CS_ETM_ETMTRACEIDR];
- i += CS_ETM_PRIV_MAX;
+ trcidr_idx = CS_ETM_ETMTRACEIDR;
+
} else if (ptr[i] == __perf_cs_etmv4_magic) {
- metadata[j] = zalloc(sizeof(*metadata[j]) *
- CS_ETMV4_PRIV_MAX);
- if (!metadata[j]) {
- err = -ENOMEM;
- goto err_free_metadata;
- }
- for (k = 0; k < CS_ETMV4_PRIV_MAX; k++)
- metadata[j][k] = ptr[i + k];
+ metadata[j] =
+ cs_etm__create_meta_blk(ptr, &i,
+ CS_ETMV4_PRIV_MAX,
+ CS_ETMV4_NR_TRC_PARAMS_V0);
/* The traceID is our handle */
- idx = metadata[j][CS_ETMV4_TRCTRACEIDR];
- i += CS_ETMV4_PRIV_MAX;
+ trcidr_idx = CS_ETMV4_TRCTRACEIDR;
+ }
+
+ if (!metadata[j]) {
+ err = -ENOMEM;
+ goto err_free_metadata;
}
/* Get an RB node for this CPU */
- inode = intlist__findnew(traceid_list, idx);
+ inode = intlist__findnew(traceid_list, metadata[j][trcidr_idx]);
/* Something went wrong, no need to continue */
if (!inode) {
@@ -2601,7 +2790,7 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
}
/*
- * Each of CS_HEADER_VERSION_0_MAX, CS_ETM_PRIV_MAX and
+ * Each of CS_HEADER_VERSION_MAX, CS_ETM_PRIV_MAX and
* CS_ETMV4_PRIV_MAX mark how many double words are in the
* global metadata, and each cpu's metadata respectively.
* The following tests if the correct number of double words was
@@ -2628,6 +2817,7 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
etm->num_cpu = num_cpu;
etm->pmu_type = pmu_type;
+ etm->sink_formatted = (hdr[CS_SINK_FORMATTED] & 0x1);
etm->snapshot_mode = (hdr[CS_ETM_SNAPSHOT] != 0);
etm->metadata = metadata;
etm->auxtrace_type = auxtrace_info->type;
@@ -2703,6 +2893,12 @@ err_free_traceid_list:
intlist__delete(traceid_list);
err_free_hdr:
zfree(&hdr);
-
+ /*
+ * At this point, as a minimum we have valid header. Dump the rest of
+ * the info section - the print routines will error out on structural
+ * issues.
+ */
+ if (dump_trace)
+ cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
return err;
}
diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h
index 4ad925d6d799..41a798d90d91 100644
--- a/tools/perf/util/cs-etm.h
+++ b/tools/perf/util/cs-etm.h
@@ -17,23 +17,38 @@ struct perf_session;
*/
enum {
/* Starting with 0x0 */
- CS_HEADER_VERSION_0,
+ CS_HEADER_VERSION,
/* PMU->type (32 bit), total # of CPUs (32 bit) */
CS_PMU_TYPE_CPUS,
+ CS_SINK_FORMATTED,
CS_ETM_SNAPSHOT,
- CS_HEADER_VERSION_0_MAX,
+ CS_HEADER_VERSION_MAX,
};
+/*
+ * Update the version for new format.
+ *
+ * New version 1 format adds a param count to the per cpu metadata.
+ * This allows easy adding of new metadata parameters.
+ * Requires that new params always added after current ones.
+ * Also allows client reader to handle file versions that are different by
+ * checking the number of params in the file vs the number expected.
+ */
+#define CS_HEADER_CURRENT_VERSION 1
+
/* Beginning of header common to both ETMv3 and V4 */
enum {
CS_ETM_MAGIC,
CS_ETM_CPU,
+ /* Number of trace config params in following ETM specific block */
+ CS_ETM_NR_TRC_PARAMS,
+ CS_ETM_COMMON_BLK_MAX_V1,
};
/* ETMv3/PTM metadata */
enum {
/* Dynamic, configurable parameters */
- CS_ETM_ETMCR = CS_ETM_CPU + 1,
+ CS_ETM_ETMCR = CS_ETM_COMMON_BLK_MAX_V1,
CS_ETM_ETMTRACEIDR,
/* RO, taken from sysFS */
CS_ETM_ETMCCER,
@@ -41,10 +56,13 @@ enum {
CS_ETM_PRIV_MAX,
};
+/* define fixed version 0 length - allow new format reader to read old files. */
+#define CS_ETM_NR_TRC_PARAMS_V0 (CS_ETM_ETMIDR - CS_ETM_ETMCR + 1)
+
/* ETMv4 metadata */
enum {
/* Dynamic, configurable parameters */
- CS_ETMV4_TRCCONFIGR = CS_ETM_CPU + 1,
+ CS_ETMV4_TRCCONFIGR = CS_ETM_COMMON_BLK_MAX_V1,
CS_ETMV4_TRCTRACEIDR,
/* RO, taken from sysFS */
CS_ETMV4_TRCIDR0,
@@ -55,6 +73,9 @@ enum {
CS_ETMV4_PRIV_MAX,
};
+/* define fixed version 0 length - allow new format reader to read old files. */
+#define CS_ETMV4_NR_TRC_PARAMS_V0 (CS_ETMV4_TRCAUTHSTATUS - CS_ETMV4_TRCCONFIGR + 1)
+
/*
* ETMv3 exception encoding number:
* See Embedded Trace Macrocell spcification (ARM IHI 0014Q)
@@ -162,7 +183,7 @@ struct cs_etm_packet_queue {
#define BMVAL(val, lsb, msb) ((val & GENMASK(msb, lsb)) >> lsb)
-#define CS_ETM_HEADER_SIZE (CS_HEADER_VERSION_0_MAX * sizeof(u64))
+#define CS_ETM_HEADER_SIZE (CS_HEADER_VERSION_MAX * sizeof(u64))
#define __perf_cs_etmv3_magic 0x3030303030303030ULL
#define __perf_cs_etmv4_magic 0x4040404040404040ULL
@@ -173,6 +194,7 @@ struct cs_etm_packet_queue {
int cs_etm__process_auxtrace_info(union perf_event *event,
struct perf_session *session);
int cs_etm__get_cpu(u8 trace_chan_id, int *cpu);
+int cs_etm__get_pid_fmt(u8 trace_chan_id, u64 *pid_fmt);
int cs_etm__etmq_set_tid(struct cs_etm_queue *etmq,
pid_t tid, u8 trace_chan_id);
bool cs_etm__etmq_is_timeless(struct cs_etm_queue *etmq);