aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/coresight.txt4
-rw-r--r--Documentation/devicetree/bindings/edac/marvell-sdei-ghes.txt69
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-i2c.txt32
-rw-r--r--Documentation/devicetree/bindings/mmc/cavium-mmc.txt58
-rw-r--r--Documentation/devicetree/bindings/perf/arm_smmuv3_pmu.txt34
-rw-r--r--MAINTAINERS7
-rw-r--r--arch/arm64/Kconfig35
-rw-r--r--arch/arm64/include/asm/cpucaps.h4
-rw-r--r--arch/arm64/include/asm/cputype.h5
-rw-r--r--arch/arm64/include/asm/irq.h9
-rw-r--r--arch/arm64/include/asm/mmu_context.h6
-rw-r--r--arch/arm64/kernel/cpu_errata.c58
-rw-r--r--arch/arm64/kernel/entry.S3
-rw-r--r--arch/arm64/mm/context.c79
-rw-r--r--drivers/acpi/apei/ghes.c64
-rw-r--r--drivers/acpi/apei/hest.c25
-rw-r--r--drivers/clk/clk-scmi.c10
-rw-r--r--drivers/clk/clk.c113
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/firmware/arm_scmi/clock.c17
-rw-r--r--drivers/firmware/arm_scmi/mailbox.c4
-rw-r--r--drivers/firmware/arm_scmi/perf.c23
-rw-r--r--drivers/gpio/Kconfig12
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio-i2c.c206
-rw-r--r--drivers/gpio/gpio-thunderx.c307
-rw-r--r--drivers/hwtracing/coresight/Makefile3
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c20
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c97
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h6
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h58
-rw-r--r--drivers/hwtracing/coresight/coresight-quirks.c107
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c385
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c67
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h130
-rw-r--r--drivers/hwtracing/coresight/coresight.c45
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.c24
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.h14
-rw-r--r--drivers/i2c/busses/i2c-thunderx-pcidrv.c6
-rw-r--r--drivers/iommu/arm-smmu-v3.c41
-rw-r--r--drivers/mailbox/Kconfig8
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/mvl_mhu.c357
-rw-r--r--drivers/misc/Kconfig8
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/otx_bphy_ctr.c283
-rw-r--r--drivers/mmc/host/cavium-octeon.c10
-rw-r--r--drivers/mmc/host/cavium-thunderx.c148
-rw-r--r--drivers/mmc/host/cavium.c1025
-rw-r--r--drivers/mmc/host/cavium.h151
-rw-r--r--drivers/mtd/spi-nor/macronix.c2
-rw-r--r--drivers/mtd/spi-nor/micron-st.c5
-rw-r--r--drivers/net/ethernet/cavium/thunder/Makefile10
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c36
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c5
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c639
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h43
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h76
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h706
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h152
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h313
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c241
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c322
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h249
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c375
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c544
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c1231
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_fixes.c1019
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_fixes.h21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c882
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c230
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c759
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c1185
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_ptp.c37
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h270
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c51
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sso.c1322
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h50
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_tim.c340
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.c826
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.h72
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c40
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h118
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c787
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c631
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c606
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c212
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_smqvf.c285
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c135
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c76
-rw-r--r--drivers/of/dynamic.c10
-rw-r--r--drivers/pci/controller/Kconfig8
-rw-r--r--drivers/pci/controller/Makefile1
-rw-r--r--drivers/pci/controller/pci-octeontx2-pem.c490
-rw-r--r--drivers/pci/quirks.c2
-rw-r--r--drivers/perf/Kconfig2
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c13
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile1
-rw-r--r--drivers/soc/marvell/Kconfig106
-rw-r--r--drivers/soc/marvell/Makefile5
-rw-r--r--drivers/soc/marvell/mdio_debugfs.c119
-rw-r--r--drivers/soc/marvell/octeontx2-dpi/Makefile8
-rw-r--r--drivers/soc/marvell/octeontx2-dpi/dpi.c526
-rw-r--r--drivers/soc/marvell/octeontx2-dpi/dpi.h246
-rw-r--r--drivers/soc/marvell/octeontx2-ghes/Makefile8
-rw-r--r--drivers/soc/marvell/octeontx2-ghes/otx2-sdei-ghes.c605
-rw-r--r--drivers/soc/marvell/octeontx2-ghes/otx2-sdei-ghes.h70
-rw-r--r--drivers/soc/marvell/octeontx2-rm/Makefile11
-rw-r--r--drivers/soc/marvell/octeontx2-rm/domain_sysfs.c830
-rw-r--r--drivers/soc/marvell/octeontx2-rm/domain_sysfs.h18
-rw-r--r--drivers/soc/marvell/octeontx2-rm/otx2_rm.c1581
-rw-r--r--drivers/soc/marvell/octeontx2-rm/otx2_rm.h95
-rw-r--r--drivers/soc/marvell/octeontx2-rm/quota.c192
-rw-r--r--drivers/soc/marvell/octeontx2-rm/quota.h90
-rw-r--r--drivers/spi/Kconfig9
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-cavium-thunderx.c18
-rw-r--r--drivers/spi/spi-octeontx2.c362
-rw-r--r--drivers/spi/spi-octeontx2.h148
-rw-r--r--include/acpi/apei.h2
-rw-r--r--include/linux/clk-provider.h1
-rw-r--r--include/linux/coresight.h17
-rw-r--r--include/linux/kernel.h6
-rw-r--r--include/linux/scmi_protocol.h2
-rw-r--r--kernel/exit.c66
135 files changed, 24166 insertions, 706 deletions
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index 846f6daae71b..83e99eb4aec3 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -117,6 +117,10 @@ its hardware characteristcs.
* arm,scatter-gather: boolean. Indicates that the TMC-ETR can safely
use the SG mode on this system.
+ * cache-lock: Enables cache locking for the allocated trace buffer.
+ As of now, this is supported only for secure buffer allocations
+ on Marvell OcteonTx2 platform.
+
* Optional property for CATU :
* interrupts : Exactly one SPI may be listed for reporting the address
error
diff --git a/Documentation/devicetree/bindings/edac/marvell-sdei-ghes.txt b/Documentation/devicetree/bindings/edac/marvell-sdei-ghes.txt
new file mode 100644
index 000000000000..97fc8b2a5254
--- /dev/null
+++ b/Documentation/devicetree/bindings/edac/marvell-sdei-ghes.txt
@@ -0,0 +1,69 @@
+* Marvell OcteonTX SOC sdei-ghes node
+
+The sdei-ghes node is defined to describe resources to the sdei-ghes module.
+
+Abstract
+--------
+
+The Generic Hardware Error Source (GHES) allows for non-standard errors to be
+reported to the system (please refer to the ACPI specification).
+
+The standard GHES driver requires the presence of ACPI tables, and accompanying
+kernel ACPI support, which is not available to systems utilizing Device Tree.
+
+The OcteonTX2 sdei-ghes module provides the required Hardware Error Source Table
+(HEST) to the kernel, allowing the GHES driver to load.
+
+Additionally, this module presents GHES devices to the system, which allows
+the firmware (ATF) to report RAS errors.
+
+The following error sources are supported:
+
+ MDC - OcteonTX Memory Diagnostic Controller
+ MCC - OcteonTX Memory Common Controller
+ LMC - OcteonTX Local Memory DDR4 Controller
+
+Device Tree sdei-ghes binding
+-----------------------------
+
+Required properties:
+- compatible : Shall be "marvell,sdei-ghes".
+
+Required properties for mdc subnode:
+- reg : Shall contain three entries, one for each of:
+ - GHES Error Status Address (ACPI 4.0a, sec 17.3.2.6)
+ - GHES Error Status Block (ACPI 4.0a, sec 17.3.2.6.1)
+ - ring buffer for communication with firmware
+
+- event-id : SDEI event ID for receiving firmware notifications
+
+Example
+-------
+
+ sdei-ghes {
+ compatible = "marvell,sdei-ghes";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ memory-region = <&ghes_hest_reserved>;
+ ranges = <0x0 0x00000000 0x0 0x7f020000 0x00100>,
+ <0x0 0x00000100 0x0 0x7f020100 0x00700>,
+ <0x0 0x00000800 0x0 0x7f020800 0x08000>;
+ mdc@0 {
+ reg = <0x0 0x00000000 0x008>,
+ <0x0 0x00000100 0x100>,
+ <0x0 0x00000800 0x800>;
+ event-id = <0x40000000>;
+ };
+ mcc@8 {
+ reg = <0x0 0x00000008 0x008>,
+ <0x0 0x00000200 0x100>,
+ <0x0 0x00001000 0x800>;
+ event-id = <0x40000001>;
+ };
+ lmc@10 {
+ reg = <0x0 0x00000010 0x008>,
+ <0x0 0x00000300 0x100>,
+ <0x0 0x00001800 0x800>;
+ event-id = <0x40000002>;
+ };
+ }
diff --git a/Documentation/devicetree/bindings/gpio/gpio-i2c.txt b/Documentation/devicetree/bindings/gpio/gpio-i2c.txt
new file mode 100644
index 000000000000..ad2743bc65ed
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-i2c.txt
@@ -0,0 +1,32 @@
+Device-Tree bindings for gpio i2c driver
+
+A simple parameterized no-irq of_driven i2c->gpio expander,
+cut down from gpio-pcf857x.c to be totally device-tree driven.
+
+Suitable for any "memory-like" device, where a 1-byte i2c read yields data
+which can safely be written back, possibly with a bit changed, with the
+effect of changing only the output level of that bit's GPIO pin.
+
+Required properties:
+ - compatible = "gpio-i2c"
+ - reg = i2c-address
+ - gpio-controller
+ - #gpio-cells = <1>;
+ - ngpios = number of pins modeled
+
+Example nodes:
+
+&i2c_bus {
+ gpio1: cpld@6c {
+ compatible = "gpio-i2c";
+ gpio-controller;
+ reg = <0x6c>;
+ #gpio-cells = <1>;
+ ngpios = <160>; // 8bits for each reg 0..0x13
+ };
+};
+
+_some_device_ {
+ // pin controlled by bitmask 0x2 of byte 0x4 of gpio1
+ enable-gpios = <&gpio1 33>;
+}
diff --git a/Documentation/devicetree/bindings/mmc/cavium-mmc.txt b/Documentation/devicetree/bindings/mmc/cavium-mmc.txt
index 1433e6201dff..21ed6d4fedcc 100644
--- a/Documentation/devicetree/bindings/mmc/cavium-mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/cavium-mmc.txt
@@ -17,16 +17,56 @@ Required properties:
- clocks : phandle
Optional properties:
- - for cd, bus-width and additional generic mmc parameters
- please refer to mmc.txt within this directory
+ - for cd, bus-width, vmmc-supply, vqmmc-supply, and additional generic
+ mmc parameters please refer to mmc.txt within this directory
- cavium,cmd-clk-skew : number of coprocessor clocks before sampling command
- cavium,dat-clk-skew : number of coprocessor clocks before sampling data
Deprecated properties:
-- spi-max-frequency : use max-frequency instead
-- cavium,bus-max-width : use bus-width instead
-- power-gpios : use vmmc-supply instead
-- cavium,octeon-6130-mmc-slot : use mmc-slot instead
+ - spi-max-frequency : use max-frequency instead
+ - cavium,bus-max-width : use bus-width instead
+ - power-gpios : use vmmc-supply instead
+ - cavium,octeon-6130-mmc-slot : use mmc-slot instead
+
+GPIO control via vmmc-supply & vqmmc-supply:
+ Two types of regulator object can be specified as mmc properties,
+ typically regulator-fixed controlled by GPIO pins.
+
+ Octeon/OcteonTX chips commonly use GPIO8 as an MMC-reset pin.
+ In systems which may boot from MMC, it starts as input, and is gently
+ pulled up/down by board logic to indicate the active sense of the
+ signal. Chip reset then drives the signal in the opposite direction
+ to effect a reset of target devices.
+ Device tree should model this with a vmmc-supply regulator, gated by
+ GPIO8, so GPIO8 is driven in the non-reset direction when MMC devices
+ are probed, and held there until rmmod/shutdown/suspend.
+ This allows a warm reboot to reset the MMC devices.
+
+ Octeon/OcteonTX MMC supports up to 3 mmc slots, but any
+ level-shifting to accommodate different signal voltages is
+ done by external hardware, under control of an optional
+ vqmmc regulator object, typically controlled by GPIO.
+
+ If any mmc-slots have a vqmmc-supply property, it is taken as a warning
+ that we must switch carefully between slots (unless they have the same
+ vqmmc object), tri-stating MMC signals to avoid any transient states
+ as level-shifters are enabled/disabled.
+
+ Even when so-called bi-directional level shifters are used,
+ this technique should be employed when using different bus-widths
+ on different slots, disabling level shifters to avoid presenting
+ non-uniform impedance across DATA0-7 & CMD when non-selected
+ 4-wide slots are left enabled, while accessing 8-wide targets.
+
+ Note that it's not possible to specify multiple regulators
+ controlled by same GPIO pin, but with different active state.
+ If one GPIO line is require to switch voltage/routing between
+ different mmc-slots, specify a vqmmc-supply on one slot, but
+ not the other. The regulator_disable call on leaving that slot
+ will implicitly switch the state to support the unmarked slot.
+
+ There's no need to list vqmmc-supply if all the mmc-slots on
+ a board run at same voltage, and have same width.
Examples:
mmc_1_4: mmc@1,4 {
@@ -40,7 +80,8 @@ Examples:
compatible = "mmc-slot";
reg = <0>;
vmmc-supply = <&mmc_supply_3v3>;
- max-frequency = <42000000>;
+ vqmmc-supply = <&vqmmc_3v3>;
+ max-frequency = <52000000>;
bus-width = <4>;
cap-sd-highspeed;
};
@@ -49,7 +90,8 @@ Examples:
compatible = "mmc-slot";
reg = <1>;
vmmc-supply = <&mmc_supply_3v3>;
- max-frequency = <42000000>;
+ vqmmc-supply = <&vqmmc_1v8>;
+ max-frequency = <100000000>;
bus-width = <8>;
cap-mmc-highspeed;
non-removable;
diff --git a/Documentation/devicetree/bindings/perf/arm_smmuv3_pmu.txt b/Documentation/devicetree/bindings/perf/arm_smmuv3_pmu.txt
new file mode 100644
index 000000000000..39c95d5d03ef
--- /dev/null
+++ b/Documentation/devicetree/bindings/perf/arm_smmuv3_pmu.txt
@@ -0,0 +1,34 @@
+* ARM SMMU-V3 PMU (Performance Monitor Extension)
+
+Required properties:
+
+- compatible: (standard compatible string) should be:
+ "arm,smmu-pmu-v3"
+
+- reg: (standard registers property) physical address and
+ size of the configuration registers blocks.
+
+ SMMU-V3 PMU can have one or more Performance monitor
+ counter Group (PMCG). Each PMCG have one 4K page and
+ additional optional one more 4K page
+
+- msi-parent: See the generic MSI binding described in
+ devicetree/bindings/interrupt-controller/msi.txt
+ for a description of the msi-parent property.
+Example:
+
+ smmupmcg0@30100000 {
+ compatible = "arm,smmu-pmu-v3";
+
+ /* Single configuration register page */
+ reg = <0x30100000 0x1000>;
+ msi-parent = <&its 0x10>;
+ };
+
+ smmupmcg0@830000100000 {
+ compatible = "arm,smmu-pmu-v3";
+
+ /* Additional optional configuration register page */
+ reg = <0x30100000 0x1000>, <0x30110000 0x1000>;
+ msi-parent = <&its 0x11>;
+ };
diff --git a/MAINTAINERS b/MAINTAINERS
index 80f67140e4f7..5c4192ea3f55 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7118,6 +7118,11 @@ F: drivers/net/wan/pc300too.c
F: drivers/net/wan/pci200syn.c
F: drivers/net/wan/wanxl*
+GENERIC I2C GPIO DRIVER
+M: Peter Swain <pswain@marvell.com>
+S: Maintained
+F: drivers/gpio/gpio-i2c.c
+
GENERIC INCLUDE/ASM HEADER FILES
M: Arnd Bergmann <arnd@arndb.de>
L: linux-arch@vger.kernel.org
@@ -10147,7 +10152,7 @@ M: Arnaud Ebalard <arno@natisbad.org>
M: Srujana Challa <schalla@marvell.com>
L: linux-crypto@vger.kernel.org
S: Maintained
-F: drivers/crypto/marvell/
+F: drivers/crypto/marvell/cesa/
MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2)
M: Mirko Lindner <mlindner@marvell.com>
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 5d513f461957..8f4d68c5c88f 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -695,6 +695,31 @@ config CAVIUM_TX2_ERRATUM_219
If unsure, say Y.
+config CAVIUM_ERRATUM_36890
+ bool "Cavium erratum 36890"
+ default y
+ help
+ Enable workaround for erratum 36890. On all ThunderX T88xx and
+ OcteonTX1 T81/T83 and some OcteonTX2 chips, the "dc zva" instruction
+ does not work all the time. This happens when there are two VAs
+ that match up with one PA; including when the two VAs match but have
+ different asids. The fix is to disable "dc zva" in userspace.
+
+ If unsure, say Y.
+
+config MRVL_ERRATUM_37119
+ bool "Marvell erratum 37119"
+ default y
+ help
+ Enable workaround for erratum 37119. On some OcteonTX2 chips,
+ while using the SSO workslots in userspace can cause problems
+ as the cache are not invalidated correctly. To workaround
+ this erratum, a local tlbi is placed before the eret.
+ This TLBI does not need to invalidate anything so a tlbi against
+ asid 0 is used.
+
+ If unsure, say Y.
+
config QCOM_FALKOR_ERRATUM_1003
bool "Falkor E1003: Incorrect translation due to ASID change"
default y
@@ -780,6 +805,14 @@ config FUJITSU_ERRATUM_010001
endmenu
+config MRVL_OCTEONTX_EL0_INTR
+ bool "Handle interrupts in EL0 via EL3"
+ default y
+ help
+ Handle certain interrupts in EL0 with the help of EL3 firmware to achieve
+ low latency and also not break task isolation.
+ Generally implemented and tested on OcteonTx and its successive
+ generation CPUs.
choice
prompt "Page size"
@@ -1125,7 +1158,7 @@ config XEN
config FORCE_MAX_ZONEORDER
int
- default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE)
+ default "14" if (ARM64_64K_PAGES && (TRANSPARENT_HUGEPAGE || (ARCH_THUNDER && PREEMPT_RT)))
default "12" if (ARM64_16K_PAGES && TRANSPARENT_HUGEPAGE)
default "11"
help
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 8eb5a088ae65..f02bf15dcc70 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -61,7 +61,9 @@
#define ARM64_HAS_AMU_EXTN 51
#define ARM64_HAS_ADDRESS_AUTH 52
#define ARM64_HAS_GENERIC_AUTH 53
+#define ARM64_WORKAROUND_CAVIUM_36890 54
+#define ARM64_WORKAROUND_MRVL_37119 55
-#define ARM64_NCAPS 54
+#define ARM64_NCAPS 56
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 7219cddeba66..ed15902a08fb 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -79,6 +79,9 @@
#define CAVIUM_CPU_PART_THUNDERX_83XX 0x0A3
#define CAVIUM_CPU_PART_THUNDERX2 0x0AF
+#define MRVL_CPU_PART_OCTEONTX2_96XX 0x0B2
+#define MRVL_CPU_PART_OCTEONTX2_95XX 0x0B3
+
#define BRCM_CPU_PART_BRAHMA_B53 0x100
#define BRCM_CPU_PART_VULCAN 0x516
@@ -109,6 +112,8 @@
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
#define MIDR_CAVIUM_THUNDERX2 MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX2)
+#define MIDR_MRVL_OCTEONTX2_96XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, MRVL_CPU_PART_OCTEONTX2_96XX)
+#define MIDR_MRVL_OCTEONTX2_95XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, MRVL_CPU_PART_OCTEONTX2_95XX)
#define MIDR_BRAHMA_B53 MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_BRAHMA_B53)
#define MIDR_BRCM_VULCAN MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_VULCAN)
#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index b2b0c6405eb0..882de50937e9 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -6,6 +6,15 @@
#include <asm-generic/irq.h>
+/* Platforms with multiple SR-IOV capable PCI devices will
+ * need large number of MSIX vectors, hence keep this number
+ * fairly high.
+ */
+#ifdef CONFIG_PCI_MSI
+#undef NR_IRQS
+#define NR_IRQS 65536
+#endif
+
struct pt_regs;
static inline int nr_legacy_irqs(void)
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index ab46187c6300..acb2a4258010 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -250,6 +250,12 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
void verify_cpu_asid_bits(void);
void post_ttbr_update_workaround(void);
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+int lock_context(struct mm_struct *mm, int index);
+int unlock_context_by_index(int index);
+bool unlock_context_by_mm(struct mm_struct *mm);
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* !__ASM_MMU_CONTEXT_H */
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index f9387c125232..51d0d62454a5 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -106,6 +106,23 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
}
+#ifdef CONFIG_CAVIUM_ERRATUM_36890
+static void
+cpu_enable_trap_zva_access(const struct arm64_cpu_capabilities *__unused)
+{
+ /*
+ * Clear SCTLR_EL2.DZE or SCTLR_EL1.DZE depending
+ * on if we are in EL2.
+ */
+ if (!is_kernel_in_hyp_mode())
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_DZE, 0);
+ else
+ sysreg_clear_set(sctlr_el2, SCTLR_EL1_DZE, 0);
+
+ return;
+}
+#endif
+
atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
#include <asm/mmu_context.h>
@@ -684,6 +701,32 @@ static const struct midr_range cavium_erratum_30115_cpus[] = {
};
#endif
+#ifdef CONFIG_CAVIUM_ERRATUM_36890
+static const struct midr_range cavium_erratum_36890_cpus[] = {
+ /* Cavium ThunderX, T88 all passes */
+ MIDR_ALL_VERSIONS(MIDR_THUNDERX),
+ /* Cavium ThunderX, T81 all passes */
+ MIDR_ALL_VERSIONS(MIDR_THUNDERX_81XX),
+ /* Cavium ThunderX, T83 all passes */
+ MIDR_ALL_VERSIONS(MIDR_THUNDERX_83XX),
+ /* Marvell OcteonTX 2, 96xx pass A0, A1, and B0 */
+ MIDR_RANGE(MIDR_MRVL_OCTEONTX2_96XX, 0, 0, 1, 0),
+ /* Marvell OcteonTX 2, 95 pass A0/A1 */
+ MIDR_RANGE(MIDR_MRVL_OCTEONTX2_95XX, 0, 0, 0, 1),
+ {},
+};
+#endif
+
+#ifdef CONFIG_MRVL_ERRATUM_37119
+static const struct midr_range mrvl_erratum_37119_cpus[] = {
+ /* Marvell OcteonTX 2, 96xx pass A0, A1, and B0 */
+ MIDR_RANGE(MIDR_MRVL_OCTEONTX2_96XX, 0, 0, 1, 0),
+ /* Marvell OcteonTX 2, 95 pass A0/A1 */
+ MIDR_RANGE(MIDR_MRVL_OCTEONTX2_95XX, 0, 0, 0, 1),
+ {},
+};
+#endif
+
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
{
@@ -849,6 +892,21 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
},
#endif
+#ifdef CONFIG_CAVIUM_ERRATUM_36890
+ {
+ .desc = "Cavium erratum 36890",
+ .capability = ARM64_WORKAROUND_CAVIUM_36890,
+ ERRATA_MIDR_RANGE_LIST(cavium_erratum_36890_cpus),
+ .cpu_enable = cpu_enable_trap_zva_access,
+ },
+#endif
+#ifdef CONFIG_MRVL_ERRATUM_37119
+ {
+ .desc = "Marvell erratum 37119",
+ .capability = ARM64_WORKAROUND_MRVL_37119,
+ ERRATA_MIDR_RANGE_LIST(mrvl_erratum_37119_cpus),
+ },
+#endif
{
.desc = "Mismatched cache type (CTR_EL0)",
.capability = ARM64_MISMATCHED_CACHE_TYPE,
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index ddcde093c433..d297fc9867a5 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -369,6 +369,9 @@ alternative_else_nop_endif
ldr lr, [sp, #S_LR]
add sp, sp, #S_FRAME_SIZE // restore sp
+#ifdef CONFIG_MRVL_ERRATUM_37119
+alternative_insn nop, "tlbi aside1, xzr", ARM64_WORKAROUND_MRVL_37119
+#endif
.if \el == 0
alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 9b26f9a88724..60795e962f2c 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -25,6 +25,13 @@ static unsigned long *asid_map;
static DEFINE_PER_CPU(atomic64_t, active_asids);
static DEFINE_PER_CPU(u64, reserved_asids);
+
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+#define LOCKED_ASIDS_COUNT 128
+
+static u64 locked_asids[LOCKED_ASIDS_COUNT];
+#endif
+
static cpumask_t tlb_flush_pending;
#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
@@ -115,6 +122,14 @@ static void flush_context(void)
per_cpu(reserved_asids, i) = asid;
}
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+ /* Set bits for locked ASIDs. */
+ for (i = 0; i < LOCKED_ASIDS_COUNT; i++) {
+ asid = locked_asids[i];
+ if (asid != 0)
+ __set_bit(asid & ~ASID_MASK, asid_map);
+ }
+#endif
/*
* Queue a TLB invalidation for each CPU to perform on next
* context-switch
@@ -122,9 +137,61 @@ static void flush_context(void)
cpumask_setall(&tlb_flush_pending);
}
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+int lock_context(struct mm_struct *mm, int index)
+{
+ unsigned long flags;
+ u64 asid;
+
+ if ((index < 0) || (index >= LOCKED_ASIDS_COUNT))
+ return -1;
+ raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+ asid = atomic64_read(&mm->context.id);
+ locked_asids[index] = asid;
+ raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(lock_context);
+
+int unlock_context_by_index(int index)
+{
+ unsigned long flags;
+
+ if ((index < 0) || (index >= LOCKED_ASIDS_COUNT))
+ return -1;
+ raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+ locked_asids[index] = 0;
+ raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(unlock_context_by_index);
+
+bool unlock_context_by_mm(struct mm_struct *mm)
+{
+ int i;
+ unsigned long flags;
+ bool hit = false;
+ u64 asid;
+
+ raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+ asid = atomic64_read(&mm->context.id);
+
+ for (i = 0; i < LOCKED_ASIDS_COUNT; i++) {
+ if (locked_asids[i] == asid) {
+ hit = true;
+ locked_asids[i] = 0;
+ }
+ }
+
+ raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+
+ return hit;
+}
+#endif
+
static bool check_update_reserved_asid(u64 asid, u64 newasid)
{
- int cpu;
+ int i, cpu;
bool hit = false;
/*
@@ -143,6 +210,16 @@ static bool check_update_reserved_asid(u64 asid, u64 newasid)
}
}
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+ /* Same mechanism for locked ASIDs */
+ for (i = 0; i < LOCKED_ASIDS_COUNT; i++) {
+ if (locked_asids[i] == asid) {
+ hit = true;
+ locked_asids[i] = newasid;
+ }
+ }
+#endif
+
return hit;
}
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 24c9642e8fc7..75740bb762d9 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -40,6 +40,7 @@
#include <linux/sched/clock.h>
#include <linux/uuid.h>
#include <linux/ras.h>
+#include <linux/of.h>
#include <acpi/actbl1.h>
#include <acpi/ghes.h>
@@ -213,6 +214,24 @@ static void ghes_ack_error(struct acpi_hest_generic_v2 *gv2)
apei_write(val, &gv2->read_ack_register);
}
+/*
+ * Checks device tree for support of sdei-ghes [driver].
+ * This driver supports GHES in the absence of ACPI.
+ * on entry:
+ * void
+ * returns:
+ * true if sdei-ghes support found in Device Tree else false
+ */
+static bool sdei_ghes_present_dt(void)
+{
+ struct device_node *np;
+
+ np = of_find_node_by_name(NULL, "sdei-ghes");
+ of_node_put(np);
+
+ return !!np;
+}
+
static struct ghes *ghes_new(struct acpi_hest_generic *generic)
{
struct ghes *ghes;
@@ -224,6 +243,20 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
return ERR_PTR(-ENOMEM);
ghes->generic = generic;
+
+ /* If sdei-ghes is present (via device tree), ACPI mappings are not
+ * available and will be relegated to 'early_mem_remap()'. However, any
+ * such outstanding 'early' mappings will be detected as leaks during
+ * late kernel initialization - see 'check_early_ioremap_leak()'.
+ * Since this mapping is a 'sanity check' only (the mapping isn't used),
+ * skip this step to avoid it being detected as an [errant] leak.
+ * Notes:
+ * * the presence of the Device Tree disables ACPI
+ * * the status register is actually mapped at run-time, when accessed
+ */
+ if (sdei_ghes_present_dt())
+ goto skip_map_status;
+
if (is_hest_type_generic_v2(ghes)) {
rc = map_gen_v2(ghes);
if (rc)
@@ -233,6 +266,8 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
rc = apei_map_generic_address(&generic->error_status_address);
if (rc)
goto err_unmap_read_ack_addr;
+
+skip_map_status:
error_block_length = generic->error_block_length;
if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
pr_warn(FW_WARN GHES_PFX
@@ -250,6 +285,9 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
return ghes;
err_unmap_status_addr:
+ /* if sdei-ghes is present, status was not mapped - skip the UNmap */
+ if (sdei_ghes_present_dt())
+ goto err_free;
apei_unmap_generic_address(&generic->error_status_address);
err_unmap_read_ack_addr:
if (is_hest_type_generic_v2(ghes))
@@ -262,6 +300,9 @@ err_free:
static void ghes_fini(struct ghes *ghes)
{
kfree(ghes->estatus);
+ /* if sdei-ghes is present, status was not mapped - skip the UNmap */
+ if (sdei_ghes_present_dt())
+ return;
apei_unmap_generic_address(&ghes->generic->error_status_address);
if (is_hest_type_generic_v2(ghes))
unmap_gen_v2(ghes);
@@ -1312,7 +1353,8 @@ static int __init ghes_init(void)
{
int rc;
- if (acpi_disabled)
+ /* permit GHES initialization if either ACPI or SDEI_GHES is present */
+ if (acpi_disabled && !sdei_ghes_present_dt())
return -ENODEV;
switch (hest_disable) {
@@ -1336,15 +1378,17 @@ static int __init ghes_init(void)
if (rc)
goto err;
- rc = apei_osc_setup();
- if (rc == 0 && osc_sb_apei_support_acked)
- pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n");
- else if (rc == 0 && !osc_sb_apei_support_acked)
- pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n");
- else if (rc && osc_sb_apei_support_acked)
- pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n");
- else
- pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n");
+ if (!acpi_disabled) {
+ rc = apei_osc_setup();
+ if (rc == 0 && osc_sb_apei_support_acked)
+ pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n");
+ else if (rc == 0 && !osc_sb_apei_support_acked)
+ pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n");
+ else if (rc && osc_sb_apei_support_acked)
+ pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n");
+ else
+ pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n");
+ }
return 0;
err:
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 822402480f7d..8ee612231bf0 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -235,8 +235,12 @@ void __init acpi_hest_init(void)
return;
}
- status = acpi_get_table(ACPI_SIG_HEST, 0,
- (struct acpi_table_header **)&hest_tab);
+ /* HEST table may have been initialized by hest_table_set() */
+ if (hest_tab)
+ status = AE_OK;
+ else
+ status = acpi_get_table(ACPI_SIG_HEST, 0,
+ (struct acpi_table_header **)&hest_tab);
if (status == AE_NOT_FOUND) {
hest_disable = HEST_NOT_FOUND;
return;
@@ -253,13 +257,17 @@ void __init acpi_hest_init(void)
if (!ghes_disable) {
rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count);
- if (rc)
+ if (rc) {
+ pr_err(HEST_PFX "Failed to parse GHES entries\n");
goto err;
+ }
if (ghes_count)
rc = hest_ghes_dev_register(ghes_count);
- if (rc)
+ if (rc) {
+ pr_err(HEST_PFX "Failed to register GHES devices\n");
goto err;
+ }
}
pr_info(HEST_PFX "Table parsing has been initialized.\n");
@@ -267,3 +275,12 @@ void __init acpi_hest_init(void)
err:
hest_disable = HEST_DISABLED;
}
+
+/*
+ * This allows the HEST to be initialized externally, in the absence of ACPI.
+ */
+void __init hest_table_set(struct acpi_table_hest *table)
+{
+ hest_tab = table;
+}
+
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
index c754dfbb73fd..5e90c6637106 100644
--- a/drivers/clk/clk-scmi.c
+++ b/drivers/clk/clk-scmi.c
@@ -86,6 +86,15 @@ static void scmi_clk_disable(struct clk_hw *hw)
clk->handle->clk_ops->disable(clk->handle, clk->id);
}
+static int scmi_get_available_rates(struct clk_hw *hw, u64 *rate)
+{
+ struct scmi_clk *clk = to_scmi_clk(hw);
+
+ return clk->handle->clk_ops->available_rates(clk->handle,
+ clk->id, rate);
+}
+
+
static const struct clk_ops scmi_clk_ops = {
.recalc_rate = scmi_clk_recalc_rate,
.round_rate = scmi_clk_round_rate,
@@ -98,6 +107,7 @@ static const struct clk_ops scmi_clk_ops = {
*/
.prepare = scmi_clk_enable,
.unprepare = scmi_clk_disable,
+ .get_available_rates = scmi_get_available_rates,
};
static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk)
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 407f6919604c..341710d3f868 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -21,6 +21,7 @@
#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/clkdev.h>
+#include <linux/uaccess.h>
#include "clk.h"
@@ -3193,6 +3194,116 @@ static int clk_max_rate_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(clk_max_rate);
+static ssize_t clock_freq_get(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buf[32];
+ unsigned int len;
+ struct clk_core *core = file->f_inode->i_private;
+ u32 clk_cur_frq = clk_core_get_rate_recalc(core);
+
+ len = snprintf(buf, sizeof(buf), "%u\n", clk_cur_frq);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t clock_freq_set(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[32];
+ ssize_t len;
+ struct clk_core *core = file->f_inode->i_private;
+ u32 new_clk_frq;
+ /* No parent */
+ unsigned long parent_rate = 0;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ /* Make the buffer a valid string that we can not overrun */
+ buf[len] = '\0';
+ if (kstrtouint(buf, 0, &new_clk_frq))
+ return -EINVAL;
+
+ core->ops->set_rate(core->hw, new_clk_frq, parent_rate);
+ return count;
+}
+
+static int clock_freq_dummy(struct seq_file *s, void *unused)
+{
+ return 0;
+}
+
+static int clock_freq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, clock_freq_dummy,
+ inode->i_private);
+}
+
+/* File operations for reading or setting various device clock frequencies */
+static const struct file_operations clock_freq_fops = {
+ .open = clock_freq_open,
+ .read = clock_freq_get,
+ .write = clock_freq_set,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int clk_core_get_available_rate(struct clk_core *core, u64 *rates)
+{
+ if (core->ops->get_available_rates)
+ return core->ops->get_available_rates(core->hw, rates);
+ return 0;
+}
+
+static ssize_t available_clock_freq_get(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[256];
+ u64 rates_buf[16];
+ u32 index = 0, i;
+ int no_of_freqs;
+
+ struct clk_core *core = file->f_inode->i_private;
+
+ no_of_freqs = clk_core_get_available_rate(core, rates_buf);
+
+ /* No freq found */
+ if (no_of_freqs <= 0 || no_of_freqs > 16) {
+ pr_err("Fails to get available frequencies\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < no_of_freqs; i++)
+ index += snprintf(&buf[index],
+ sizeof(buf), "%llu ", rates_buf[i]);
+
+ index += snprintf(&buf[index], sizeof(buf), "\n");
+ return simple_read_from_buffer(user_buf, count, ppos, buf, index);
+}
+
+static int available_clock_freq_dummy(struct seq_file *s, void *unused)
+{
+ return 0;
+}
+
+static int available_clock_freq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, available_clock_freq_dummy,
+ inode->i_private);
+}
+
+/*
+ * File operations for reading all valid possible frequencies the device
+ * clock could support.
+ */
+static const struct file_operations available_clock_freq_fops = {
+ .open = available_clock_freq_open,
+ .read = available_clock_freq_get,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
{
struct dentry *root;
@@ -3205,6 +3316,8 @@ static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
debugfs_create_file("clk_rate", clk_rate_mode, root, core,
&clk_rate_fops);
+ debugfs_create_file("clk_available_freq", 0444, root, core,
+ &available_clock_freq_fops);
debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops);
debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops);
debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 944ed7226e37..766b170fd11d 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
-obj-$(CONFIG_CRYPTO_DEV_MARVELL) += marvell/
+obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/cesa/
obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 4c2227662b26..da04833aaca5 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -194,6 +194,22 @@ err:
return ret;
}
+static int scmi_clock_available_rates(const struct scmi_handle *handle,
+ u32 clk_id, u64 *rates)
+{
+ struct clock_info *cinfo = handle->clk_priv;
+ struct scmi_clock_info *clk = cinfo->clk + clk_id;
+
+ if (!rates)
+ return -EINVAL;
+
+ /* Copy all the rates into user specified buffer */
+ memcpy(rates, &clk->list.rates[0],
+ clk->list.num_rates * sizeof(rates));
+
+ return clk->list.num_rates;
+}
+
static int
scmi_clock_rate_get(const struct scmi_handle *handle, u32 clk_id, u64 *value)
{
@@ -309,6 +325,7 @@ static struct scmi_clk_ops clk_ops = {
.rate_set = scmi_clock_rate_set,
.enable = scmi_clock_enable,
.disable = scmi_clock_disable,
+ .available_rates = scmi_clock_available_rates,
};
static int scmi_clock_protocol_init(struct scmi_handle *handle)
diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
index 73077bbc4ad9..79559d6653f1 100644
--- a/drivers/firmware/arm_scmi/mailbox.c
+++ b/drivers/firmware/arm_scmi/mailbox.c
@@ -178,7 +178,7 @@ static struct scmi_transport_ops scmi_mailbox_ops = {
const struct scmi_desc scmi_mailbox_desc = {
.ops = &scmi_mailbox_ops,
- .max_rx_timeout_ms = 30, /* We may increase this if required */
+ .max_rx_timeout_ms = 300, /* We may increase this if required */
.max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
- .max_msg_size = 128,
+ .max_msg_size = 256,
};
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 34f3a917dd8d..6b59202d3d5c 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -593,6 +593,23 @@ static int scmi_dev_domain_id(struct device *dev)
return clkspec.args[0];
}
+static uint32_t roundoff_to_nearest_100(uint32_t freq)
+{
+ uint32_t quotient, remainder;
+ uint32_t freq_mhz;
+
+ freq_mhz = (freq / 1000000);
+ quotient = freq_mhz / 100;
+ remainder = freq_mhz % 100;
+
+ if (remainder >= 50)
+ freq_mhz = quotient * 100 + 100;
+ else
+ freq_mhz = quotient * 100;
+
+ return freq_mhz * 1000000;
+}
+
static int scmi_dvfs_device_opps_add(const struct scmi_handle *handle,
struct device *dev)
{
@@ -611,6 +628,12 @@ static int scmi_dvfs_device_opps_add(const struct scmi_handle *handle,
for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
freq = opp->perf * dom->mult_factor;
+ /*
+ * marvell specific: need to round off to nearest hundered
+ * if the calcuated frequency is not a multiple of 100 in MHz
+ */
+ freq = roundoff_to_nearest_100(freq);
+
ret = dev_pm_opp_add(dev, freq, 0);
if (ret) {
dev_warn(dev, "failed to add opp %luHz\n", freq);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 1b96169d84f7..7444e029fb08 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -892,6 +892,18 @@ config GPIO_GW_PLD
Say yes here to provide access to the Gateworks I2C PLD GPIO
Expander. This is used at least on the Cambria GW2358-4.
+config GPIO_I2C
+ tristate "Generic I2C->GPIO no-irq expander"
+ depends on OF
+ default n
+ help
+ Select this option to enable GPIO for simple I2C devices,
+ parameterized by device-tree, and having no interrupts.
+ Developed to model a custom board's CPLD, but may be useful
+ for various hardware where i2c-poking flips external pins.
+
+ If unsure, say N.
+
config GPIO_MAX7300
tristate "Maxim MAX7300 GPIO expander"
select GPIO_MAX730X
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index b2cfc21a97f3..7b32453a2b73 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_GPIO_GRGPIO) += gpio-grgpio.o
obj-$(CONFIG_GPIO_GW_PLD) += gpio-gw-pld.o
obj-$(CONFIG_GPIO_HLWD) += gpio-hlwd.o
obj-$(CONFIG_HTC_EGPIO) += gpio-htc-egpio.o
++obj-$(CONFIG_GPIO_I2C) += gpio-i2c.o
obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
obj-$(CONFIG_GPIO_INTEL_MID) += gpio-intel-mid.o
obj-$(CONFIG_GPIO_IOP) += gpio-iop.o
diff --git a/drivers/gpio/gpio-i2c.c b/drivers/gpio/gpio-i2c.c
new file mode 100644
index 000000000000..f24b7df4c69d
--- /dev/null
+++ b/drivers/gpio/gpio-i2c.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * simple parameterized no-irq of_driven i2c->gpio expander,
+ * cut down from gpio-pcf857x.c to be totally device-tree driven.
+ *
+ * Suitable for any "memory-like" device, where a 1-byte i2c read yields data
+ * which can safely be written back, possibly with a bit changed, with the
+ * effect of changing only the output level of that bit's GPIO pin.
+ *
+ * Copyright (C) 2016 Cavium Inc.
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+struct gpio_i2c_platform_data {
+ unsigned int i2c_addr;
+ unsigned int pins;
+};
+
+
+static const struct of_device_id gpio_i2c_of_table[] = {
+ { .compatible = "gpio-i2c" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpio_i2c_of_table);
+
+struct gpio_i2c {
+ unsigned int i2c_addr;
+ struct gpio_chip chip;
+ struct i2c_client *client;
+ struct mutex lock; /* protect 'out' */
+ u8 out[]; /* software latch */
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int gpio_i2c_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gpio_i2c *gpio = container_of(chip, struct gpio_i2c, chip);
+ int value;
+ unsigned int byte = (offset >> 3);
+ unsigned int bit = (offset & 7);
+
+ mutex_lock(&gpio->lock);
+ value = i2c_smbus_read_byte_data(gpio->client, byte);
+ mutex_unlock(&gpio->lock);
+ return (value < 0) ? 0 : ((value >> bit) & 1);
+}
+
+static int gpio_i2c_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct gpio_i2c *gpio = container_of(chip, struct gpio_i2c, chip);
+ unsigned int byte = (offset >> 3);
+ unsigned int bit = (offset & 7);
+ unsigned int mask = (1 << bit);
+ int status;
+ u8 was;
+
+ mutex_lock(&gpio->lock);
+ was = i2c_smbus_read_byte_data(gpio->client, byte);
+ if (value)
+ was |= mask;
+ else
+ was &= ~mask;
+ status = i2c_smbus_write_byte_data(gpio->client, byte, was);
+ gpio->out[byte] = was;
+ mutex_unlock(&gpio->lock);
+
+ return status;
+}
+
+static void gpio_i2c_set(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ gpio_i2c_output(chip, offset, value);
+}
+
+/* for open-drain: set as input by letting output go high */
+static int gpio_i2c_input(struct gpio_chip *chip, unsigned int offset)
+{
+ return gpio_i2c_output(chip, offset, 1);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int gpio_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct gpio_i2c_platform_data *pdata = dev_get_platdata(&client->dev);
+ struct device_node *np = client->dev.of_node;
+ struct gpio_i2c *gpio;
+ u32 pins;
+ u32 i2c_addr;
+ int status;
+
+ if (np) {
+ status = of_property_read_u32(np, "reg", &i2c_addr);
+ if (status < 0) {
+ dev_dbg(&client->dev, "missing reg property\n");
+ return status;
+ }
+ status = of_property_read_u32(np, "ngpios", &pins);
+ if (status < 0) {
+ dev_dbg(&client->dev, "missing ngpios property\n");
+ return status;
+ }
+ } else if (pdata) {
+ i2c_addr = pdata->i2c_addr;
+ pins = pdata->pins;
+ } else {
+ dev_dbg(&client->dev, "no platform data\n");
+ return -EPROBE_DEFER;
+ }
+
+ /* Allocate, initialize, and register this gpio_chip. */
+ gpio = devm_kzalloc(&client->dev,
+ sizeof(*gpio) + (pins + 7) / 8, GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ mutex_init(&gpio->lock);
+
+ gpio->i2c_addr = i2c_addr;
+ gpio->chip.base = -1;
+ gpio->chip.can_sleep = true;
+ gpio->chip.parent = &client->dev;
+ gpio->chip.owner = THIS_MODULE;
+ gpio->chip.get = gpio_i2c_get;
+ gpio->chip.set = gpio_i2c_set;
+ gpio->chip.direction_input = gpio_i2c_input;
+ gpio->chip.direction_output = gpio_i2c_output;
+ gpio->chip.ngpio = pins;
+ gpio->chip.label = client->name;
+ gpio->client = client;
+ gpio->client->addr = i2c_addr;
+ i2c_set_clientdata(client, gpio);
+
+ status = gpiochip_add(&gpio->chip);
+ if (status < 0)
+ goto fail;
+
+ dev_info(&client->dev, "probed\n");
+
+ return 0;
+
+fail:
+ dev_dbg(&client->dev, "probe error %d for '%s'\n", status,
+ client->name);
+
+ return status;
+}
+
+static int gpio_i2c_remove(struct i2c_client *client)
+{
+ struct gpio_i2c *gpio = i2c_get_clientdata(client);
+ int status = 0;
+
+ gpiochip_remove(&gpio->chip);
+ return status;
+}
+
+/* this must _exist_ for i2c_device_probe() to call our probe, may be empty */
+static struct i2c_device_id empty_id_table[] = {
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, empty_id_table);
+
+static struct i2c_driver gpio_i2c_driver = {
+ .driver = {
+ .name = "gpio-i2c",
+ .of_match_table = of_match_ptr(gpio_i2c_of_table),
+ },
+ .probe = gpio_i2c_probe,
+ .remove = gpio_i2c_remove,
+ .id_table = empty_id_table,
+};
+
+module_i2c_driver(gpio_i2c_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Brownell");
+MODULE_AUTHOR("Peter Swain <pswain@cavium.com>");
+/*
+ * arguably this name defies convention, but correct(?) alias has been
+ * taken by the inverse function in
+ * drivers/i2c/busses/i2c-gpio.c:MODULE_ALIAS("platform:i2c-gpio");
+ */
+MODULE_ALIAS("platform:gpio-i2c");
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
index 9f66deab46ea..f9d70122d45a 100644
--- a/drivers/gpio/gpio-thunderx.c
+++ b/drivers/gpio/gpio-thunderx.c
@@ -16,7 +16,16 @@
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <asm-generic/msi.h>
-
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+#include <linux/arm-smccc.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/moduleparam.h>
+#include <linux/uaccess.h>
+#include <linux/mmu_context.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#endif
#define GPIO_RX_DAT 0x0
#define GPIO_TX_SET 0x8
@@ -32,17 +41,63 @@
#define GPIO_BIT_CFG_FIL_CNT_SHIFT 4
#define GPIO_BIT_CFG_FIL_SEL_SHIFT 8
#define GPIO_BIT_CFG_TX_OD BIT(12)
-#define GPIO_BIT_CFG_PIN_SEL_MASK GENMASK(25, 16)
+#define GPIO_BIT_CFG_PIN_SEL_MASK GENMASK(26, 16)
#define GPIO_INTR 0x800
#define GPIO_INTR_INTR BIT(0)
#define GPIO_INTR_INTR_W1S BIT(1)
#define GPIO_INTR_ENA_W1C BIT(2)
#define GPIO_INTR_ENA_W1S BIT(3)
#define GPIO_2ND_BANK 0x1400
+#define MRVL_OCTEONTX2_96XX_PARTNUM 0xB2
+
#define GLITCH_FILTER_400NS ((4u << GPIO_BIT_CFG_FIL_SEL_SHIFT) | \
(9u << GPIO_BIT_CFG_FIL_CNT_SHIFT))
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+#define DEVICE_NAME "otx-gpio-ctr"
+#define OTX_IOC_MAGIC 0xF2
+#define MAX_GPIO 80
+
+static struct device *otx_device;
+static struct class *otx_class;
+static struct cdev *otx_cdev;
+static dev_t otx_dev;
+static DEFINE_SPINLOCK(el3_inthandler_lock);
+static int gpio_in_use;
+static int gpio_installed[MAX_GPIO];
+static struct thread_info *gpio_installed_threads[MAX_GPIO];
+static struct task_struct *gpio_installed_tasks[MAX_GPIO];
+
+/* THUNDERX SMC definitons */
+/* X1 - gpio_num, X2 - sp, X3 - cpu, X4 - ttbr0 */
+#define THUNDERX_INSTALL_GPIO_INT 0xC2000801
+/* X1 - gpio_num */
+#define THUNDERX_REMOVE_GPIO_INT 0xC2000802
+
+struct intr_hand {
+ u64 mask;
+ char name[50];
+ u64 coffset;
+ u64 soffset;
+ irqreturn_t (*handler)(int, void *);
+};
+
+struct otx_gpio_usr_data {
+ u64 isr_base;
+ u64 sp;
+ u64 cpu;
+ u64 gpio_num;
+};
+
+
+#define OTX_IOC_SET_GPIO_HANDLER \
+ _IOW(OTX_IOC_MAGIC, 1, struct otx_gpio_usr_data)
+
+#define OTX_IOC_CLR_GPIO_HANDLER \
+ _IO(OTX_IOC_MAGIC, 2)
+#endif
+
struct thunderx_gpio;
struct thunderx_line {
@@ -62,6 +117,159 @@ struct thunderx_gpio {
int base_msi;
};
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+static inline int __install_el3_inthandler(unsigned long gpio_num,
+ unsigned long sp,
+ unsigned long cpu,
+ unsigned long ttbr0)
+{
+ struct arm_smccc_res res;
+ unsigned long flags;
+ int retval = -1;
+
+ spin_lock_irqsave(&el3_inthandler_lock, flags);
+ if (!gpio_installed[gpio_num]) {
+ lock_context(current->group_leader->mm, gpio_num);
+ arm_smccc_smc(THUNDERX_INSTALL_GPIO_INT, gpio_num,
+ sp, cpu, ttbr0, 0, 0, 0, &res);
+ if (res.a0 == 0) {
+ gpio_installed[gpio_num] = 1;
+ gpio_installed_threads[gpio_num]
+ = current_thread_info();
+ gpio_installed_tasks[gpio_num]
+ = current->group_leader;
+ retval = 0;
+ } else {
+ unlock_context_by_index(gpio_num);
+ }
+ }
+ spin_unlock_irqrestore(&el3_inthandler_lock, flags);
+ return retval;
+}
+
+static inline int __remove_el3_inthandler(unsigned long gpio_num)
+{
+ struct arm_smccc_res res;
+ unsigned long flags;
+ unsigned int retval;
+
+ spin_lock_irqsave(&el3_inthandler_lock, flags);
+ if (gpio_installed[gpio_num]) {
+ arm_smccc_smc(THUNDERX_REMOVE_GPIO_INT, gpio_num,
+ 0, 0, 0, 0, 0, 0, &res);
+ gpio_installed[gpio_num] = 0;
+ gpio_installed_threads[gpio_num] = NULL;
+ gpio_installed_tasks[gpio_num] = NULL;
+ unlock_context_by_index(gpio_num);
+ retval = 0;
+ } else {
+ retval = -1;
+ }
+ spin_unlock_irqrestore(&el3_inthandler_lock, flags);
+ return retval;
+}
+
+static long otx_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ int err = 0;
+ struct otx_gpio_usr_data gpio_usr;
+ u64 gpio_ttbr, gpio_isr_base, gpio_sp, gpio_cpu, gpio_num;
+ int ret;
+ //struct task_struct *task = current;
+
+ if (!gpio_in_use)
+ return -EINVAL;
+
+ if (_IOC_TYPE(cmd) != OTX_IOC_MAGIC)
+ return -ENOTTY;
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ err = !access_ok((void __user *)arg, _IOC_SIZE(cmd));
+ else if (_IOC_TYPE(cmd) & _IOC_WRITE)
+ err = !access_ok((void __user *)arg, _IOC_SIZE(cmd));
+
+ if (err)
+ return -EFAULT;
+
+ switch (cmd) {
+ case OTX_IOC_SET_GPIO_HANDLER: /*Install GPIO ISR handler*/
+ ret = copy_from_user(&gpio_usr, (void *)arg, _IOC_SIZE(cmd));
+ if (gpio_usr.gpio_num >= MAX_GPIO)
+ return -EINVAL;
+ if (ret)
+ return -EFAULT;
+ gpio_ttbr = 0;
+ //TODO: reserve a asid to avoid asid rollovers
+ asm volatile("mrs %0, ttbr0_el1\n\t" : "=r"(gpio_ttbr));
+ gpio_isr_base = gpio_usr.isr_base;
+ gpio_sp = gpio_usr.sp;
+ gpio_cpu = gpio_usr.cpu;
+ gpio_num = gpio_usr.gpio_num;
+ ret = __install_el3_inthandler(gpio_num, gpio_sp,
+ gpio_cpu, gpio_isr_base);
+ if (ret != 0)
+ return -EEXIST;
+ break;
+ case OTX_IOC_CLR_GPIO_HANDLER: /*Clear GPIO ISR handler*/
+ gpio_usr.gpio_num = arg;
+ if (gpio_usr.gpio_num >= MAX_GPIO)
+ return -EINVAL;
+ ret = __remove_el3_inthandler(gpio_usr.gpio_num);
+ if (ret != 0)
+ return -ENOENT;
+ break;
+ default:
+ return -ENOTTY;
+ }
+ return 0;
+}
+
+static void cleanup_el3_irqs(struct task_struct *task)
+{
+ int i;
+
+ for (i = 0; i < MAX_GPIO; i++) {
+ if (gpio_installed[i] &&
+ gpio_installed_tasks[i] &&
+ (gpio_installed_tasks[i] == task)) {
+ pr_alert("Exiting, removing handler for GPIO %d\n",
+ i);
+ __remove_el3_inthandler(i);
+ pr_alert("Exited, removed handler for GPIO %d\n",
+ i);
+ } else {
+ if (gpio_installed[i] &&
+ (gpio_installed_threads[i]
+ == current_thread_info()))
+ pr_alert(
+ "Exiting, thread info matches, not removing handler for GPIO %d\n",
+ i);
+ }
+ }
+}
+
+static int otx_dev_open(struct inode *inode, struct file *fp)
+{
+ gpio_in_use = 1;
+ return 0;
+}
+
+static int otx_dev_release(struct inode *inode, struct file *fp)
+{
+ if (gpio_in_use == 0)
+ return -EINVAL;
+ gpio_in_use = 0;
+ return 0;
+}
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = otx_dev_open,
+ .release = otx_dev_release,
+ .unlocked_ioctl = otx_dev_ioctl
+};
+#endif
+
static unsigned int bit_cfg_reg(unsigned int line)
{
return 8 * line + GPIO_BIT_CFG;
@@ -104,16 +312,17 @@ static int thunderx_gpio_request(struct gpio_chip *chip, unsigned int line)
static int thunderx_gpio_dir_in(struct gpio_chip *chip, unsigned int line)
{
struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
+ unsigned long flags;
if (!thunderx_gpio_is_gpio(txgpio, line))
return -EIO;
- raw_spin_lock(&txgpio->lock);
+ raw_spin_lock_irqsave(&txgpio->lock, flags);
clear_bit(line, txgpio->invert_mask);
clear_bit(line, txgpio->od_mask);
writeq(txgpio->line_entries[line].fil_bits,
txgpio->register_base + bit_cfg_reg(line));
- raw_spin_unlock(&txgpio->lock);
+ raw_spin_unlock_irqrestore(&txgpio->lock, flags);
return 0;
}
@@ -135,11 +344,12 @@ static int thunderx_gpio_dir_out(struct gpio_chip *chip, unsigned int line,
{
struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
u64 bit_cfg = txgpio->line_entries[line].fil_bits | GPIO_BIT_CFG_TX_OE;
+ unsigned long flags;
if (!thunderx_gpio_is_gpio(txgpio, line))
return -EIO;
- raw_spin_lock(&txgpio->lock);
+ raw_spin_lock_irqsave(&txgpio->lock, flags);
thunderx_gpio_set(chip, line, value);
@@ -151,7 +361,7 @@ static int thunderx_gpio_dir_out(struct gpio_chip *chip, unsigned int line,
writeq(bit_cfg, txgpio->register_base + bit_cfg_reg(line));
- raw_spin_unlock(&txgpio->lock);
+ raw_spin_unlock_irqrestore(&txgpio->lock, flags);
return 0;
}
@@ -188,11 +398,12 @@ static int thunderx_gpio_set_config(struct gpio_chip *chip,
int ret = -ENOTSUPP;
struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
void __iomem *reg = txgpio->register_base + (bank * GPIO_2ND_BANK) + GPIO_TX_SET;
+ unsigned long flags;
if (!thunderx_gpio_is_gpio(txgpio, line))
return -EIO;
- raw_spin_lock(&txgpio->lock);
+ raw_spin_lock_irqsave(&txgpio->lock, flags);
orig_invert = test_bit(line, txgpio->invert_mask);
new_invert = orig_invert;
orig_od = test_bit(line, txgpio->od_mask);
@@ -243,7 +454,7 @@ static int thunderx_gpio_set_config(struct gpio_chip *chip,
default:
break;
}
- raw_spin_unlock(&txgpio->lock);
+ raw_spin_unlock_irqrestore(&txgpio->lock, flags);
/*
* If currently output and OPEN_DRAIN changed, install the new
@@ -330,6 +541,7 @@ static int thunderx_gpio_irq_set_type(struct irq_data *d,
struct thunderx_line *txline =
&txgpio->line_entries[irqd_to_hwirq(d)];
u64 bit_cfg;
+ unsigned long flags;
irqd_set_trigger_type(d, flow_type);
@@ -342,7 +554,7 @@ static int thunderx_gpio_irq_set_type(struct irq_data *d,
irq_set_handler_locked(d, handle_fasteoi_mask_irq);
}
- raw_spin_lock(&txgpio->lock);
+ raw_spin_lock_irqsave(&txgpio->lock, flags);
if (flow_type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)) {
bit_cfg |= GPIO_BIT_CFG_PIN_XOR;
set_bit(txline->line, txgpio->invert_mask);
@@ -351,7 +563,7 @@ static int thunderx_gpio_irq_set_type(struct irq_data *d,
}
clear_bit(txline->line, txgpio->od_mask);
writeq(bit_cfg, txgpio->register_base + bit_cfg_reg(txline->line));
- raw_spin_unlock(&txgpio->lock);
+ raw_spin_unlock_irqrestore(&txgpio->lock, flags);
return IRQ_SET_MASK_OK;
}
@@ -470,7 +682,13 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
u64 c = readq(txgpio->register_base + GPIO_CONST);
ngpio = c & GPIO_CONST_GPIOS_MASK;
- txgpio->base_msi = (c >> 8) & 0xff;
+
+ /* Workaround for all passes of T96xx */
+ if (((pdev->subsystem_device >> 8) & 0xFF) == MRVL_OCTEONTX2_96XX_PARTNUM) {
+ txgpio->base_msi = 0x36;
+ } else {
+ txgpio->base_msi = (c >> 8) & 0xff;
+ }
}
txgpio->msix_entries = devm_kcalloc(dev,
@@ -561,7 +779,65 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
dev_info(dev, "ThunderX GPIO: %d lines with base %d.\n",
ngpio, chip->base);
+
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+ /* Register task cleanup handler */
+ err = task_cleanup_handler_add(cleanup_el3_irqs);
+ if (err != 0) {
+ dev_err(dev, "Failed to register cleanup handler: %d\n", err);
+ goto cleanup_handler_err;
+ }
+
+ /* create a character device */
+ err = alloc_chrdev_region(&otx_dev, 1, 1, DEVICE_NAME);
+ if (err != 0) {
+ dev_err(dev, "Failed to create device: %d\n", err);
+ goto alloc_chrdev_err;
+ }
+
+ otx_cdev = cdev_alloc();
+ if (!otx_cdev) {
+ err = -ENODEV;
+ goto cdev_alloc_err;
+ }
+
+ cdev_init(otx_cdev, &fops);
+ err = cdev_add(otx_cdev, otx_dev, 1);
+ if (err < 0) {
+ err = -ENODEV;
+ goto cdev_add_err;
+ }
+
+ /* create new class for sysfs*/
+ otx_class = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(otx_class)) {
+ err = -ENODEV;
+ goto class_create_err;
+ }
+
+ otx_device = device_create(otx_class, NULL, otx_dev, NULL,
+ DEVICE_NAME);
+ if (IS_ERR(otx_device)) {
+ err = -ENODEV;
+ goto device_create_err;
+ }
+#endif
+
return 0;
+
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+device_create_err:
+ class_destroy(otx_class);
+
+class_create_err:
+cdev_add_err:
+ cdev_del(otx_cdev);
+cdev_alloc_err:
+ unregister_chrdev_region(otx_dev, 1);
+alloc_chrdev_err:
+ task_cleanup_handler_remove(cleanup_el3_irqs);
+cleanup_handler_err:
+#endif
out:
pci_set_drvdata(pdev, NULL);
return err;
@@ -579,6 +855,15 @@ static void thunderx_gpio_remove(struct pci_dev *pdev)
irq_domain_remove(txgpio->chip.irq.domain);
pci_set_drvdata(pdev, NULL);
+
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+ device_destroy(otx_class, otx_dev);
+ class_destroy(otx_class);
+ cdev_del(otx_cdev);
+ unregister_chrdev_region(otx_dev, 1);
+
+ task_cleanup_handler_remove(cleanup_el3_irqs);
+#endif
}
static const struct pci_device_id thunderx_gpio_id_table[] = {
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 0e3e72f0f510..73e0ae3a4339 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -2,7 +2,8 @@
#
# Makefile for CoreSight drivers.
#
-obj-$(CONFIG_CORESIGHT) += coresight.o coresight-etm-perf.o coresight-platform.o
+obj-$(CONFIG_CORESIGHT) += coresight.o coresight-etm-perf.o coresight-platform.o \
+ coresight-quirks.o
obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o \
coresight-tmc-etf.o \
coresight-tmc-etr.o
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 84f1dcb69827..190ed6fdde73 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -223,7 +223,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
id = (u32)event->attr.config2;
sink = coresight_get_sink_by_id(id);
} else {
- sink = coresight_get_enabled_sink(true);
+ sink = coresight_get_enabled_sink(NULL, true);
}
if (!sink)
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index ce41482431f9..f0840809e40c 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -2336,8 +2336,26 @@ static u32 etmv4_cross_read(const struct device *dev, u32 offset)
/*
* smp cross call ensures the CPU will be powered up before
* accessing the ETMv4 trace core registers
+ *
+ * Note: When task isolation is enabled, the target cpu used
+ * is always primary core and hence the above assumption of
+ * cpu associated with the ETM being in powered up state during
+ * register writes is not valid.
+ * But on the other hand, using smp call ensures that atomicity is
+ * not broken as well.
*/
- smp_call_function_single(drvdata->cpu, do_smp_cross_read, &reg, 1);
+ smp_call_function_single(drvdata->rc_cpu, do_smp_cross_read, &reg, 1);
+
+ /*
+ * OcteonTx2 h/w reports ETMv4.2 but it supports Ignore Packet
+ * feature of ETMv4.3, Treat this h/w as ETMv4.3 compatible.
+ */
+ if ((offset == TRCIDR1) &&
+ (drvdata->etm_options & CSETM_QUIRK_TREAT_ETMv43)) {
+ reg.data &= ~0xF0;
+ reg.data |= 0x30;
+ }
+
return reg.data;
}
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 13c362cddd6a..e697f57f7d48 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -104,6 +104,43 @@ struct etm4_enable_arg {
int rc;
};
+/* Raw enable/disable APIs for ETM sync insertion */
+
+static void etm4_enable_raw(struct coresight_device *csdev)
+{
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ CS_UNLOCK(drvdata->base);
+
+ etm4_os_unlock(drvdata);
+
+ /* Enable the trace unit */
+ writel(1, drvdata->base + TRCPRGCTLR);
+
+ dsb(sy);
+ isb();
+
+ CS_LOCK(drvdata->base);
+}
+
+static void etm4_disable_raw(struct coresight_device *csdev)
+{
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ CS_UNLOCK(drvdata->base);
+ /*
+ * Make sure everything completes before disabling, as recommended
+ * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register,
+ * SSTATUS") of ARM IHI 0064D
+ */
+ dsb(sy);
+ isb();
+
+ writel_relaxed(0x0, drvdata->base + TRCPRGCTLR);
+
+ CS_LOCK(drvdata->base);
+}
+
static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
{
int i, rc;
@@ -221,6 +258,20 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
done:
CS_LOCK(drvdata->base);
+ /* For supporting SW sync insertion */
+ if (!is_etm_sync_mode_hw()) {
+ /* ETM sync insertions are gated in the
+ * ETR timer handler based on hw state.
+ */
+ drvdata->csdev->hw_state = USR_START;
+
+ /* Global timer handler not being associated with
+ * a specific ETM core, need to know the current
+ * list of active ETMs.
+ */
+ coresight_etm_active_enable(drvdata->cpu);
+ }
+
dev_dbg(etm_dev, "cpu: %d enable smp call done: %d\n",
drvdata->cpu, rc);
return rc;
@@ -420,9 +471,16 @@ static int etm4_enable_sysfs(struct coresight_device *csdev)
/*
* Executing etm4_enable_hw on the cpu whose ETM is being enabled
* ensures that register writes occur when cpu is powered.
+ *
+ * Note: When task isolation is enabled, the target cpu used
+ * is always primary core and hence the above assumption of
+ * cpu associated with the ETM being in powered up state during
+ * register writes is not valid.
+ * But on the other hand, using smp call ensures that atomicity is
+ * not broken as well.
*/
arg.drvdata = drvdata;
- ret = smp_call_function_single(drvdata->cpu,
+ ret = smp_call_function_single(drvdata->rc_cpu,
etm4_enable_hw_smp_call, &arg, 1);
if (!ret)
ret = arg.rc;
@@ -517,6 +575,12 @@ static void etm4_disable_hw(void *info)
CS_LOCK(drvdata->base);
+ /* For supporting SW sync insertion */
+ if (!is_etm_sync_mode_hw()) {
+ drvdata->csdev->hw_state = USR_STOP;
+ coresight_etm_active_disable(drvdata->cpu);
+ }
+
dev_dbg(&drvdata->csdev->dev,
"cpu: %d disable smp call done\n", drvdata->cpu);
}
@@ -562,8 +626,15 @@ static void etm4_disable_sysfs(struct coresight_device *csdev)
/*
* Executing etm4_disable_hw on the cpu whose ETM is being disabled
* ensures that register writes occur when cpu is powered.
+ *
+ * Note: When task isolation is enabled, the target cpu used
+ * is always primary core and hence the above assumption of
+ * cpu associated with the ETM being in powered up state during
+ * register writes is not valid.
+ * But on the other hand, using smp call ensures that atomicity is
+ * not broken as well.
*/
- smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
+ smp_call_function_single(drvdata->rc_cpu, etm4_disable_hw, drvdata, 1);
spin_unlock(&drvdata->spinlock);
cpus_read_unlock();
@@ -604,6 +675,8 @@ static const struct coresight_ops_source etm4_source_ops = {
.trace_id = etm4_trace_id,
.enable = etm4_enable,
.disable = etm4_disable,
+ .enable_raw = etm4_enable_raw,
+ .disable_raw = etm4_disable_raw,
};
static const struct coresight_ops etm4_cs_ops = {
@@ -668,6 +741,16 @@ static void etm4_init_arch_data(void *info)
/* base architecture of trace unit */
etmidr1 = readl_relaxed(drvdata->base + TRCIDR1);
+
+ /*
+ * OcteonTx2 h/w reports ETMv4.2 but it supports Ignore Packet
+ * feature of ETMv4.3, Treat this h/w as ETMv4.3 compatible.
+ */
+ if (drvdata->etm_options & CSETM_QUIRK_TREAT_ETMv43) {
+ etmidr1 &= ~0xF0;
+ etmidr1 |= 0x30;
+ }
+
/*
* TRCARCHMIN, bits[7:4] architecture the minor version number
* TRCARCHMAJ, bits[11:8] architecture major versin number
@@ -1502,10 +1585,14 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
if (!desc.name)
return -ENOMEM;
+ /* Update the smp target cpu */
+ drvdata->rc_cpu = is_etm_sync_mode_sw_global() ? SYNC_GLOBAL_CORE :
+ drvdata->cpu;
+
cpus_read_lock();
etmdrvdata[drvdata->cpu] = drvdata;
- if (smp_call_function_single(drvdata->cpu,
+ if (smp_call_function_single(drvdata->rc_cpu,
etm4_init_arch_data, drvdata, 1))
dev_err(dev, "ETM arch init failed\n");
@@ -1533,6 +1620,9 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
}
adev->dev.platform_data = pdata;
+ /* Enable fixes for Silicon issues */
+ drvdata->etm_options = coresight_get_etm_quirks(OCTEONTX_CN9XXX_ETM);
+
desc.type = CORESIGHT_DEV_TYPE_SOURCE;
desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
desc.ops = &etm4_cs_ops;
@@ -1583,6 +1673,7 @@ static const struct amba_id etm4_ids[] = {
CS_AMBA_ID(0x000bb95a), /* Cortex-A72 */
CS_AMBA_ID(0x000bb959), /* Cortex-A73 */
CS_AMBA_UCI_ID(0x000bb9da, uci_id_etm4),/* Cortex-A35 */
+ CS_AMBA_ID(0x000cc210), /* Marvell-OcteonTx-CN9xxx */
CS_AMBA_UCI_ID(0x000f0205, uci_id_etm4),/* Qualcomm Kryo */
CS_AMBA_UCI_ID(0x000f0211, uci_id_etm4),/* Qualcomm Kryo */
CS_AMBA_ID(0x000bb802), /* Qualcomm Kryo 385 Cortex-A55 */
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 47729e04aac7..cb049125cfea 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -356,7 +356,11 @@ struct etmv4_save_state {
* @csdev: Component vitals needed by the framework.
* @spinlock: Only one at a time pls.
* @mode: This tracer's mode, i.e sysFS, Perf or disabled.
+ * @etm_options: Bitmask of options to manage ETMv4 Silicon issues
* @cpu: The cpu this component is affined to.
+ * @rc_cpu: The cpu on which remote function calls can be run
+ * In certain kernel configurations, some cores are not expected
+ * to be interrupted and we need a fallback target cpu.
* @arch: ETM version number.
* @nr_pe: The number of processing entity available for tracing.
* @nr_pe_cmp: The number of processing entity comparator inputs that are
@@ -413,7 +417,9 @@ struct etmv4_drvdata {
struct coresight_device *csdev;
spinlock_t spinlock;
local_t mode;
+ u32 etm_options;
int cpu;
+ int rc_cpu;
u8 arch;
u8 nr_pe;
u8 nr_pe_cmp;
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 890f9a5c97c6..6fc63729c6d5 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -69,6 +69,47 @@ static DEVICE_ATTR_RO(name)
extern const u32 barrier_pkt[4];
#define CORESIGHT_BARRIER_PKT_SIZE (sizeof(barrier_pkt))
+/* Marvell OcteonTx CN9xxx device */
+#define OCTEONTX_CN9XXX_ETR 0x000cc213
+/* Marvell OcteonTx CN9xxx ETM device */
+#define OCTEONTX_CN9XXX_ETM 0x000cc210
+
+/* Coresight Hardware quirks */
+#define CSETR_QUIRK_BUFFSIZE_8BX (0x1U << 0) /* 8 byte size multiplier */
+#define CSETR_QUIRK_SECURE_BUFF (0x1U << 1) /* Trace buffer is Secure */
+#define CSETR_QUIRK_RESET_CTL_REG (0x1U << 2) /* Reset CTL on reset */
+#define CSETR_QUIRK_NO_STOP_FLUSH (0x1U << 3) /* No Stop on flush */
+#define CSETM_QUIRK_SW_SYNC (0x1U << 4) /* No Hardware sync */
+#define CSETM_QUIRK_TREAT_ETMv43 (0x1U << 5) /* ETMv4.2 as ETMv4.3 */
+
+/* ETM sync insertion modes
+ * 1. MODE_HW
+ * Sync insertion is done by hardware without any software intervention
+ *
+ * 2. MODE_SW_GLOBAL
+ * sync insertion runs from common timer handler on primary core
+ *
+ * 3. MODE_SW_PER_CORE
+ * sync insertion runs from per core timer handler
+ *
+ * When hardware doesn't support sync insertion, we fall back to software based
+ * ones. Typically, GLOBAL mode would be preferred when the traced cores are
+ * running performance critical applications and cannot be interrupted,
+ * but at the same time there would be a small loss of trace data during the
+ * insertion sequence as well.
+ *
+ * For the sake of simplicity, in GLOBAL mode, common timer handler is
+ * always expected to run on primary core(core 0).
+ */
+#define SYNC_GLOBAL_CORE 0 /* Core 0 */
+
+enum etm_sync_mode {
+ SYNC_MODE_INVALID,
+ SYNC_MODE_HW,
+ SYNC_MODE_SW_GLOBAL,
+ SYNC_MODE_SW_PER_CORE,
+};
+
enum etm_addr_type {
ETM_ADDR_TYPE_NONE,
ETM_ADDR_TYPE_SINGLE,
@@ -81,6 +122,7 @@ enum cs_mode {
CS_MODE_DISABLED,
CS_MODE_SYSFS,
CS_MODE_PERF,
+ CS_MODE_READ_PREVBOOT,
};
/**
@@ -148,7 +190,8 @@ static inline void coresight_write_reg_pair(void __iomem *addr, u64 val,
void coresight_disable_path(struct list_head *path);
int coresight_enable_path(struct list_head *path, u32 mode, void *sink_data);
struct coresight_device *coresight_get_sink(struct list_head *path);
-struct coresight_device *coresight_get_enabled_sink(bool reset);
+struct coresight_device *coresight_get_enabled_sink(struct coresight_device *cs,
+ bool reset);
struct coresight_device *coresight_get_sink_by_id(u32 id);
struct list_head *coresight_build_path(struct coresight_device *csdev,
struct coresight_device *sink);
@@ -212,6 +255,19 @@ static inline void *coresight_get_uci_data(const struct amba_id *id)
}
void coresight_release_platform_data(struct coresight_platform_data *pdata);
+
+/* Coresight ETM/ETR hardware quirks */
+u32 coresight_get_etr_quirks(unsigned int id);
+u32 coresight_get_etm_quirks(unsigned int id);
+
+/* ETM software sync insertion */
+bool is_etm_sync_mode_hw(void);
+bool is_etm_sync_mode_sw_global(void);
+
+void coresight_etm_active_enable(int cpu);
+void coresight_etm_active_disable(int cpu);
+cpumask_t coresight_etm_active_list(void);
+
struct coresight_device *
coresight_find_csdev_by_fwnode(struct fwnode_handle *r_fwnode);
void coresight_set_assoc_ectdev_mutex(struct coresight_device *csdev,
diff --git a/drivers/hwtracing/coresight/coresight-quirks.c b/drivers/hwtracing/coresight/coresight-quirks.c
new file mode 100644
index 000000000000..acea9957a206
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-quirks.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+#include <linux/coresight.h>
+#include "coresight-priv.h"
+
+u32 coresight_get_etr_quirks(unsigned int id)
+{
+ u32 options = 0; /* reset */
+
+ if (midr_is_cpu_model_range(read_cpuid_id(),
+ MIDR_MRVL_OCTEONTX2_96XX,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(3, 0)) ||
+ midr_is_cpu_model_range(read_cpuid_id(),
+ MIDR_MRVL_OCTEONTX2_95XX,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(2, 0)))
+ options |= CSETR_QUIRK_BUFFSIZE_8BX |
+ CSETR_QUIRK_RESET_CTL_REG |
+ CSETR_QUIRK_NO_STOP_FLUSH;
+
+ if (id == OCTEONTX_CN9XXX_ETR)
+ options |= CSETR_QUIRK_SECURE_BUFF;
+
+ return options;
+}
+
+u32 coresight_get_etm_quirks(unsigned int id)
+{
+ u32 options = 0; /* reset */
+
+ if (id == OCTEONTX_CN9XXX_ETM)
+ options |= CSETM_QUIRK_TREAT_ETMv43;
+
+ return options;
+}
+
+bool is_etm_has_hw_sync(void)
+{
+ /* Check if hardware supports sync insertion */
+ if (midr_is_cpu_model_range(read_cpuid_id(),
+ MIDR_MRVL_OCTEONTX2_96XX,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(3, 0)) ||
+ midr_is_cpu_model_range(read_cpuid_id(),
+ MIDR_MRVL_OCTEONTX2_95XX,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(2, 0)))
+ return false;
+ else
+ return true;
+}
+
+/* APIs for choosing the sync insertion mode */
+static int coresight_get_etm_sync_mode(void)
+{
+ /* Check if hardware supports sync insertion */
+ if (is_etm_has_hw_sync())
+ return SYNC_MODE_HW;
+
+ /* Find the software based sync insertion mode */
+#ifdef CONFIG_TASK_ISOLATION
+ return SYNC_MODE_SW_GLOBAL;
+#else
+ return SYNC_MODE_SW_PER_CORE;
+#endif
+}
+
+/* APIs for enabling fixes for CSETR_QUIRK_SW_SYNC
+ *
+ * Note: Driver options are not used as done in other quirks,
+ * since the fix is spread across multiple(ETM/ETR) driver files.
+ */
+
+bool is_etm_sync_mode_hw(void)
+{
+ return coresight_get_etm_sync_mode() == SYNC_MODE_HW;
+}
+
+bool is_etm_sync_mode_sw_global(void)
+{
+ return coresight_get_etm_sync_mode() == SYNC_MODE_SW_GLOBAL;
+}
+
+/* Support functions for managing active ETM list used by
+ * global mode sync insertion.
+ *
+ * Note: It is assumed that all accessor functions
+ * on etm_active_list should be called in a atomic context
+ */
+
+static cpumask_t etm_active_list; /* Bitmap of active ETMs cpu wise */
+
+void coresight_etm_active_enable(int cpu)
+{
+ cpumask_set_cpu(cpu, &etm_active_list);
+}
+
+void coresight_etm_active_disable(int cpu)
+{
+ cpumask_clear_cpu(cpu, &etm_active_list);
+}
+
+cpumask_t coresight_etm_active_list(void)
+{
+ return etm_active_list;
+}
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 625882bc8b08..18b73dd6304f 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -16,6 +16,7 @@
#include <linux/vmalloc.h>
#include "coresight-catu.h"
#include "coresight-etm-perf.h"
+#include <linux/arm-smccc.h>
#include "coresight-priv.h"
#include "coresight-tmc.h"
@@ -108,6 +109,232 @@ struct etr_sg_table {
dma_addr_t hwaddr;
};
+/* SW mode sync insertion interval
+ *
+ * Sync insertion interval for 1M is based on assumption of
+ * trace data generated at 4bits/cycle ,cycle period of 0.4 ns
+ * and atleast 4 syncs per buffer wrap.
+ *
+ * One limitation of fixing only 4 syncs per buffer wrap is that
+ * we might loose 1/4 of the initial buffer data due to lack of sync.
+ * But on the other hand, we could reduce the sync insertion frequency
+ * by increasing the buffer size which seems to be a good compromise.
+ */
+#define SYNC_TICK_NS_PER_MB 200000 /* 200us */
+#define SYNCS_PER_FILL 4
+
+/* Global mode timer management */
+
+/**
+ * struct tmc_etr_tsync_global - Global mode timer
+ * @drvdata_cpumap: cpu to tmc drvdata map
+ * @timer: global timer shared by all cores
+ * @tick: gloabl timer tick period
+ * @active_count: timer reference count
+ */
+static struct tmc_etr_tsync_global {
+ struct tmc_drvdata *drvdata_cpumap[NR_CPUS];
+ struct hrtimer timer;
+ int active_count;
+ u64 tick;
+} tmc_etr_tsync_global;
+
+/* Accessor functions for tsync global */
+void tmc_etr_add_cpumap(struct tmc_drvdata *drvdata)
+{
+ tmc_etr_tsync_global.drvdata_cpumap[drvdata->cpu] = drvdata;
+}
+
+static inline struct tmc_drvdata *cpu_to_tmcdrvdata(int cpu)
+{
+ return tmc_etr_tsync_global.drvdata_cpumap[cpu];
+}
+
+static inline struct hrtimer *tmc_etr_tsync_global_timer(void)
+{
+ return &tmc_etr_tsync_global.timer;
+}
+
+static inline void tmc_etr_tsync_global_tick(u64 tick)
+{
+ tmc_etr_tsync_global.tick = tick;
+}
+
+/* Refernence counting is assumed to be always called from
+ * an atomic context.
+ */
+static inline int tmc_etr_tsync_global_addref(void)
+{
+ return ++tmc_etr_tsync_global.active_count;
+}
+
+static inline int tmc_etr_tsync_global_delref(void)
+{
+ return --tmc_etr_tsync_global.active_count;
+}
+
+/* Sync insertion API */
+static void tmc_etr_insert_sync(struct tmc_drvdata *drvdata)
+{
+ struct coresight_device *sdev = drvdata->etm_source;
+ struct etr_tsync_data *syncd = &drvdata->tsync_data;
+ int err = 0, len;
+ u64 rwp;
+
+ /* We have three contenders for ETM control.
+ * 1. User initiated ETM control
+ * 2. Timer sync initiated ETM control
+ * 3. No stop on flush initated ETM control
+ * They all run in an atomic context and that too in
+ * the same core. Either on a core in which ETM is associated
+ * or in the primary core thereby mutually exclusive.
+ *
+ * To avoid any sync insertion while ETM is disabled by
+ * user, we rely on the device hw_state.
+ * Like for example, hrtimer being in active state even
+ * after ETM is disabled by user.
+ */
+ if (sdev->hw_state != USR_START)
+ return;
+
+ rwp = tmc_read_rwp(drvdata);
+ if (!syncd->prev_rwp)
+ goto sync_insert;
+
+ if (syncd->prev_rwp <= rwp) {
+ len = rwp - syncd->prev_rwp;
+ } else { /* Buffer wrapped */
+ goto sync_insert;
+ }
+
+ /* Check if we reached buffer threshold */
+ if (len < syncd->len_thold)
+ goto skip_insert;
+
+ /* Software based sync insertion procedure */
+sync_insert:
+ /* Disable source */
+ if (likely(sdev && source_ops(sdev)->disable_raw))
+ source_ops(sdev)->disable_raw(sdev);
+ else
+ err = -EINVAL;
+
+ /* Enable source */
+ if (likely(sdev && source_ops(sdev)->enable_raw))
+ source_ops(sdev)->enable_raw(sdev);
+ else
+ err = -EINVAL;
+
+ if (!err) {
+ /* Mark the write pointer of sync insertion */
+ syncd->prev_rwp = tmc_read_rwp(drvdata);
+ }
+
+skip_insert:
+ return;
+}
+
+/* Timer handler APIs */
+
+static enum hrtimer_restart tmc_etr_timer_handler_percore(struct hrtimer *t)
+{
+ struct tmc_drvdata *drvdata;
+
+ drvdata = container_of(t, struct tmc_drvdata, timer);
+ hrtimer_forward_now(t, ns_to_ktime(drvdata->tsync_data.tick));
+ tmc_etr_insert_sync(drvdata);
+ return HRTIMER_RESTART;
+}
+
+static enum hrtimer_restart tmc_etr_timer_handler_global(struct hrtimer *t)
+{
+ cpumask_t active_mask;
+ int cpu;
+
+ hrtimer_forward_now(t, ns_to_ktime(tmc_etr_tsync_global.tick));
+
+ active_mask = coresight_etm_active_list();
+ /* Run sync insertions for all active ETMs */
+ for_each_cpu(cpu, &active_mask)
+ tmc_etr_insert_sync(cpu_to_tmcdrvdata(cpu));
+
+ return HRTIMER_RESTART;
+}
+
+/* Timer init API common for both global and per core mode */
+void tmc_etr_timer_init(struct tmc_drvdata *drvdata)
+{
+ struct hrtimer *timer;
+
+ timer = is_etm_sync_mode_sw_global() ?
+ tmc_etr_tsync_global_timer() : &drvdata->timer;
+ hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+}
+
+/* Timer setup API common for both global and per core mode
+ *
+ * Global mode: Timer gets started only if its not active already.
+ * Number of users managed by reference counting.
+ * Percore mode: Timer gets started always
+ *
+ * Always executed in an atomic context either in IPI handler
+ * on a remote core or with irqs disabled in the local core
+ */
+static void tmc_etr_timer_setup(void *data)
+{
+ struct tmc_drvdata *drvdata = data;
+ struct hrtimer *timer;
+ bool mode_global;
+ u64 tick;
+
+ tick = drvdata->tsync_data.tick;
+ mode_global = is_etm_sync_mode_sw_global();
+ if (mode_global) {
+ if (tmc_etr_tsync_global_addref() == 1) {
+ /* Start only if we are the first user */
+ tmc_etr_tsync_global_tick(tick); /* Configure tick */
+ } else {
+ dev_dbg(&drvdata->csdev->dev, "global timer active already\n");
+ return;
+ }
+ }
+
+ timer = mode_global ?
+ tmc_etr_tsync_global_timer() : &drvdata->timer;
+ timer->function = mode_global ?
+ tmc_etr_timer_handler_global : tmc_etr_timer_handler_percore;
+ dev_dbg(&drvdata->csdev->dev, "Starting sync timer, mode:%s period:%lld ns\n",
+ mode_global ? "global" : "percore", tick);
+ hrtimer_start(timer, ns_to_ktime(tick), HRTIMER_MODE_REL_PINNED);
+}
+
+/* Timer cancel API common for both global and per core mode
+ *
+ * Global mode: Timer gets cancelled only if there are no other users
+ * Percore mode: Timer gets cancelled always
+ *
+ * Always executed in an atomic context either in IPI handler
+ * on a remote core or with irqs disabled in the local core
+ */
+static void tmc_etr_timer_cancel(void *data)
+{
+ struct tmc_drvdata *drvdata = data;
+ struct hrtimer *timer;
+ bool mode_global;
+
+ mode_global = is_etm_sync_mode_sw_global();
+ if (mode_global) {
+ if (tmc_etr_tsync_global_delref() != 0) {
+ /* Nothing to do if we are not the last user */
+ return;
+ }
+ }
+
+ timer = mode_global ?
+ tmc_etr_tsync_global_timer() : &drvdata->timer;
+ hrtimer_cancel(timer);
+}
+
/*
* tmc_etr_sg_table_entries: Total number of table entries required to map
* @nr_pages system pages.
@@ -593,6 +820,9 @@ static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
{
struct etr_flat_buf *flat_buf;
struct device *real_dev = drvdata->csdev->dev.parent;
+ dma_addr_t s_paddr = 0;
+ int buff_sec_mapped = 0;
+ int ret;
/* We cannot reuse existing pages for flat buf */
if (pages)
@@ -609,12 +839,44 @@ static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
return -ENOMEM;
}
+ if (!(drvdata->etr_options & CSETR_QUIRK_SECURE_BUFF))
+ goto skip_secure_buffer;
+
+ /* Register driver allocated dma buffer for necessary
+ * mapping in the secure world
+ */
+ if (tmc_register_drvbuf(drvdata, flat_buf->daddr, etr_buf->size)) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ buff_sec_mapped = 1;
+
+ /* Allocate secure trace buffer */
+ if (tmc_alloc_secbuf(drvdata, etr_buf->size, &s_paddr)) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+skip_secure_buffer:
flat_buf->size = etr_buf->size;
flat_buf->dev = &drvdata->csdev->dev;
etr_buf->hwaddr = flat_buf->daddr;
+ etr_buf->s_paddr= s_paddr;
etr_buf->mode = ETR_MODE_FLAT;
etr_buf->private = flat_buf;
return 0;
+
+err:
+ kfree(flat_buf);
+ dma_free_coherent(&drvdata->csdev->dev, etr_buf->size, flat_buf->vaddr,
+ flat_buf->daddr);
+ if (buff_sec_mapped)
+ tmc_unregister_drvbuf(drvdata, flat_buf->daddr,
+ etr_buf->size);
+ if (s_paddr)
+ tmc_free_secbuf(drvdata, s_paddr, etr_buf->size);
+
+ return ret;
}
static void tmc_etr_free_flat_buf(struct etr_buf *etr_buf)
@@ -955,10 +1217,17 @@ static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
CS_UNLOCK(drvdata->base);
+ if (drvdata->etr_options & CSETR_QUIRK_RESET_CTL_REG)
+ tmc_disable_hw(drvdata);
+
/* Wait for TMCSReady bit to be set */
tmc_wait_for_tmcready(drvdata);
writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ);
+ if (drvdata && CSETR_QUIRK_BUFFSIZE_8BX)
+ writel_relaxed(etr_buf->size / 8, drvdata->base + TMC_RSZ);
+ else
+ writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ);
writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
@@ -975,7 +1244,10 @@ static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
axictl |= TMC_AXICTL_SCT_GAT_MODE;
writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
- tmc_write_dba(drvdata, etr_buf->hwaddr);
+ if (drvdata->etr_options & CSETR_QUIRK_SECURE_BUFF)
+ tmc_write_dba(drvdata, etr_buf->s_paddr);
+ else
+ tmc_write_dba(drvdata, etr_buf->hwaddr);
/*
* If the TMC pointers must be programmed before the session,
* we have to set it properly (i.e, RRP/RWP to base address and
@@ -983,7 +1255,10 @@ static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
*/
if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) {
tmc_write_rrp(drvdata, etr_buf->hwaddr);
- tmc_write_rwp(drvdata, etr_buf->hwaddr);
+ if (drvdata->etr_options & CSETR_QUIRK_SECURE_BUFF)
+ tmc_write_rwp(drvdata, etr_buf->s_paddr);
+ else
+ tmc_write_rwp(drvdata, etr_buf->hwaddr);
sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL;
writel_relaxed(sts, drvdata->base + TMC_STS);
}
@@ -1127,6 +1402,20 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct etr_buf *sysfs_buf = NULL, *new_buf = NULL, *free_buf = NULL;
+ if (!is_etm_sync_mode_hw()) {
+ /* Calculate parameters for sync insertion */
+ drvdata->tsync_data.len_thold =
+ drvdata->size / (SYNCS_PER_FILL);
+ drvdata->tsync_data.tick =
+ (drvdata->size / SZ_1M) * SYNC_TICK_NS_PER_MB;
+ drvdata->tsync_data.prev_rwp = 0;
+ if (!drvdata->tsync_data.tick) {
+ drvdata->tsync_data.tick = SYNC_TICK_NS_PER_MB;
+ dev_warn(&drvdata->csdev->dev,
+ "Trace bufer size not sufficient, sync insertion can fail\n");
+ }
+ }
+
/*
* If we are enabling the ETR from disabled state, we need to make
* sure we have a buffer with the right size. The etr_buf is not reset
@@ -1174,6 +1463,9 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
drvdata->sysfs_buf = new_buf;
}
+ if (drvdata->mode == CS_MODE_READ_PREVBOOT)
+ goto out;
+
ret = tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf);
if (!ret) {
drvdata->mode = CS_MODE_SYSFS;
@@ -1182,6 +1474,10 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ if (!ret && !is_etm_sync_mode_hw() &&
+ (drvdata->mode != CS_MODE_READ_PREVBOOT))
+ smp_call_function_single(drvdata->rc_cpu, tmc_etr_timer_setup,
+ drvdata, true);
/* Free memory outside the spinlock if need be */
if (free_buf)
tmc_etr_free_sysfs_buf(free_buf);
@@ -1653,22 +1949,67 @@ static int tmc_disable_etr_sink(struct coresight_device *csdev)
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ if (!is_etm_sync_mode_hw())
+ smp_call_function_single(drvdata->rc_cpu, tmc_etr_timer_cancel,
+ drvdata, true);
+
dev_dbg(&csdev->dev, "TMC-ETR disabled\n");
return 0;
}
+void tmc_register_source(struct coresight_device *csdev, void *source)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ drvdata->etm_source = source;
+}
+
+void tmc_unregister_source(struct coresight_device *csdev)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ drvdata->etm_source = NULL;
+}
+
static const struct coresight_ops_sink tmc_etr_sink_ops = {
.enable = tmc_enable_etr_sink,
.disable = tmc_disable_etr_sink,
.alloc_buffer = tmc_alloc_etr_buffer,
.update_buffer = tmc_update_etr_buffer,
.free_buffer = tmc_free_etr_buffer,
+ .register_source = tmc_register_source,
+ .unregister_source = tmc_unregister_source,
};
const struct coresight_ops tmc_etr_cs_ops = {
.sink_ops = &tmc_etr_sink_ops,
};
+
+/* APIs to manage ETM start/stop when ETR stop on flush is broken */
+
+void tmc_flushstop_etm_off(void *data)
+{
+ struct tmc_drvdata *drvdata = data;
+ struct coresight_device *sdev = drvdata->etm_source;
+
+ if (sdev->hw_state == USR_START) {
+ source_ops(sdev)->disable_raw(sdev);
+ sdev->hw_state = SW_STOP;
+ }
+}
+
+void tmc_flushstop_etm_on(void *data)
+{
+ struct tmc_drvdata *drvdata = data;
+ struct coresight_device *sdev = drvdata->etm_source;
+
+ if (sdev->hw_state == SW_STOP) { /* Restore the user configured state */
+ source_ops(sdev)->enable_raw(sdev);
+ sdev->hw_state = USR_START;
+ }
+}
+
int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
{
int ret = 0;
@@ -1678,6 +2019,20 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
return -EINVAL;
+ if (drvdata->mode == CS_MODE_READ_PREVBOOT) {
+ /* Initialize drvdata for reading trace data from last boot */
+ ret = tmc_enable_etr_sink_sysfs(drvdata->csdev);
+ if (ret)
+ return ret;
+ /* Update the buffer offset, len */
+ tmc_etr_sync_sysfs_buf(drvdata);
+ return 0;
+ }
+
+ if (drvdata->etr_options & CSETR_QUIRK_NO_STOP_FLUSH)
+ smp_call_function_single(drvdata->rc_cpu, tmc_flushstop_etm_off,
+ drvdata, true);
+
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
ret = -EBUSY;
@@ -1702,6 +2057,13 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ if (ret && drvdata->etr_options & CSETR_QUIRK_NO_STOP_FLUSH) {
+ dev_warn(&drvdata->csdev->dev, "ETM wrongly stopped\n");
+ /* Restore back on error */
+ smp_call_function_single(drvdata->rc_cpu, tmc_flushstop_etm_on,
+ drvdata, true);
+ }
+
return ret;
}
@@ -1742,3 +2104,22 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
return 0;
}
+
+int tmc_copy_secure_buffer(struct tmc_drvdata *drvdata,
+ char *bufp, size_t len)
+{
+ struct arm_smccc_res res;
+ uint64_t offset;
+ char *vaddr;
+ struct etr_buf *etr_buf = drvdata->etr_buf;
+
+ tmc_etr_buf_get_data(etr_buf, 0, 0, &vaddr);
+ offset = bufp - vaddr;
+
+ arm_smccc_smc(OCTEONTX_TRC_COPY_TO_DRVBUF, etr_buf->hwaddr + offset,
+ etr_buf->s_paddr + offset, len, 0, 0, 0, 0, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS)
+ return -EFAULT;
+
+ return 0;
+}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 1cf82fa58289..408306736fb8 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -46,8 +46,12 @@ void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
u32 ffcr;
ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
- ffcr |= TMC_FFCR_STOP_ON_FLUSH;
- writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
+
+ if (!(drvdata->etr_options & CSETR_QUIRK_NO_STOP_FLUSH)) {
+ /* Its assumed that ETM is stopped as an alternative */
+ ffcr |= TMC_FFCR_STOP_ON_FLUSH;
+ writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
+ }
ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT);
writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
/* Ensure flush completes */
@@ -57,7 +61,8 @@ void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
"timeout while waiting for completion of Manual Flush\n");
}
- tmc_wait_for_tmcready(drvdata);
+ if (!(drvdata->etr_options & CSETR_QUIRK_NO_STOP_FLUSH))
+ tmc_wait_for_tmcready(drvdata);
}
void tmc_enable_hw(struct tmc_drvdata *drvdata)
@@ -148,6 +153,11 @@ static int tmc_open(struct inode *inode, struct file *file)
struct tmc_drvdata *drvdata = container_of(file->private_data,
struct tmc_drvdata, miscdev);
+ if (drvdata->buf == NULL) {
+ drvdata->mode = CS_MODE_READ_PREVBOOT;
+ dev_info(&drvdata->csdev->dev, "TMC read mode for previous boot\n");
+ }
+
ret = tmc_read_prepare(drvdata);
if (ret)
return ret;
@@ -183,6 +193,10 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
if (actual <= 0)
return 0;
+ if ((drvdata->etr_options & CSETR_QUIRK_SECURE_BUFF) &&
+ tmc_copy_secure_buffer(drvdata, bufp, len))
+ return -EFAULT;
+
if (copy_to_user(data, bufp, actual)) {
dev_dbg(&drvdata->csdev->dev,
"%s: copy_to_user failed\n", __func__);
@@ -346,9 +360,27 @@ static ssize_t buffer_size_store(struct device *dev,
static DEVICE_ATTR_RW(buffer_size);
+static ssize_t tracebuffer_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ return -EINVAL;
+}
+
+static ssize_t tracebuffer_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val = drvdata->size;
+
+ return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RW(tracebuffer_size);
+
static struct attribute *coresight_tmc_attrs[] = {
&dev_attr_trigger_cntr.attr,
&dev_attr_buffer_size.attr,
+ &dev_attr_tracebuffer_size.attr,
NULL,
};
@@ -425,8 +457,15 @@ static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps)
static u32 tmc_etr_get_default_buffer_size(struct device *dev)
{
u32 size;
+ struct tmc_drvdata *drvdata;
- if (fwnode_property_read_u32(dev->fwnode, "arm,buffer-size", &size))
+ drvdata = dev_get_drvdata(dev);
+ if (drvdata->etr_options & CSETR_QUIRK_SECURE_BUFF) {
+ if (tmc_get_cpu_tracebufsize(drvdata,
+ &size) || !size) {
+ return 0;
+ }
+ } else
size = SZ_1M;
return size;
}
@@ -461,17 +500,35 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
spin_lock_init(&drvdata->spinlock);
+ drvdata->cpu = coresight_get_cpu(dev);
+
+ /* Enable fixes for Silicon issues */
+ drvdata->etr_options = coresight_get_etr_quirks(OCTEONTX_CN9XXX_ETR);
+
+ /* Update the smp target cpu */
+ drvdata->rc_cpu = is_etm_sync_mode_sw_global() ? SYNC_GLOBAL_CORE :
+ drvdata->cpu;
+ if (!is_etm_sync_mode_hw()) {
+ tmc_etr_add_cpumap(drvdata); /* Used for global sync mode */
+ tmc_etr_timer_init(drvdata);
+ }
+
devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
drvdata->config_type = BMVAL(devid, 6, 7);
drvdata->memwidth = tmc_get_memwidth(devid);
/* This device is not associated with a session */
drvdata->pid = -1;
+ drvdata->formatter_en = !(readl_relaxed(drvdata->base + TMC_FFSR) &
+ TMC_FFSR_FT_NOT_PRESENT);
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
drvdata->size = tmc_etr_get_default_buffer_size(dev);
else
drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
+ /* Keep cache lock disabled by default */
+ drvdata->cache_lock_en = false;
+
desc.dev = dev;
desc.groups = coresight_tmc_groups;
@@ -546,6 +603,8 @@ static const struct amba_id tmc_ids[] = {
CS_AMBA_ID(0x000bb9e9),
/* Coresight SoC 600 TMC-ETF */
CS_AMBA_ID(0x000bb9ea),
+ /* Marvell OcteonTx CN9xxx */
+ CS_AMBA_ID_DATA(OCTEONTX_CN9XXX_ETR, (unsigned long)OCTEONTX_CN9XXX_ETR_CAPS),
{ 0, 0},
};
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 71de978575f3..4c0128bf8156 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -12,6 +12,7 @@
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/refcount.h>
+#include <linux/arm-smccc.h>
#define TMC_RSZ 0x004
#define TMC_STS 0x00c
@@ -75,6 +76,9 @@
#define TMC_AXICTL_AXCACHE_OS (0xf << 2)
#define TMC_AXICTL_ARCACHE_OS (0xf << 16)
+/* TMC_FFSR - 0x300 */
+#define TMC_FFSR_FT_NOT_PRESENT BIT(4)
+
/* TMC_FFCR - 0x304 */
#define TMC_FFCR_FLUSHMAN_BIT 6
#define TMC_FFCR_EN_FMT BIT(0)
@@ -130,6 +134,29 @@ enum tmc_mem_intf_width {
#define CORESIGHT_SOC_600_ETR_CAPS \
(TMC_ETR_SAVE_RESTORE | TMC_ETR_AXI_ARCACHE)
+/* Marvell OcteonTx CN9xxx TMC-ETR unadvertised capabilities */
+#define OCTEONTX_CN9XXX_ETR_CAPS \
+ (TMC_ETR_SAVE_RESTORE)
+
+/* SMC call ids for managing the secure trace buffer */
+
+/* Args: x1 - size, x2 - cpu, x3 - llc lock flag
+ * Returns: x0 - status, x1 - secure buffer address
+ */
+#define OCTEONTX_TRC_ALLOC_SBUF 0xc2000c05
+/* Args: x1 - non secure buffer address, x2 - size */
+#define OCTEONTX_TRC_REGISTER_DRVBUF 0xc2000c06
+/* Args: x1 - dst(non secure), x2 - src(secure), x3 - size */
+#define OCTEONTX_TRC_COPY_TO_DRVBUF 0xc2000c07
+/* Args: x1 - secure buffer address, x2 - size */
+#define OCTEONTX_TRC_FREE_SBUF 0xc2000c08
+/* Args: x1 - non secure buffer address, x2 - size */
+#define OCTEONTX_TRC_UNREGISTER_DRVBUF 0xc2000c09
+/* Args: Nil
+ * Returns: cpu trace buffer size
+ */
+#define OCTEONTX_TRC_GET_CPU_BUFSIZE 0xc2000c0a
+
enum etr_mode {
ETR_MODE_FLAT, /* Uses contiguous flat buffer */
ETR_MODE_ETR_SG, /* Uses in-built TMC ETR SG mechanism */
@@ -145,6 +172,7 @@ struct etr_buf_operations;
* @full : Trace data overflow
* @size : Size of the buffer.
* @hwaddr : Address to be programmed in the TMC:DBA{LO,HI}
+ * @s_paddr : Secure trace buffer
* @offset : Offset of the trace data in the buffer for consumption.
* @len : Available trace data @buf (may round up to the beginning).
* @ops : ETR buffer operations for the mode.
@@ -156,6 +184,7 @@ struct etr_buf {
bool full;
ssize_t size;
dma_addr_t hwaddr;
+ dma_addr_t s_paddr;
unsigned long offset;
s64 len;
const struct etr_buf_operations *ops;
@@ -163,17 +192,32 @@ struct etr_buf {
};
/**
+ * struct etr_tsync_data - Timer based sync insertion data management
+ * @syncs_per_fill: syncs inserted per buffer wrap
+ * @prev_rwp: writepointer for the last sync insertion
+ * @len_thold: Buffer length threshold for inserting syncs
+ * @tick: Tick interval in ns
+ */
+struct etr_tsync_data {
+ int syncs_per_fill;
+ u64 prev_rwp;
+ u64 len_thold;
+ u64 tick;
+};
+
+/**
* struct tmc_drvdata - specifics associated to an TMC component
* @base: memory mapped base address for this component.
* @csdev: component vitals needed by the framework.
* @miscdev: specifics to handle "/dev/xyz.tmc" entry.
- * @spinlock: only one at a time pls.
- * @pid: Process ID of the process being monitored by the session
+ * @spinlock: only one at a time pls. * @pid: Process ID of the process being monitored by the session
* that is using this component.
* @buf: Snapshot of the trace data for ETF/ETB.
* @etr_buf: details of buffer used in TMC-ETR
* @len: size of the available trace for ETF/ETB.
* @size: trace buffer size for this TMC (common for all modes).
+ * @formatter_en: Formatter enable/disable status
+ * @cache_lock_en: Cache lock status
* @mode: how this TMC is being used.
* @config_type: TMC variant, must be of type @tmc_config_type.
* @memwidth: width of the memory interface databus, in bytes.
@@ -184,6 +228,14 @@ struct etr_buf {
* @idr_mutex: Access serialisation for idr.
* @sysfs_buf: SYSFS buffer for ETR.
* @perf_buf: PERF buffer for ETR.
+ * @etr_options: Bitmask of options to manage Silicon issues
+ * @cpu: CPU id this component is associated with
+ * @rc_cpu: The cpu on which remote function calls can be run
+ * In certain kernel configurations, some cores are not expected
+ * to be interrupted and we need a fallback target cpu.
+ * @etm_source: ETM source associated with this ETR
+ * @etr_tsync_data: Timer based sync insertion data
+ * @timer: Timer for initiating sync insertion
*/
struct tmc_drvdata {
void __iomem *base;
@@ -196,6 +248,8 @@ struct tmc_drvdata {
char *buf; /* TMC ETB */
struct etr_buf *etr_buf; /* TMC ETR */
};
+ bool formatter_en;
+ bool cache_lock_en;
u32 len;
u32 size;
u32 mode;
@@ -207,6 +261,12 @@ struct tmc_drvdata {
struct mutex idr_mutex;
struct etr_buf *sysfs_buf;
struct etr_buf *perf_buf;
+ u32 etr_options;
+ int cpu;
+ int rc_cpu;
+ void *etm_source;
+ struct etr_tsync_data tsync_data;
+ struct hrtimer timer;
};
struct etr_buf_operations {
@@ -268,6 +328,8 @@ ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
/* ETR functions */
int tmc_read_prepare_etr(struct tmc_drvdata *drvdata);
int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata);
+void tmc_etr_timer_init(struct tmc_drvdata *drvdata);
+void tmc_etr_add_cpumap(struct tmc_drvdata *drvdata);
extern const struct coresight_ops tmc_etr_cs_ops;
ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
loff_t pos, size_t len, char **bufpp);
@@ -325,4 +387,68 @@ tmc_sg_table_buf_size(struct tmc_sg_table *sg_table)
struct coresight_device *tmc_etr_get_catu_device(struct tmc_drvdata *drvdata);
+static inline int tmc_get_cpu_tracebufsize(struct tmc_drvdata *drvdata,
+ u32 *len)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(OCTEONTX_TRC_GET_CPU_BUFSIZE, 0, 0, 0,
+ 0, 0, 0, 0, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS)
+ return -EFAULT;
+
+ *len = (u32)res.a1;
+ return 0;
+}
+
+static inline int tmc_alloc_secbuf(struct tmc_drvdata *drvdata,
+ size_t len, dma_addr_t *s_paddr)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(OCTEONTX_TRC_ALLOC_SBUF, len, drvdata->cpu,
+ drvdata->cache_lock_en, 0, 0, 0, 0, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS)
+ return -EFAULT;
+
+ *s_paddr = res.a1;
+ return 0;
+}
+
+static inline int tmc_free_secbuf(struct tmc_drvdata *drvdata,
+ dma_addr_t s_paddr, size_t len)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(OCTEONTX_TRC_FREE_SBUF, s_paddr, len,
+ 0, 0, 0, 0, 0, &res);
+ return 0;
+}
+
+static inline int tmc_register_drvbuf(struct tmc_drvdata *drvdata,
+ dma_addr_t paddr, size_t len)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(OCTEONTX_TRC_REGISTER_DRVBUF, paddr, len,
+ 0, 0, 0, 0, 0, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS)
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline int tmc_unregister_drvbuf(struct tmc_drvdata *drvdata,
+ dma_addr_t paddr, size_t len)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(OCTEONTX_TRC_UNREGISTER_DRVBUF, paddr, len,
+ 0, 0, 0, 0, 0, &res);
+ return 0;
+
+}
+
+int tmc_copy_secure_buffer(struct tmc_drvdata *drvdata,
+ char *bufp, size_t len);
#endif
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 8f5e62f02444..2b274ef6b79b 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -20,6 +20,8 @@
#include <linux/pm_runtime.h>
#include "coresight-etm-perf.h"
+#include <asm/cputype.h>
+
#include "coresight-priv.h"
static DEFINE_MUTEX(coresight_mutex);
@@ -566,6 +568,7 @@ static int coresight_enabled_sink(struct device *dev, const void *data)
/**
* coresight_get_enabled_sink - returns the first enabled sink found on the bus
+ * In case the child port is a single source ETR, returns the child port as sink
* @deactivate: Whether the 'enable_sink' flag should be reset
*
* When operated from perf the deactivate parameter should be set to 'true'.
@@ -576,10 +579,30 @@ static int coresight_enabled_sink(struct device *dev, const void *data)
* parameter should be set to 'false', hence mandating users to explicitly
* clear the flag.
*/
-struct coresight_device *coresight_get_enabled_sink(bool deactivate)
+struct coresight_device *coresight_get_enabled_sink(struct coresight_device *s,
+ bool deactivate)
{
+ struct coresight_device *child;
struct device *dev = NULL;
+ if (s == NULL)
+ goto skip_single_source;
+
+ /* If the connected port is an ETR with single trace source,
+ * nothing to search further.
+ */
+ child = s->pdata->conns[0].child_dev;
+ if (s->pdata->nr_outport == 1 &&
+ child->type == CORESIGHT_DEV_TYPE_SINK &&
+ child->subtype.sink_subtype == CORESIGHT_DEV_SUBTYPE_SINK_BUFFER &&
+ child->pdata->nr_inport == 1 &&
+ child->activated) {
+ if (deactivate)
+ child->activated = false;
+ return child;
+ }
+
+skip_single_source:
dev = bus_find_device(&coresight_bustype, NULL, &deactivate,
coresight_enabled_sink);
@@ -827,12 +850,19 @@ int coresight_enable(struct coresight_device *csdev)
* Search for a valid sink for this session but don't reset the
* "enable_sink" flag in sysFS. Users get to do that explicitly.
*/
- sink = coresight_get_enabled_sink(false);
+ sink = coresight_get_enabled_sink(csdev, false);
if (!sink) {
ret = -EINVAL;
goto out;
}
+ if (!is_etm_sync_mode_hw()) {
+ /* Add reference to source
+ * Used by sync insertion logic in ETR driver
+ */
+ sink_ops(sink)->register_source(sink, csdev);
+ }
+
path = coresight_build_path(csdev, sink);
if (IS_ERR(path)) {
pr_err("building path(s) failed\n");
@@ -884,6 +914,7 @@ EXPORT_SYMBOL_GPL(coresight_enable);
void coresight_disable(struct coresight_device *csdev)
{
int cpu, ret;
+ struct coresight_device *sink;
struct list_head *path = NULL;
mutex_lock(&coresight_mutex);
@@ -910,6 +941,16 @@ void coresight_disable(struct coresight_device *csdev)
break;
}
+ if (!is_etm_sync_mode_hw()) {
+ sink = coresight_get_enabled_sink(csdev, false);
+ if (!sink)
+ goto out;
+ /* Remove source reference
+ * Used by sync insertion logic in ETR driver
+ */
+ sink_ops(sink)->unregister_source(csdev);
+ }
+
coresight_disable_path(path);
coresight_release_path(path);
diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c
index d9607905dc2f..75e3779bb650 100644
--- a/drivers/i2c/busses/i2c-octeon-core.c
+++ b/drivers/i2c/busses/i2c-octeon-core.c
@@ -17,6 +17,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include "i2c-octeon-core.h"
@@ -658,7 +659,16 @@ out:
void octeon_i2c_set_clock(struct octeon_i2c *i2c)
{
int tclk, thp_base, inc, thp_idx, mdiv_idx, ndiv_idx, foscl, diff;
- int thp = 0x18, mdiv = 2, ndiv = 0, delta_hz = 1000000;
+ /* starting value on search for lowest diff */
+ const int huge_delta = 1000000;
+ /*
+ * Find divisors to produce target frequency, start with large delta
+ * to cover wider range of divisors, note thp = TCLK half period.
+ */
+ int thp = 0x18, mdiv = 2, ndiv = 0, delta_hz = huge_delta;
+
+ if (octeon_i2c_is_otx2(to_pci_dev(i2c->dev)))
+ thp = 0x3;
for (ndiv_idx = 0; ndiv_idx < 8 && delta_hz != 0; ndiv_idx++) {
/*
@@ -672,17 +682,25 @@ void octeon_i2c_set_clock(struct octeon_i2c *i2c)
*/
tclk = i2c->twsi_freq * (mdiv_idx + 1) * 10;
tclk *= (1 << ndiv_idx);
- thp_base = (i2c->sys_freq / (tclk * 2)) - 1;
+ if (octeon_i2c_is_otx2(to_pci_dev(i2c->dev)))
+ thp_base = (i2c->sys_freq / tclk) - 2;
+ else
+ thp_base = (i2c->sys_freq / (tclk * 2)) - 1;
for (inc = 0; inc <= 1; inc++) {
thp_idx = thp_base + inc;
if (thp_idx < 5 || thp_idx > 0xff)
continue;
- foscl = i2c->sys_freq / (2 * (thp_idx + 1));
+ if (octeon_i2c_is_otx2(to_pci_dev(i2c->dev)))
+ foscl = i2c->sys_freq / (thp_idx + 2);
+ else
+ foscl = i2c->sys_freq /
+ (2 * (thp_idx + 1));
foscl = foscl / (1 << ndiv_idx);
foscl = foscl / (mdiv_idx + 1) / 10;
diff = abs(foscl - i2c->twsi_freq);
+ /* Use it if smaller diff from target */
if (diff < delta_hz) {
delta_hz = diff;
thp = thp_idx;
diff --git a/drivers/i2c/busses/i2c-octeon-core.h b/drivers/i2c/busses/i2c-octeon-core.h
index 9bb9f64fdda0..15d379bf1d3c 100644
--- a/drivers/i2c/busses/i2c-octeon-core.h
+++ b/drivers/i2c/busses/i2c-octeon-core.h
@@ -211,6 +211,20 @@ static inline void octeon_i2c_write_int(struct octeon_i2c *i2c, u64 data)
octeon_i2c_writeq_flush(data, i2c->twsi_base + TWSI_INT(i2c));
}
+#define PCI_SUBSYS_DEVID_9XXX 0xB
+/**
+ * octeon_i2c_is_otx2 - check for chip ID
+ * @pdev: PCI dev structure
+ *
+ * Returns TRUE if OcteonTX2, FALSE otherwise.
+ */
+static inline bool octeon_i2c_is_otx2(struct pci_dev *pdev)
+{
+ u32 chip_id = (pdev->subsystem_device >> 12) & 0xF;
+
+ return (chip_id == PCI_SUBSYS_DEVID_9XXX);
+}
+
/* Prototypes */
irqreturn_t octeon_i2c_isr(int irq, void *dev_id);
int octeon_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num);
diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
index 12c90aa0900e..9fd903196b6c 100644
--- a/drivers/i2c/busses/i2c-thunderx-pcidrv.c
+++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
@@ -205,6 +205,12 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev,
if (ret)
goto error;
+ /*
+ * For OcteonTX2 chips, set reference frequency to 100MHz
+ * as refclk_src in TWSI_MODE register defaults to 100MHz.
+ */
+ if (octeon_i2c_is_otx2(pdev))
+ i2c->sys_freq = 100000000;
octeon_i2c_set_clock(i2c);
i2c->adap = thunderx_i2c_ops;
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index af21d24a09e8..7e20820ddeea 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -88,6 +88,12 @@
#define IDR5_VAX GENMASK(11, 10)
#define IDR5_VAX_52_BIT 1
+#define ARM_SMMU_IIDR 0x18
+#define IIDR_CN96XX_A0 0x2b20034c
+#define IIDR_CN96XX_B0 0x2b20134c
+#define IIDR_CN95XX_A0 0x2b30034c
+#define IIDR_CN95XX_A1 0x2b30134c
+
#define ARM_SMMU_CR0 0x20
#define CR0_ATSCHK (1 << 4)
#define CR0_CMDQEN (1 << 3)
@@ -652,6 +658,7 @@ struct arm_smmu_device {
#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
#define ARM_SMMU_OPT_PAGE0_REGS_ONLY (1 << 1)
+#define ARM_SMMU_OPT_FORCE_QDRAIN (1 << 2)
u32 options;
struct arm_smmu_cmdq cmdq;
@@ -1455,7 +1462,20 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
* d. Advance the hardware prod pointer
* Control dependency ordering from the entries becoming valid.
*/
- writel_relaxed(prod, cmdq->q.prod_reg);
+ if (smmu->options & ARM_SMMU_OPT_FORCE_QDRAIN) {
+ u32 p, c = readl_relaxed(cmdq->q.cons_reg);
+ while (Q_IDX(&llq, c) != Q_IDX(&llq, prod) ||
+ Q_WRP(&llq, c) != Q_WRP(&llq, prod)) {
+ p = Q_IDX(&llq, c) | Q_WRP(&llq, c);
+ p++;
+ writel_relaxed(p, cmdq->q.prod_reg);
+ do {
+ cpu_relax();
+ c = readl_relaxed(cmdq->q.cons_reg);
+ } while (Q_IDX(&llq, c) != Q_IDX(&llq, p));
+ }
+ } else
+ writel_relaxed(prod, cmdq->q.prod_reg);
/*
* e. Tell the next owner we're done
@@ -3911,6 +3931,25 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
smmu->ias, smmu->oas, smmu->features);
+
+ /* Options based on implementation */
+ reg = readl_relaxed(smmu->base + ARM_SMMU_IIDR);
+
+ /* Marvell Octeontx2 SMMU wrongly issues unsupported
+ * 64 byte memory reads under certain conditions for
+ * reading commands from the command queue.
+ * Force command queue drain for every two writes,
+ * so that SMMU issues only 32 byte reads.
+ */
+ switch (reg) {
+ case IIDR_CN96XX_A0:
+ case IIDR_CN96XX_B0:
+ case IIDR_CN95XX_A0:
+ case IIDR_CN95XX_A1:
+ smmu->options |= ARM_SMMU_OPT_FORCE_QDRAIN;
+ break;
+ }
+
return 0;
}
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 5a577a6734cf..12255621ae62 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -22,6 +22,14 @@ config IMX_MBOX
help
Mailbox implementation for i.MX Messaging Unit (MU).
+config MVL_MHU
+ tristate "MVL MHU Mailbox"
+ help
+ Say Y here if you want to build the MVL MHU controller driver.
+ The controller has 2 mailbox channels, the last of which can be
+ used in secure mode only. Thi mail box uses internal CPC RAM for
+ creating the mailbox memory.
+
config PLATFORM_MHU
tristate "Platform MHU Mailbox"
depends on OF
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 2e4364ef5c47..bc14ce3a5bb9 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -11,6 +11,8 @@ obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o
obj-$(CONFIG_ARMADA_37XX_RWTM_MBOX) += armada-37xx-rwtm-mailbox.o
+obj-$(CONFIG_MVL_MHU) += mvl_mhu.o
+
obj-$(CONFIG_PLATFORM_MHU) += platform_mhu.o
obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
diff --git a/drivers/mailbox/mvl_mhu.c b/drivers/mailbox/mvl_mhu.c
new file mode 100644
index 000000000000..332710f5ed32
--- /dev/null
+++ b/drivers/mailbox/mvl_mhu.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for mailbox in OcteonTx2
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2019 Marvell
+ *
+ */
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+
+#define MHU_NUM_PCHANS 1
+
+#define BAR0 0
+#define SCP_INDEX 0x0
+#define DEV_AP0 0x2
+#define SCP_TO_AP_INTERRUPT 2
+#define DRV_NAME "mbox-thunderx"
+
+#define XCPX_DEVY_XCP_MBOX_LINT_OFFSET 0x000E1C00
+#define XCP_TO_DEV_XCP_MBOX_LINT(xcp_core, device_id) \
+ (XCPX_DEVY_XCP_MBOX_LINT_OFFSET | \
+ ((uint64_t)(xcp_core) << 36) | \
+ ((uint64_t)(device_id) << 4))
+
+#define AP0_TO_SCP_MBOX_LINT XCP_TO_DEV_XCP_MBOX_LINT(SCP_INDEX, DEV_AP0)
+
+/*
+ * Doorbell-Register: XCP(0..1)_DEV(0..7)_XCP_MBOX
+ * Communication data from devices to XCP. When written, sets
+ * XCP(0..1)_DEV(0..7)_XCP_MBOX.
+ * PS: it doesn't matter what is written into this register,
+ * Attempting to writing 'anything' would cause an interrupt
+ * to the target!
+ */
+
+#define DONT_CARE_DATA 0xFF
+#define XCPX_DEVY_XCP_MBOX_OFFSET 0x000E1000
+#define XCP_TO_DEV_XCP_MBOX(xcp_core, device_id) \
+ (XCPX_DEVY_XCP_MBOX_OFFSET | \
+ ((uint64_t)(xcp_core) << 36) | \
+ ((uint64_t)(device_id) << 4))
+
+/* AP0-to-SCP doorbell */
+#define AP0_TO_SCP_MBOX XCP_TO_DEV_XCP_MBOX(SCP_INDEX, DEV_AP0)
+
+/* Register offset: Enable interrupt from SCP to AP */
+#define XCP0_XCP_DEV2_MBOX_RINT_ENA_W1S 0x000D1C60
+
+/* Rx interrupt from SCP to Non-secure AP (linux kernel) */
+#define XCPX_XCP_DEVY_MBOX_RINT_OFFSET 0x000D1C00
+#define XCPX_XCP_DEVY_MBOX_RINT(xcp_core, device_id) \
+ (XCPX_XCP_DEVY_MBOX_RINT_OFFSET | \
+ ((uint64_t)(xcp_core) << 36) | \
+ ((uint64_t)(device_id) << 4))
+
+/* The interrupt status register */
+#define SCP_TO_AP0_MBOX_RINT XCPX_XCP_DEVY_MBOX_RINT(SCP_INDEX, DEV_AP0)
+
+#define XCPX_XCP_DEVY_MBOX_RINT_OFFSET 0x000D1C00
+#define XCPX_XCP_DEVY_MBOX_RINT(xcp_core, device_id) \
+ (XCPX_XCP_DEVY_MBOX_RINT_OFFSET | \
+ ((uint64_t)(xcp_core) << 36) | \
+ ((uint64_t)(device_id) << 4))
+#define SCP_TO_AP0_MBOX_RINT XCPX_XCP_DEVY_MBOX_RINT(SCP_INDEX, DEV_AP0)
+
+struct mvl_mhu_link {
+ unsigned int irq;
+ void __iomem *tx_reg;
+ void __iomem *rx_reg;
+ void __iomem *shared_mem;
+ struct device *dev;
+};
+
+struct mvl_mhu {
+ void __iomem *base;
+ struct mvl_mhu_link mlink[MHU_NUM_PCHANS];
+ struct mbox_chan chan[MHU_NUM_PCHANS];
+ struct mbox_controller mbox;
+ struct device *dev;
+ void __iomem *payload;
+ const char *name;
+};
+
+/**
+ * MVL MHU Mailbox platform specific configuration
+ *
+ * @num_pchans: Maximum number of physical channels
+ * @num_doorbells: Maximum number of doorbells per physical channel
+ */
+struct mvl_mhu_mbox_pdata {
+ unsigned int num_pchans;
+ unsigned int num_doorbells;
+ bool support_doorbells;
+};
+
+/**
+ * MVL MHU Mailbox allocated channel information
+ *
+ * @mhu: Pointer to parent mailbox device
+ * @pchan: Physical channel within which this doorbell resides in
+ * @doorbell: doorbell number pertaining to this channel
+ */
+struct mvl_mhu_channel {
+ struct mvl_mhu *mhu;
+ unsigned int pchan;
+ unsigned int doorbell;
+};
+
+/* Sources of interrupt */
+enum {
+ INDEX_INT_SRC_SCMI_TX,
+ INDEX_INT_SRC_AVS_STS,
+ INDEX_INT_SRC_NONE,
+};
+
+/* information of interrupts from SCP */
+struct int_src_data_s {
+ uint64_t int_src_cnt;
+ uint64_t int_src_data;
+};
+
+/* bottom half of rx interrupt */
+static irqreturn_t mvl_mhu_rx_interrupt_thread(int irq, void *p)
+{
+ struct mbox_chan *chan = p;
+ u64 val, scmi_tx_cnt, avs_failure_cnt;
+ struct mvl_mhu_link *mlink = chan->con_priv;
+ struct int_src_data_s *data =
+ (struct int_src_data_s *)mlink->shared_mem;
+
+ /*
+ * Local copy of event counters. A mismatch of received
+ * count value and the local copy means additional events
+ * are being flagged that needs to be attended by AP
+ */
+ static u64 event_counter[INDEX_INT_SRC_NONE] = {0};
+
+ /* scmi interrupt */
+ scmi_tx_cnt = readq(&data[INDEX_INT_SRC_SCMI_TX].int_src_cnt);
+ if (event_counter[INDEX_INT_SRC_SCMI_TX] != scmi_tx_cnt) {
+ mbox_chan_received_data(chan, (void *)&val);
+ /* Update the memory to prepare for next */
+ event_counter[INDEX_INT_SRC_SCMI_TX] = scmi_tx_cnt;
+ }
+
+ /* AVS failures */
+ avs_failure_cnt = readq(&data[INDEX_INT_SRC_AVS_STS].int_src_cnt);
+ if (event_counter[INDEX_INT_SRC_AVS_STS] != avs_failure_cnt) {
+ pr_err("!!! FATAL ERROR IN AVS BUS !!! FATAL ERROR IN AVS BUS !!!\n");
+ /* Update the memory to prepare for next */
+ event_counter[INDEX_INT_SRC_AVS_STS] = avs_failure_cnt;
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mvl_mhu_rx_interrupt(int irq, void *p)
+{
+ struct mbox_chan *chan = p;
+ struct mvl_mhu_link *mlink = chan->con_priv;
+ void __iomem *base = mlink->tx_reg;
+ u64 val;
+
+ /* Read interrupt status register */
+ val = readq_relaxed(base + SCP_TO_AP0_MBOX_RINT);
+ if (!val)
+ return IRQ_NONE;
+
+ /* Clear the interrupt : Write on clear */
+ writeq_relaxed(0x1, base + SCP_TO_AP0_MBOX_RINT);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static bool mvl_mhu_last_tx_done(struct mbox_chan *chan)
+{
+ struct mvl_mhu_link *mlink = chan->con_priv;
+ void __iomem *base = mlink->tx_reg;
+ u64 val = readq_relaxed(base + SCP_TO_AP0_MBOX_RINT);
+
+ return (val == 0);
+}
+
+static int mvl_mhu_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mvl_mhu_link *mlink = chan->con_priv;
+ void __iomem *base = mlink->tx_reg;
+
+ writeq_relaxed(DONT_CARE_DATA, base + AP0_TO_SCP_MBOX);
+ return 0;
+}
+
+static int mvl_mhu_startup(struct mbox_chan *chan)
+{
+ int ret = 0;
+ struct mvl_mhu_link *mlink = chan->con_priv;
+
+ /* register interrupts */
+ ret = request_threaded_irq(mlink->irq, mvl_mhu_rx_interrupt,
+ mvl_mhu_rx_interrupt_thread, 0,
+ DRV_NAME, chan);
+ if (ret)
+ dev_err(mlink->dev, "request_irq failed:%d\n", ret);
+
+ /* Enable interrupt from SCP to NS_AP */
+ writeq_relaxed(0x1, mlink->tx_reg + XCP0_XCP_DEV2_MBOX_RINT_ENA_W1S);
+
+ return ret;
+}
+
+static const struct mbox_chan_ops mvl_mhu_ops = {
+ .send_data = mvl_mhu_send_data,
+ .startup = mvl_mhu_startup,
+ .last_tx_done = mvl_mhu_last_tx_done,
+};
+
+static const struct mvl_mhu_mbox_pdata mvl_mhu_pdata = {
+ .num_pchans = 1,
+ .num_doorbells = 1,
+ .support_doorbells = false,
+};
+
+static int mvl_mhu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int i, err, ret, nvec;
+ struct mvl_mhu *mhu;
+ struct resource res;
+ resource_size_t size;
+ struct device *dev = &pdev->dev;
+ struct device_node *shmem, *np = dev->of_node;
+
+ if (!np)
+ return -ENODEV;
+
+ if (mvl_mhu_pdata.num_pchans > MHU_NUM_PCHANS) {
+ dev_err(dev, "Number of physical channel can't exceed %d\n",
+ MHU_NUM_PCHANS);
+ return -EINVAL;
+ }
+ dev->platform_data = (void *)&mvl_mhu_pdata;
+
+ mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
+ if (!mhu)
+ return -ENOMEM;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device: err %d\n", err);
+ return err;
+ }
+
+ ret = pci_request_region(pdev, BAR0, DRV_NAME);
+ if (ret) {
+ dev_err(dev, "Failed requested region PCI dev err:%d\n", err);
+ return ret;
+ }
+
+ mhu->base = pcim_iomap(pdev, BAR0, pci_resource_len(pdev, BAR0));
+ if (!mhu->base) {
+ dev_err(dev, "Failed to iomap PCI device: err %d\n", err);
+ return -EINVAL;
+ }
+
+ err = of_property_read_string(np, "mbox-name", &mhu->name);
+ if (err)
+ mhu->name = np->full_name;
+
+ /* get shared memory details between NS AP & SCP */
+ shmem = of_parse_phandle(np, "shmem", 0);
+ ret = of_address_to_resource(shmem, 0, &res);
+ of_node_put(shmem);
+ if (ret) {
+ dev_err(dev, "failed to get CPC COMMON payload mem resource\n");
+ return ret;
+ }
+ size = resource_size(&res);
+
+ mhu->payload = devm_ioremap(dev, res.start, size);
+ if (!mhu->payload) {
+ dev_err(dev, "failed to ioremap CPC COMMON payload\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ mhu->dev = dev;
+ mhu->mbox.dev = dev;
+ mhu->mbox.chans = &mhu->chan[0];
+ mhu->mbox.num_chans = MHU_NUM_PCHANS;
+ mhu->mbox.txdone_irq = false;
+ mhu->mbox.txdone_poll = true;
+ mhu->mbox.txpoll_period = 1;
+ mhu->mbox.ops = &mvl_mhu_ops;
+
+ pci_set_drvdata(pdev, mhu);
+
+ err = mbox_controller_register(&mhu->mbox);
+ if (err) {
+ dev_err(dev, "Failed to register mailboxes %d\n", err);
+ return err;
+ }
+
+ nvec = pci_alloc_irq_vectors(pdev, 0, 3, PCI_IRQ_MSIX);
+ if (nvec < 0) {
+ dev_err(dev, "irq vectors allocation failed:%d\n", nvec);
+ return nvec;
+ }
+
+ for (i = 0; i < mvl_mhu_pdata.num_pchans; i++) {
+ mhu->mlink[i].tx_reg = mhu->base;
+ mhu->mlink[i].rx_reg = mhu->base;
+ mhu->mlink[i].dev = dev;
+ mhu->mlink[i].shared_mem = mhu->payload;
+ mhu->chan[i].con_priv = &mhu->mlink[i];
+ /* Enable scp to AP interrupt */
+ mhu->mlink[i].irq = pci_irq_vector(pdev, SCP_TO_AP_INTERRUPT);
+ }
+
+ return 0;
+}
+
+static void mvl_mhu_remove(struct pci_dev *pdev)
+{
+ struct mvl_mhu *mhu = pci_get_drvdata(pdev);
+
+ pci_free_irq_vectors(pdev);
+ mbox_controller_unregister(&mhu->mbox);
+ pcim_iounmap(pdev, mhu->base);
+ pci_release_region(pdev, BAR0);
+}
+
+static const struct pci_device_id mvl_mhu_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xA067) },
+ { 0, } /* end of table */
+};
+
+static struct pci_driver mvl_mhu_driver = {
+ .name = "mvl_mhu",
+ .id_table = mvl_mhu_ids,
+ .probe = mvl_mhu_probe,
+ .remove = mvl_mhu_remove,
+};
+module_pci_driver(mvl_mhu_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MVL MHU Driver");
+MODULE_AUTHOR("Sujeet Baranwal <sbaranwal@marvell.com>");
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 99e151475d8f..ae9f2ab2bea0 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -465,6 +465,14 @@ config PVPANIC
a paravirtualized device provided by QEMU; it lets a virtual machine
(guest) communicate panic events to the host.
+config MARVELL_OTX_BPHY_CTR
+ bool "Marvell OcteonTX BPHY Control driver"
+ ---help---
+ Enables BPHY control driver which handles ioctl calls
+ to set/clear IRQ handlers in EL3 using SMC calls.
+ The purpose of this is to handle some BPHY Interrupts in
+ user space directly without kernel's intervention.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 9abf2923d831..6abc55e8ac64 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -58,3 +58,5 @@ obj-$(CONFIG_PVPANIC) += pvpanic.o
obj-$(CONFIG_HABANA_AI) += habanalabs/
obj-$(CONFIG_UACCE) += uacce/
obj-$(CONFIG_XILINX_SDFEC) += xilinx_sdfec.o
+obj-$(CONFIG_MISC_RTSX) += cardreader/
+obj-$(CONFIG_MARVELL_OTX_BPHY_CTR) += otx_bphy_ctr.o
diff --git a/drivers/misc/otx_bphy_ctr.c b/drivers/misc/otx_bphy_ctr.c
new file mode 100644
index 000000000000..b568e4acc13e
--- /dev/null
+++ b/drivers/misc/otx_bphy_ctr.c
@@ -0,0 +1,283 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2016, 2018 Cavium Inc.
+ */
+#include <linux/init.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/arm-smccc.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/moduleparam.h>
+#include <linux/uaccess.h>
+#include <linux/mmu_context.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+
+#define DEVICE_NAME "otx-bphy-ctr"
+#define OTX_IOC_MAGIC 0xF3
+#define MAX_IRQ 27
+
+static struct device *otx_device;
+static struct class *otx_class;
+static struct cdev *otx_cdev;
+static dev_t otx_dev;
+static DEFINE_SPINLOCK(el3_inthandler_lock);
+static int in_use;
+static int irq_installed[MAX_IRQ];
+static struct thread_info *irq_installed_threads[MAX_IRQ];
+static struct task_struct *irq_installed_tasks[MAX_IRQ];
+
+/* SMC definitons */
+/* X1 - irq_num, X2 - sp, X3 - cpu, X4 - ttbr0 */
+#define OCTEONTX_INSTALL_BPHY_PSM_ERRINT 0xc2000803
+/* X1 - irq_num */
+#define OCTEONTX_REMOVE_BPHY_PSM_ERRINT 0xc2000804
+
+struct otx_irq_usr_data {
+ u64 isr_base;
+ u64 sp;
+ u64 cpu;
+ u64 irq_num;
+};
+
+
+#define OTX_IOC_SET_BPHY_HANDLER \
+ _IOW(OTX_IOC_MAGIC, 1, struct otx_irq_usr_data)
+
+#define OTX_IOC_CLR_BPHY_HANDLER \
+ _IO(OTX_IOC_MAGIC, 2)
+
+static inline int __install_el3_inthandler(unsigned long irq_num,
+ unsigned long sp,
+ unsigned long cpu,
+ unsigned long ttbr0)
+{
+ struct arm_smccc_res res;
+ unsigned long flags;
+ int retval = -1;
+
+ spin_lock_irqsave(&el3_inthandler_lock, flags);
+
+ if (!irq_installed[irq_num]) {
+ lock_context(current->group_leader->mm, irq_num);
+ arm_smccc_smc(OCTEONTX_INSTALL_BPHY_PSM_ERRINT, irq_num,
+ sp, cpu, ttbr0, 0, 0, 0, &res);
+ if (res.a0 == 0) {
+ irq_installed[irq_num] = 1;
+ irq_installed_threads[irq_num]
+ = current_thread_info();
+ irq_installed_tasks[irq_num]
+ = current->group_leader;
+ retval = 0;
+ } else {
+ unlock_context_by_index(irq_num);
+ }
+ }
+ spin_unlock_irqrestore(&el3_inthandler_lock, flags);
+ return retval;
+}
+
+static inline int __remove_el3_inthandler(unsigned long irq_num)
+{
+ struct arm_smccc_res res;
+ unsigned long flags;
+ unsigned int retval;
+
+ spin_lock_irqsave(&el3_inthandler_lock, flags);
+
+ if (irq_installed[irq_num]) {
+ arm_smccc_smc(OCTEONTX_REMOVE_BPHY_PSM_ERRINT, irq_num,
+ 0, 0, 0, 0, 0, 0, &res);
+ irq_installed[irq_num] = 0;
+ irq_installed_threads[irq_num] = NULL;
+ irq_installed_tasks[irq_num] = NULL;
+ unlock_context_by_index(irq_num);
+ retval = 0;
+ } else {
+ retval = -1;
+ }
+ spin_unlock_irqrestore(&el3_inthandler_lock, flags);
+ return retval;
+}
+
+static long otx_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ int err = 0;
+ struct otx_irq_usr_data irq_usr;
+ u64 irq_ttbr, irq_isr_base, irq_sp, irq_cpu, irq_num;
+ int ret;
+ //struct task_struct *task = current;
+
+ if (!in_use)
+ return -EINVAL;
+
+ if (_IOC_TYPE(cmd) != OTX_IOC_MAGIC)
+ return -ENOTTY;
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ err = !access_ok(VERIFY_WRITE, (void __user *)arg,
+ _IOC_SIZE(cmd));
+ else if (_IOC_TYPE(cmd) & _IOC_WRITE)
+ err = !access_ok(VERIFY_READ, (void __user *)arg,
+ _IOC_SIZE(cmd));
+
+ if (err)
+ return -EFAULT;
+
+ switch (cmd) {
+ case OTX_IOC_SET_BPHY_HANDLER: /*Install ISR handler*/
+ ret = copy_from_user(&irq_usr, (void *)arg, _IOC_SIZE(cmd));
+ if (irq_usr.irq_num >= MAX_IRQ)
+ return -EINVAL;
+ if (ret)
+ return -EFAULT;
+ irq_ttbr = 0;
+ //TODO: reserve a asid to avoid asid rollovers
+ asm volatile("mrs %0, ttbr0_el1\n\t" : "=r"(irq_ttbr));
+ irq_isr_base = irq_usr.isr_base;
+ irq_sp = irq_usr.sp;
+ irq_cpu = irq_usr.cpu;
+ irq_num = irq_usr.irq_num;
+ ret = __install_el3_inthandler(irq_num, irq_sp,
+ irq_cpu, irq_isr_base);
+ if (ret != 0)
+ return -EEXIST;
+ break;
+ case OTX_IOC_CLR_BPHY_HANDLER: /*Clear ISR handler*/
+ irq_usr.irq_num = arg;
+ if (irq_usr.irq_num >= MAX_IRQ)
+ return -EINVAL;
+ ret = __remove_el3_inthandler(irq_usr.irq_num);
+ if (ret != 0)
+ return -ENOENT;
+ break;
+ default:
+ return -ENOTTY;
+ }
+ return 0;
+}
+
+static void cleanup_el3_irqs(struct task_struct *task)
+{
+ int i;
+
+ for (i = 0; i < MAX_IRQ; i++) {
+ if (irq_installed[i] &&
+ irq_installed_tasks[i] &&
+ (irq_installed_tasks[i] == task)) {
+ pr_alert("Exiting, removing handler for BPHY IRQ %d\n",
+ i);
+ __remove_el3_inthandler(i);
+ pr_alert("Exited, removed handler for BPHY IRQ %d\n",
+ i);
+ } else {
+ if (irq_installed[i] &&
+ (irq_installed_threads[i]
+ == current_thread_info()))
+ pr_alert("Exiting, thread info matches, not removing handler for BPHY IRQ %d\n", i);
+ }
+ }
+}
+
+static int otx_dev_open(struct inode *inode, struct file *fp)
+{
+ in_use = 1;
+ return 0;
+}
+
+static int otx_dev_release(struct inode *inode, struct file *fp)
+{
+ if (in_use == 0)
+ return -EINVAL;
+ in_use = 0;
+ return 0;
+}
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = otx_dev_open,
+ .release = otx_dev_release,
+ .unlocked_ioctl = otx_dev_ioctl
+};
+
+static int __init otx_ctr_dev_init(void)
+{
+ int err = 0;
+
+ /* create a character device */
+ err = alloc_chrdev_region(&otx_dev, 1, 1, DEVICE_NAME);
+ if (err != 0) {
+ pr_err("Failed to create device: %d\n", err);
+ goto alloc_chrdev_err;
+ }
+
+ otx_cdev = cdev_alloc();
+ if (!otx_cdev) {
+ err = -ENODEV;
+ goto cdev_alloc_err;
+ }
+
+ cdev_init(otx_cdev, &fops);
+ err = cdev_add(otx_cdev, otx_dev, 1);
+ if (err < 0) {
+ err = -ENODEV;
+ goto cdev_add_err;
+ }
+
+ /* create new class for sysfs*/
+ otx_class = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(otx_class)) {
+ err = -ENODEV;
+ goto class_create_err;
+ }
+
+ otx_device = device_create(otx_class, NULL, otx_dev, NULL,
+ DEVICE_NAME);
+ if (IS_ERR(otx_device)) {
+ err = -ENODEV;
+ goto device_create_err;
+ }
+
+ /* Register task cleanup handler */
+ err = task_cleanup_handler_add(cleanup_el3_irqs);
+ if (err != 0) {
+ dev_err(otx_device, "Failed to register cleanup handler: %d\n", err);
+ goto cleanup_handler_err;
+ }
+
+ return err;
+
+device_create_err:
+ class_destroy(otx_class);
+
+class_create_err:
+cdev_add_err:
+ cdev_del(otx_cdev);
+cdev_alloc_err:
+ unregister_chrdev_region(otx_dev, 1);
+alloc_chrdev_err:
+ task_cleanup_handler_remove(cleanup_el3_irqs);
+cleanup_handler_err:
+ return err;
+}
+
+static void __exit otx_ctr_dev_exit(void)
+{
+ device_destroy(otx_class, otx_dev);
+ class_destroy(otx_class);
+ cdev_del(otx_cdev);
+ unregister_chrdev_region(otx_dev, 1);
+
+ task_cleanup_handler_remove(cleanup_el3_irqs);
+}
+
+module_init(otx_ctr_dev_init);
+module_exit(otx_ctr_dev_exit);
+
+MODULE_DESCRIPTION("Marvell OTX Control Device Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/cavium-octeon.c b/drivers/mmc/host/cavium-octeon.c
index e299cdd1e619..442d1bab3047 100644
--- a/drivers/mmc/host/cavium-octeon.c
+++ b/drivers/mmc/host/cavium-octeon.c
@@ -236,8 +236,8 @@ static int octeon_mmc_probe(struct platform_device *pdev)
/* Only CMD_DONE, DMA_DONE, CMD_ERR, DMA_ERR */
for (i = 1; i <= 4; i++) {
ret = devm_request_irq(&pdev->dev, mmc_irq[i],
- cvm_mmc_interrupt,
- 0, cvm_mmc_irq_names[i], host);
+ cvm_mmc_interrupt, IRQF_NO_THREAD,
+ cvm_mmc_irq_names[i], host);
if (ret < 0) {
dev_err(&pdev->dev, "Error: devm_request_irq %d\n",
mmc_irq[i]);
@@ -246,8 +246,8 @@ static int octeon_mmc_probe(struct platform_device *pdev)
}
} else {
ret = devm_request_irq(&pdev->dev, mmc_irq[0],
- cvm_mmc_interrupt, 0, KBUILD_MODNAME,
- host);
+ cvm_mmc_interrupt, IRQF_NO_THREAD,
+ KBUILD_MODNAME, host);
if (ret < 0) {
dev_err(&pdev->dev, "Error: devm_request_irq %d\n",
mmc_irq[0]);
@@ -266,7 +266,7 @@ static int octeon_mmc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
i = 0;
- for_each_child_of_node(node, cn) {
+ for_each_available_child_of_node(node, cn) {
host->slot_pdev[i] =
of_platform_device_create(cn, NULL, &pdev->dev);
if (!host->slot_pdev[i]) {
diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c
index 76013bbbcff3..c75535504447 100644
--- a/drivers/mmc/host/cavium-thunderx.c
+++ b/drivers/mmc/host/cavium-thunderx.c
@@ -15,6 +15,8 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/bitfield.h>
#include "cavium.h"
static void thunder_mmc_acquire_bus(struct cvm_mmc_host *host)
@@ -31,6 +33,8 @@ static void thunder_mmc_int_enable(struct cvm_mmc_host *host, u64 val)
{
writeq(val, host->base + MIO_EMM_INT(host));
writeq(val, host->base + MIO_EMM_INT_EN_SET(host));
+ writeq(MIO_EMM_DMA_INT_DMA,
+ host->dma_base + MIO_EMM_DMA_INT(host));
}
static int thunder_mmc_register_interrupts(struct cvm_mmc_host *host,
@@ -45,14 +49,125 @@ static int thunder_mmc_register_interrupts(struct cvm_mmc_host *host,
/* register interrupts */
for (i = 0; i < nvec; i++) {
ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
- cvm_mmc_interrupt,
- 0, cvm_mmc_irq_names[i], host);
+ cvm_mmc_interrupt, 0,
+ cvm_mmc_irq_names[i], host);
if (ret)
return ret;
}
return 0;
}
+/* calibration evaluates the per tap delay */
+static void thunder_calibrate_mmc(struct cvm_mmc_host *host)
+{
+ u32 retries = 10;
+ u32 delay = 4;
+ unsigned int ps;
+ const char *how = "default";
+
+ if (is_mmc_8xxx(host))
+ return;
+
+ /* set _DEBUG[CLK_ON]=1 as workaround for clock issue */
+ if (is_mmc_otx2_A0(host) || is_mmc_95xx(host))
+ writeq(1, host->base + MIO_EMM_DEBUG(host));
+
+ if (is_mmc_otx2_A0(host)) {
+ /*
+ * Operation of up to 100 MHz may be achieved by skipping the
+ * steps that establish the tap delays and instead assuming
+ * that MIO_EMM_TAP[DELAY] returns 0x4 indicating 78 pS/tap.
+ */
+ } else {
+ u64 tap;
+ u64 emm_cfg = readq(host->base + MIO_EMM_CFG(host));
+ u64 tcfg;
+ u64 emm_io_ctl;
+ u64 emm_switch;
+ u64 emm_wdog;
+ u64 emm_sts_mask;
+ u64 emm_debug;
+ u64 emm_timing;
+ u64 emm_rca;
+
+ /*
+ * MIO_EMM_CFG[BUS_ENA] must be zero for calibration,
+ * but that resets whole host, so save state.
+ */
+ emm_io_ctl = readq(host->base + MIO_EMM_IO_CTL(host));
+ emm_switch = readq(host->base + MIO_EMM_SWITCH(host));
+ emm_wdog = readq(host->base + MIO_EMM_WDOG(host));
+ emm_sts_mask =
+ readq(host->base + MIO_EMM_STS_MASK(host));
+ emm_debug = readq(host->base + MIO_EMM_DEBUG(host));
+ emm_timing = readq(host->base + MIO_EMM_TIMING(host));
+ emm_rca = readq(host->base + MIO_EMM_RCA(host));
+
+ /* reset controller */
+ tcfg = emm_cfg;
+ tcfg &= ~MIO_EMM_CFG_BUS_ENA;
+ writeq(tcfg, host->base + MIO_EMM_CFG(host));
+ udelay(1);
+
+ /* restart with phantom slot 3 */
+ tcfg |= FIELD_PREP(MIO_EMM_CFG_BUS_ENA, 1ull << 3);
+ writeq(tcfg, host->base + MIO_EMM_CFG(host));
+ mdelay(1);
+
+ /* Start calibration */
+ writeq(0, host->base + MIO_EMM_CALB(host));
+ udelay(5);
+ writeq(START_CALIBRATION, host->base + MIO_EMM_CALB(host));
+ udelay(5);
+
+ do {
+ /* wait for approximately 300 coprocessor clock */
+ udelay(5);
+ tap = readq(host->base + MIO_EMM_TAP(host));
+ } while (!tap && retries--);
+
+ /* leave calibration mode */
+ writeq(0, host->base + MIO_EMM_CALB(host));
+ udelay(5);
+
+ if (retries <= 0 || !tap) {
+ how = "fallback";
+ } else {
+ /* calculate the per-tap delay */
+ delay = tap & MIO_EMM_TAP_DELAY;
+ how = "calibrated";
+ }
+
+ /* restore old state */
+ writeq(emm_cfg, host->base + MIO_EMM_CFG(host));
+ mdelay(1);
+ writeq(emm_rca, host->base + MIO_EMM_RCA(host));
+ writeq(emm_timing, host->base + MIO_EMM_TIMING(host));
+ writeq(emm_debug, host->base + MIO_EMM_DEBUG(host));
+ writeq(emm_sts_mask,
+ host->base + MIO_EMM_STS_MASK(host));
+ writeq(emm_wdog, host->base + MIO_EMM_WDOG(host));
+ writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
+ writeq(emm_io_ctl, host->base + MIO_EMM_IO_CTL(host));
+ mdelay(1);
+
+ }
+
+ /*
+ * Scale measured/guessed calibration value to pS:
+ * The delay value should be multiplied by 10 ns(or 10000 ps)
+ * and then divided by no of taps to determine the estimated
+ * delay in pico second. The nominal value is 125 ps per tap.
+ */
+ ps = (delay * PS_10000) / TOTAL_NO_OF_TAPS;
+ if (host->per_tap_delay != ps) {
+ dev_info(host->dev, "%s delay:%d per_tap_delay:%dpS\n",
+ how, delay, ps);
+ host->per_tap_delay = ps;
+ host->delay_logged = 0;
+ }
+}
+
static int thunder_mmc_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -83,6 +198,7 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
/* On ThunderX these are identical */
host->dma_base = host->base;
+ host->pdev = pdev;
host->reg_off = 0x2000;
host->reg_off_dma = 0x160;
@@ -111,24 +227,32 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
host->need_irq_handler_lock = true;
host->last_slot = -1;
- ret = dma_set_mask(dev, DMA_BIT_MASK(48));
if (ret)
goto error;
/*
* Clear out any pending interrupts that may be left over from
* bootloader. Writing 1 to the bits clears them.
+ * Clear DMA FIFO after IRQ disable, then stub any dangling events
*/
- writeq(127, host->base + MIO_EMM_INT_EN(host));
- writeq(3, host->base + MIO_EMM_DMA_INT_ENA_W1C(host));
- /* Clear DMA FIFO */
- writeq(BIT_ULL(16), host->base + MIO_EMM_DMA_FIFO_CFG(host));
+ writeq(~0, host->base + MIO_EMM_INT(host));
+ writeq(~0, host->dma_base + MIO_EMM_DMA_INT_ENA_W1C(host));
+ writeq(~0, host->base + MIO_EMM_INT_EN_CLR(host));
+ writeq(MIO_EMM_DMA_FIFO_CFG_CLR,
+ host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
+ writeq(~0, host->dma_base + MIO_EMM_DMA_INT(host));
ret = thunder_mmc_register_interrupts(host, pdev);
if (ret)
goto error;
- for_each_child_of_node(node, child_node) {
+ /* Run the calibration to calculate per tap delay that would be
+ * used to evaluate values. These values would be programmed in
+ * MIO_EMM_TIMING.
+ */
+ thunder_calibrate_mmc(host);
+
+ for_each_available_child_of_node(node, child_node) {
/*
* mmc_of_parse and devm* require one device per slot.
* Create a dummy device per slot and set the node pointer to
@@ -141,12 +265,15 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
if (!host->slot_pdev[i])
continue;
+ dev_info(dev, "Probing slot %d\n", i);
+
ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
if (ret)
goto error;
}
i++;
}
+
dev_info(dev, "probed\n");
return 0;
@@ -176,8 +303,11 @@ static void thunder_mmc_remove(struct pci_dev *pdev)
cvm_mmc_of_slot_remove(host->slot[i]);
dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host));
- dma_cfg &= ~MIO_EMM_DMA_CFG_EN;
+ dma_cfg |= MIO_EMM_DMA_CFG_CLR;
writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
+ do {
+ dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host));
+ } while (dma_cfg & MIO_EMM_DMA_CFG_EN);
clk_disable_unprepare(host->clk);
pci_release_regions(pdev);
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
index 89deb451e0ac..08973178549a 100644
--- a/drivers/mmc/host/cavium.c
+++ b/drivers/mmc/host/cavium.c
@@ -25,6 +25,8 @@
#include <linux/regulator/consumer.h>
#include <linux/scatterlist.h>
#include <linux/time.h>
+#include <linux/iommu.h>
+#include <linux/swiotlb.h>
#include "cavium.h"
@@ -38,6 +40,8 @@ const char *cvm_mmc_irq_names[] = {
"MMC Switch Error",
"MMC DMA int Fifo",
"MMC DMA int",
+ "MMC NCB Fault",
+ "MMC RAS",
};
/*
@@ -71,7 +75,7 @@ static struct cvm_mmc_cr_type cvm_mmc_cr_types[] = {
{0, 1}, /* CMD16 */
{1, 1}, /* CMD17 */
{1, 1}, /* CMD18 */
- {3, 1}, /* CMD19 */
+ {2, 1}, /* CMD19 */
{2, 1}, /* CMD20 */
{0, 0}, /* CMD21 */
{0, 0}, /* CMD22 */
@@ -118,6 +122,156 @@ static struct cvm_mmc_cr_type cvm_mmc_cr_types[] = {
{0, 0} /* CMD63 */
};
+static int tapdance;
+module_param(tapdance, int, 0644);
+MODULE_PARM_DESC(tapdance, "adjust bus-timing: (0=mid-eye, positive=Nth_fastest_tap)");
+
+static int clk_scale = 100;
+module_param(clk_scale, int, 0644);
+MODULE_PARM_DESC(clk_scale, "percent scale data_/cmd_out taps (default 100)");
+
+static bool fixed_timing;
+module_param(fixed_timing, bool, 0444);
+MODULE_PARM_DESC(fixed_timing, "use fixed data_/cmd_out taps");
+
+static bool ddr_cmd_taps;
+module_param(ddr_cmd_taps, bool, 0644);
+MODULE_PARM_DESC(ddr_cmd_taps, "reduce cmd_out_taps in DDR modes, as before");
+
+static bool cvm_is_mmc_timing_ddr(struct cvm_mmc_slot *slot)
+{
+ if ((slot->mmc->ios.timing == MMC_TIMING_UHS_DDR50) ||
+ (slot->mmc->ios.timing == MMC_TIMING_MMC_DDR52) ||
+ (slot->mmc->ios.timing == MMC_TIMING_MMC_HS400))
+ return true;
+ else
+ return false;
+}
+
+static void cvm_mmc_set_timing(struct cvm_mmc_slot *slot)
+{
+ if (!is_mmc_otx2(slot->host))
+ return;
+
+ writeq(slot->taps, slot->host->base + MIO_EMM_TIMING(slot->host));
+}
+
+static int tout(struct cvm_mmc_slot *slot, int ps, int hint)
+{
+ struct cvm_mmc_host *host = slot->host;
+ struct mmc_host *mmc = slot->mmc;
+ int tap_ps = host->per_tap_delay;
+ int timing = mmc->ios.timing;
+ static int old_scale;
+ int taps;
+
+ if (fixed_timing)
+ return hint;
+
+ if (!hint)
+ hint = 63;
+
+ if (!tap_ps)
+ return hint;
+
+ taps = min((int)(ps * clk_scale) / (tap_ps * 100), 63);
+
+ /* when modparam is adjusted, re-announce timing */
+ if (old_scale != clk_scale) {
+ host->delay_logged = 0;
+ old_scale = clk_scale;
+ }
+
+ if (!test_and_set_bit(timing,
+ &host->delay_logged))
+ dev_info(host->dev, "mmc%d.ios_timing:%d %dpS hint:%d taps:%d\n",
+ mmc->index, timing, ps, hint, taps);
+
+ return taps;
+}
+
+static int cvm_mmc_configure_delay(struct cvm_mmc_slot *slot)
+{
+ struct cvm_mmc_host *host = slot->host;
+ struct mmc_host *mmc = slot->mmc;
+
+ pr_debug("slot%d.configure_delay\n", slot->bus_id);
+
+ if (is_mmc_8xxx(host)) {
+ /* MIO_EMM_SAMPLE is till T83XX */
+ u64 emm_sample =
+ FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) |
+ FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->data_cnt);
+ writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host));
+ } else {
+ int half = MAX_NO_OF_TAPS / 2;
+ int cin = FIELD_GET(MIO_EMM_TIMING_CMD_IN, slot->taps);
+ int din = FIELD_GET(MIO_EMM_TIMING_DATA_IN, slot->taps);
+ int cout, dout;
+
+ if (!slot->taps)
+ cin = din = half;
+ /*
+ * EMM_CMD hold time from rising edge of EMMC_CLK.
+ * Typically 3.0 ns at frequencies < 26 MHz.
+ * Typically 3.0 ns at frequencies <= 52 MHz SDR.
+ * Typically 2.5 ns at frequencies <= 52 MHz DDR.
+ * Typically 0.8 ns at frequencies > 52 MHz SDR.
+ * Typically 0.4 ns at frequencies > 52 MHz DDR.
+ */
+ switch (mmc->ios.timing) {
+ case MMC_TIMING_LEGACY:
+ default:
+ if (mmc->card && mmc_card_mmc(mmc->card))
+ cout = tout(slot, 5000, 39);
+ else
+ cout = tout(slot, 8000, 63);
+ break;
+ case MMC_TIMING_UHS_SDR12:
+ cout = tout(slot, 3000, 39);
+ break;
+ case MMC_TIMING_MMC_HS:
+ cout = tout(slot, 2500, 32);
+ break;
+ case MMC_TIMING_SD_HS:
+ case MMC_TIMING_UHS_SDR25:
+ case MMC_TIMING_UHS_SDR50:
+ cout = tout(slot, 2000, 26);
+ break;
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ cout = tout(slot, 1500, 20);
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
+ case MMC_TIMING_MMC_HS400:
+ cout = tout(slot, 800, 10);
+ break;
+ }
+
+ if (!is_mmc_95xx(host)) {
+ if (!cvm_is_mmc_timing_ddr(slot))
+ dout = cout;
+ else if (ddr_cmd_taps)
+ cout = dout = cout / 2;
+ else
+ dout = cout / 2;
+ } else
+ dout = tout(slot, 800, 10);
+
+ slot->taps =
+ FIELD_PREP(MIO_EMM_TIMING_CMD_IN, cin) |
+ FIELD_PREP(MIO_EMM_TIMING_CMD_OUT, cout) |
+ FIELD_PREP(MIO_EMM_TIMING_DATA_IN, din) |
+ FIELD_PREP(MIO_EMM_TIMING_DATA_OUT, dout);
+
+ pr_debug("slot%d.taps %llx\n", slot->bus_id, slot->taps);
+ cvm_mmc_set_timing(slot);
+ }
+
+ return 0;
+}
+
static struct cvm_mmc_cr_mods cvm_mmc_get_cr_mods(struct mmc_command *cmd)
{
struct cvm_mmc_cr_type *cr;
@@ -175,14 +329,14 @@ static void check_switch_errors(struct cvm_mmc_host *host)
dev_err(host->dev, "Switch bus width error\n");
}
-static void clear_bus_id(u64 *reg)
+static inline void clear_bus_id(u64 *reg)
{
u64 bus_id_mask = GENMASK_ULL(61, 60);
*reg &= ~bus_id_mask;
}
-static void set_bus_id(u64 *reg, int bus_id)
+static inline void set_bus_id(u64 *reg, int bus_id)
{
clear_bus_id(reg);
*reg |= FIELD_PREP(GENMASK(61, 60), bus_id);
@@ -193,25 +347,69 @@ static int get_bus_id(u64 reg)
return FIELD_GET(GENMASK_ULL(61, 60), reg);
}
-/*
- * We never set the switch_exe bit since that would interfere
- * with the commands send by the MMC core.
- */
-static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
+/* save old slot details, switch power */
+static bool pre_switch(struct cvm_mmc_host *host, u64 emm_switch)
{
- int retries = 100;
- u64 rsp_sts;
- int bus_id;
+ int bus_id = get_bus_id(emm_switch);
+ struct cvm_mmc_slot *slot = host->slot[bus_id];
+ struct cvm_mmc_slot *old_slot;
+ bool same_vqmmc = false;
- /*
- * Modes setting only taken from slot 0. Work around that hardware
- * issue by first switching to slot 0.
+ if (host->last_slot == bus_id)
+ return false;
+
+ /* when VQMMC is switched, tri-state CMDn over any slot change
+ * to avoid transient states on D0-7 or CLK from level-shifters
*/
- bus_id = get_bus_id(emm_switch);
- clear_bus_id(&emm_switch);
- writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
+ if (host->use_vqmmc) {
+ writeq(1ull << 3, host->base + MIO_EMM_CFG(host));
+ udelay(10);
+ }
+
+ if (host->last_slot >= 0 && host->slot[host->last_slot]) {
+ old_slot = host->slot[host->last_slot];
+ old_slot->cached_switch =
+ readq(host->base + MIO_EMM_SWITCH(host));
+ old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host));
+
+ same_vqmmc = (slot->mmc->supply.vqmmc ==
+ old_slot->mmc->supply.vqmmc);
+ if (!same_vqmmc && !IS_ERR_OR_NULL(old_slot->mmc->supply.vqmmc))
+ regulator_disable(old_slot->mmc->supply.vqmmc);
+ }
+
+ if (!same_vqmmc && !IS_ERR_OR_NULL(slot->mmc->supply.vqmmc)) {
+ int e = regulator_enable(slot->mmc->supply.vqmmc);
+
+ if (e)
+ dev_err(host->dev, "mmc-slot@%d.vqmmc err %d\n",
+ bus_id, e);
+ }
+
+ host->last_slot = slot->bus_id;
+
+ return true;
+}
+
+static void post_switch(struct cvm_mmc_host *host, u64 emm_switch)
+{
+ int bus_id = get_bus_id(emm_switch);
+ struct cvm_mmc_slot *slot = host->slot[bus_id];
+
+ if (host->use_vqmmc) {
+ /* enable new CMDn */
+ writeq(1ull << bus_id, host->base + MIO_EMM_CFG(host));
+ udelay(10);
+ }
+
+ writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host));
+}
+
+static inline void mode_switch(struct cvm_mmc_host *host, u64 emm_switch)
+{
+ u64 rsp_sts;
+ int retries = 100;
- set_bus_id(&emm_switch, bus_id);
writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
/* wait for the switch to finish */
@@ -221,15 +419,49 @@ static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
break;
udelay(10);
} while (--retries);
+}
+
+/*
+ * We never set the switch_exe bit since that would interfere
+ * with the commands send by the MMC core.
+ */
+static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
+{
+ int bus_id = get_bus_id(emm_switch);
+ struct cvm_mmc_slot *slot = host->slot[bus_id];
+ bool slot_changed = pre_switch(host, emm_switch);
+
+ /*
+ * Modes setting only taken from slot 0. Work around that hardware
+ * issue by first switching to slot 0.
+ */
+ if (bus_id) {
+ u64 switch0 = emm_switch;
+
+ clear_bus_id(&switch0);
+ mode_switch(host, switch0);
+ }
+
+ mode_switch(host, emm_switch);
check_switch_errors(host);
+ if (slot_changed)
+ post_switch(host, emm_switch);
+ slot->cached_switch = emm_switch;
+ if (emm_switch & MIO_EMM_SWITCH_CLK)
+ slot->cmd6_pending = false;
}
+/* need to change hardware state to match software requirements? */
static bool switch_val_changed(struct cvm_mmc_slot *slot, u64 new_val)
{
/* Match BUS_ID, HS_TIMING, BUS_WIDTH, POWER_CLASS, CLK_HI, CLK_LO */
- u64 match = 0x3001070fffffffffull;
+ /* For 9xxx add HS200_TIMING and HS400_TIMING */
+ u64 match = (is_mmc_otx2(slot->host)) ?
+ 0x3007070fffffffffull : 0x3001070fffffffffull;
+ if (!slot->host->powered)
+ return true;
return (slot->cached_switch & match) != (new_val & match);
}
@@ -247,58 +479,62 @@ static void set_wdog(struct cvm_mmc_slot *slot, unsigned int ns)
writeq(timeout, slot->host->base + MIO_EMM_WDOG(slot->host));
}
+static void emmc_io_drive_setup(struct cvm_mmc_slot *slot)
+{
+ u64 ioctl_cfg;
+ struct cvm_mmc_host *host = slot->host;
+
+ /* Setup drive and slew only for 9x */
+ if (is_mmc_otx2(host)) {
+ if ((slot->drive < 0) || (slot->slew < 0))
+ return;
+ /* Setup the emmc interface current drive
+ * strength & clk slew rate.
+ */
+ ioctl_cfg = FIELD_PREP(MIO_EMM_IO_CTL_DRIVE, slot->drive) |
+ FIELD_PREP(MIO_EMM_IO_CTL_SLEW, slot->slew);
+ writeq(ioctl_cfg, host->base + MIO_EMM_IO_CTL(host));
+ }
+}
+
static void cvm_mmc_reset_bus(struct cvm_mmc_slot *slot)
{
struct cvm_mmc_host *host = slot->host;
u64 emm_switch, wdog;
- emm_switch = readq(slot->host->base + MIO_EMM_SWITCH(host));
- emm_switch &= ~(MIO_EMM_SWITCH_EXE | MIO_EMM_SWITCH_ERR0 |
- MIO_EMM_SWITCH_ERR1 | MIO_EMM_SWITCH_ERR2);
+ emm_switch = readq(host->base + MIO_EMM_SWITCH(host));
+ emm_switch &= ~(MIO_EMM_SWITCH_EXE | MIO_EMM_SWITCH_ERRS);
set_bus_id(&emm_switch, slot->bus_id);
- wdog = readq(slot->host->base + MIO_EMM_WDOG(host));
- do_switch(slot->host, emm_switch);
-
- slot->cached_switch = emm_switch;
+ wdog = readq(host->base + MIO_EMM_WDOG(host));
+ do_switch(host, emm_switch);
+ host->powered = true;
msleep(20);
- writeq(wdog, slot->host->base + MIO_EMM_WDOG(host));
+ writeq(wdog, host->base + MIO_EMM_WDOG(host));
}
/* Switch to another slot if needed */
static void cvm_mmc_switch_to(struct cvm_mmc_slot *slot)
{
struct cvm_mmc_host *host = slot->host;
- struct cvm_mmc_slot *old_slot;
- u64 emm_sample, emm_switch;
if (slot->bus_id == host->last_slot)
return;
- if (host->last_slot >= 0 && host->slot[host->last_slot]) {
- old_slot = host->slot[host->last_slot];
- old_slot->cached_switch = readq(host->base + MIO_EMM_SWITCH(host));
- old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host));
- }
-
- writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host));
- emm_switch = slot->cached_switch;
- set_bus_id(&emm_switch, slot->bus_id);
- do_switch(host, emm_switch);
+ do_switch(host, slot->cached_switch);
+ host->powered = true;
- emm_sample = FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) |
- FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->dat_cnt);
- writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host));
-
- host->last_slot = slot->bus_id;
+ emmc_io_drive_setup(slot);
+ cvm_mmc_configure_delay(slot);
}
-static void do_read(struct cvm_mmc_host *host, struct mmc_request *req,
+static void do_read(struct cvm_mmc_slot *slot, struct mmc_request *req,
u64 dbuf)
{
- struct sg_mapping_iter *smi = &host->smi;
+ struct cvm_mmc_host *host = slot->host;
+ struct sg_mapping_iter *smi = &slot->smi;
int data_len = req->data->blocks * req->data->blksz;
int bytes_xfered, shift = -1;
u64 dat = 0;
@@ -365,7 +601,7 @@ static void set_cmd_response(struct cvm_mmc_host *host, struct mmc_request *req,
}
}
-static int get_dma_dir(struct mmc_data *data)
+static inline int get_dma_dir(struct mmc_data *data)
{
return (data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
}
@@ -374,6 +610,9 @@ static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
{
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
+
+ writeq(MIO_EMM_DMA_FIFO_CFG_CLR,
+ host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
return 1;
}
@@ -382,6 +621,7 @@ static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
{
u64 fifo_cfg;
int count;
+ void __iomem *dma_intp = host->dma_base + MIO_EMM_DMA_INT(host);
/* Check if there are any pending requests left */
fifo_cfg = readq(host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
@@ -392,8 +632,16 @@ static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
- /* Clear and disable FIFO */
- writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
+ writeq(MIO_EMM_DMA_FIFO_CFG_CLR,
+ host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
+
+ /* on read, wait for internal buffer to flush out to mem */
+ if (get_dma_dir(data) == DMA_FROM_DEVICE) {
+ while (!(readq(dma_intp) & MIO_EMM_DMA_INT_DMA))
+ udelay(10);
+ writeq(MIO_EMM_DMA_INT_DMA, dma_intp);
+ }
+
dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
return 1;
}
@@ -415,7 +663,8 @@ static int check_status(u64 rsp_sts)
if (rsp_sts & MIO_EMM_RSP_STS_RSP_TIMEOUT ||
rsp_sts & MIO_EMM_RSP_STS_BLK_TIMEOUT)
return -ETIMEDOUT;
- if (rsp_sts & MIO_EMM_RSP_STS_DBUF_ERR)
+ if (rsp_sts & MIO_EMM_RSP_STS_DBUF_ERR ||
+ rsp_sts & MIO_EMM_RSP_STS_BLK_CRC_ERR)
return -EIO;
return 0;
}
@@ -435,16 +684,24 @@ static void cleanup_dma(struct cvm_mmc_host *host, u64 rsp_sts)
irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
{
struct cvm_mmc_host *host = dev_id;
- struct mmc_request *req;
+ struct mmc_request *req = NULL;
+ struct cvm_mmc_slot *slot = NULL;
unsigned long flags = 0;
u64 emm_int, rsp_sts;
bool host_done;
+ int bus_id;
if (host->need_irq_handler_lock)
spin_lock_irqsave(&host->irq_handler_lock, flags);
else
__acquire(&host->irq_handler_lock);
+ rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
+ bus_id = get_bus_id(rsp_sts);
+ slot = host->slot[bus_id];
+ if (slot)
+ req = slot->current_req;
+
/* Clear interrupt bits (write 1 clears ). */
emm_int = readq(host->base + MIO_EMM_INT(host));
writeq(emm_int, host->base + MIO_EMM_INT(host));
@@ -452,25 +709,32 @@ irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
if (emm_int & MIO_EMM_INT_SWITCH_ERR)
check_switch_errors(host);
- req = host->current_req;
if (!req)
goto out;
- rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
+ /*
+ * dma_pend means DMA has stalled with CRC errs.
+ * start teardown, get irq on completion, mmc stack retries.
+ */
+ if ((rsp_sts & MIO_EMM_RSP_STS_DMA_PEND) && slot->dma_active) {
+ cleanup_dma(host, rsp_sts);
+ goto out;
+ }
+
/*
* dma_val set means DMA is still in progress. Don't touch
* the request and wait for the interrupt indicating that
* the DMA is finished.
*/
- if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && host->dma_active)
+ if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && slot->dma_active)
goto out;
- if (!host->dma_active && req->data &&
+ if (!slot->dma_active && req->data &&
(emm_int & MIO_EMM_INT_BUF_DONE)) {
unsigned int type = (rsp_sts >> 7) & 3;
if (type == 1)
- do_read(host, req, rsp_sts & MIO_EMM_RSP_STS_DBUF);
+ do_read(slot, req, rsp_sts & MIO_EMM_RSP_STS_DBUF);
else if (type == 2)
do_write(req);
}
@@ -480,12 +744,16 @@ irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
emm_int & MIO_EMM_INT_CMD_ERR ||
emm_int & MIO_EMM_INT_DMA_ERR;
+ /* Add NCB_FLT interrupt for octtx2 */
+ if (is_mmc_otx2(host))
+ host_done = host_done || emm_int & MIO_EMM_INT_NCB_FLT;
+
if (!(host_done && req->done))
goto no_req_done;
req->cmd->error = check_status(rsp_sts);
- if (host->dma_active && req->data)
+ if (slot->dma_active && req->data)
if (!finish_dma(host, req->data))
goto no_req_done;
@@ -494,7 +762,18 @@ irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
(rsp_sts & MIO_EMM_RSP_STS_DMA_PEND))
cleanup_dma(host, rsp_sts);
- host->current_req = NULL;
+ /* follow CMD6 timing/width with IMMEDIATE switch */
+ if (slot && slot->cmd6_pending) {
+ if (host_done && !req->cmd->error) {
+ do_switch(host, slot->want_switch);
+ emmc_io_drive_setup(slot);
+ cvm_mmc_configure_delay(slot);
+ } else if (slot) {
+ slot->cmd6_pending = false;
+ }
+ }
+
+ slot->current_req = NULL;
req->done(req);
no_req_done:
@@ -609,9 +888,9 @@ static u64 prepare_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
error:
WARN_ON_ONCE(1);
+ writeq(MIO_EMM_DMA_FIFO_CFG_CLR,
+ host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
- /* Disable FIFO */
- writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
return 0;
}
@@ -653,7 +932,11 @@ static void cvm_mmc_dma_request(struct mmc_host *mmc,
struct cvm_mmc_slot *slot = mmc_priv(mmc);
struct cvm_mmc_host *host = slot->host;
struct mmc_data *data;
- u64 emm_dma, addr;
+ u64 emm_dma, addr, int_enable_mask = 0;
+ int seg;
+
+ /* cleared by successful termination */
+ mrq->cmd->error = -EINVAL;
if (!mrq->data || !mrq->data->sg || !mrq->data->sg_len ||
!mrq->stop || mrq->stop->opcode != MMC_STOP_TRANSMISSION) {
@@ -662,17 +945,27 @@ static void cvm_mmc_dma_request(struct mmc_host *mmc,
goto error;
}
+ /* unaligned multi-block DMA has problems, so forbid all unaligned */
+ for (seg = 0; seg < mrq->data->sg_len; seg++) {
+ struct scatterlist *sg = &mrq->data->sg[seg];
+ u64 align = (sg->offset | sg->length);
+
+ if (!(align & 7))
+ continue;
+ dev_info(&mmc->card->dev,
+ "Error:64bit alignment required\n");
+ goto error;
+ }
+
cvm_mmc_switch_to(slot);
data = mrq->data;
+
pr_debug("DMA request blocks: %d block_size: %d total_size: %d\n",
data->blocks, data->blksz, data->blocks * data->blksz);
if (data->timeout_ns)
set_wdog(slot, data->timeout_ns);
- WARN_ON(host->current_req);
- host->current_req = mrq;
-
emm_dma = prepare_ext_dma(mmc, mrq);
addr = prepare_dma(host, data);
if (!addr) {
@@ -680,9 +973,19 @@ static void cvm_mmc_dma_request(struct mmc_host *mmc,
goto error;
}
- host->dma_active = true;
- host->int_enable(host, MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE |
- MIO_EMM_INT_DMA_ERR);
+ mrq->host = mmc;
+ WARN_ON(slot->current_req);
+ slot->current_req = mrq;
+ slot->dma_active = true;
+
+ int_enable_mask = MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE |
+ MIO_EMM_INT_DMA_ERR;
+
+ /* Add NCB_FLT interrupt for octtx2 */
+ if (is_mmc_otx2(host))
+ int_enable_mask |= MIO_EMM_INT_NCB_FLT;
+
+ host->int_enable(host, int_enable_mask);
if (host->dmar_fixup)
host->dmar_fixup(host, mrq->cmd, data, addr);
@@ -700,22 +1003,22 @@ static void cvm_mmc_dma_request(struct mmc_host *mmc,
return;
error:
- mrq->cmd->error = -EINVAL;
if (mrq->done)
mrq->done(mrq);
host->release_bus(host);
}
-static void do_read_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
+static void do_read_request(struct cvm_mmc_slot *slot, struct mmc_request *mrq)
{
- sg_miter_start(&host->smi, mrq->data->sg, mrq->data->sg_len,
+ sg_miter_start(&slot->smi, mrq->data->sg, mrq->data->sg_len,
SG_MITER_ATOMIC | SG_MITER_TO_SG);
}
-static void do_write_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
+static void do_write_request(struct cvm_mmc_slot *slot, struct mmc_request *mrq)
{
+ struct cvm_mmc_host *host = slot->host;
unsigned int data_len = mrq->data->blocks * mrq->data->blksz;
- struct sg_mapping_iter *smi = &host->smi;
+ struct sg_mapping_iter *smi = &slot->smi;
unsigned int bytes_xfered;
int shift = 56;
u64 dat = 0;
@@ -749,6 +1052,51 @@ static void do_write_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
sg_miter_stop(smi);
}
+static void cvm_mmc_track_switch(struct cvm_mmc_slot *slot, u32 cmd_arg)
+{
+ u8 how = (cmd_arg >> 24) & 3;
+ u8 where = (u8)(cmd_arg >> 16);
+ u8 val = (u8)(cmd_arg >> 8);
+
+ slot->want_switch = slot->cached_switch;
+
+ /*
+ * track ext_csd assignments (how==3) for critical entries
+ * to make sure we follow up with MIO_EMM_SWITCH adjustment
+ * before ANY mmc/core interaction at old settings.
+ * Current mmc/core logic (linux 4.14) does not set/clear
+ * bits (how = 1 or 2), which would require more complex
+ * logic to track the intent of a change
+ */
+
+ if (how != 3)
+ return;
+
+ switch (where) {
+ case EXT_CSD_BUS_WIDTH:
+ slot->want_switch &= ~MIO_EMM_SWITCH_BUS_WIDTH;
+ slot->want_switch |=
+ FIELD_PREP(MIO_EMM_SWITCH_BUS_WIDTH, val);
+ break;
+ case EXT_CSD_POWER_CLASS:
+ slot->want_switch &= ~MIO_EMM_SWITCH_POWER_CLASS;
+ slot->want_switch |=
+ FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, val);
+ break;
+ case EXT_CSD_HS_TIMING:
+ slot->want_switch &= ~MIO_EMM_SWITCH_TIMING;
+ if (val)
+ slot->want_switch |=
+ FIELD_PREP(MIO_EMM_SWITCH_TIMING,
+ (1 << (val - 1)));
+ break;
+ default:
+ return;
+ }
+
+ slot->cmd6_pending = true;
+}
+
static void cvm_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct cvm_mmc_slot *slot = mmc_priv(mmc);
@@ -777,23 +1125,27 @@ static void cvm_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
mods = cvm_mmc_get_cr_mods(cmd);
- WARN_ON(host->current_req);
- host->current_req = mrq;
+ WARN_ON(slot->current_req);
+ mrq->host = mmc;
+ slot->current_req = mrq;
if (cmd->data) {
if (cmd->data->flags & MMC_DATA_READ)
- do_read_request(host, mrq);
+ do_read_request(slot, mrq);
else
- do_write_request(host, mrq);
+ do_write_request(slot, mrq);
if (cmd->data->timeout_ns)
set_wdog(slot, cmd->data->timeout_ns);
} else
set_wdog(slot, 0);
- host->dma_active = false;
+ slot->dma_active = false;
host->int_enable(host, MIO_EMM_INT_CMD_DONE | MIO_EMM_INT_CMD_ERR);
+ if (cmd->opcode == MMC_SWITCH)
+ cvm_mmc_track_switch(slot, cmd->arg);
+
emm_cmd = FIELD_PREP(MIO_EMM_CMD_VAL, 1) |
FIELD_PREP(MIO_EMM_CMD_CTYPE_XOR, mods.ctype_xor) |
FIELD_PREP(MIO_EMM_CMD_RTYPE_XOR, mods.rtype_xor) |
@@ -819,37 +1171,259 @@ retry:
if (!retries)
dev_err(host->dev, "Bad status: %llx before command write\n", rsp_sts);
writeq(emm_cmd, host->base + MIO_EMM_CMD(host));
+ if (cmd->opcode == MMC_SWITCH)
+ udelay(1300);
+}
+
+static void cvm_mmc_wait_done(struct mmc_request *cvm_mrq)
+{
+ complete(&cvm_mrq->completion);
+}
+
+static int cvm_mmc_r1_cmd(struct mmc_host *mmc, u32 *statp, u32 opcode)
+{
+ static struct mmc_command cmd = {};
+ static struct mmc_request cvm_mrq = {};
+
+ if (!opcode)
+ opcode = MMC_SEND_STATUS;
+ cmd.opcode = opcode;
+ if (mmc->card)
+ cmd.arg = mmc->card->rca << 16;
+ else
+ cmd.arg = 1 << 16;
+ cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
+ cmd.data = NULL;
+ cvm_mrq.cmd = &cmd;
+
+ init_completion(&cvm_mrq.completion);
+ cvm_mrq.done = cvm_mmc_wait_done;
+
+ cvm_mmc_request(mmc, &cvm_mrq);
+ if (!wait_for_completion_timeout(&cvm_mrq.completion,
+ msecs_to_jiffies(10))) {
+ mmc_abort_tuning(mmc, opcode);
+ return -ETIMEDOUT;
+ }
+
+ if (statp)
+ *statp = cmd.resp[0];
+
+ return cvm_mrq.cmd->error;
+}
+
+static int cvm_mmc_data_tuning(struct mmc_host *mmc, u32 *statp, u32 opcode)
+{
+ int err = 0;
+ u8 *ext_csd;
+ static struct mmc_command cmd = {};
+ static struct mmc_data data = {};
+ static struct mmc_request cvm_mrq = {};
+ static struct scatterlist sg;
+ struct cvm_mmc_slot *slot = mmc_priv(mmc);
+ struct mmc_card *card = mmc->card;
+
+ if (!(slot->cached_switch & MIO_EMM_SWITCH_HS400_TIMING)) {
+ int edetail = -EINVAL;
+ int core_opinion;
+
+ core_opinion =
+ mmc_send_tuning(mmc, opcode, &edetail);
+
+ /* only accept mmc/core opinion when it's happy */
+ if (!core_opinion)
+ return core_opinion;
+ }
+
+ /* EXT_CSD supported only after ver 3 */
+ if (card && card->csd.mmca_vsn <= CSD_SPEC_VER_3)
+ return -EOPNOTSUPP;
+ /*
+ * As the ext_csd is so large and mostly unused, we don't store the
+ * raw block in mmc_card.
+ */
+ ext_csd = kzalloc(BLKSZ_EXT_CSD, GFP_KERNEL);
+ if (!ext_csd)
+ return -ENOMEM;
+
+ cvm_mrq.cmd = &cmd;
+ cvm_mrq.data = &data;
+ cmd.data = &data;
+
+ cmd.opcode = MMC_SEND_EXT_CSD;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ data.blksz = BLKSZ_EXT_CSD;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ sg_init_one(&sg, ext_csd, BLKSZ_EXT_CSD);
+
+ /* set timeout */
+ if (card) {
+ /* SD cards use a 100 multiplier rather than 10 */
+ u32 mult = mmc_card_sd(card) ? 100 : 10;
+
+ data.timeout_ns = card->csd.taac_ns * mult;
+ data.timeout_clks = card->csd.taac_clks * mult;
+ } else {
+ data.timeout_ns = 50 * NSEC_PER_MSEC;
+ }
+
+ init_completion(&cvm_mrq.completion);
+ cvm_mrq.done = cvm_mmc_wait_done;
+
+ cvm_mmc_request(mmc, &cvm_mrq);
+ if (!wait_for_completion_timeout(&cvm_mrq.completion,
+ msecs_to_jiffies(100))) {
+ mmc_abort_tuning(mmc, cmd.opcode);
+ err = -ETIMEDOUT;
+ }
+
+ data.sg_len = 0; /* FIXME: catch over-time completions? */
+ kfree(ext_csd);
+
+ if (err)
+ return err;
+
+ if (statp)
+ *statp = cvm_mrq.cmd->resp[0];
+
+ return cvm_mrq.cmd->error;
+}
+
+/* adjusters for the 4 otx2 delay line taps */
+struct adj {
+ const char *name;
+ u64 mask;
+ int (*test)(struct mmc_host *mmc, u32 *statp, u32 opcode);
+ u32 opcode;
+ bool ddr_only;
+};
+
+static int adjust_tuning(struct mmc_host *mmc, struct adj *adj, u32 opcode)
+{
+ int err, start_run = -1, best_run = 0, best_start = -1;
+ int last_good = -1;
+ bool prev_ok = false;
+ u64 timing, tap;
+ struct cvm_mmc_slot *slot = mmc_priv(mmc);
+ struct cvm_mmc_host *host = slot->host;
+ char how[MAX_NO_OF_TAPS+1] = "";
+
+ /* loop over range+1 to simplify processing */
+ for (tap = 0; tap <= MAX_NO_OF_TAPS; tap++, prev_ok = !err) {
+ if (tap < MAX_NO_OF_TAPS) {
+ timing = readq(host->base + MIO_EMM_TIMING(host));
+ timing &= ~adj->mask;
+ timing |= (tap << __bf_shf(adj->mask));
+ writeq(timing, host->base + MIO_EMM_TIMING(host));
+
+ err = adj->test(mmc, NULL, opcode);
+
+ how[tap] = "-+"[!err];
+ if (!err)
+ last_good = tap;
+ } else {
+ /*
+ * putting the end+1 case in loop simplifies
+ * logic, allowing 'prev_ok' to process a
+ * sweet spot in tuning which extends to wall.
+ */
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ /*
+ * If no CRC/etc errors in response, but previous
+ * failed, note the start of a new run
+ */
+ if (!prev_ok)
+ start_run = tap;
+ } else if (prev_ok) {
+ int run = tap - 1 - start_run;
+
+ /* did we just exit a wider sweet spot? */
+ if (start_run >= 0 && run > best_run) {
+ best_start = start_run;
+ best_run = run;
+ }
+ }
+ }
+
+ if (best_start < 0) {
+ dev_warn(host->dev, "%s %lldMHz tuning %s failed\n",
+ mmc_hostname(mmc), slot->clock / 1000000, adj->name);
+ return -EINVAL;
+ }
+
+ tap = best_start + best_run / 2;
+ how[tap] = '@';
+ if (tapdance) {
+ tap = last_good - tapdance;
+ how[tap] = 'X';
+ }
+ dev_dbg(host->dev, "%s/%s %d/%lld/%d %s\n",
+ mmc_hostname(mmc), adj->name,
+ best_start, tap, best_start + best_run,
+ how);
+ slot->taps &= ~adj->mask;
+ slot->taps |= (tap << __bf_shf(adj->mask));
+ cvm_mmc_set_timing(slot);
+ return 0;
+}
+
+static u32 max_supported_frequency(struct cvm_mmc_host *host)
+{
+ /* Default maximum freqeuncey is 52000000 for chip prior to 9X */
+ u32 max_frequency = MHZ_52;
+
+ if (is_mmc_otx2(host)) {
+ /* Default max frequency is 200MHz for 9X chips */
+ max_frequency = MHZ_200;
+
+ /* Erratum is only applicable pass A0 */
+ if (is_mmc_otx2_A0(host))
+ max_frequency = MHZ_100;
+ else if (is_mmc_otx2_C0(host))
+ max_frequency = MHZ_150;
+ }
+ return max_frequency;
}
static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
+
struct cvm_mmc_slot *slot = mmc_priv(mmc);
struct cvm_mmc_host *host = slot->host;
int clk_period = 0, power_class = 10, bus_width = 0;
- u64 clock, emm_switch;
+ u64 clock, emm_switch, mode;
+ u32 max_f;
+
+ if (ios->power_mode == MMC_POWER_OFF) {
+ if (host->powered) {
+ cvm_mmc_reset_bus(slot);
+ if (host->global_pwr_gpiod)
+ host->set_shared_power(host, 0);
+ else if (!IS_ERR_OR_NULL(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+ host->powered = false;
+ }
+ set_wdog(slot, 0);
+ return;
+ }
host->acquire_bus(host);
cvm_mmc_switch_to(slot);
- /* Set the power state */
- switch (ios->power_mode) {
- case MMC_POWER_ON:
- break;
-
- case MMC_POWER_OFF:
- cvm_mmc_reset_bus(slot);
- if (host->global_pwr_gpiod)
- host->set_shared_power(host, 0);
- else if (!IS_ERR(mmc->supply.vmmc))
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
- break;
-
- case MMC_POWER_UP:
+ if (ios->power_mode == MMC_POWER_UP) {
if (host->global_pwr_gpiod)
host->set_shared_power(host, 1);
- else if (!IS_ERR(mmc->supply.vmmc))
+ else if (!IS_ERR_OR_NULL(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
- break;
}
/* Convert bus width to HW definition */
@@ -866,41 +1440,201 @@ static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
/* DDR is available for 4/8 bit bus width */
- if (ios->bus_width && ios->timing == MMC_TIMING_MMC_DDR52)
- bus_width |= 4;
+ switch (ios->timing) {
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ if (ios->bus_width)
+ bus_width |= 4;
+ break;
+ case MMC_TIMING_MMC_HS400:
+ if (ios->bus_width & 2)
+ bus_width |= 4;
+ break;
+ }
/* Change the clock frequency. */
clock = ios->clock;
- if (clock > 52000000)
- clock = 52000000;
+ max_f = max_supported_frequency(host);
+
+ if (clock < mmc->f_min)
+ clock = mmc->f_min;
+ if (clock > max_f)
+ clock = max_f;
+
slot->clock = clock;
- if (clock)
- clk_period = (host->sys_freq + clock - 1) / (2 * clock);
+ if (clock) {
+ clk_period = host->sys_freq / (2 * clock);
+ /* check to not exceed requested speed */
+ while (1) {
+ int hz = host->sys_freq / (2 * clk_period);
- emm_switch = FIELD_PREP(MIO_EMM_SWITCH_HS_TIMING,
- (ios->timing == MMC_TIMING_MMC_HS)) |
+ if (hz <= clock)
+ break;
+ clk_period++;
+ }
+ }
+
+ emm_switch =
FIELD_PREP(MIO_EMM_SWITCH_BUS_WIDTH, bus_width) |
FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, power_class) |
FIELD_PREP(MIO_EMM_SWITCH_CLK_HI, clk_period) |
FIELD_PREP(MIO_EMM_SWITCH_CLK_LO, clk_period);
+ switch (ios->timing) {
+ case MMC_TIMING_LEGACY:
+ break;
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ case MMC_TIMING_UHS_SDR12:
+ case MMC_TIMING_UHS_SDR25:
+ case MMC_TIMING_UHS_SDR50:
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_HS_TIMING, 1);
+ break;
+ case MMC_TIMING_MMC_HS200:
+ emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_HS200_TIMING, 1);
+ break;
+ case MMC_TIMING_MMC_HS400:
+ emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_HS400_TIMING, 1);
+ break;
+ }
set_bus_id(&emm_switch, slot->bus_id);
+ pr_debug("mmc-slot%d trying switch %llx w%lld hs%lld hs200:%lld hs400:%lld\n",
+ slot->bus_id, emm_switch,
+ FIELD_GET(MIO_EMM_SWITCH_BUS_WIDTH, emm_switch),
+ FIELD_GET(MIO_EMM_SWITCH_HS_TIMING, emm_switch),
+ FIELD_GET(MIO_EMM_SWITCH_HS200_TIMING, emm_switch),
+ FIELD_GET(MIO_EMM_SWITCH_HS400_TIMING, emm_switch));
+
if (!switch_val_changed(slot, emm_switch))
goto out;
set_wdog(slot, 0);
do_switch(host, emm_switch);
+
+ mode = readq(host->base + MIO_EMM_MODE(host, slot->bus_id));
+ pr_debug("mmc-slot%d mode %llx w%lld hs%lld hs200:%lld hs400:%lld\n",
+ slot->bus_id, mode,
+ (mode >> 40) & 7, (mode >> 48) & 1,
+ (mode >> 49) & 1, (mode >> 50) & 1);
+
slot->cached_switch = emm_switch;
+ host->powered = true;
+ cvm_mmc_configure_delay(slot);
out:
host->release_bus(host);
}
+static struct adj adj[] = {
+ { "CMD_IN", MIO_EMM_TIMING_CMD_IN,
+ cvm_mmc_r1_cmd, MMC_SEND_STATUS, },
+ { "DATA_IN", MIO_EMM_TIMING_DATA_IN,
+ cvm_mmc_data_tuning, },
+ { NULL, },
+};
+
+static int cvm_scan_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct cvm_mmc_slot *slot = mmc_priv(mmc);
+ struct adj *a;
+ int ret;
+
+ for (a = adj; a->name; a++) {
+ if (a->ddr_only && !cvm_is_mmc_timing_ddr(slot))
+ continue;
+
+ ret = adjust_tuning(mmc, a,
+ a->opcode ?: opcode);
+
+ if (ret)
+ return ret;
+ }
+
+ cvm_mmc_set_timing(slot);
+ return 0;
+}
+
+static int cvm_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct cvm_mmc_slot *slot = mmc_priv(mmc);
+ struct cvm_mmc_host *host = slot->host;
+ int clk_period, hz;
+
+ int ret;
+
+ do {
+ u64 emm_switch =
+ readq(host->base + MIO_EMM_MODE(host, slot->bus_id));
+
+ clk_period = FIELD_GET(MIO_EMM_SWITCH_CLK_LO, emm_switch);
+ dev_info(slot->host->dev, "%s re-tuning\n",
+ mmc_hostname(mmc));
+ ret = cvm_scan_tuning(mmc, opcode);
+ if (ret) {
+ int inc = clk_period >> 3;
+
+ if (!inc)
+ inc++;
+ clk_period += inc;
+ hz = host->sys_freq / (2 * clk_period);
+ pr_debug("clk_period %d += %d, now %d Hz\n",
+ clk_period - inc, inc, hz);
+
+ if (hz < 400000)
+ break;
+
+ slot->clock = hz;
+ mmc->ios.clock = hz;
+
+ emm_switch &= ~MIO_EMM_SWITCH_CLK_LO;
+ emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_LO,
+ clk_period);
+ emm_switch &= ~MIO_EMM_SWITCH_CLK_HI;
+ emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_HI,
+ clk_period);
+ do_switch(host, emm_switch);
+ }
+ } while (ret);
+
+ return ret;
+}
+
+static int cvm_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct cvm_mmc_slot *slot = mmc_priv(mmc);
+
+ return cvm_mmc_configure_delay(slot);
+}
+
+static void cvm_mmc_reset(struct mmc_host *mmc)
+{
+ struct cvm_mmc_slot *slot = mmc_priv(mmc);
+ struct cvm_mmc_host *host = slot->host;
+ u64 r;
+
+ cvm_mmc_reset_bus(slot);
+
+ r = FIELD_PREP(MIO_EMM_CMD_VAL, 1) |
+ FIELD_PREP(MIO_EMM_CMD_BUS_ID, slot->bus_id);
+
+ writeq(r, host->base + MIO_EMM_CMD(host));
+
+ do {
+ r = readq(host->base + MIO_EMM_RSP_STS(host));
+ } while (!(r & MIO_EMM_RSP_STS_CMD_DONE));
+}
+
static const struct mmc_host_ops cvm_mmc_ops = {
.request = cvm_mmc_request,
.set_ios = cvm_mmc_set_ios,
.get_ro = mmc_gpio_get_ro,
.get_cd = mmc_gpio_get_cd,
+ .hw_reset = cvm_mmc_reset,
+ .execute_tuning = cvm_execute_tuning,
+ .prepare_hs400_tuning = cvm_prepare_hs400_tuning,
};
static void cvm_mmc_set_clock(struct cvm_mmc_slot *slot, unsigned int clock)
@@ -917,7 +1651,7 @@ static int cvm_mmc_init_lowlevel(struct cvm_mmc_slot *slot)
struct cvm_mmc_host *host = slot->host;
u64 emm_switch;
- /* Enable this bus slot. */
+ /* Enable this bus slot. Overridden when vqmmc-switching engaged */
host->emm_cfg |= (1ull << slot->bus_id);
writeq(host->emm_cfg, slot->host->base + MIO_EMM_CFG(host));
udelay(10);
@@ -933,8 +1667,8 @@ static int cvm_mmc_init_lowlevel(struct cvm_mmc_slot *slot)
/* Make the changes take effect on this bus slot. */
set_bus_id(&emm_switch, slot->bus_id);
do_switch(host, emm_switch);
-
slot->cached_switch = emm_switch;
+ host->powered = true;
/*
* Set watchdog timeout value and default reset value
@@ -953,7 +1687,7 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
u32 id, cmd_skew = 0, dat_skew = 0, bus_width = 0;
struct device_node *node = dev->of_node;
struct mmc_host *mmc = slot->mmc;
- u64 clock_period;
+ u32 max_frequency, current_drive, clk_slew;
int ret;
ret = of_property_read_u32(node, "reg", &id);
@@ -962,8 +1696,14 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
return ret;
}
- if (id >= CAVIUM_MAX_MMC || slot->host->slot[id]) {
- dev_err(dev, "Invalid reg property on %pOF\n", node);
+ if (id >= CAVIUM_MAX_MMC) {
+ dev_err(dev, "Invalid reg=<%d> property on %pOF\n", id, node);
+ return -EINVAL;
+ }
+
+ if (slot->host->slot[id]) {
+ dev_err(dev, "Duplicate reg=<%d> property on %pOF\n",
+ id, node);
return -EINVAL;
}
@@ -974,7 +1714,7 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
* Legacy Octeon firmware has no regulator entry, fall-back to
* a hard-coded voltage to get a sane OCR.
*/
- if (IS_ERR(mmc->supply.vmmc))
+ if (IS_ERR_OR_NULL(mmc->supply.vmmc))
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
/* Common MMC bindings */
@@ -982,7 +1722,7 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
if (ret)
return ret;
- /* Set bus width */
+ /* Set bus width from obsolete properties, if unset */
if (!(mmc->caps & (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA))) {
of_property_read_u32(node, "cavium,bus-max-width", &bus_width);
if (bus_width == 8)
@@ -991,19 +1731,40 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
mmc->caps |= MMC_CAP_4_BIT_DATA;
}
+ max_frequency = max_supported_frequency(slot->host);
+
/* Set maximum and minimum frequency */
if (!mmc->f_max)
of_property_read_u32(node, "spi-max-frequency", &mmc->f_max);
- if (!mmc->f_max || mmc->f_max > 52000000)
- mmc->f_max = 52000000;
- mmc->f_min = 400000;
+ if (!mmc->f_max || mmc->f_max > max_frequency)
+ mmc->f_max = max_frequency;
+ mmc->f_min = KHZ_400;
/* Sampling register settings, period in picoseconds */
- clock_period = 1000000000000ull / slot->host->sys_freq;
of_property_read_u32(node, "cavium,cmd-clk-skew", &cmd_skew);
of_property_read_u32(node, "cavium,dat-clk-skew", &dat_skew);
- slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period;
- slot->dat_cnt = (dat_skew + clock_period / 2) / clock_period;
+ if (is_mmc_8xxx(slot->host) || is_mmc_otx2(slot->host)) {
+ slot->cmd_cnt = cmd_skew;
+ slot->data_cnt = dat_skew;
+ } else {
+ u64 clock_period = 1000000000000ull / slot->host->sys_freq;
+
+ slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period;
+ slot->data_cnt = (dat_skew + clock_period / 2) / clock_period;
+ }
+
+ /* Get current drive and clk skew */
+ ret = of_property_read_u32(node, "cavium,drv-strength", &current_drive);
+ if (ret)
+ slot->drive = -1;
+ else
+ slot->drive = current_drive;
+
+ ret = of_property_read_u32(node, "cavium,clk-slew", &clk_slew);
+ if (ret)
+ slot->slew = -1;
+ else
+ slot->slew = clk_slew;
return id;
}
@@ -1012,6 +1773,7 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
{
struct cvm_mmc_slot *slot;
struct mmc_host *mmc;
+ struct iommu_domain *dom;
int ret, id;
mmc = mmc_alloc_host(sizeof(struct cvm_mmc_slot), dev);
@@ -1030,16 +1792,19 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
/* Set up host parameters */
mmc->ops = &cvm_mmc_ops;
+ mmc->caps |= MMC_CAP_ERASE | MMC_CAP_BUS_WIDTH_TEST;
+ mmc->caps |= MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD;
+
/*
- * We only have a 3.3v supply, we cannot support any
- * of the UHS modes. We do support the high speed DDR
- * modes up to 52MHz.
+ * We only have a 3.3v supply for slots, we cannot
+ * support any of the UHS modes. We do support the
+ * high speed DDR modes up to 52MHz.
*
* Disable bounce buffers for max_segs = 1
*/
- mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
- MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD |
- MMC_CAP_3_3V_DDR;
+
+ if (!is_mmc_otx2(host))
+ mmc->caps |= MMC_CAP_3_3V_DDR;
if (host->use_sg)
mmc->max_segs = 16;
@@ -1055,14 +1820,30 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
/* DMA block count field is 15 bits */
mmc->max_blk_count = 32767;
+ dom = iommu_get_domain_for_dev(dev->parent);
+ if (dom && dom->type == IOMMU_DOMAIN_IDENTITY) {
+ unsigned int max_size = (1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE;
+
+ if (mmc->max_seg_size > max_size)
+ mmc->max_seg_size = max_size;
+
+ max_size *= mmc->max_segs;
+
+ if (mmc->max_req_size > max_size)
+ mmc->max_req_size = max_size;
+ }
+
+ mmc_can_retune(mmc);
+
slot->clock = mmc->f_min;
slot->bus_id = id;
slot->cached_rca = 1;
host->acquire_bus(host);
host->slot[id] = slot;
- cvm_mmc_switch_to(slot);
+ host->use_vqmmc |= !IS_ERR_OR_NULL(slot->mmc->supply.vqmmc);
cvm_mmc_init_lowlevel(slot);
+ cvm_mmc_switch_to(slot);
host->release_bus(host);
ret = mmc_add_host(mmc);
diff --git a/drivers/mmc/host/cavium.h b/drivers/mmc/host/cavium.h
index f3eea5eaa678..ec10ed476128 100644
--- a/drivers/mmc/host/cavium.h
+++ b/drivers/mmc/host/cavium.h
@@ -19,8 +19,47 @@
#include <linux/of.h>
#include <linux/scatterlist.h>
#include <linux/semaphore.h>
+#include <linux/pci.h>
#define CAVIUM_MAX_MMC 4
+#define BLKSZ_EXT_CSD 512
+#define MRVL_OCTEONTX2_96XX_PARTNUM 0xB2
+
+/* Subsystem Device ID */
+#define PCI_SUBSYS_DEVID_8XXX 0xA
+#define PCI_SUBSYS_DEVID_9XXX 0xB
+#define PCI_SUBSYS_DEVID_95XX 0xB3
+
+/* Chip revision Id */
+#define REV_ID_0 0
+#define REV_ID_2 2
+
+#define KHZ_400 (400000)
+#define MHZ_26 (26000000)
+#define MHZ_52 (52000000)
+#define MHZ_100 (100000000)
+#define MHZ_150 (150000000)
+#define MHZ_200 (200000000)
+
+/* octtx2: emmc interface io current drive strength */
+#define MILLI_AMP_2 (0x0)
+#define MILLI_AMP_4 (0x1)
+#define MILLI_AMP_8 (0x2)
+#define MILLI_AMP_16 (0x3)
+
+/* octtx2: emmc interface io clk skew */
+#define LOW_SLEW_RATE (0x0)
+#define HIGH_SLEW_RATE (0x1)
+
+/* octtx2: emmc interface calibration */
+#define START_CALIBRATION (0x1)
+#define TOTAL_NO_OF_TAPS (512)
+#define PS_10000 (10 * 1000)
+#define PS_5000 (5000)
+#define PS_2500 (2500)
+#define PS_400 (400)
+#define MAX_NO_OF_TAPS 64
+
/* DMA register addresses */
#define MIO_EMM_DMA_FIFO_CFG(x) (0x00 + x->reg_off_dma)
@@ -33,8 +72,17 @@
#define MIO_EMM_DMA_INT_ENA_W1S(x) (0x40 + x->reg_off_dma)
#define MIO_EMM_DMA_INT_ENA_W1C(x) (0x48 + x->reg_off_dma)
+/* octtx2 specific registers */
+#define MIO_EMM_CALB(x) (0xC0 + x->reg_off)
+#define MIO_EMM_TAP(x) (0xC8 + x->reg_off)
+#define MIO_EMM_TIMING(x) (0xD0 + x->reg_off)
+#define MIO_EMM_DEBUG(x) (0xF8 + x->reg_off)
+
/* register addresses */
#define MIO_EMM_CFG(x) (0x00 + x->reg_off)
+#define MIO_EMM_MODE(x, s) (0x08 + 8*(s) + (x)->reg_off)
+/* octtx2 specific register */
+#define MIO_EMM_IO_CTL(x) (0x40 + x->reg_off)
#define MIO_EMM_SWITCH(x) (0x48 + x->reg_off)
#define MIO_EMM_DMA(x) (0x50 + x->reg_off)
#define MIO_EMM_CMD(x) (0x58 + x->reg_off)
@@ -56,6 +104,7 @@ struct cvm_mmc_host {
struct device *dev;
void __iomem *base;
void __iomem *dma_base;
+ struct pci_dev *pdev;
int reg_off;
int reg_off_dma;
u64 emm_cfg;
@@ -64,12 +113,10 @@ struct cvm_mmc_host {
struct clk *clk;
int sys_freq;
- struct mmc_request *current_req;
- struct sg_mapping_iter smi;
- bool dma_active;
bool use_sg;
-
bool has_ciu3;
+ bool powered;
+ bool use_vqmmc; /* must disable slots over switch */
bool big_dma_addr;
bool need_irq_handler_lock;
spinlock_t irq_handler_lock;
@@ -80,6 +127,9 @@ struct cvm_mmc_host {
struct cvm_mmc_slot *slot[CAVIUM_MAX_MMC];
struct platform_device *slot_pdev[CAVIUM_MAX_MMC];
+ /* octtx2 specific */
+ unsigned int per_tap_delay; /* per tap delay in pico second */
+ unsigned long delay_logged; /* per-ios.timing bitmask */
void (*set_shared_power)(struct cvm_mmc_host *, int);
void (*acquire_bus)(struct cvm_mmc_host *);
@@ -94,16 +144,27 @@ struct cvm_mmc_host {
struct cvm_mmc_slot {
struct mmc_host *mmc; /* slot-level mmc_core object */
struct cvm_mmc_host *host; /* common hw for all slots */
+ struct mmc_request *current_req;
u64 clock;
+ u32 ecount, gcount;
u64 cached_switch;
u64 cached_rca;
- unsigned int cmd_cnt; /* sample delay */
- unsigned int dat_cnt; /* sample delay */
+ struct sg_mapping_iter smi;
+ bool dma_active;
+
+ u64 taps; /* otx2: MIO_EMM_TIMING */
+ unsigned int cmd_cnt; /* otx: sample cmd in delay */
+ unsigned int data_cnt; /* otx: sample data in delay */
+
+ int drive; /* Current drive */
+ int slew; /* clock skew */
int bus_id;
+ bool cmd6_pending;
+ u64 want_switch;
};
struct cvm_mmc_cr_type {
@@ -161,6 +222,21 @@ struct cvm_mmc_cr_mods {
#define MIO_EMM_DMA_CFG_SIZE GENMASK_ULL(55, 36)
#define MIO_EMM_DMA_CFG_ADR GENMASK_ULL(35, 0)
+#define MIO_EMM_CFG_BUS_ENA GENMASK_ULL(3, 0)
+
+#define MIO_EMM_IO_CTL_DRIVE GENMASK_ULL(3, 2)
+#define MIO_EMM_IO_CTL_SLEW BIT_ULL(0)
+
+#define MIO_EMM_CALB_START BIT_ULL(0)
+#define MIO_EMM_TAP_DELAY GENMASK_ULL(7, 0)
+
+#define MIO_EMM_TIMING_CMD_IN GENMASK_ULL(53, 48)
+#define MIO_EMM_TIMING_CMD_OUT GENMASK_ULL(37, 32)
+#define MIO_EMM_TIMING_DATA_IN GENMASK_ULL(21, 16)
+#define MIO_EMM_TIMING_DATA_OUT GENMASK_ULL(5, 0)
+
+#define MIO_EMM_INT_NCB_RAS BIT_ULL(8)
+#define MIO_EMM_INT_NCB_FLT BIT_ULL(7)
#define MIO_EMM_INT_SWITCH_ERR BIT_ULL(6)
#define MIO_EMM_INT_SWITCH_DONE BIT_ULL(5)
#define MIO_EMM_INT_DMA_ERR BIT_ULL(4)
@@ -169,6 +245,9 @@ struct cvm_mmc_cr_mods {
#define MIO_EMM_INT_CMD_DONE BIT_ULL(1)
#define MIO_EMM_INT_BUF_DONE BIT_ULL(0)
+#define MIO_EMM_DMA_INT_FIFO BIT_ULL(1)
+#define MIO_EMM_DMA_INT_DMA BIT_ULL(0)
+
#define MIO_EMM_RSP_STS_BUS_ID GENMASK_ULL(61, 60)
#define MIO_EMM_RSP_STS_CMD_VAL BIT_ULL(59)
#define MIO_EMM_RSP_STS_SWITCH_VAL BIT_ULL(58)
@@ -200,9 +279,14 @@ struct cvm_mmc_cr_mods {
#define MIO_EMM_SWITCH_ERR0 BIT_ULL(58)
#define MIO_EMM_SWITCH_ERR1 BIT_ULL(57)
#define MIO_EMM_SWITCH_ERR2 BIT_ULL(56)
+#define MIO_EMM_SWITCH_ERRS GENMASK_ULL(58, 56)
+#define MIO_EMM_SWITCH_HS400_TIMING BIT_ULL(50)
+#define MIO_EMM_SWITCH_HS200_TIMING BIT_ULL(49)
#define MIO_EMM_SWITCH_HS_TIMING BIT_ULL(48)
+#define MIO_EMM_SWITCH_TIMING GENMASK_ULL(50, 48)
#define MIO_EMM_SWITCH_BUS_WIDTH GENMASK_ULL(42, 40)
#define MIO_EMM_SWITCH_POWER_CLASS GENMASK_ULL(35, 32)
+#define MIO_EMM_SWITCH_CLK GENMASK_ULL(31, 0)
#define MIO_EMM_SWITCH_CLK_HI GENMASK_ULL(31, 16)
#define MIO_EMM_SWITCH_CLK_LO GENMASK_ULL(15, 0)
@@ -210,6 +294,61 @@ struct cvm_mmc_cr_mods {
irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id);
int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host);
int cvm_mmc_of_slot_remove(struct cvm_mmc_slot *slot);
+
extern const char *cvm_mmc_irq_names[];
+static inline bool is_mmc_8xxx(struct cvm_mmc_host *host)
+{
+#ifdef CONFIG_ARM64
+ struct pci_dev *pdev = host->pdev;
+ u32 chip_id = (pdev->subsystem_device >> 12) & 0xF;
+
+ return (chip_id == PCI_SUBSYS_DEVID_8XXX);
+#else
+ return false;
+#endif
+}
+
+static inline bool is_mmc_otx2(struct cvm_mmc_host *host)
+{
+#ifdef CONFIG_ARM64
+ struct pci_dev *pdev = host->pdev;
+ u32 chip_id = (pdev->subsystem_device >> 12) & 0xF;
+
+ return (chip_id == PCI_SUBSYS_DEVID_9XXX);
+#else
+ return false;
+#endif
+}
+
+static inline bool is_mmc_otx2_A0(struct cvm_mmc_host *host)
+{
+#ifdef CONFIG_ARM64
+ struct pci_dev *pdev = host->pdev;
+ u32 chip_id = (pdev->subsystem_device >> 8) & 0xFF;
+
+ return (pdev->revision == 0x00) &&
+ (chip_id == MRVL_OCTEONTX2_96XX_PARTNUM);
+#else
+ return false;
+#endif
+}
+
+static inline bool is_mmc_otx2_C0(struct cvm_mmc_host *host)
+{
+ struct pci_dev *pdev = host->pdev;
+ u32 chip_id = (pdev->subsystem_device >> 12) & 0xF;
+
+ return (pdev->revision == REV_ID_2) &&
+ (chip_id == PCI_SUBSYS_DEVID_9XXX);
+}
+
+static inline bool is_mmc_95xx(struct cvm_mmc_host *host)
+{
+ struct pci_dev *pdev = host->pdev;
+ u32 chip_id = (pdev->subsystem_device >> 8) & 0xFF;
+
+ return (chip_id == PCI_SUBSYS_DEVID_95XX);
+}
+
#endif
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
index ab0f963d630c..b0728a659515 100644
--- a/drivers/mtd/spi-nor/macronix.c
+++ b/drivers/mtd/spi-nor/macronix.c
@@ -75,7 +75,7 @@ static const struct flash_info macronix_parts[] = {
SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048,
SECT_4K | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ) },
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048,
SPI_NOR_QUAD_READ) },
};
diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
index 6c034b9718e2..f1dcf73f2640 100644
--- a/drivers/mtd/spi-nor/micron-st.c
+++ b/drivers/mtd/spi-nor/micron-st.c
@@ -59,7 +59,7 @@ static const struct flash_info st_parts[] = {
SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
{ "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048,
SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
- NO_CHIP_ERASE) },
+ NO_CHIP_ERASE | SPI_NOR_4B_OPCODES)},
{ "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048,
SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
NO_CHIP_ERASE) },
@@ -69,6 +69,9 @@ static const struct flash_info st_parts[] = {
{ "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096,
SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
NO_CHIP_ERASE) },
+ { "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES | NO_CHIP_ERASE) },
{ "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
{ "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
diff --git a/drivers/net/ethernet/cavium/thunder/Makefile b/drivers/net/ethernet/cavium/thunder/Makefile
index 2fc6142d1634..6cf739e31883 100644
--- a/drivers/net/ethernet/cavium/thunder/Makefile
+++ b/drivers/net/ethernet/cavium/thunder/Makefile
@@ -5,9 +5,9 @@
obj-$(CONFIG_THUNDER_NIC_RGX) += thunder_xcv.o
obj-$(CONFIG_THUNDER_NIC_BGX) += thunder_bgx.o
-obj-$(CONFIG_THUNDER_NIC_PF) += nicpf.o
-obj-$(CONFIG_THUNDER_NIC_VF) += nicvf.o
+obj-$(CONFIG_THUNDER_NIC_PF) += thunder_nic.o
+obj-$(CONFIG_THUNDER_NIC_VF) += thunder_nicvf.o
-nicpf-y := nic_main.o
-nicvf-y := nicvf_main.o nicvf_queues.o
-nicvf-y += nicvf_ethtool.o
+thunder_nic-y := nic_main.o
+thunder_nicvf-y := nicvf_main.o nicvf_queues.o
+thunder_nicvf-y += nicvf_ethtool.o
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 83dabcffc789..4c0dade7fe68 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -512,25 +512,40 @@ static int nicvf_set_ringparam(struct net_device *netdev,
static int nicvf_get_rss_hash_opts(struct nicvf *nic,
struct ethtool_rxnfc *info)
{
+ u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
+
info->data = 0;
+ if (!(rss_cfg & BIT_ULL(RSS_HASH_IP)))
+ return 0;
+
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+
switch (info->flow_type) {
case TCP_V4_FLOW:
case TCP_V6_FLOW:
+ if (rss_cfg & BIT_ULL(RSS_HASH_TCP))
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
case UDP_V4_FLOW:
case UDP_V6_FLOW:
+ if (rss_cfg & BIT_ULL(RSS_HASH_UDP))
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
case SCTP_V4_FLOW:
case SCTP_V6_FLOW:
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* Fall through */
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
case IPV4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
case IPV6_FLOW:
- info->data |= RXH_IP_SRC | RXH_IP_DST;
break;
default:
return -EINVAL;
}
-
return 0;
}
@@ -596,19 +611,6 @@ static int nicvf_set_rss_hash_opts(struct nicvf *nic,
return -EINVAL;
}
break;
- case SCTP_V4_FLOW:
- case SCTP_V6_FLOW:
- switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- rss_cfg &= ~(1ULL << RSS_HASH_L4ETC);
- break;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- rss_cfg |= (1ULL << RSS_HASH_L4ETC);
- break;
- default:
- return -EINVAL;
- }
- break;
case IPV4_FLOW:
case IPV6_FLOW:
rss_cfg = RSS_HASH_IP;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 069e7413f1ef..5e8fa534aef4 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -359,7 +359,7 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
/* Release additional page references held for recycling */
head = 0;
while (head < rbdr->pgcnt) {
- pgcache = &rbdr->pgcache[head];
+ pgcache = &rbdr->pgcache[head++];
if (pgcache->page && page_ref_count(pgcache->page) != 0) {
if (rbdr->is_xdp) {
page_ref_sub(pgcache->page,
@@ -367,9 +367,8 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
}
put_page(pgcache->page);
}
- head++;
}
-
+ kfree(rbdr->pgcache);
/* Free RBDR ring */
nicvf_free_q_desc_mem(nic, &rbdr->dmem);
}
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 8ff28ed04b7f..bb939eb9d225 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -80,6 +80,7 @@ static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
static int lmac_count; /* Total no of LMACs in system */
static int bgx_xaui_check_link(struct lmac *lmac);
+static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac);
/* Supported devices */
static const struct pci_device_id bgx_id_table[] = {
@@ -579,6 +580,14 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac)
}
bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
+ if (!bgx->is_rgx) {
+ bgx_reg_modify(bgx, lmac->lmacid, BGX_GMP_PCS_MRX_CTL,
+ PCS_MRX_CTL_RESET);
+ if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_PCS_MRX_CTL,
+ PCS_MRX_CTL_RESET, true)) {
+ dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n");
+ }
+ }
/* Restore CMR config settings */
cmr_cfg |= (rx_en ? CMR_PKT_RX_EN : 0) | (tx_en ? CMR_PKT_TX_EN : 0);
@@ -592,35 +601,21 @@ static void bgx_lmac_handler(struct net_device *netdev)
{
struct lmac *lmac = container_of(netdev, struct lmac, netdev);
struct phy_device *phydev;
- int link_changed = 0;
if (!lmac)
return;
phydev = lmac->phydev;
- if (!phydev->link && lmac->last_link)
- link_changed = -1;
-
- if (phydev->link &&
- (lmac->last_duplex != phydev->duplex ||
- lmac->last_link != phydev->link ||
- lmac->last_speed != phydev->speed)) {
- link_changed = 1;
- }
+ if (phydev->link == 1)
+ lmac->link_up = true;
+ else
+ lmac->link_up = false;
lmac->last_link = phydev->link;
lmac->last_speed = phydev->speed;
lmac->last_duplex = phydev->duplex;
- if (!link_changed)
- return;
-
- if (link_changed > 0)
- lmac->link_up = true;
- else
- lmac->link_up = false;
-
if (lmac->is_sgmii)
bgx_sgmii_change_link_state(lmac);
else
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 1b25948c662b..c9e10fb84612 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -8,4 +8,6 @@ obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
octeontx2_mbox-y := mbox.o
octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
- rvu_reg.o rvu_npc.o rvu_debugfs.o
+ rvu_reg.o rvu_npc.o rvu_debugfs.o rvu_validation.o rvu_sso.o \
+ rvu_tim.o rvu_cpt.o rvu_npc_fs.o ptp.o rvu_ptp.o rvu_fixes.o \
+ rvu_sdp.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index a4e65da8d95b..a059575060af 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -14,22 +14,27 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include "cgx.h"
+#include "rvu.h"
#define DRV_NAME "octeontx2-cgx"
#define DRV_STRING "Marvell OcteonTX2 CGX/MAC Driver"
+#define CGX_RX_STAT_GLOBAL_INDEX 9
+
/**
* struct lmac
* @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion
* @cmd_lock: Lock to serialize the command interface
* @resp: command response
* @link_info: link related information
+ * @mac_to_index_bmap: Mac address to CGX table index mapping
* @event_cb: callback for linkchange events
* @event_cb_lock: lock for serializing callback with unregister
* @cmd_pend: flag set before new command is started
@@ -43,6 +48,7 @@ struct lmac {
struct mutex cmd_lock;
u64 resp;
struct cgx_link_user_info link_info;
+ struct rsrc_bmap mac_to_index_bmap;
struct cgx_event_cb event_cb;
spinlock_t event_cb_lock;
bool cmd_pend;
@@ -186,11 +192,100 @@ int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
- cfg |= CGX_DMAC_CTL0_CAM_ENABLE;
+ cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE |
+ CGX_DMAC_MCAST_MODE);
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ return 0;
+}
+
+int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ int index, idx;
+ u64 cfg = 0;
+
+ /* Get available index where entry is to be installed */
+ idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap);
+ if (idx < 0)
+ return idx;
+
+ /* Calculate real index of CGX DMAC table */
+ index = lmac_id * lmac->mac_to_index_bmap.max + idx;
+
+ cfg = mac2u64 (mac_addr);
+ cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
+ cfg |= ((u64)lmac_id << 49);
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
+
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE |
+ CGX_DMAC_CAM_ACCEPT);
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ return idx;
+}
+EXPORT_SYMBOL(cgx_lmac_addr_add);
+
+int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ u8 index = 0;
+ u64 cfg;
+
+ /* Restore index 0 to its default init value as done during
+ * cgx_lmac_init
+ */
+ set_bit(0, lmac->mac_to_index_bmap.bmap);
+
+ index = lmac_id * lmac->mac_to_index_bmap.max + index;
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
+
+ /* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg &= ~CGX_DMAC_CAM_ACCEPT;
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
return 0;
}
+EXPORT_SYMBOL(cgx_lmac_addr_reset);
+
+int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+
+ /* Validate the index */
+ if (index >= lmac->mac_to_index_bmap.max)
+ return -EINVAL;
+
+ /* Skip deletion for reserved index i.e. index 0 */
+ if (index == 0)
+ return 0;
+
+ rvu_free_rsrc(&lmac->mac_to_index_bmap, index);
+
+ index = lmac_id * lmac->mac_to_index_bmap.max + index;
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
+
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_addr_del);
+
+int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+
+ if (lmac)
+ return lmac->mac_to_index_bmap.max;
+
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_addr_max_entries_get);
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
{
@@ -260,8 +355,8 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
if (enable) {
/* Enable promiscuous mode on LMAC */
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
- cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE);
- cfg |= CGX_DMAC_BCAST_MODE;
+ cfg &= ~CGX_DMAC_CAM_ACCEPT;
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
cfg = cgx_read(cgx, 0,
@@ -316,6 +411,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
if (!cgx || lmac_id >= cgx->lmac_count)
return -ENODEV;
+
+ /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */
+ if (idx >= CGX_RX_STAT_GLOBAL_INDEX)
+ lmac_id = 0;
+
*rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
return 0;
}
@@ -330,6 +430,92 @@ int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
return 0;
}
+int cgx_stats_rst(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ int stat_id;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+
+ for (stat_id = 0 ; stat_id < CGX_RX_STATS_COUNT; stat_id++) {
+ if (stat_id >= CGX_RX_STAT_GLOBAL_INDEX)
+ /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_STAT0 + (stat_id * 8)), 0);
+ else
+ cgx_write(cgx, lmac_id,
+ (CGXX_CMRX_RX_STAT0 + (stat_id * 8)), 0);
+ }
+
+ for (stat_id = 0 ; stat_id < CGX_TX_STATS_COUNT; stat_id++)
+ cgx_write(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (stat_id * 8), 0);
+
+ return 0;
+}
+
+static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo)
+{
+ if (linfo->fec) {
+ switch (linfo->lmac_type_id) {
+ case LMAC_MODE_SGMII:
+ case LMAC_MODE_XAUI:
+ case LMAC_MODE_RXAUI:
+ case LMAC_MODE_QSGMII:
+ return 0;
+ case LMAC_MODE_10G_R:
+ case LMAC_MODE_25G_R:
+ case LMAC_MODE_100G_R:
+ case LMAC_MODE_USXGMII:
+ return 1;
+ case LMAC_MODE_40G_R:
+ return 4;
+ case LMAC_MODE_50G_R:
+ if (linfo->fec == OTX2_FEC_BASER)
+ return 2;
+ else
+ return 1;
+ }
+ }
+ return 0;
+}
+
+int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
+{
+ int stats, fec_stats_count = 0;
+ int corr_reg, uncorr_reg;
+ struct cgx *cgx = cgxd;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+ fec_stats_count =
+ cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info);
+ if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
+ corr_reg = CGXX_SPUX_LNX_FEC_CORR_BLOCKS;
+ uncorr_reg = CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS;
+ } else {
+ corr_reg = CGXX_SPUX_RSFEC_CORR;
+ uncorr_reg = CGXX_SPUX_RSFEC_UNCORR;
+ }
+ for (stats = 0; stats < fec_stats_count; stats++) {
+ rsp->fec_corr_blks +=
+ cgx_read(cgx, lmac_id, corr_reg + (stats * 8));
+ rsp->fec_uncorr_blks +=
+ cgx_read(cgx, lmac_id, uncorr_reg + (stats * 8));
+ }
+ return 0;
+}
+
+u64 cgx_get_lmac_tx_fifo_status(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return 0;
+ return cgx_read(cgx, lmac_id, CGXX_CMRX_TX_FIFO_LEN);
+}
+EXPORT_SYMBOL(cgx_get_lmac_tx_fifo_status);
+
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
{
struct cgx *cgx = cgxd;
@@ -340,9 +526,9 @@ int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
if (enable)
- cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
+ cfg |= DATA_PKT_RX_EN | DATA_PKT_TX_EN;
else
- cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
+ cfg &= ~(DATA_PKT_RX_EN | DATA_PKT_TX_EN);
cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
return 0;
}
@@ -367,8 +553,21 @@ int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
return !!(last & DATA_PKT_TX_EN);
}
-int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
- u8 *tx_pause, u8 *rx_pause)
+static int cgx_lmac_get_higig2_pause_frm_status(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL);
+
+ *rx_pause = !!(cfg & CGXX_SMUX_HG2_CONTROL_RX_ENABLE);
+ *tx_pause = !!(cfg & CGXX_SMUX_HG2_CONTROL_TX_ENABLE);
+ return 0;
+}
+
+int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause)
{
struct cgx *cgx = cgxd;
u64 cfg;
@@ -376,6 +575,10 @@ int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
if (!cgx || lmac_id >= cgx->lmac_count)
return -ENODEV;
+ if (is_higig2_enabled(cgxd, lmac_id))
+ return cgx_lmac_get_higig2_pause_frm_status(cgxd, lmac_id,
+ tx_pause, rx_pause);
+
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
*rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
@@ -384,15 +587,52 @@ int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
return 0;
}
-int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
- u8 tx_pause, u8 rx_pause)
+static int cgx_lmac_enadis_higig2_pause_frm(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause)
{
struct cgx *cgx = cgxd;
u64 cfg;
- if (!cgx || lmac_id >= cgx->lmac_count)
- return -ENODEV;
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL);
+ cfg &= ~CGXX_SMUX_HG2_CONTROL_RX_ENABLE;
+ cfg |= rx_pause ? CGXX_SMUX_HG2_CONTROL_RX_ENABLE : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL, cfg);
+
+ /* Forward PAUSE information to TX block */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL);
+ cfg &= ~CGXX_SMUX_HG2_CONTROL_TX_ENABLE;
+ cfg |= tx_pause ? CGXX_SMUX_HG2_CONTROL_TX_ENABLE : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL, cfg);
+
+ /* allow intra packet hg2 generation */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
+ cfg &= ~CGXX_SMUX_TX_PAUSE_PKT_HG2_INTRA_EN;
+ cfg |= tx_pause ? CGXX_SMUX_TX_PAUSE_PKT_HG2_INTRA_EN : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL, cfg);
+
+ cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
+ if (tx_pause) {
+ cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id);
+ } else {
+ cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
+ cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
+ }
+ cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
+ return 0;
+}
+
+static int cgx_lmac_enadis_8023_pause_frm(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
@@ -411,9 +651,58 @@ int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
}
cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
+
return 0;
}
+int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+
+ if (is_higig2_enabled(cgxd, lmac_id))
+ return cgx_lmac_enadis_higig2_pause_frm(cgxd, lmac_id,
+ tx_pause, rx_pause);
+ else
+ return cgx_lmac_enadis_8023_pause_frm(cgxd, lmac_id,
+ tx_pause, rx_pause);
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_enadis_pause_frm);
+
+void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!cgx)
+ return;
+
+ if (enable) {
+ /* Enable inbound PTP timestamping */
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg |= CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg |= CGX_SMUX_RX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ /* Disable inbound PTP stamping */
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ }
+}
+EXPORT_SYMBOL(cgx_lmac_ptp_config);
+
static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
{
u64 cfg;
@@ -443,6 +732,12 @@ static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
cfg | (DEFAULT_PAUSE_TIME / 2));
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
+ cfg = FIELD_SET(HG2_INTRA_INTERVAL, (DEFAULT_PAUSE_TIME / 2),
+ cfg);
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
+ cfg);
+
cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME,
DEFAULT_PAUSE_TIME);
@@ -461,10 +756,18 @@ static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL);
+ cfg &= ~CGXX_SMUX_HG2_CONTROL_RX_ENABLE;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL, cfg);
+
/* Disable pause frames transmission */
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL);
+ cfg &= ~CGXX_SMUX_HG2_CONTROL_TX_ENABLE;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL, cfg);
}
}
@@ -553,6 +856,7 @@ static inline void cgx_link_usertable_init(void)
cgx_speed_mbps[CGX_LINK_25G] = 25000;
cgx_speed_mbps[CGX_LINK_40G] = 40000;
cgx_speed_mbps[CGX_LINK_50G] = 50000;
+ cgx_speed_mbps[CGX_LINK_80G] = 80000;
cgx_speed_mbps[CGX_LINK_100G] = 100000;
cgx_lmactype_string[LMAC_MODE_SGMII] = "SGMII";
@@ -567,6 +871,155 @@ static inline void cgx_link_usertable_init(void)
cgx_lmactype_string[LMAC_MODE_USXGMII] = "USXGMII";
}
+static inline int cgx_link_usertable_index_map(int speed)
+{
+ switch (speed) {
+ case SPEED_10:
+ return CGX_LINK_10M;
+ case SPEED_100:
+ return CGX_LINK_100M;
+ case SPEED_1000:
+ return CGX_LINK_1G;
+ case SPEED_2500:
+ return CGX_LINK_2HG;
+ case SPEED_5000:
+ return CGX_LINK_5G;
+ case SPEED_10000:
+ return CGX_LINK_10G;
+ case SPEED_20000:
+ return CGX_LINK_20G;
+ case SPEED_25000:
+ return CGX_LINK_25G;
+ case SPEED_40000:
+ return CGX_LINK_40G;
+ case SPEED_50000:
+ return CGX_LINK_50G;
+ case 80000:
+ return CGX_LINK_80G;
+ case SPEED_100000:
+ return CGX_LINK_100G;
+ case SPEED_UNKNOWN:
+ return CGX_LINK_NONE;
+ }
+ return CGX_LINK_NONE;
+}
+
+static void set_mod_args(struct cgx_set_link_mode_args *args,
+ u32 speed, u8 duplex, u8 autoneg, u64 mode)
+{
+ /* firmware requires this value in the reverse format */
+ args->duplex = duplex;
+ args->speed = speed;
+ args->mode = mode;
+ args->an = autoneg;
+ args->ports = 0;
+}
+
+static void otx2_map_ethtool_link_modes(u64 bitmask,
+ struct cgx_set_link_mode_args *args)
+{
+ switch (bitmask) {
+ case BIT_ULL(ETHTOOL_LINK_MODE_10baseT_Half_BIT):
+ set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_10baseT_Full_BIT):
+ set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_100baseT_Half_BIT):
+ set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_100baseT_Full_BIT):
+ set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_1000baseT_Half_BIT):
+ set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_1000baseT_Full_BIT):
+ set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_1000baseX_Full_BIT):
+ set_mod_args(args, 1000, 0, 0, BIT_ULL(CGX_MODE_1000_BASEX));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_10000baseT_Full_BIT):
+ set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_QSGMII));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT):
+ set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2C));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT):
+ set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2M));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT):
+ set_mod_args(args, 10000, 0, 1, BIT_ULL(CGX_MODE_10G_KR));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT):
+ set_mod_args(args, 20000, 0, 0, BIT_ULL(CGX_MODE_20G_C2C));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT):
+ set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_C2C));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT):
+ set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_C2M));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT):
+ set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_2_C2C));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT):
+ set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_CR));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT):
+ set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_KR));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT):
+ set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2C));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT):
+ set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2M));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT):
+ set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_CR4));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT):
+ set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_KR4));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT):
+ set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40GAUI_C2C));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT):
+ set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2C));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT):
+ set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_4_C2C));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT):
+ set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2M));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT):
+ set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_CR));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT):
+ set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_KR));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT):
+ set_mod_args(args, 80000, 0, 0, BIT_ULL(CGX_MODE_80GAUI_C2C));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT):
+ set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2C));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT):
+ set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2M));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT):
+ set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_CR4));
+ break;
+ case BIT_ULL(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT):
+ set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_KR4));
+ break;
+ default:
+ set_mod_args(args, 0, 1, 0, BIT_ULL(CGX_MODE_MAX));
+ break;
+ }
+}
static inline void link_status_user_format(u64 lstat,
struct cgx_link_user_info *linfo,
struct cgx *cgx, u8 lmac_id)
@@ -576,6 +1029,9 @@ static inline void link_status_user_format(u64 lstat,
linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
+ linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat);
+ linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat);
+ linfo->port = FIELD_GET(RESP_LINKSTAT_PORT, lstat);
linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
@@ -603,6 +1059,8 @@ static inline void cgx_link_change_handler(u64 lstat,
lmac->link_info = event.link_uinfo;
linfo = &lmac->link_info;
+ if (err_type == CGX_ERR_SPEED_CHANGE_INVALID)
+ return;
/* Ensure callback doesn't get unregistered until we finish it */
spin_lock(&lmac->event_cb_lock);
@@ -631,7 +1089,8 @@ static inline bool cgx_cmdresp_is_linkevent(u64 event)
id = FIELD_GET(EVTREG_ID, event);
if (id == CGX_CMD_LINK_BRING_UP ||
- id == CGX_CMD_LINK_BRING_DOWN)
+ id == CGX_CMD_LINK_BRING_DOWN ||
+ id == CGX_CMD_MODE_CHANGE)
return true;
else
return false;
@@ -675,7 +1134,7 @@ static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
/* Release thread waiting for completion */
lmac->cmd_pend = false;
- wake_up_interruptible(&lmac->wq_cmd_cmplt);
+ wake_up(&lmac->wq_cmd_cmplt);
break;
case CGX_EVT_ASYNC:
if (cgx_event_is_linkevent(event))
@@ -746,6 +1205,94 @@ int cgx_get_fwdata_base(u64 *base)
return err;
}
+int cgx_set_fec(u64 fec, int cgx_id, int lmac_id)
+{
+ u64 req = 0, resp;
+ struct cgx *cgx;
+ int err = 0;
+
+ cgx = cgx_get_pdata(cgx_id);
+ if (!cgx)
+ return -ENXIO;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_SET_FEC, req);
+ req = FIELD_SET(CMDSETFEC, fec, req);
+ err = cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+ if (!err) {
+ cgx->lmac_idmap[lmac_id]->link_info.fec =
+ FIELD_GET(RESP_LINKSTAT_FEC, resp);
+ return cgx->lmac_idmap[lmac_id]->link_info.fec;
+ }
+ return err;
+}
+
+int cgx_set_phy_mod_type(int mod, void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u64 req = 0, resp;
+
+ if (!cgx)
+ return -ENODEV;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_SET_PHY_MOD_TYPE, req);
+ req = FIELD_SET(CMDSETPHYMODTYPE, mod, req);
+ return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+}
+
+int cgx_get_phy_mod_type(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u64 req = 0, resp;
+ int err;
+
+ if (!cgx)
+ return -ENODEV;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_PHY_MOD_TYPE, req);
+ err = cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+ if (!err)
+ return FIELD_GET(RESP_GETPHYMODTYPE, resp);
+ return err;
+}
+
+int cgx_get_phy_fec_stats(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u64 req = 0, resp;
+
+ if (!cgx)
+ return -ENODEV;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_PHY_FEC_STATS, req);
+ return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+}
+
+int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
+ int cgx_id, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u64 req = 0, resp;
+ int err = 0;
+
+ if (!cgx)
+ return -ENODEV;
+
+ if (args.mode)
+ otx2_map_ethtool_link_modes(args.mode, &args);
+ if (!args.speed && args.duplex && !args.an)
+ return -EINVAL;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req);
+ req = FIELD_SET(CMDMODECHANGE_SPEED,
+ cgx_link_usertable_index_map(args.speed), req);
+ req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req);
+ req = FIELD_SET(CMDMODECHANGE_AN, args.an, req);
+ req = FIELD_SET(CMDMODECHANGE_PORT, args.ports, req);
+ req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req);
+ err = cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+ return err;
+}
+
static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
{
u64 req = 0;
@@ -807,6 +1354,17 @@ static void cgx_lmac_linkup_work(struct work_struct *work)
}
}
+int cgx_set_link_state(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx)
+ return -ENODEV;
+
+ return cgx_fwi_link_change(cgx, lmac_id, enable);
+}
+EXPORT_SYMBOL(cgx_set_link_state);
+
int cgx_lmac_linkup_start(void *cgxd)
{
struct cgx *cgx = cgxd;
@@ -819,6 +1377,37 @@ int cgx_lmac_linkup_start(void *cgxd)
return 0;
}
+void cgx_lmac_enadis_higig2(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 req = 0, resp;
+
+ /* disable 802.3 pause frames before enabling higig2 */
+ if (enable) {
+ cgx_lmac_enadis_8023_pause_frm(cgxd, lmac_id, false, false);
+ cgx_lmac_enadis_higig2_pause_frm(cgxd, lmac_id, true, true);
+ }
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_HIGIG, req);
+ req = FIELD_SET(CMDREG_ENABLE, enable, req);
+ cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+
+ /* enable 802.3 pause frames as higig2 disabled */
+ if (!enable) {
+ cgx_lmac_enadis_higig2_pause_frm(cgxd, lmac_id, false, false);
+ cgx_lmac_enadis_8023_pause_frm(cgxd, lmac_id, true, true);
+ }
+}
+
+bool is_higig2_enabled(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ return (cfg & CGXX_SMUX_TX_CTL_HIGIG_EN);
+}
+
static int cgx_lmac_init(struct cgx *cgx)
{
struct lmac *lmac;
@@ -838,6 +1427,15 @@ static int cgx_lmac_init(struct cgx *cgx)
sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
lmac->lmac_id = i;
lmac->cgx = cgx;
+ lmac->mac_to_index_bmap.max =
+ MAX_DMAC_ENTRIES_PER_CGX / cgx->lmac_count;
+ err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
+ if (err)
+ return err;
+
+ /* Reserve first entry for default MAC address */
+ set_bit(0, lmac->mac_to_index_bmap.bmap);
+
init_waitqueue_head(&lmac->wq_cmd_cmplt);
mutex_init(&lmac->cmd_lock);
spin_lock_init(&lmac->event_cb_lock);
@@ -877,6 +1475,7 @@ static int cgx_lmac_exit(struct cgx *cgx)
if (!lmac)
continue;
free_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), lmac);
+ kfree(lmac->mac_to_index_bmap.bmap);
kfree(lmac->name);
kfree(lmac);
}
@@ -929,6 +1528,13 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
& CGX_ID_MASK;
+ /* Skip probe if CGX is not mapped to NIX */
+ if (!is_cgx_mapped_to_nix(pdev->subsystem_device, cgx->cgx_id)) {
+ dev_notice(dev, "skip cgx%d probe\n", cgx->cgx_id);
+ err = -ENOMEM;
+ goto err_free_irq_vectors;
+ }
+
/* init wq for processing linkup requests */
INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
@@ -965,8 +1571,11 @@ static void cgx_remove(struct pci_dev *pdev)
{
struct cgx *cgx = pci_get_drvdata(pdev);
- cgx_lmac_exit(cgx);
- list_del(&cgx->cgx_list);
+ if (cgx) {
+ cgx_lmac_exit(cgx);
+ list_del(&cgx->cgx_list);
+ }
+
pci_free_irq_vectors(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 394f96591feb..6f323397b0dc 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -22,6 +22,7 @@
#define CGX_ID_MASK 0x7
#define MAX_LMAC_PER_CGX 4
+#define MAX_DMAC_ENTRIES_PER_CGX 32
#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */
#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
@@ -47,28 +48,44 @@
#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48)
#define CGXX_CMRX_RX_DMAC_CAM1 0x400
#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0)
+#define CGXX_CMRX_TX_FIFO_LEN 0x618
+#define CGXX_CMRX_TX_LMAC_IDLE BIT_ULL(14)
+#define CGXX_CMRX_TX_LMAC_E_IDLE BIT_ULL(29)
#define CGXX_CMRX_TX_STAT0 0x700
#define CGXX_SCRATCH0_REG 0x1050
#define CGXX_SCRATCH1_REG 0x1058
#define CGX_CONST 0x2000
#define CGXX_SPUX_CONTROL1 0x10000
+#define CGXX_SPUX_LNX_FEC_CORR_BLOCKS 0x10700
+#define CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS 0x10800
+#define CGXX_SPUX_RSFEC_CORR 0x10088
+#define CGXX_SPUX_RSFEC_UNCORR 0x10090
+
#define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14)
#define CGXX_GMP_PCS_MRX_CTL 0x30000
#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14)
#define CGXX_SMUX_RX_FRM_CTL 0x20020
#define CGX_SMUX_RX_FRM_CTL_CTL_BCK BIT_ULL(3)
+#define CGX_SMUX_RX_FRM_CTL_PTP_MODE BIT_ULL(12)
#define CGXX_GMP_GMI_RXX_FRM_CTL 0x38028
#define CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK BIT_ULL(3)
+#define CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE BIT_ULL(12)
#define CGXX_SMUX_TX_CTL 0x20178
+#define CGXX_SMUX_TX_CTL_HIGIG_EN BIT_ULL(8)
#define CGXX_SMUX_TX_PAUSE_PKT_TIME 0x20110
#define CGXX_SMUX_TX_PAUSE_PKT_INTERVAL 0x20120
+#define CGXX_SMUX_TX_PAUSE_PKT_HG2_INTRA_EN BIT_ULL(32)
+#define HG2_INTRA_INTERVAL GENMASK_ULL(31, 16)
#define CGXX_GMP_GMI_TX_PAUSE_PKT_TIME 0x38230
#define CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL 0x38248
#define CGX_SMUX_TX_CTL_L2P_BP_CONV BIT_ULL(7)
#define CGXX_CMR_RX_OVR_BP 0x130
#define CGX_CMR_RX_OVR_BP_EN(X) BIT_ULL(((X) + 8))
#define CGX_CMR_RX_OVR_BP_BP(X) BIT_ULL(((X) + 4))
+#define CGXX_SMUX_HG2_CONTROL 0x20210
+#define CGXX_SMUX_HG2_CONTROL_TX_ENABLE BIT_ULL(18)
+#define CGXX_SMUX_HG2_CONTROL_RX_ENABLE BIT_ULL(17)
#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
#define CGX_EVENT_REG CGXX_SCRATCH0_REG
@@ -124,10 +141,17 @@ int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id);
int cgx_lmac_evh_unregister(void *cgxd, int lmac_id);
int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat);
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
+int cgx_stats_rst(void *cgxd, int lmac_id);
+int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp);
+u64 cgx_get_lmac_tx_fifo_status(void *cgxd, int lmac_id);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
+int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id);
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id);
+int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
+int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index);
+int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id);
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
@@ -135,8 +159,19 @@ int cgx_get_link_info(void *cgxd, int lmac_id,
struct cgx_link_user_info *linfo);
int cgx_lmac_linkup_start(void *cgxd);
int cgx_get_fwdata_base(u64 *base);
-int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
- u8 *tx_pause, u8 *rx_pause);
-int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
- u8 tx_pause, u8 rx_pause);
+int cgx_set_fec(u64 fec, int cgx_id, int lmac_id);
+int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
+ int cgx_id, int lmac_id);
+int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause);
+int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause);
+void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable);
+int cgx_set_link_state(void *cgxd, int lmac_id, bool enable);
+int cgx_set_phy_mod_type(int mod, void *cgxd, int lmac_id);
+int cgx_get_phy_mod_type(void *cgxd, int lmac_id);
+
+int cgx_get_phy_fec_stats(void *cgxd, int lmac_id);
+void cgx_lmac_enadis_higig2(void *cgxd, int lmac_id, bool enable);
+bool is_higig2_enabled(void *cgxd, int lmac_id);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
index c3702fa58b6b..90bb6c6fc7a0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -43,7 +43,13 @@ enum cgx_error_type {
CGX_ERR_TRAINING_FAIL,
CGX_ERR_RX_EQU_FAIL,
CGX_ERR_SPUX_BER_FAIL,
- CGX_ERR_SPUX_RSFEC_ALGN_FAIL, /* = 22 */
+ CGX_ERR_SPUX_RSFEC_ALGN_FAIL,
+ CGX_ERR_SPUX_MARKER_LOCK_FAIL,
+ CGX_ERR_SET_FEC_INVALID,
+ CGX_ERR_SET_FEC_FAIL,
+ CGX_ERR_MODULE_INVALID,
+ CGX_ERR_MODULE_NOT_PRESENT,
+ CGX_ERR_SPEED_CHANGE_INVALID,
};
/* LINK speed types */
@@ -59,10 +65,42 @@ enum cgx_link_speed {
CGX_LINK_25G,
CGX_LINK_40G,
CGX_LINK_50G,
+ CGX_LINK_80G,
CGX_LINK_100G,
CGX_LINK_SPEED_MAX,
};
+enum CGX_MODE_ {
+ CGX_MODE_SGMII,
+ CGX_MODE_1000_BASEX,
+ CGX_MODE_QSGMII,
+ CGX_MODE_10G_C2C,
+ CGX_MODE_10G_C2M,
+ CGX_MODE_10G_KR,
+ CGX_MODE_20G_C2C,
+ CGX_MODE_25G_C2C,
+ CGX_MODE_25G_C2M,
+ CGX_MODE_25G_2_C2C,
+ CGX_MODE_25G_CR,
+ CGX_MODE_25G_KR,
+ CGX_MODE_40G_C2C,
+ CGX_MODE_40G_C2M,
+ CGX_MODE_40G_CR4,
+ CGX_MODE_40G_KR4,
+ CGX_MODE_40GAUI_C2C,
+ CGX_MODE_50G_C2C,
+ CGX_MODE_50G_C2M,
+ CGX_MODE_50G_4_C2C,
+ CGX_MODE_50G_CR,
+ CGX_MODE_50G_KR,
+ CGX_MODE_80GAUI_C2C,
+ CGX_MODE_100G_C2C,
+ CGX_MODE_100G_C2M,
+ CGX_MODE_100G_CR4,
+ CGX_MODE_100G_KR4,
+ CGX_MODE_MAX /* = 29 */
+};
+
/* REQUEST ID types. Input to firmware */
enum cgx_cmd_id {
CGX_CMD_NONE,
@@ -75,12 +113,25 @@ enum cgx_cmd_id {
CGX_CMD_INTERNAL_LBK,
CGX_CMD_EXTERNAL_LBK,
CGX_CMD_HIGIG,
- CGX_CMD_LINK_STATE_CHANGE,
+ CGX_CMD_LINK_STAT_CHANGE,
CGX_CMD_MODE_CHANGE, /* hot plug support */
CGX_CMD_INTF_SHUTDOWN,
CGX_CMD_GET_MKEX_PRFL_SIZE,
CGX_CMD_GET_MKEX_PRFL_ADDR,
CGX_CMD_GET_FWD_BASE, /* get base address of shared FW data */
+ CGX_CMD_GET_LINK_MODES, /* Supported Link Modes */
+ CGX_CMD_SET_LINK_MODE,
+ CGX_CMD_GET_SUPPORTED_FEC,
+ CGX_CMD_SET_FEC,
+ CGX_CMD_GET_AN,
+ CGX_CMD_SET_AN,
+ CGX_CMD_GET_ADV_LINK_MODES,
+ CGX_CMD_GET_ADV_FEC,
+ CGX_CMD_GET_PHY_MOD_TYPE, /* line-side modulation type: NRZ or PAM4 */
+ CGX_CMD_SET_PHY_MOD_TYPE,
+ CGX_CMD_PRBS,
+ CGX_CMD_DISPLAY_EYE,
+ CGX_CMD_GET_PHY_FEC_STATS,
};
/* async event ids */
@@ -171,13 +222,19 @@ struct cgx_lnk_sts {
uint64_t full_duplex:1;
uint64_t speed:4; /* cgx_link_speed */
uint64_t err_type:10;
- uint64_t reserved2:39;
+ uint64_t an:1; /* AN supported or not */
+ uint64_t fec:2; /* FEC type if enabled, if not 0 */
+ uint64_t port:8;
+ uint64_t reserved2:28;
};
#define RESP_LINKSTAT_UP GENMASK_ULL(9, 9)
#define RESP_LINKSTAT_FDUPLEX GENMASK_ULL(10, 10)
#define RESP_LINKSTAT_SPEED GENMASK_ULL(14, 11)
#define RESP_LINKSTAT_ERRTYPE GENMASK_ULL(24, 15)
+#define RESP_LINKSTAT_AN GENMASK_ULL(25, 25)
+#define RESP_LINKSTAT_FEC GENMASK_ULL(27, 26)
+#define RESP_LINKSTAT_PORT GENMASK_ULL(35, 28)
/* scratchx(1) CSR used for non-secure SW->ATF communication
* This CSR acts as a command register
@@ -198,5 +255,18 @@ struct cgx_lnk_sts {
#define CMDLINKCHANGE_LINKUP BIT_ULL(8)
#define CMDLINKCHANGE_FULLDPLX BIT_ULL(9)
#define CMDLINKCHANGE_SPEED GENMASK_ULL(13, 10)
+#define CMDSETFEC GENMASK_ULL(9, 8)
+/* command argument to be passed for cmd ID - CGX_CMD_MODE_CHANGE */
+#define CMDMODECHANGE_SPEED GENMASK_ULL(11, 8)
+#define CMDMODECHANGE_DUPLEX GENMASK_ULL(12, 12)
+#define CMDMODECHANGE_AN GENMASK_ULL(13, 13)
+#define CMDMODECHANGE_PORT GENMASK_ULL(21, 14)
+#define CMDMODECHANGE_FLAGS GENMASK_ULL(63, 22)
+
+/* command argument to be passed for cmd ID - CGX_CMD_SET_PHY_MOD_TYPE */
+#define CMDSETPHYMODTYPE GENMASK_ULL(8, 8)
+
+/* response to cmd ID - RESP_GETPHYMODTYPE */
+#define RESP_GETPHYMODTYPE GENMASK_ULL(9, 9)
#endif /* __CGX_FW_INTF_H__ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index f48eb66ed021..1f4c5ef9bbdd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -162,6 +162,8 @@ enum nix_scheduler {
#define NIX_RX_ACTIONOP_UCAST_IPSEC (0x2ull)
#define NIX_RX_ACTIONOP_MCAST (0x3ull)
#define NIX_RX_ACTIONOP_RSS (0x4ull)
+/* Use action set in default unicast entry */
+#define NIX_RX_ACTION_DEFAULT (0xfull)
/* NIX TX action operation*/
#define NIX_TX_ACTIONOP_DROP (0x0ull)
@@ -179,12 +181,15 @@ enum nix_scheduler {
#define NIX_INTF_TYPE_CGX 0
#define NIX_INTF_TYPE_LBK 1
+#define NIX_INTF_TYPE_SDP 2
#define MAX_LMAC_PKIND 12
#define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b))
#define NIX_LINK_LBK(a) (12 + (a))
#define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c))
#define NIX_CHAN_LBK_CHX(a, b) (0 + 0x100 * (a) + (b))
+#define NIX_CHAN_SDP_CH_START (0x700ull)
+#define NIX_CHAN_SDP_CHX(a) (NIX_CHAN_SDP_CH_START + (a))
/* NIX LSO format indices.
* As of now TSO is the only one using, so statically assigning indices.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 6dfd0f90cd70..7bd9db85fca4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -86,7 +86,7 @@ struct mbox_msghdr {
#define OTX2_MBOX_REQ_SIG (0xdead)
#define OTX2_MBOX_RSP_SIG (0xbeef)
u16 sig; /* Signature, for validating corrupted msgs */
-#define OTX2_MBOX_VERSION (0x0001)
+#define OTX2_MBOX_VERSION (0x0004)
u16 ver; /* Version of msg's structure for this ID */
u16 next_msgoff; /* Offset of next msg within mailbox region */
int rc; /* Msg process'ed response code */
@@ -125,9 +125,12 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
M(READY, 0x001, ready, msg_req, ready_msg_rsp) \
M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \
M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \
+M(FREE_RSRC_CNT, 0x004, free_rsrc_cnt, msg_req, free_rsrcs_rsp) \
M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
+M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
+M(NDC_SYNC_OP, 0x009, ndc_sync_op, ndc_sync_op, msg_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \
@@ -143,8 +146,29 @@ M(CGX_STOP_LINKEVENTS, 0x208, cgx_stop_linkevents, msg_req, msg_rsp) \
M(CGX_GET_LINKINFO, 0x209, cgx_get_linkinfo, msg_req, cgx_link_info_msg) \
M(CGX_INTLBK_ENABLE, 0x20A, cgx_intlbk_enable, msg_req, msg_rsp) \
M(CGX_INTLBK_DISABLE, 0x20B, cgx_intlbk_disable, msg_req, msg_rsp) \
+M(CGX_PTP_RX_ENABLE, 0x20C, cgx_ptp_rx_enable, msg_req, msg_rsp) \
+M(CGX_PTP_RX_DISABLE, 0x20D, cgx_ptp_rx_disable, msg_req, msg_rsp) \
M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \
cgx_pause_frm_cfg) \
+M(CGX_FW_DATA_GET, 0x20F, cgx_get_aux_link_info, msg_req, cgx_fw_data) \
+M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode) \
+M(CGX_MAC_ADDR_ADD, 0x211, cgx_mac_addr_add, cgx_mac_addr_add_req, \
+ cgx_mac_addr_add_rsp) \
+M(CGX_MAC_ADDR_DEL, 0x212, cgx_mac_addr_del, cgx_mac_addr_del_req, \
+ msg_rsp) \
+M(CGX_MAC_MAX_ENTRIES_GET, 0x213, cgx_mac_max_entries_get, msg_req, \
+ cgx_max_dmac_entries_get_rsp) \
+M(CGX_SET_LINK_STATE, 0x214, cgx_set_link_state, \
+ cgx_set_link_state_msg, msg_rsp) \
+M(CGX_GET_PHY_MOD_TYPE, 0x215, cgx_get_phy_mod_type, msg_req, \
+ cgx_phy_mod_type) \
+M(CGX_SET_PHY_MOD_TYPE, 0x216, cgx_set_phy_mod_type, cgx_phy_mod_type, \
+ msg_rsp) \
+M(CGX_FEC_STATS, 0x217, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \
+M(CGX_SET_LINK_MODE, 0x218, cgx_set_link_mode, cgx_set_link_mode_req,\
+ cgx_set_link_mode_rsp) \
+M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req, msg_rsp) \
+M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
npa_lf_alloc_req, npa_lf_alloc_rsp) \
@@ -152,8 +176,42 @@ M(NPA_LF_FREE, 0x401, npa_lf_free, msg_req, msg_rsp) \
M(NPA_AQ_ENQ, 0x402, npa_aq_enq, npa_aq_enq_req, npa_aq_enq_rsp) \
M(NPA_HWCTX_DISABLE, 0x403, npa_hwctx_disable, hwctx_disable_req, msg_rsp)\
/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \
+M(SSO_LF_ALLOC, 0x600, sso_lf_alloc, \
+ sso_lf_alloc_req, sso_lf_alloc_rsp) \
+M(SSO_LF_FREE, 0x601, sso_lf_free, \
+ sso_lf_free_req, msg_rsp) \
+M(SSOW_LF_ALLOC, 0x602, ssow_lf_alloc, \
+ ssow_lf_alloc_req, msg_rsp) \
+M(SSOW_LF_FREE, 0x603, ssow_lf_free, \
+ ssow_lf_free_req, msg_rsp) \
+M(SSO_HW_SETCONFIG, 0x604, sso_hw_setconfig, \
+ sso_hw_setconfig, msg_rsp) \
+M(SSO_GRP_SET_PRIORITY, 0x605, sso_grp_set_priority, \
+ sso_grp_priority, msg_rsp) \
+M(SSO_GRP_GET_PRIORITY, 0x606, sso_grp_get_priority, \
+ sso_info_req, sso_grp_priority) \
+M(SSO_WS_CACHE_INV, 0x607, sso_ws_cache_inv, msg_req, msg_rsp) \
+M(SSO_GRP_QOS_CONFIG, 0x608, sso_grp_qos_config, sso_grp_qos_cfg, msg_rsp)\
+M(SSO_GRP_GET_STATS, 0x609, sso_grp_get_stats, sso_info_req, sso_grp_stats)\
+M(SSO_HWS_GET_STATS, 0x610, sso_hws_get_stats, sso_info_req, sso_hws_stats)\
/* TIM mbox IDs (range 0x800 - 0x9FF) */ \
+M(TIM_LF_ALLOC, 0x800, tim_lf_alloc, \
+ tim_lf_alloc_req, tim_lf_alloc_rsp) \
+M(TIM_LF_FREE, 0x801, tim_lf_free, tim_ring_req, msg_rsp) \
+M(TIM_CONFIG_RING, 0x802, tim_config_ring, tim_config_req, msg_rsp)\
+M(TIM_ENABLE_RING, 0x803, tim_enable_ring, tim_ring_req, tim_enable_rsp)\
+M(TIM_DISABLE_RING, 0x804, tim_disable_ring, tim_ring_req, msg_rsp) \
/* CPT mbox IDs (range 0xA00 - 0xBFF) */ \
+M(CPT_LF_ALLOC, 0xA00, cpt_lf_alloc, cpt_lf_alloc_req_msg, \
+ cpt_lf_alloc_rsp_msg) \
+M(CPT_LF_FREE, 0xA01, cpt_lf_free, msg_req, msg_rsp) \
+M(CPT_RD_WR_REGISTER, 0xA02, cpt_rd_wr_register, cpt_rd_wr_reg_msg, \
+ cpt_rd_wr_reg_msg) \
+M(CPT_SET_CRYPTO_GRP, 0xA03, cpt_set_crypto_grp, \
+ cpt_set_crypto_grp_req_msg, \
+ cpt_set_crypto_grp_req_msg) \
+M(CPT_INLINE_IPSEC_CFG, 0xA04, cpt_inline_ipsec_cfg, \
+ cpt_inline_ipsec_cfg_msg, msg_rsp) \
/* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \
M(NPC_MCAM_ALLOC_ENTRY, 0x6000, npc_mcam_alloc_entry, npc_mcam_alloc_entry_req,\
npc_mcam_alloc_entry_rsp) \
@@ -184,10 +242,19 @@ M(NPC_MCAM_ALLOC_AND_WRITE_ENTRY, 0x600b, npc_mcam_alloc_and_write_entry, \
npc_mcam_alloc_and_write_entry_rsp) \
M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, \
msg_req, npc_get_kex_cfg_rsp) \
+M(NPC_INSTALL_FLOW, 0x600d, npc_install_flow, \
+ npc_install_flow_req, npc_install_flow_rsp) \
+M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \
+ npc_delete_flow_req, msg_rsp) \
+M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \
+ npc_mcam_read_entry_req, \
+ npc_mcam_read_entry_rsp) \
+M(NPC_SET_PKIND, 0x6010, npc_set_pkind, \
+ npc_set_pkind, msg_rsp) \
/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \
nix_lf_alloc_req, nix_lf_alloc_rsp) \
-M(NIX_LF_FREE, 0x8001, nix_lf_free, msg_req, msg_rsp) \
+M(NIX_LF_FREE, 0x8001, nix_lf_free, nix_lf_free_req, msg_rsp) \
M(NIX_AQ_ENQ, 0x8002, nix_aq_enq, nix_aq_enq_req, nix_aq_enq_rsp) \
M(NIX_HWCTX_DISABLE, 0x8003, nix_hwctx_disable, \
hwctx_disable_req, msg_rsp) \
@@ -196,7 +263,8 @@ M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc, \
M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free, nix_txsch_free_req, msg_rsp) \
M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, msg_rsp) \
M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp) \
-M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, msg_rsp) \
+M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, \
+ nix_vtag_config_rsp) \
M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, \
nix_rss_flowkey_cfg, \
nix_rss_flowkey_cfg_rsp) \
@@ -212,15 +280,22 @@ M(NIX_SET_RX_CFG, 0x8010, nix_set_rx_cfg, nix_rx_cfg, msg_rsp) \
M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
nix_lso_format_cfg, \
nix_lso_format_cfg_rsp) \
-M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp) \
+M(NIX_LF_PTP_TX_ENABLE, 0x8013, nix_lf_ptp_tx_enable, msg_req, msg_rsp) \
+M(NIX_LF_PTP_TX_DISABLE, 0x8014, nix_lf_ptp_tx_disable, msg_req, msg_rsp) \
+M(NIX_SET_VLAN_TPID, 0x8015, nix_set_vlan_tpid, nix_set_vlan_tpid, msg_rsp) \
M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \
nix_bp_cfg_rsp) \
M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \
+M(NIX_INLINE_IPSEC_CFG, 0x8019, nix_inline_ipsec_cfg, \
+ nix_inline_ipsec_cfg, msg_rsp) \
+M(NIX_INLINE_IPSEC_LF_CFG, 0x801a, nix_inline_ipsec_lf_cfg, \
+ nix_inline_ipsec_lf_cfg, msg_rsp)
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
#define MBOX_UP_CGX_MESSAGES \
-M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp)
+M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp) \
+M(CGX_PTP_RX_INFO, 0xC01, cgx_ptp_rx_info, cgx_ptp_rx_info_msg, msg_rsp)
enum {
#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
@@ -293,6 +368,21 @@ struct rsrc_detach {
u8 cptlfs:1;
};
+/*
+ * Number of resources available to the caller.
+ * In reply to MBOX_MSG_FREE_RSRC_CNT.
+ */
+struct free_rsrcs_rsp {
+ struct mbox_msghdr hdr;
+ u16 schq[NIX_TXSCH_LVL_CNT];
+ u16 sso;
+ u16 tim;
+ u16 ssow;
+ u16 cpt;
+ u8 npa;
+ u8 nix;
+};
+
#define MSIX_VECTOR_INVALID 0xFFFF
#define MAX_RVU_BLKLF_CNT 256
@@ -326,6 +416,11 @@ struct cgx_stats_rsp {
u64 tx_stats[CGX_TX_STATS_COUNT];
};
+struct cgx_fec_stats_rsp {
+ struct mbox_msghdr hdr;
+ u64 fec_corr_blks;
+ u64 fec_uncorr_blks;
+};
/* Structure for requesting the operation for
* setting/getting mac address in the CGX interface
*/
@@ -334,11 +429,46 @@ struct cgx_mac_addr_set_or_get {
u8 mac_addr[ETH_ALEN];
};
+/* Structure for requesting the operation to
+ * add DMAC filter entry into CGX interface
+ */
+struct cgx_mac_addr_add_req {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* Structure for response against the operation to
+ * add DMAC filter entry into CGX interface
+ */
+struct cgx_mac_addr_add_rsp {
+ struct mbox_msghdr hdr;
+ u8 index;
+};
+
+/* Structure for requesting the operation to
+ * delete DMAC filter entry from CGX interface
+ */
+struct cgx_mac_addr_del_req {
+ struct mbox_msghdr hdr;
+ u8 index;
+};
+
+/* Structure for response against the operation to
+ * get maximum supported DMAC filter entries
+ */
+struct cgx_max_dmac_entries_get_rsp {
+ struct mbox_msghdr hdr;
+ u8 max_dmac_filters;
+};
+
struct cgx_link_user_info {
uint64_t link_up:1;
uint64_t full_duplex:1;
uint64_t lmac_type_id:4;
uint64_t speed:20; /* speed in Mbps */
+ uint64_t an:1; /* AN supported or not */
+ uint64_t fec:2; /* FEC type if enabled else 0 */
+ uint64_t port:8;
#define LMACTYPE_STR_LEN 16
char lmac_type[LMACTYPE_STR_LEN];
};
@@ -348,6 +478,11 @@ struct cgx_link_info_msg {
struct cgx_link_user_info link_info;
};
+struct cgx_ptp_rx_info_msg {
+ struct mbox_msghdr hdr;
+ u8 ptp_en;
+};
+
struct cgx_pause_frm_cfg {
struct mbox_msghdr hdr;
u8 set;
@@ -357,6 +492,97 @@ struct cgx_pause_frm_cfg {
u8 tx_pause;
};
+struct sfp_eeprom_s {
+#define SFP_EEPROM_SIZE 256
+ u16 sff_id;
+ u8 buf[SFP_EEPROM_SIZE];
+ u64 reserved;
+};
+
+enum fec_type {
+ OTX2_FEC_NONE,
+ OTX2_FEC_BASER,
+ OTX2_FEC_RS,
+};
+
+struct phy_s {
+ struct {
+ u64 can_change_mod_type : 1;
+ u64 mod_type : 1;
+ u64 has_fec_stats : 1;
+ } misc;
+ struct fec_stats_s {
+ u32 rsfec_corr_cws;
+ u32 rsfec_uncorr_cws;
+ u32 brfec_corr_blks;
+ u32 brfec_uncorr_blks;
+ } fec_stats;
+};
+
+struct cgx_lmac_fwdata_s {
+ u16 rw_valid;
+ u64 supported_fec;
+ u64 supported_an;
+ u64 supported_link_modes;
+ /* only applicable if AN is supported */
+ u64 advertised_fec;
+ u64 advertised_link_modes;
+ /* Only applicable if SFP/QSFP slot is present */
+ struct sfp_eeprom_s sfp_eeprom;
+ struct phy_s phy;
+#define LMAC_FWDATA_RESERVED_MEM 1021
+ u64 reserved[LMAC_FWDATA_RESERVED_MEM];
+};
+
+struct cgx_fw_data {
+ struct mbox_msghdr hdr;
+ struct cgx_lmac_fwdata_s fwdata;
+};
+
+struct fec_mode {
+ struct mbox_msghdr hdr;
+ int fec;
+};
+
+struct cgx_set_link_state_msg {
+ struct mbox_msghdr hdr;
+ u8 enable; /* '1' for link up, '0' for link down */
+};
+
+struct cgx_phy_mod_type {
+ struct mbox_msghdr hdr;
+ int mod;
+};
+
+struct npc_set_pkind {
+ struct mbox_msghdr hdr;
+#define OTX2_PRIV_FLAGS_DEFAULT BIT_ULL(0)
+#define OTX2_PRIV_FLAGS_EDSA BIT_ULL(1)
+#define OTX2_PRIV_FLAGS_HIGIG BIT_ULL(2)
+#define OTX2_PRIV_FLAGS_CUSTOM BIT_ULL(63)
+ u64 mode;
+#define PKIND_TX BIT_ULL(0)
+#define PKIND_RX BIT_ULL(1)
+ u8 dir;
+ u8 pkind; /* valid only in case custom flag */
+};
+struct cgx_set_link_mode_args {
+ u32 speed;
+ u8 duplex;
+ u8 an;
+ u8 ports;
+ u64 mode;
+};
+
+struct cgx_set_link_mode_req {
+ struct mbox_msghdr hdr;
+ struct cgx_set_link_mode_args args;
+};
+
+struct cgx_set_link_mode_rsp {
+ struct mbox_msghdr hdr;
+ int status;
+};
/* NPA mbox message formats */
/* NPA mailbox error codes
@@ -453,6 +679,21 @@ enum nix_af_status {
NIX_AF_ERR_LSO_CFG_FAIL = -418,
NIX_AF_INVAL_NPA_PF_FUNC = -419,
NIX_AF_INVAL_SSO_PF_FUNC = -420,
+ NIX_AF_ERR_TX_VTAG_NOSPC = -421,
+ NIX_AF_ERR_RX_VTAG_INUSE = -422,
+ NIX_AF_ERR_PTP_CONFIG_FAIL = -423,
+};
+
+/* For NIX RX vtag action */
+enum nix_rx_vtag0_type {
+ NIX_AF_LFX_RX_VTAG_TYPE0, /* reserved for rx vlan offload */
+ NIX_AF_LFX_RX_VTAG_TYPE1,
+ NIX_AF_LFX_RX_VTAG_TYPE2,
+ NIX_AF_LFX_RX_VTAG_TYPE3,
+ NIX_AF_LFX_RX_VTAG_TYPE4,
+ NIX_AF_LFX_RX_VTAG_TYPE5,
+ NIX_AF_LFX_RX_VTAG_TYPE6,
+ NIX_AF_LFX_RX_VTAG_TYPE7,
};
/* For NIX LF context alloc and init */
@@ -469,6 +710,8 @@ struct nix_lf_alloc_req {
u16 sso_func;
u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */
u64 way_mask;
+#define NIX_LF_RSS_TAG_LSB_AS_ADDER BIT_ULL(0)
+ u64 flags;
};
struct nix_lf_alloc_rsp {
@@ -485,6 +728,14 @@ struct nix_lf_alloc_rsp {
u8 lf_tx_stats; /* NIX_AF_CONST1::LF_TX_STATS */
u16 cints; /* NIX_AF_CONST2::CINTS */
u16 qints; /* NIX_AF_CONST2::QINTS */
+ u8 hw_rx_tstamp_en;
+};
+
+struct nix_lf_free_req {
+ struct mbox_msghdr hdr;
+#define NIX_LF_DISABLE_FLOWS BIT_ULL(0)
+#define NIX_LF_DONT_FREE_TX_VTAG BIT_ULL(1)
+ u64 flags;
};
/* NIX AQ enqueue msg */
@@ -577,14 +828,40 @@ struct nix_vtag_config {
union {
/* valid when cfg_type is '0' */
struct {
- /* tx vlan0 tag(C-VLAN) */
- u64 vlan0;
- /* tx vlan1 tag(S-VLAN) */
- u64 vlan1;
- /* insert tx vlan tag */
- u8 insert_vlan :1;
- /* insert tx double vlan tag */
- u8 double_vlan :1;
+ u64 vtag0;
+ u64 vtag1;
+
+ /* cfg_vtag0 & cfg_vtag1 fields are valid
+ * when free_vtag0 & free_vtag1 are '0's.
+ */
+ /* cfg_vtag0 = 1 to configure vtag0 */
+ u8 cfg_vtag0 :1;
+ /* cfg_vtag1 = 1 to configure vtag1 */
+ u8 cfg_vtag1 :1;
+
+ /* vtag0_idx & vtag1_idx are only valid when
+ * both cfg_vtag0 & cfg_vtag1 are '0's,
+ * these fields are used along with free_vtag0
+ * & free_vtag1 to free the nix lf's tx_vlan
+ * configuration.
+ *
+ * Denotes the indices of tx_vtag def registers
+ * that needs to be cleared and freed.
+ */
+ int vtag0_idx;
+ int vtag1_idx;
+
+ /* free_vtag0 & free_vtag1 fields are valid
+ * when cfg_vtag0 & cfg_vtag1 are '0's.
+ */
+ /* free_vtag0 = 1 clears vtag0 configuration
+ * vtag0_idx denotes the index to be cleared.
+ */
+ u8 free_vtag0 :1;
+ /* free_vtag1 = 1 clears vtag1 configuration
+ * vtag1_idx denotes the index to be cleared.
+ */
+ u8 free_vtag1 :1;
} tx;
/* valid when cfg_type is '1' */
@@ -599,6 +876,19 @@ struct nix_vtag_config {
};
};
+struct nix_vtag_config_rsp {
+ struct mbox_msghdr hdr;
+ int vtag0_idx;
+ int vtag1_idx;
+ /* Indices of tx_vtag def registers used to configure
+ * tx vtag0 & vtag1 headers, these indices are valid
+ * when nix_vtag_config mbox requested for vtag0 and/
+ * or vtag1 configuration.
+ */
+};
+
+#define NIX_FLOW_KEY_TYPE_L3_L4_MASK (~(0xf << 28))
+
struct nix_rss_flowkey_cfg {
struct mbox_msghdr hdr;
int mcam_index; /* MCAM entry index to modify */
@@ -620,6 +910,10 @@ struct nix_rss_flowkey_cfg {
#define NIX_FLOW_KEY_TYPE_INNR_UDP BIT(15)
#define NIX_FLOW_KEY_TYPE_INNR_SCTP BIT(16)
#define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
+#define NIX_FLOW_KEY_TYPE_L4_DST_ONLY BIT(28)
+#define NIX_FLOW_KEY_TYPE_L4_SRC_ONLY BIT(29)
+#define NIX_FLOW_KEY_TYPE_L3_DST_ONLY BIT(30)
+#define NIX_FLOW_KEY_TYPE_L3_SRC_ONLY BIT(31)
u32 flowkey_cfg; /* Flowkey types selected */
u8 group; /* RSS context or group */
};
@@ -691,6 +985,148 @@ struct nix_lso_format_cfg_rsp {
u8 lso_format_idx;
};
+struct nix_set_vlan_tpid {
+ struct mbox_msghdr hdr;
+#define NIX_VLAN_TYPE_INNER 0
+#define NIX_VLAN_TYPE_OUTER 1
+ u8 vlan_type;
+ u16 tpid;
+};
+
+/* Global NIX inline IPSec configuration */
+struct nix_inline_ipsec_cfg {
+ struct mbox_msghdr hdr;
+ u32 cpt_credit;
+ struct {
+ u8 egrp;
+ u8 opcode;
+ } gen_cfg;
+ struct {
+ u16 cpt_pf_func;
+ u8 cpt_slot;
+ } inst_qsel;
+ u8 enable;
+};
+
+/* Per NIX LF inline IPSec configuration */
+struct nix_inline_ipsec_lf_cfg {
+ struct mbox_msghdr hdr;
+ u64 sa_base_addr;
+ struct {
+ u32 tag_const;
+ u16 lenm1_max;
+ u8 sa_pow2_size;
+ u8 tt;
+ } ipsec_cfg0;
+ struct {
+ u32 sa_idx_max;
+ u8 sa_idx_w;
+ } ipsec_cfg1;
+ u8 enable;
+};
+
+/* SSO mailbox error codes
+ * Range 501 - 600.
+ */
+enum sso_af_status {
+ SSO_AF_ERR_PARAM = -501,
+ SSO_AF_ERR_LF_INVALID = -502,
+ SSO_AF_ERR_AF_LF_ALLOC = -503,
+ SSO_AF_ERR_GRP_EBUSY = -504,
+ SSO_AF_INVAL_NPA_PF_FUNC = -505,
+};
+
+struct sso_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ int node;
+ u16 hwgrps;
+};
+
+struct sso_lf_alloc_rsp {
+ struct mbox_msghdr hdr;
+ u32 xaq_buf_size;
+ u32 xaq_wq_entries;
+ u32 in_unit_entries;
+ u16 hwgrps;
+};
+
+struct sso_lf_free_req {
+ struct mbox_msghdr hdr;
+ int node;
+ u16 hwgrps;
+};
+
+struct sso_hw_setconfig {
+ struct mbox_msghdr hdr;
+ u32 npa_aura_id;
+ u16 npa_pf_func;
+ u16 hwgrps;
+};
+
+struct sso_info_req {
+ struct mbox_msghdr hdr;
+ union {
+ u16 grp;
+ u16 hws;
+ };
+};
+
+struct sso_grp_priority {
+ struct mbox_msghdr hdr;
+ u16 grp;
+ u8 priority;
+ u8 affinity;
+ u8 weight;
+};
+
+/* SSOW mailbox error codes
+ * Range 601 - 700.
+ */
+enum ssow_af_status {
+ SSOW_AF_ERR_PARAM = -601,
+ SSOW_AF_ERR_LF_INVALID = -602,
+ SSOW_AF_ERR_AF_LF_ALLOC = -603,
+};
+
+struct ssow_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ int node;
+ u16 hws;
+};
+
+struct ssow_lf_free_req {
+ struct mbox_msghdr hdr;
+ int node;
+ u16 hws;
+};
+
+struct sso_grp_qos_cfg {
+ struct mbox_msghdr hdr;
+ u16 grp;
+ u32 xaq_limit;
+ u16 taq_thr;
+ u16 iaq_thr;
+};
+
+struct sso_grp_stats {
+ struct mbox_msghdr hdr;
+ u16 grp;
+ u64 ws_pc;
+ u64 ext_pc;
+ u64 wa_pc;
+ u64 ts_pc;
+ u64 ds_pc;
+ u64 dq_pc;
+ u64 aw_status;
+ u64 page_cnt;
+};
+
+struct sso_hws_stats {
+ struct mbox_msghdr hdr;
+ u16 hws;
+ u64 arbitration;
+};
+
struct nix_bp_cfg_req {
struct mbox_msghdr hdr;
u16 chan_base; /* Starting channel number */
@@ -723,6 +1159,7 @@ enum npc_af_status {
NPC_MCAM_ALLOC_DENIED = -702,
NPC_MCAM_ALLOC_FAILED = -703,
NPC_MCAM_PERM_DENIED = -704,
+ NPC_AF_ERR_HIGIG_CONFIG_FAIL = -705,
};
struct npc_mcam_alloc_entry_req {
@@ -858,4 +1295,247 @@ struct npc_get_kex_cfg_rsp {
u8 mkex_pfl_name[MKEX_NAME_LEN];
};
+enum ptp_op {
+ PTP_OP_ADJFINE = 0, /* adjfine(req.scaled_ppm); */
+ PTP_OP_GET_CLOCK = 1, /* rsp.clk = get_clock() */
+};
+
+struct ptp_req {
+ struct mbox_msghdr hdr;
+ u8 op;
+ s64 scaled_ppm;
+ u8 is_pmu;
+};
+
+struct ptp_rsp {
+ struct mbox_msghdr hdr;
+ u64 clk;
+ u64 tsc;
+};
+
+enum header_fields {
+ NPC_DMAC,
+ NPC_SMAC,
+ NPC_ETYPE,
+ NPC_OUTER_VID,
+ NPC_TOS,
+ NPC_SIP_IPV4,
+ NPC_DIP_IPV4,
+ NPC_SIP_IPV6,
+ NPC_DIP_IPV6,
+ NPC_SPORT_TCP,
+ NPC_DPORT_TCP,
+ NPC_SPORT_UDP,
+ NPC_DPORT_UDP,
+ NPC_HEADER_FIELDS_MAX,
+};
+
+struct flow_msg {
+ unsigned char dmac[6];
+ unsigned char smac[6];
+ __be16 etype;
+ __be16 vlan_etype;
+ __be16 vlan_tci;
+ union {
+ __be32 ip4src;
+ __be32 ip6src[4];
+ };
+ union {
+ __be32 ip4dst;
+ __be32 ip6dst[4];
+ };
+ u8 tos;
+ u8 ip_ver;
+ u8 ip_proto;
+ u8 tc;
+ __be16 sport;
+ __be16 dport;
+};
+
+struct npc_install_flow_req {
+ struct mbox_msghdr hdr;
+ struct flow_msg packet;
+ struct flow_msg mask;
+ u64 features;
+ u16 entry;
+ u16 channel;
+ u8 intf;
+ u8 set_cntr; /* If counter is available set counter for this entry ? */
+ u8 default_rule;
+ u8 append; /* overwrite(0) or append(1) flow to default rule? */
+ u16 vf;
+ /* action */
+ u32 index;
+ u16 match_id;
+ u8 flow_key_alg;
+ u8 op;
+ /* vtag rx action */
+ u8 vtag0_type;
+ u8 vtag0_valid;
+ u8 vtag1_type;
+ u8 vtag1_valid;
+ /* vtag tx action */
+ u16 vtag0_def;
+ u8 vtag0_op;
+ u16 vtag1_def;
+ u8 vtag1_op;
+};
+
+struct npc_install_flow_rsp {
+ struct mbox_msghdr hdr;
+ int counter; /* negative if no counter else counter number */
+};
+
+struct npc_delete_flow_req {
+ struct mbox_msghdr hdr;
+ u16 entry;
+ u16 start;/*Disable range of entries */
+ u16 end;
+ u8 all; /* PF + VFs */
+};
+
+struct npc_mcam_read_entry_req {
+ struct mbox_msghdr hdr;
+ u16 entry; /* MCAM entry to read */
+};
+
+struct npc_mcam_read_entry_rsp {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry_data;
+ u8 intf;
+ u8 enable;
+};
+
+/* TIM mailbox error codes
+ * Range 801 - 900.
+ */
+enum tim_af_status {
+ TIM_AF_NO_RINGS_LEFT = -801,
+ TIM_AF_INVAL_NPA_PF_FUNC = -802,
+ TIM_AF_INVAL_SSO_PF_FUNC = -803,
+ TIM_AF_RING_STILL_RUNNING = -804,
+ TIM_AF_LF_INVALID = -805,
+ TIM_AF_CSIZE_NOT_ALIGNED = -806,
+ TIM_AF_CSIZE_TOO_SMALL = -807,
+ TIM_AF_CSIZE_TOO_BIG = -808,
+ TIM_AF_INTERVAL_TOO_SMALL = -809,
+ TIM_AF_INVALID_BIG_ENDIAN_VALUE = -810,
+ TIM_AF_INVALID_CLOCK_SOURCE = -811,
+ TIM_AF_GPIO_CLK_SRC_NOT_ENABLED = -812,
+ TIM_AF_INVALID_BSIZE = -813,
+ TIM_AF_INVALID_ENABLE_PERIODIC = -814,
+ TIM_AF_INVALID_ENABLE_DONTFREE = -815,
+ TIM_AF_ENA_DONTFRE_NSET_PERIODIC = -816,
+ TIM_AF_RING_ALREADY_DISABLED = -817,
+};
+
+enum tim_clk_srcs {
+ TIM_CLK_SRCS_TENNS = 0,
+ TIM_CLK_SRCS_GPIO = 1,
+ TIM_CLK_SRCS_GTI = 2,
+ TIM_CLK_SRCS_PTP = 3,
+ TIM_CLK_SRSC_INVALID,
+};
+
+enum tim_gpio_edge {
+ TIM_GPIO_NO_EDGE = 0,
+ TIM_GPIO_LTOH_TRANS = 1,
+ TIM_GPIO_HTOL_TRANS = 2,
+ TIM_GPIO_BOTH_TRANS = 3,
+ TIM_GPIO_INVALID,
+};
+
+struct tim_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ u16 ring;
+ u16 npa_pf_func;
+ u16 sso_pf_func;
+};
+
+struct tim_ring_req {
+ struct mbox_msghdr hdr;
+ u16 ring;
+};
+
+struct tim_config_req {
+ struct mbox_msghdr hdr;
+ u16 ring;
+ u8 bigendian;
+ u8 clocksource;
+ u8 enableperiodic;
+ u8 enabledontfreebuffer;
+ u32 bucketsize;
+ u32 chunksize;
+ u32 interval;
+};
+
+struct tim_lf_alloc_rsp {
+ struct mbox_msghdr hdr;
+ u64 tenns_clk;
+};
+
+struct tim_enable_rsp {
+ struct mbox_msghdr hdr;
+ u64 timestarted;
+ u32 currentbucket;
+};
+
+struct ndc_sync_op {
+ struct mbox_msghdr hdr;
+ u8 nix_lf_tx_sync;
+ u8 nix_lf_rx_sync;
+ u8 npa_lf_sync;
+};
+
+/* CPT mailbox error codes
+ * Range 901 - 1000.
+ */
+enum cpt_af_status {
+ CPT_AF_ERR_PARAM = -901,
+ CPT_AF_ERR_GRP_INVALID = -902,
+ CPT_AF_ERR_LF_INVALID = -903,
+ CPT_AF_ERR_ACCESS_DENIED = -904,
+ CPT_AF_ERR_SSO_PF_FUNC_INVALID = -905,
+ CPT_AF_ERR_NIX_PF_FUNC_INVALID = -906,
+ CPT_AF_ERR_INLINE_IPSEC_INB_ENA = -907,
+ CPT_AF_ERR_INLINE_IPSEC_OUT_ENA = -908
+};
+
+/* CPT mbox message formats */
+
+struct cpt_rd_wr_reg_msg {
+ struct mbox_msghdr hdr;
+ u64 reg_offset;
+ u64 *ret_val;
+ u64 val;
+ u8 is_write;
+};
+
+struct cpt_set_crypto_grp_req_msg {
+ struct mbox_msghdr hdr;
+ u8 crypto_eng_grp;
+};
+
+struct cpt_lf_alloc_req_msg {
+ struct mbox_msghdr hdr;
+ u16 nix_pf_func;
+ u16 sso_pf_func;
+};
+
+struct cpt_lf_alloc_rsp_msg {
+ struct mbox_msghdr hdr;
+ u8 crypto_eng_grp;
+};
+
+#define CPT_INLINE_INBOUND 0
+#define CPT_INLINE_OUTBOUND 1
+struct cpt_inline_ipsec_cfg_msg {
+ struct mbox_msghdr hdr;
+ u8 enable;
+ u8 slot;
+ u8 dir;
+ u16 sso_pf_func; /* inbound path SSO_PF_FUNC */
+ u16 nix_pf_func; /* outbound path NIX_PF_FUNC */
+};
+
#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 3803af9231c6..04cf6334b388 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -53,8 +53,13 @@ enum npc_kpu_lb_ltype {
NPC_LT_LB_CUSTOM1 = 0xF,
};
+/* Don't modify ltypes up to IP6_EXT, otherwise length and checksum of IP
+ * headers may not be checked correctly. IPv4 ltypes and IPv6 ltypes must
+ * differ only at bit 0 so mask 0xE can be used to detect extended headers.
+ */
enum npc_kpu_lc_ltype {
- NPC_LT_LC_IP = 1,
+ NPC_LT_LC_PTP = 1,
+ NPC_LT_LC_IP,
NPC_LT_LC_IP_OPT,
NPC_LT_LC_IP6,
NPC_LT_LC_IP6_EXT,
@@ -62,7 +67,6 @@ enum npc_kpu_lc_ltype {
NPC_LT_LC_RARP,
NPC_LT_LC_MPLS,
NPC_LT_LC_NSH,
- NPC_LT_LC_PTP,
NPC_LT_LC_FCOE,
NPC_LT_LC_CUSTOM0 = 0xE,
NPC_LT_LC_CUSTOM1 = 0xF,
@@ -77,6 +81,8 @@ enum npc_kpu_ld_ltype {
NPC_LT_LD_ICMP,
NPC_LT_LD_SCTP,
NPC_LT_LD_ICMP6,
+ NPC_LT_LD_CUSTOM0,
+ NPC_LT_LD_CUSTOM1,
NPC_LT_LD_IGMP = 8,
NPC_LT_LD_ESP,
NPC_LT_LD_AH,
@@ -85,8 +91,6 @@ enum npc_kpu_ld_ltype {
NPC_LT_LD_NSH,
NPC_LT_LD_TU_MPLS_IN_NSH,
NPC_LT_LD_TU_MPLS_IN_IP,
- NPC_LT_LD_CUSTOM0 = 0xE,
- NPC_LT_LD_CUSTOM1 = 0xF,
};
enum npc_kpu_le_ltype {
@@ -139,6 +143,13 @@ enum npc_kpu_lh_ltype {
NPC_LT_LH_CUSTOM1 = 0xF,
};
+enum npc_pkind_type {
+ NPC_TX_HIGIG_PKIND = 60ULL,
+ NPC_RX_HIGIG_PKIND,
+ NPC_RX_EDSA_PKIND,
+ NPC_TX_DEF_PKIND,
+};
+
struct npc_kpu_profile_cam {
u8 state;
u8 state_mask;
@@ -148,7 +159,7 @@ struct npc_kpu_profile_cam {
u16 dp1_mask;
u16 dp2;
u16 dp2_mask;
-};
+} __packed;
struct npc_kpu_profile_action {
u8 errlev;
@@ -168,7 +179,7 @@ struct npc_kpu_profile_action {
u8 mask;
u8 right;
u8 shift;
-};
+} __packed;
struct npc_kpu_profile {
int cam_entries;
@@ -296,11 +307,44 @@ struct nix_rx_action {
#endif
};
+struct nix_tx_action {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_48 :16;
+ u64 match_id :16;
+ u64 index :20;
+ u64 rsvd_11_8 :8;
+ u64 op :4;
+#else
+ u64 op :4;
+ u64 rsvd_11_8 :8;
+ u64 index :20;
+ u64 match_id :16;
+ u64 rsvd_63_48 :16;
+#endif
+};
+
+/* NPC_AF_INTFX_KEX_CFG field masks */
+#define NPC_PARSE_NIBBLE GENMASK_ULL(30, 0)
+
/* NIX Receive Vtag Action Structure */
-#define VTAG0_VALID_BIT BIT_ULL(15)
-#define VTAG0_TYPE_MASK GENMASK_ULL(14, 12)
-#define VTAG0_LID_MASK GENMASK_ULL(10, 8)
-#define VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
+#define RX_VTAG0_VALID_BIT BIT_ULL(15)
+#define RX_VTAG0_TYPE_MASK GENMASK_ULL(14, 12)
+#define RX_VTAG0_LID_MASK GENMASK_ULL(10, 8)
+#define RX_VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
+#define RX_VTAG1_VALID_BIT BIT_ULL(47)
+#define RX_VTAG1_TYPE_MASK GENMASK_ULL(46, 44)
+#define RX_VTAG1_LID_MASK GENMASK_ULL(42, 40)
+#define RX_VTAG1_RELPTR_MASK GENMASK_ULL(39, 32)
+
+/* NIX Transmit Vtag Action Structure */
+#define TX_VTAG0_DEF_MASK GENMASK_ULL(25, 16)
+#define TX_VTAG0_OP_MASK GENMASK_ULL(13, 12)
+#define TX_VTAG0_LID_MASK GENMASK_ULL(10, 8)
+#define TX_VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
+#define TX_VTAG1_DEF_MASK GENMASK_ULL(57, 48)
+#define TX_VTAG1_OP_MASK GENMASK_ULL(45, 44)
+#define TX_VTAG1_LID_MASK GENMASK_ULL(42, 40)
+#define TX_VTAG1_RELPTR_MASK GENMASK_ULL(39, 32)
struct npc_mcam_kex {
/* MKEX Profle Header */
@@ -320,4 +364,92 @@ struct npc_mcam_kex {
u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
} __packed;
+struct npc_kpu_fwdata {
+ int entries;
+ /* What follows is:
+ * struct npc_kpu_profile_cam[entries];
+ * struct npc_kpu_profile_action[entries];
+ */
+ u8 data[0];
+} __packed;
+
+struct npc_lt_def {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+} __packed;
+
+struct npc_lt_def_ipsec {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 spi_offset;
+ u8 spi_nz;
+} __packed;
+
+struct npc_lt_def_cfg {
+ struct npc_lt_def rx_ol2;
+ struct npc_lt_def rx_oip4;
+ struct npc_lt_def rx_iip4;
+ struct npc_lt_def rx_oip6;
+ struct npc_lt_def rx_iip6;
+ struct npc_lt_def rx_otcp;
+ struct npc_lt_def rx_itcp;
+ struct npc_lt_def rx_oudp;
+ struct npc_lt_def rx_iudp;
+ struct npc_lt_def rx_osctp;
+ struct npc_lt_def rx_isctp;
+ struct npc_lt_def_ipsec rx_ipsec[2];
+ struct npc_lt_def pck_ol2;
+ struct npc_lt_def pck_oip4;
+ struct npc_lt_def pck_oip6;
+ struct npc_lt_def pck_iip4;
+} __packed;
+
+/* Loadable KPU profile firmware data */
+struct npc_kpu_profile_fwdata {
+#define KPU_SIGN 0x00666f727075706b
+#define KPU_NAME_LEN 32
+/** Maximum number of custom KPU entries supported by the built-in profile. */
+#define KPU_MAX_CST_ENT 2
+ /* KPU Profle Header */
+ u64 signature; /* "kpuprof\0" (8 bytes/ASCII characters) */
+ u8 name[KPU_NAME_LEN]; /* KPU Profile name */
+ u64 version; /* KPU profile version */
+ u8 kpus;
+ u8 reserved[7];
+
+ /* Default MKEX profile to be used with this KPU profile. May be
+ * overridden with mkex_profile module parameter. Format is same as for
+ * the MKEX profile to streamline processing.
+ */
+ struct npc_mcam_kex mkex;
+ /* LTYPE values for specific HW offloaded protocols. */
+ struct npc_lt_def_cfg lt_def;
+ /* Dynamically sized data:
+ * Custom KPU CAM and ACTION configuration entries.
+ * struct npc_kpu_fwdata kpu[kpus];
+ */
+ u8 data[0];
+} __packed;
+
+struct rvu_npc_mcam_rule {
+ struct flow_msg packet;
+ struct flow_msg mask;
+ u8 intf;
+ union {
+ struct nix_tx_action tx_action;
+ struct nix_rx_action rx_action;
+ };
+ u64 vtag_action;
+ struct list_head list;
+ u64 features;
+ u16 owner;
+ u16 entry;
+ u16 cntr;
+ bool has_cntr;
+ u8 default_rule;
+ bool enable;
+};
+
#endif /* NPC_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index aa2727e6211a..728c54cadd0f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -11,7 +11,10 @@
#ifndef NPC_PROFILE_H
#define NPC_PROFILE_H
-#define NPC_KPU_PROFILE_VER 0x0000000100050000
+#define NPC_KPU_PROFILE_VER 0x0000000100050000
+#define NPC_KPU_VER_MAJ(ver) (u16)(((ver) >> 32) & 0xFFFF)
+#define NPC_KPU_VER_MIN(ver) (u16)(((ver) >> 16) & 0xFFFF)
+#define NPC_KPU_VER_PATCH(ver) (u16)((ver) & 0xFFFF)
#define NPC_IH_W 0x8000
#define NPC_IH_UTAG 0x2000
@@ -140,6 +143,12 @@
#define NPC_DSA_EXTEND 0x1000
#define NPC_DSA_EDSA 0x8000
+#define NPC_KEXOF_DMAC 8
+#define MKEX_SIGN 0x19bbfdbd15f
+#define KEX_LD_CFG(bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \
+ (((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
+ ((flags_ena) << 6) | ((key_ofs) & 0x3F))
+
enum npc_kpu_parser_state {
NPC_S_NA = 0,
NPC_S_KPU1_ETHER,
@@ -418,6 +427,27 @@ enum NPC_ERRLEV_E {
NPC_ERRLEV_ENUM_LAST = 16,
};
+#define NPC_KPU_NOP_CAM \
+ { \
+ NPC_S_NA, 0xff, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ }
+
+#define NPC_KPU_NOP_ACTION \
+ { \
+ NPC_ERRLEV_RE, NPC_EC_NOERR, \
+ 0, 0, 0, 0, 0, \
+ NPC_S_NA, 0, 0, \
+ NPC_LID_LA, NPC_LT_NA, \
+ 0, \
+ 0, 0, 0, 0, \
+ }
+
static struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
@@ -998,6 +1028,8 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = {
};
static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU1_ETHER, 0xff,
NPC_ETYPE_IP,
@@ -1667,6 +1699,8 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
};
static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU2_CTAG, 0xff,
NPC_ETYPE_IP,
@@ -2795,6 +2829,8 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
};
static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU3_CTAG, 0xff,
NPC_ETYPE_IP,
@@ -3914,6 +3950,8 @@ static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
};
static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU4_MPLS, 0xff,
NPC_MPLS_S,
@@ -4007,6 +4045,8 @@ static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
};
static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU5_IP, 0xff,
0x0000,
@@ -4577,6 +4617,8 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
};
static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU6_IP6_EXT, 0xff,
0x0000,
@@ -4922,6 +4964,8 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
};
static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU7_IP6_EXT, 0xff,
0x0000,
@@ -5141,6 +5185,8 @@ static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
};
static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU8_TCP, 0xff,
0x0000,
@@ -5873,6 +5919,8 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
};
static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
NPC_MPLS_S,
@@ -6335,6 +6383,8 @@ static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
};
static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU10_TU_MPLS, 0xff,
NPC_MPLS_S,
@@ -6500,6 +6550,8 @@ static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
};
static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU11_TU_ETHER, 0xff,
NPC_ETYPE_IP,
@@ -6809,6 +6861,8 @@ static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
};
static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_TCP,
@@ -7064,6 +7118,8 @@ static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
};
static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU13_TU_IP6_EXT, 0xff,
0x0000,
@@ -7076,6 +7132,8 @@ static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
};
static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU14_TU_IP6_EXT, 0xff,
0x0000,
@@ -7088,6 +7146,8 @@ static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
};
static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU15_TU_TCP, 0xff,
0x0000,
@@ -7289,6 +7349,8 @@ static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
};
static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU16_TCP_DATA, 0xff,
0x0000,
@@ -7346,6 +7408,8 @@ static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
};
static struct npc_kpu_profile_action kpu1_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 3, 0,
@@ -7963,6 +8027,8 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
};
static struct npc_kpu_profile_action kpu2_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
@@ -8966,6 +9032,8 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
};
static struct npc_kpu_profile_action kpu3_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
@@ -9961,6 +10029,8 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
};
static struct npc_kpu_profile_action kpu4_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
@@ -10044,6 +10114,8 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
};
static struct npc_kpu_profile_action kpu5_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_LC, NPC_EC_IP_TTL_0,
0, 0, 0, 0, 1,
@@ -10551,6 +10623,8 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
};
static struct npc_kpu_profile_action kpu6_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -10858,6 +10932,8 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
};
static struct npc_kpu_profile_action kpu7_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -11053,6 +11129,8 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
};
static struct npc_kpu_profile_action kpu8_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_FIN_ONLY,
0, 0, 0, 0, 1,
@@ -11704,6 +11782,8 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
};
static struct npc_kpu_profile_action kpu9_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
@@ -12115,6 +12195,8 @@ static struct npc_kpu_profile_action kpu9_action_entries[] = {
};
static struct npc_kpu_profile_action kpu10_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
@@ -12262,6 +12344,8 @@ static struct npc_kpu_profile_action kpu10_action_entries[] = {
};
static struct npc_kpu_profile_action kpu11_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 0, 0,
@@ -12537,6 +12621,8 @@ static struct npc_kpu_profile_action kpu11_action_entries[] = {
};
static struct npc_kpu_profile_action kpu12_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 12, 0, 2, 0,
@@ -12764,6 +12850,8 @@ static struct npc_kpu_profile_action kpu12_action_entries[] = {
};
static struct npc_kpu_profile_action kpu13_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -12775,6 +12863,8 @@ static struct npc_kpu_profile_action kpu13_action_entries[] = {
};
static struct npc_kpu_profile_action kpu14_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -12786,6 +12876,8 @@ static struct npc_kpu_profile_action kpu14_action_entries[] = {
};
static struct npc_kpu_profile_action kpu15_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_FIN_ONLY,
0, 0, 0, 0, 1,
@@ -12965,6 +13057,8 @@ static struct npc_kpu_profile_action kpu15_action_entries[] = {
};
static struct npc_kpu_profile_action kpu16_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -13114,4 +13208,221 @@ static struct npc_kpu_profile npc_kpu_profiles[] = {
},
};
+static struct npc_lt_def_cfg npc_lt_defaults = {
+ .rx_ol2 = {
+ .lid = NPC_LID_LA,
+ .ltype_match = NPC_LT_LA_ETHER,
+ .ltype_mask = 0x0F,
+ },
+ .rx_oip4 = {
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP,
+ .ltype_mask = 0x0E,
+ },
+ .rx_iip4 = {
+ .lid = NPC_LID_LG,
+ .ltype_match = NPC_LT_LG_TU_IP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_oip6 = {
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP6,
+ .ltype_mask = 0x0E,
+ },
+ .rx_iip6 = {
+ .lid = NPC_LID_LG,
+ .ltype_match = NPC_LT_LG_TU_IP6,
+ .ltype_mask = 0x0F,
+ },
+ .rx_otcp = {
+ .lid = NPC_LID_LD,
+ .ltype_match = NPC_LT_LD_TCP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_itcp = {
+ .lid = NPC_LID_LH,
+ .ltype_match = NPC_LT_LH_TU_TCP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_oudp = {
+ .lid = NPC_LID_LD,
+ .ltype_match = NPC_LT_LD_UDP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_iudp = {
+ .lid = NPC_LID_LH,
+ .ltype_match = NPC_LT_LH_TU_UDP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_osctp = {
+ .lid = NPC_LID_LD,
+ .ltype_match = NPC_LT_LD_SCTP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_isctp = {
+ .lid = NPC_LID_LH,
+ .ltype_match = NPC_LT_LH_TU_SCTP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_ipsec = {
+ {
+ .lid = NPC_LID_LD,
+ .ltype_match = NPC_LT_LD_ESP,
+ .ltype_mask = 0x0F,
+ },
+ {
+ .spi_offset = 8,
+ .lid = NPC_LID_LH,
+ .ltype_match = NPC_LT_LH_TU_ESP,
+ .ltype_mask = 0x0F,
+ },
+ },
+ .pck_ol2 = {
+ .lid = NPC_LID_LA,
+ .ltype_match = NPC_LT_LA_ETHER,
+ .ltype_mask = 0x0F,
+ },
+ .pck_oip4 = {
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP,
+ .ltype_mask = 0x0E,
+ },
+ .pck_iip4 = {
+ .lid = NPC_LID_LG,
+ .ltype_match = NPC_LT_LG_TU_IP,
+ .ltype_mask = 0x0F,
+ },
+};
+
+static struct npc_mcam_kex npc_mkex_default = {
+ .mkex_sign = MKEX_SIGN,
+ .name = "default",
+ .kpu_version = NPC_KPU_PROFILE_VER,
+ .keyx_cfg = {
+ /* nibble: LA..LE (ltype only) + Channel */
+ [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | 0x249207,
+ [NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | 0x249200,
+ },
+ .intf_lid_lt_ld = {
+ /* Default RX MCAM KEX profile */
+ [NIX_INTF_RX] = {
+ [NPC_LID_LA] = {
+ /* Layer A: Ethernet: */
+ [NPC_LT_LA_ETHER] = {
+ /* DMAC: 6 bytes, KW1[47:0] */
+ KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_KEXOF_DMAC),
+ /* Ethertype: 2 bytes, KW0[47:32] */
+ KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x4),
+ },
+ /* Layer A: HiGig2: */
+ [NPC_LT_LA_HIGIG2_ETHER] = {
+ /* Classification: 2 bytes, KW1[15:0] */
+ KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, NPC_KEXOF_DMAC),
+ /* VID: 2 bytes, KW1[31:16] */
+ KEX_LD_CFG(0x01, 0xc, 0x1, 0x0,
+ NPC_KEXOF_DMAC + 2),
+ },
+ },
+ [NPC_LID_LB] = {
+ /* Layer B: Single VLAN (CTAG) */
+ /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
+ [NPC_LT_LB_CTAG] = {
+ KEX_LD_CFG(0x03, 0x2, 0x1, 0x0, 0x4),
+ },
+ /* Layer B: Stacked VLAN (STAG|QinQ) */
+ [NPC_LT_LB_STAG_QINQ] = {
+ /* Outer VLAN: 2 bytes, KW0[63:48] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
+ /* Ethertype: 2 bytes, KW0[47:32] */
+ KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x4),
+ },
+ },
+ [NPC_LID_LC] = {
+ /* Layer C: IPv4 */
+ [NPC_LT_LC_IP] = {
+ /* SIP+DIP: 8 bytes, KW2[63:0] */
+ KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10),
+ /* TOS: 1 byte, KW1[63:56] */
+ KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf),
+ },
+ },
+ [NPC_LID_LD] = {
+ /* Layer D:UDP */
+ [NPC_LT_LD_UDP] = {
+ /* SPORT: 2 bytes, KW3[15:0] */
+ KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18),
+ /* DPORT: 2 bytes, KW3[31:16] */
+ KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a),
+ },
+ /* Layer D:TCP */
+ [NPC_LT_LD_TCP] = {
+ /* SPORT: 2 bytes, KW3[15:0] */
+ KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18),
+ /* DPORT: 2 bytes, KW3[31:16] */
+ KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a),
+ },
+ },
+ },
+
+ /* Default TX MCAM KEX profile */
+ [NIX_INTF_TX] = {
+ [NPC_LID_LA] = {
+ /* Layer A: Ethernet: */
+ [NPC_LT_LA_IH_NIX_ETHER] = {
+ /* PF_FUNC: 2B , KW0 [47:32] */
+ KEX_LD_CFG(0x01, 0x0, 0x1, 0x0, 0x4),
+ /* DMAC: 6 bytes, KW1[63:16] */
+ KEX_LD_CFG(0x05, 0x8, 0x1, 0x0, 0xa),
+ },
+ /* Layer A: HiGig2: */
+ [NPC_LT_LA_IH_NIX_HIGIG2_ETHER] = {
+ /* PF_FUNC: 2B , KW0 [47:32] */
+ KEX_LD_CFG(0x01, 0x0, 0x1, 0x0, 0x4),
+ /* VID: 2 bytes, KW1[31:16] */
+ KEX_LD_CFG(0x01, 0x10, 0x1, 0x0, 0xa),
+ },
+ },
+ [NPC_LID_LB] = {
+ /* Layer B: Single VLAN (CTAG) */
+ [NPC_LT_LB_CTAG] = {
+ /* CTAG VLAN[2..3] KW0[63:48] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
+ /* CTAG VLAN[2..3] KW1[15:0] */
+ KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x8),
+ },
+ /* Layer B: Stacked VLAN (STAG|QinQ) */
+ [NPC_LT_LB_STAG_QINQ] = {
+ /* Outer VLAN: 2 bytes, KW0[63:48] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
+ /* Outer VLAN: 2 Bytes, KW1[15:0] */
+ KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x8),
+ },
+ },
+ [NPC_LID_LC] = {
+ /* Layer C: IPv4 */
+ [NPC_LT_LC_IP] = {
+ /* SIP+DIP: 8 bytes, KW2[63:0] */
+ KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10),
+ },
+ },
+ [NPC_LID_LD] = {
+ /* Layer D:UDP */
+ [NPC_LT_LD_UDP] = {
+ /* SPORT: 2 bytes, KW3[15:0] */
+ KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18),
+ /* DPORT: 2 bytes, KW3[31:16] */
+ KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a),
+ },
+ /* Layer D:TCP */
+ [NPC_LT_LD_TCP] = {
+ /* SPORT: 2 bytes, KW3[15:0] */
+ KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18),
+ /* DPORT: 2 bytes, KW3[31:16] */
+ KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a),
+ },
+ },
+ },
+ },
+};
+
#endif /* NPC_PROFILE_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
new file mode 100644
index 000000000000..c85ee22b3d2b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell PTP driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "ptp.h"
+
+#define DRV_NAME "Marvell PTP Driver"
+
+#define PCI_DEVID_OCTEONTX2_PTP 0xA00C
+#define PCI_SUBSYS_DEVID_OCTX2_98xx_PTP 0xB100
+#define PCI_SUBSYS_DEVID_OCTX2_96XX_PTP 0xB200
+#define PCI_SUBSYS_DEVID_OCTX2_95XX_PTP 0xB300
+#define PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP 0xB400
+#define PCI_DEVID_OCTEONTX2_RST 0xA085
+
+#define PCI_PTP_BAR_NO 0
+#define PCI_RST_BAR_NO 0
+
+#define PTP_CLOCK_CFG 0xF00ULL
+#define PTP_CLOCK_CFG_PTP_EN BIT(0)
+#define PTP_CLOCK_LO 0xF08ULL
+#define PTP_CLOCK_HI 0xF10ULL
+#define PTP_CLOCK_COMP 0xF18ULL
+
+#define RST_BOOT 0x1600ULL
+#define CLOCK_BASE_RATE 50000000ULL
+
+static u64 get_clock_rate(void)
+{
+ u64 ret = CLOCK_BASE_RATE * 16;
+ struct pci_dev *pdev;
+ void __iomem *base;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RST, NULL);
+ if (!pdev)
+ goto error;
+
+ base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO);
+ if (!base)
+ goto error_put_pdev;
+
+ ret = CLOCK_BASE_RATE * ((readq(base + RST_BOOT) >> 33) & 0x3f);
+
+ iounmap(base);
+
+error_put_pdev:
+ pci_dev_put(pdev);
+
+error:
+ return ret;
+}
+
+struct ptp *ptp_get(void)
+{
+ struct pci_dev *pdev;
+ struct ptp *ptp;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_PTP, NULL);
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ ptp = pci_get_drvdata(pdev);
+ if (!ptp)
+ ptp = ERR_PTR(-EPROBE_DEFER);
+ if (IS_ERR(ptp))
+ pci_dev_put(pdev);
+
+ return ptp;
+}
+
+void ptp_put(struct ptp *ptp)
+{
+ if (!ptp)
+ return;
+
+ pci_dev_put(ptp->pdev);
+}
+
+int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
+{
+ bool neg_adj = false;
+ u64 comp;
+ u64 adj;
+ s64 ppb;
+
+ if (scaled_ppm < 0) {
+ neg_adj = true;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ /* The hardware adds the clock compensation value to the PTP clock
+ * on every coprocessor clock cycle. Typical convention is that it
+ * represent number of nanosecond betwen each cycle. In this
+ * convention compensation value is in 64 bit fixed-point
+ * representation where upper 32 bits are number of nanoseconds
+ * and lower is fractions of nanosecond.
+ * The scaled_ppm represent the ratio in "parts per bilion" by which the
+ * compensation value should be corrected.
+ * To calculate new compenstation value we use 64bit fixed point
+ * arithmetic on following formula
+ * comp = tbase + tbase * scaled_ppm / (1M * 2^16)
+ * where tbase is the basic compensation value calculated initialy
+ * in cavium_ptp_init() -> tbase = 1/Hz. Then we use endian
+ * independent structure definition to write data to PTP register.
+ */
+ comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+ /* convert scaled_ppm to ppb */
+ ppb = 1 + scaled_ppm;
+ ppb *= 125;
+ ppb >>= 13;
+ adj = comp * ppb;
+ adj = div_u64(adj, 1000000000ull);
+ comp = neg_adj ? comp - adj : comp + adj;
+
+ writeq(comp, ptp->reg_base + PTP_CLOCK_COMP);
+
+ return 0;
+}
+
+static inline u64 get_tsc(bool is_pmu)
+{
+ return is_pmu ? read_sysreg(pmccntr_el0) : read_sysreg(cntvct_el0);
+}
+
+int ptp_get_clock(struct ptp *ptp, bool is_pmu, u64 *clk, u64 *tsc)
+{
+ u64 end, start;
+ u8 retries = 0;
+
+ do {
+ start = get_tsc(0);
+ *tsc = get_tsc(is_pmu);
+ *clk = readq(ptp->reg_base + PTP_CLOCK_HI);
+ end = get_tsc(0);
+ retries++;
+ } while (((end - start) > 50) && retries < 5);
+
+ return 0;
+}
+
+static int ptp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct ptp *ptp;
+ u64 clock_comp;
+ u64 clock_cfg;
+ int err;
+
+ ptp = devm_kzalloc(dev, sizeof(*ptp), GFP_KERNEL);
+ if (!ptp) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ ptp->pdev = pdev;
+
+ err = pcim_enable_device(pdev);
+ if (err)
+ goto error_free;
+
+ err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev));
+ if (err)
+ goto error_free;
+
+ ptp->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
+
+ ptp->clock_rate = get_clock_rate();
+
+ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+ clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
+
+ clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+ writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
+
+ pci_set_drvdata(pdev, ptp);
+
+ return 0;
+
+error_free:
+ devm_kfree(dev, ptp);
+
+error:
+ /* For `ptp_get()` we need to differentiate between the case
+ * when the core has not tried to probe this device and the case when
+ * the probe failed. In the later case we pretend that the
+ * initialization was successful and keep the error in
+ * `dev->driver_data`.
+ */
+ pci_set_drvdata(pdev, ERR_PTR(err));
+ return 0;
+}
+
+static void ptp_remove(struct pci_dev *pdev)
+{
+ struct ptp *ptp = pci_get_drvdata(pdev);
+ u64 clock_cfg;
+
+ if (IS_ERR_OR_NULL(ptp))
+ return;
+
+ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+ clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
+}
+
+static const struct pci_device_id ptp_id_table[] = {
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_98xx_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_96XX_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_95XX_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP) },
+ { 0, }
+};
+
+struct pci_driver ptp_driver = {
+ .name = DRV_NAME,
+ .id_table = ptp_id_table,
+ .probe = ptp_probe,
+ .remove = ptp_remove,
+};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
new file mode 100644
index 000000000000..91a7b8c9c27b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell PTP driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef PTP_H
+#define PTP_H
+
+#include <linux/timecounter.h>
+#include <linux/time64.h>
+#include <linux/spinlock.h>
+
+struct ptp {
+ struct pci_dev *pdev;
+ void __iomem *reg_base;
+ u32 clock_rate;
+};
+
+struct ptp *ptp_get(void);
+void ptp_put(struct ptp *ptp);
+
+int ptp_adjfine(struct ptp *ptp, long scaled_ppm);
+int ptp_get_clock(struct ptp *ptp, bool is_pmu, u64 *clki, u64 *tsc);
+
+extern struct pci_driver ptp_driver;
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 557e4292c846..0ddb8f77cc17 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -18,6 +18,7 @@
#include "cgx.h"
#include "rvu.h"
#include "rvu_reg.h"
+#include "ptp.h"
#define DRV_NAME "octeontx2-af"
#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
@@ -54,6 +55,10 @@ static char *mkex_profile; /* MKEX profile name */
module_param(mkex_profile, charp, 0000);
MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
+static char *kpu_profile; /* KPU profile name */
+module_param(kpu_profile, charp, 0000);
+MODULE_PARM_DESC(kpu_profile, "KPU profile name string");
+
static void rvu_setup_hw_capabilities(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -64,14 +69,15 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu)
hw->cap.nix_tx_link_bp = true;
hw->cap.nix_rx_multicast = true;
- if (is_rvu_96xx_B0(rvu)) {
+ if (is_rvu_96xx_B0(rvu) || is_rvu_95xx_A0(rvu) ||
+ is_rvu_post_96xx_C0(rvu) || is_rvu_95xx_B0(rvu)) {
hw->cap.nix_fixed_txschq_mapping = true;
hw->cap.nix_txsch_per_cgx_lmac = 4;
hw->cap.nix_txsch_per_lbk_lmac = 132;
hw->cap.nix_txsch_per_sdp_lmac = 76;
hw->cap.nix_shaping = false;
hw->cap.nix_tx_link_bp = false;
- if (is_rvu_96xx_A0(rvu))
+ if (is_rvu_96xx_A0(rvu) || is_rvu_95xx_A0(rvu))
hw->cap.nix_rx_multicast = false;
}
}
@@ -424,14 +430,14 @@ static void rvu_check_block_implemented(struct rvu *rvu)
static void rvu_setup_rvum_blk_revid(struct rvu *rvu)
{
rvu_write64(rvu, BLKADDR_RVUM,
- RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM),
- RVU_BLK_RVUM_REVID);
+ RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM),
+ RVU_BLK_RVUM_REVID);
}
static void rvu_clear_rvum_blk_revid(struct rvu *rvu)
{
rvu_write64(rvu, BLKADDR_RVUM,
- RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00);
+ RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00);
}
int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
@@ -653,6 +659,7 @@ static void rvu_free_hw_resources(struct rvu *rvu)
rvu_npa_freemem(rvu);
rvu_npc_freemem(rvu);
rvu_nix_freemem(rvu);
+ rvu_sso_freemem(rvu);
/* Free block LF bitmaps */
for (id = 0; id < BLK_COUNT; id++) {
@@ -660,7 +667,7 @@ static void rvu_free_hw_resources(struct rvu *rvu)
kfree(block->lf.bmap);
}
- /* Free MSIX bitmaps */
+ /* Free MSIX and TIM bitmaps */
for (id = 0; id < hw->total_pfs; id++) {
pfvf = &rvu->pf[id];
kfree(pfvf->msix.bmap);
@@ -705,6 +712,7 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
} else {
eth_random_addr(pfvf->mac_addr);
}
+ ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
/* Assign MAC address to VFs */
rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
@@ -719,6 +727,7 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
} else {
eth_random_addr(pfvf->mac_addr);
}
+ ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
}
}
}
@@ -909,7 +918,9 @@ init:
mutex_init(&rvu->rsrc_lock);
- rvu_fwdata_init(rvu);
+ err = rvu_fwdata_init(rvu);
+ if (err)
+ goto msix_err;
err = rvu_setup_msix_resources(rvu);
if (err)
@@ -953,8 +964,26 @@ init:
if (err)
goto nix_err;
+ err = rvu_sso_init(rvu);
+ if (err)
+ goto sso_err;
+
+ err = rvu_tim_init(rvu);
+ if (err)
+ goto sso_err;
+
+ err = rvu_cpt_init(rvu);
+ if (err)
+ goto sso_err;
+
+ err = rvu_sdp_init(rvu);
+ if (err)
+ goto sso_err;
+
return 0;
+sso_err:
+ rvu_sso_freemem(rvu);
nix_err:
rvu_nix_freemem(rvu);
npa_err:
@@ -1022,7 +1051,7 @@ int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
/* Get current count of a RVU block's LF/slots
* provisioned to a given RVU func.
*/
-static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype)
+u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype)
{
switch (blktype) {
case BLKTYPE_NPA:
@@ -1051,7 +1080,7 @@ bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
pfvf = rvu_get_pfvf(rvu, pcifunc);
/* Check if this PFFUNC has a LF of type blktype attached */
- if (!rvu_get_rsrc_mapcount(pfvf, blktype))
+ if (blktype != BLKTYPE_SSO && !rvu_get_rsrc_mapcount(pfvf, blktype))
return false;
return true;
@@ -1062,6 +1091,9 @@ static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
{
u64 val;
+ if (block->type == BLKTYPE_TIM)
+ return rvu_tim_lookup_rsrc(rvu, block, pcifunc, slot);
+
val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
rvu_write64(rvu, block->addr, block->lookup_reg, val);
/* Wait for the lookup to finish */
@@ -1090,6 +1122,9 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
if (blkaddr < 0)
return;
+ if (blkaddr == BLKADDR_NIX0)
+ rvu_nix_reset_mac(pfvf, pcifunc);
+
block = &hw->block[blkaddr];
num_lfs = rvu_get_rsrc_mapcount(pfvf, block->type);
@@ -1212,6 +1247,12 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
struct rvu_block *block;
int free_lfs, mappedlfs;
+ if (rvu_check_rsrc_policy(rvu, req, pcifunc)) {
+ dev_err(rvu->dev, "Func 0x%x: Resource policy check failed\n",
+ pcifunc);
+ return -EINVAL;
+ }
+
/* Only one NPA LF can be attached */
if (req->npalf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NPA)) {
block = &hw->block[BLKADDR_NPA];
@@ -1326,12 +1367,12 @@ int rvu_mbox_handler_attach_resources(struct rvu *rvu,
goto exit;
/* Now attach the requested resources */
- if (attach->npalf)
- rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1);
-
if (attach->nixlf)
rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1);
+ if (attach->npalf)
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1);
+
if (attach->sso) {
/* RVU func doesn't know which exact LF or slot is attached
* to it, it always sees as slot 0,1,2. So for a 'modify'
@@ -1499,6 +1540,13 @@ int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_ndc_sync(struct rvu *rvu, int lfblkaddr, int lfidx, u64 lfoffset)
+{
+ /* Sync cached info for this LF in NDC to LLC/DRAM */
+ rvu_write64(rvu, lfblkaddr, lfoffset, BIT_ULL(12) | lfidx);
+ return rvu_poll_reg(rvu, lfblkaddr, lfoffset, BIT_ULL(12), true);
+}
+
int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
struct get_hw_cap_rsp *rsp)
{
@@ -1510,6 +1558,65 @@ int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_mbox_handler_ndc_sync_op(struct rvu *rvu,
+ struct ndc_sync_op *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int err, lfidx, lfblkaddr;
+
+ if (req->npa_lf_sync) {
+ /* Get NPA LF data */
+ lfblkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
+ if (lfblkaddr < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ lfidx = rvu_get_lf(rvu, &hw->block[lfblkaddr], pcifunc, 0);
+ if (lfidx < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ /* Sync NPA NDC */
+ err = rvu_ndc_sync(rvu, lfblkaddr,
+ lfidx, NPA_AF_NDC_SYNC);
+ if (err)
+ dev_err(rvu->dev,
+ "NDC-NPA sync failed for LF %u\n", lfidx);
+ }
+
+ if (!req->nix_lf_tx_sync && !req->nix_lf_rx_sync)
+ return 0;
+
+ /* Get NIX LF data */
+ lfblkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (lfblkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ lfidx = rvu_get_lf(rvu, &hw->block[lfblkaddr], pcifunc, 0);
+ if (lfidx < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (req->nix_lf_tx_sync) {
+ /* Sync NIX TX NDC */
+ err = rvu_ndc_sync(rvu, lfblkaddr,
+ lfidx, NIX_AF_NDC_TX_SYNC);
+ if (err)
+ dev_err(rvu->dev,
+ "NDC-NIX-TX sync fail for LF %u\n", lfidx);
+ }
+
+ if (req->nix_lf_rx_sync) {
+ /* Sync NIX RX NDC */
+ err = rvu_ndc_sync(rvu, lfblkaddr,
+ lfidx, NIX_AF_NDC_RX_SYNC);
+ if (err)
+ dev_err(rvu->dev,
+ "NDC-NIX-RX sync failed for LF %u\n", lfidx);
+ }
+
+ return 0;
+}
+
static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
struct mbox_msghdr *req)
{
@@ -1916,6 +2023,94 @@ static void rvu_enable_mbox_intr(struct rvu *rvu)
INTR_MASK(hw->total_pfs) & ~1ULL);
}
+static void rvu_npa_lf_mapped_nix_lf_teardown(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *nix_block;
+ struct rsrc_detach detach;
+ u16 nix_pcifunc;
+ int blkaddr, lf;
+ u64 regval;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return;
+
+ nix_block = &hw->block[blkaddr];
+ for (lf = 0; lf < nix_block->lf.max; lf++) {
+ /* Loop through all the NIX LFs and check if the NPA lf is
+ * being used based on pcifunc.
+ */
+ regval = rvu_read64(rvu, blkaddr, NIX_AF_LFX_CFG(lf));
+ if ((regval & 0xFFFF) != pcifunc)
+ continue;
+
+ nix_pcifunc = nix_block->fn_map[lf];
+
+ /* Skip NIX LF attached to the pcifunc as it is already
+ * quiesced.
+ */
+ if (nix_pcifunc == pcifunc)
+ continue;
+
+ detach.partial = true;
+ detach.nixlf = true;
+ /* Teardown the NIX LF. */
+ rvu_nix_lf_teardown(rvu, nix_pcifunc, blkaddr, lf);
+ rvu_lf_reset(rvu, nix_block, lf);
+ /* Detach the NIX LF. */
+ rvu_detach_rsrcs(rvu, &detach, nix_pcifunc);
+ }
+}
+
+static void rvu_npa_lf_mapped_sso_lf_teardown(struct rvu *rvu, u16 pcifunc)
+{
+ u16 *pcifunc_arr;
+ u16 sso_pcifunc, match_cnt = 0;
+ struct rvu_block *sso_block;
+ struct rsrc_detach detach;
+ int blkaddr, lf;
+ u64 regval;
+ size_t len;
+
+ len = sizeof(*pcifunc_arr) * (rvu->hw->total_pfs + rvu->hw->total_vfs);
+ pcifunc_arr = kmalloc(len, GFP_KERNEL);
+ if (!pcifunc_arr)
+ return;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0) {
+ kfree(pcifunc_arr);
+ return;
+ }
+
+ sso_block = &rvu->hw->block[blkaddr];
+ for (lf = 0; lf < sso_block->lf.max; lf++) {
+ regval = rvu_read64(rvu, blkaddr, SSO_AF_XAQX_GMCTL(lf));
+ if ((regval & 0xFFFF) != pcifunc)
+ continue;
+
+ sso_pcifunc = sso_block->fn_map[lf];
+ regval = rvu_read64(rvu, blkaddr, sso_block->lfcfg_reg |
+ (lf << sso_block->lfshift));
+ /* Save SSO PF_FUNC info to detach all LFs of that PF_FUNC at
+ * once later.
+ */
+ rvu_sso_lf_teardown(rvu, sso_pcifunc, lf, regval & 0xF);
+ rvu_lf_reset(rvu, sso_block, lf);
+ pcifunc_arr[match_cnt] = sso_pcifunc;
+ match_cnt++;
+ }
+
+ detach.partial = true;
+ detach.sso = true;
+
+ for (sso_pcifunc = 0; sso_pcifunc < match_cnt; sso_pcifunc++)
+ rvu_detach_rsrcs(rvu, &detach, pcifunc_arr[sso_pcifunc]);
+
+ kfree(pcifunc_arr);
+}
+
static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
{
struct rvu_block *block;
@@ -1935,14 +2130,36 @@ static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
/* Cleanup LF and reset it */
if (block->addr == BLKADDR_NIX0)
rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
- else if (block->addr == BLKADDR_NPA)
+ else if (block->addr == BLKADDR_NPA) {
+ rvu_npa_lf_mapped_nix_lf_teardown(rvu, pcifunc);
+ rvu_npa_lf_mapped_sso_lf_teardown(rvu, pcifunc);
rvu_npa_lf_teardown(rvu, pcifunc, lf);
+ } else if (block->addr == BLKADDR_SSO)
+ rvu_sso_lf_teardown(rvu, pcifunc, lf, slot);
+ else if (block->addr == BLKADDR_SSOW)
+ rvu_ssow_lf_teardown(rvu, pcifunc, lf, slot);
+ else if (block->addr == BLKADDR_TIM)
+ rvu_tim_lf_teardown(rvu, pcifunc, lf, slot);
err = rvu_lf_reset(rvu, block, lf);
if (err) {
dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
block->addr, lf);
}
+
+ if (block->addr == BLKADDR_SSO)
+ rvu_sso_hwgrp_config_thresh(rvu, block->addr, lf);
+ }
+}
+
+static void rvu_sso_pfvf_rst(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (pfvf->sso_uniq_ident) {
+ rvu_free_rsrc(&hw->sso.pfvf_ident, pfvf->sso_uniq_ident);
+ pfvf->sso_uniq_ident = 0;
}
}
@@ -1961,6 +2178,7 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
rvu_detach_rsrcs(rvu, NULL, pcifunc);
+ rvu_sso_pfvf_rst(rvu, pcifunc);
mutex_unlock(&rvu->flr_lock);
}
@@ -2130,6 +2348,11 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
{
int irq;
+ rvu_npa_unregister_interrupts(rvu);
+ rvu_nix_unregister_interrupts(rvu);
+ rvu_sso_unregister_interrupts(rvu);
+ rvu_cpt_unregister_interrupts(rvu);
+
/* Disable the Mbox interrupt */
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
@@ -2206,6 +2429,10 @@ static int rvu_register_interrupts(struct rvu *rvu)
rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
+ /* Clear TRPEND bit for all PF */
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs));
+
/* Enable mailbox interrupts from all PFs */
rvu_enable_mbox_intr(rvu);
@@ -2337,8 +2564,24 @@ static int rvu_register_interrupts(struct rvu *rvu)
goto fail;
}
rvu->irq_allocated[offset] = true;
- return 0;
+ ret = rvu_npa_register_interrupts(rvu);
+ if (ret)
+ goto fail;
+
+ ret = rvu_nix_register_interrupts(rvu);
+ if (ret)
+ goto fail;
+
+ ret = rvu_sso_register_interrupts(rvu);
+ if (ret)
+ goto fail;
+
+ ret = rvu_cpt_register_interrupts(rvu);
+ if (ret)
+ goto fail;
+
+ return 0;
fail:
rvu_unregister_interrupts(rvu);
return ret;
@@ -2437,7 +2680,7 @@ static void rvu_enable_afvf_intr(struct rvu *rvu)
#define PCI_DEVID_OCTEONTX2_LBK 0xA061
-static int lbk_get_num_chans(void)
+int rvu_get_num_lbk_chans(void)
{
struct pci_dev *pdev;
void __iomem *base;
@@ -2472,7 +2715,7 @@ static int rvu_enable_sriov(struct rvu *rvu)
return 0;
}
- chans = lbk_get_num_chans();
+ chans = rvu_get_num_lbk_chans();
if (chans < 0)
return chans;
@@ -2523,6 +2766,8 @@ static void rvu_update_module_params(struct rvu *rvu)
strscpy(rvu->mkex_pfl_name,
mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
+ strscpy(rvu->kpu_pfl_name,
+ kpu_profile ? kpu_profile : default_pfl_name, KPU_NAME_LEN);
}
static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -2564,6 +2809,18 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
pci_set_master(pdev);
+ rvu->ptp = ptp_get();
+ if (IS_ERR(rvu->ptp)) {
+ err = PTR_ERR(rvu->ptp);
+ if (err == -EPROBE_DEFER) {
+ dev_err(dev,
+ "PTP driver not loaded, deferring probe\n");
+ goto err_release_regions;
+ }
+ rvu->ptp = NULL;
+ }
+
+ pci_set_master(pdev);
/* Map Admin function CSRs */
rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
@@ -2571,7 +2828,7 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!rvu->afreg_base || !rvu->pfreg_base) {
dev_err(dev, "Unable to map admin function CSRs, aborting\n");
err = -ENOMEM;
- goto err_release_regions;
+ goto err_put_ptp;
}
/* Store module params in rvu structure */
@@ -2586,7 +2843,7 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = rvu_setup_hw_resources(rvu);
if (err)
- goto err_release_regions;
+ goto err_put_ptp;
/* Init mailbox btw AF and PFs */
err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
@@ -2604,16 +2861,22 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_flr;
rvu_setup_rvum_blk_revid(rvu);
-
- /* Enable AF's VFs (if any) */
- err = rvu_enable_sriov(rvu);
+ err = rvu_policy_init(rvu);
if (err)
goto err_irq;
/* Initialize debugfs */
rvu_dbg_init(rvu);
+ /* Enable AF's VFs (if any) */
+ err = rvu_enable_sriov(rvu);
+ if (err)
+ goto err_policy;
+
return 0;
+
+err_policy:
+ rvu_policy_destroy(rvu);
err_irq:
rvu_unregister_interrupts(rvu);
err_flr:
@@ -2626,6 +2889,8 @@ err_hwsetup:
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
rvu_clear_rvum_blk_revid(rvu);
+err_put_ptp:
+ ptp_put(rvu->ptp);
err_release_regions:
pci_release_regions(pdev);
err_disable_device:
@@ -2642,6 +2907,7 @@ static void rvu_remove(struct pci_dev *pdev)
struct rvu *rvu = pci_get_drvdata(pdev);
rvu_dbg_exit(rvu);
+ rvu_policy_destroy(rvu);
rvu_unregister_interrupts(rvu);
rvu_flr_wq_destroy(rvu);
rvu_cgx_exit(rvu);
@@ -2651,6 +2917,7 @@ static void rvu_remove(struct pci_dev *pdev)
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
rvu_clear_rvum_blk_revid(rvu);
+ ptp_put(rvu->ptp);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
@@ -2676,9 +2943,19 @@ static int __init rvu_init_module(void)
if (err < 0)
return err;
+ err = pci_register_driver(&ptp_driver);
+ if (err < 0)
+ goto ptp_err;
+
err = pci_register_driver(&rvu_driver);
if (err < 0)
- pci_unregister_driver(&cgx_driver);
+ goto rvu_err;
+
+ return 0;
+rvu_err:
+ pci_unregister_driver(&ptp_driver);
+ptp_err:
+ pci_unregister_driver(&cgx_driver);
return err;
}
@@ -2686,6 +2963,7 @@ static int __init rvu_init_module(void)
static void __exit rvu_cleanup_module(void)
{
pci_unregister_driver(&rvu_driver);
+ pci_unregister_driver(&ptp_driver);
pci_unregister_driver(&cgx_driver);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index dcf25a092008..4308ddace641 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -15,11 +15,14 @@
#include "rvu_struct.h"
#include "common.h"
#include "mbox.h"
+#include "npc.h"
+#include "rvu_validation.h"
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065
/* Subsystem Device ID */
+#define PCI_SUBSYS_DEVID_98XX 0xB100
#define PCI_SUBSYS_DEVID_96XX 0xB200
/* PCI BAR nos */
@@ -42,6 +45,10 @@ struct dump_ctx {
bool all;
};
+struct cpt_dump_ctx {
+ char e_type[NAME_SIZE];
+};
+
struct rvu_debugfs {
struct dentry *root;
struct dentry *cgx_root;
@@ -50,11 +57,16 @@ struct rvu_debugfs {
struct dentry *npa;
struct dentry *nix;
struct dentry *npc;
+ struct dentry *sso;
+ struct dentry *sso_hwgrp;
+ struct dentry *sso_hws;
+ struct dentry *cpt;
struct dump_ctx npa_aura_ctx;
struct dump_ctx npa_pool_ctx;
struct dump_ctx nix_cq_ctx;
struct dump_ctx nix_rq_ctx;
struct dump_ctx nix_sq_ctx;
+ struct cpt_dump_ctx cpt_ctx;
int npa_qsize_id;
int nix_qsize_id;
};
@@ -104,6 +116,68 @@ struct nix_mce_list {
int max;
};
+/* list of known and supported fields in packet header and
+ * fields present in key structure.
+ */
+enum key_fields {
+ NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */
+ NPC_PF_FUNC, /* Valid when Tx */
+ NPC_ERRLEV,
+ NPC_ERRCODE,
+ NPC_LXMB,
+ NPC_LA,
+ NPC_LB,
+ NPC_LC,
+ NPC_LD,
+ NPC_LE,
+ NPC_LF,
+ NPC_LG,
+ NPC_LH,
+ /* ether type for untagged frame */
+ NPC_ETYPE_ETHER,
+ /* ether type for single tagged frame */
+ NPC_ETYPE_TAG1,
+ /* ether type for double tagged frame */
+ NPC_ETYPE_TAG2,
+ /* outer vlan tci for single tagged frame */
+ NPC_VLAN_TAG1,
+ /* outer vlan tci for double tagged frame */
+ NPC_VLAN_TAG2,
+ /* other header fields programmed to extract but not of our interest */
+ NPC_UNKNOWN,
+ NPC_KEY_FIELDS_MAX,
+};
+
+/* layer meta data to uniquely identify a packet header field */
+struct npc_layer_mdata {
+ u8 lid;
+ u8 ltype;
+ u8 hdr;
+ u8 key;
+ u8 len;
+};
+
+/* Structure to represent a field present in the
+ * generated key. A key field may present anywhere and can
+ * be of any size in the generated key. Once this structure
+ * is populated for fields of interest then field's presence
+ * and location (if present) can be known.
+ */
+struct npc_key_field {
+ /* Masks where all set bits indicate position
+ * of a field in the key
+ */
+ u64 kw_mask[NPC_MAX_KWS_IN_KEY];
+ /* Number of words in the key a field spans. If a field is
+ * of 16 bytes and key offset is 4 then the field will use
+ * 4 bytes in KW0, 8 bytes in KW1 and 4 bytes in KW2 and
+ * nr_kws will be 3(KW0, KW1 and KW2).
+ */
+ int nr_kws;
+ /* used by packet header fields */
+ struct npc_layer_mdata layer_mdata;
+};
+
struct npc_mcam {
struct rsrc_bmap counters;
struct mutex lock; /* MCAM entries and counters update lock */
@@ -127,6 +201,40 @@ struct npc_mcam {
u16 hprio_count;
u16 hprio_end;
u16 rx_miss_act_cntr; /* Counter for RX MISS action */
+ /* fields present in the generated key */
+ struct npc_key_field tx_key_fields[NPC_KEY_FIELDS_MAX];
+ struct npc_key_field rx_key_fields[NPC_KEY_FIELDS_MAX];
+ u64 tx_features;
+ u64 rx_features;
+ struct list_head mcam_rules;
+};
+
+/* KPU profile adapter structure which is used to translate between built-in and
+ * firmware KPU profile structures.
+ */
+struct npc_kpu_profile_adapter {
+ const char *name;
+ u64 version;
+ struct npc_lt_def_cfg *lt_def;
+ struct npc_kpu_profile_action *ikpu; /* array[pkinds] */
+ struct npc_kpu_profile *kpu; /* array[kpus] */
+ struct npc_mcam_kex *mkex;
+ bool custom; /* true if loadable profile used */
+ size_t pkinds;
+ size_t kpus;
+};
+
+struct sso_rsrc {
+ u8 sso_hws;
+ u16 sso_hwgrps;
+ u16 sso_xaq_num_works;
+ u16 sso_xaq_buf_size;
+ u16 sso_iue;
+ u64 iaq_rsvd;
+ u64 iaq_max;
+ u64 taq_rsvd;
+ u64 taq_max;
+ struct rsrc_bmap pfvf_ident;
};
/* Structure for per RVU func info ie PF/VF */
@@ -138,6 +246,7 @@ struct rvu_pfvf {
u16 cptlfs;
u16 timlfs;
u8 cgx_lmac;
+ u8 sso_uniq_ident;
/* Block LF's MSIX vector info */
struct rsrc_bmap msix; /* Bitmap for MSIX vector alloc */
@@ -169,19 +278,23 @@ struct rvu_pfvf {
u16 maxlen;
u16 minlen;
+ bool hw_rx_tstamp_en; /* Is rx_tstamp enabled */
+ u8 pf_set_vf_cfg;
u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
+ u8 default_mac[ETH_ALEN]; /* MAC address from FWdata */
/* Broadcast pkt replication info */
u16 bcast_mce_idx;
struct nix_mce_list bcast_mce_list;
- /* VLAN offload */
- struct mcam_entry entry;
- int rxvlan_index;
- bool rxvlan;
+ /* For resource limits */
+ struct pci_dev *pdev;
+ struct kobject *limits_kobj;
bool cgx_in_use; /* this PF/VF using CGX? */
int cgx_users; /* number of cgx users - used only by PFs */
+
+ struct rvu_npc_mcam_rule *def_rule;
};
struct nix_txsch {
@@ -218,12 +331,21 @@ struct nix_lso {
u8 in_use;
};
+struct nix_txvlan {
+#define NIX_TX_VTAG_DEF_MAX 0x400
+ struct rsrc_bmap rsrc;
+ u16 *entry2pfvf_map;
+ struct mutex rsrc_lock; /* Serialize resource alloc/free */
+};
+
struct nix_hw {
struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
struct nix_mcast mcast;
struct nix_flowkey flowkey;
struct nix_mark_format mark_format;
struct nix_lso lso;
+ struct nix_txvlan txvlan;
+ void *tx_stall;
};
/* RVU block's capabilities or functionality,
@@ -257,6 +379,7 @@ struct rvu_hwinfo {
struct nix_hw *nix0;
struct npc_pkind pkind;
struct npc_mcam mcam;
+ struct sso_rsrc sso;
};
struct mbox_wq_info {
@@ -280,6 +403,9 @@ struct rvu_fwdata {
#define VF_MACNUM_MAX 256
u64 pf_macs[PF_MACNUM_MAX];
u64 vf_macs[VF_MACNUM_MAX];
+#define CGX_MAX 3
+#define CGX_LMACS_MAX 4
+ struct cgx_lmac_fwdata_s cgx_fw_data[CGX_MAX][CGX_LMACS_MAX];
u64 sclk;
u64 rclk;
u64 mcam_addr;
@@ -289,6 +415,8 @@ struct rvu_fwdata {
u64 reserved[FWDATA_RESERVED_MEM];
};
+struct ptp;
+
struct rvu {
void __iomem *afreg_base;
void __iomem *pfreg_base;
@@ -297,6 +425,7 @@ struct rvu {
struct rvu_hwinfo *hw;
struct rvu_pfvf *pf;
struct rvu_pfvf *hwvf;
+ struct rvu_limits pf_limits;
struct mutex rsrc_lock; /* Serialize resource alloc/free */
int vfs; /* Number of VFs attached to RVU */
@@ -333,9 +462,17 @@ struct rvu {
struct mutex cgx_cfg_lock; /* serialize cgx configuration */
char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */
+ char kpu_pfl_name[KPU_NAME_LEN]; /* Configured KPU profile name */
/* Firmware data */
struct rvu_fwdata *fwdata;
+ void *kpu_fwdata;
+ size_t kpu_fwdata_sz;
+
+ /* NPC KPU data */
+ struct npc_kpu_profile_adapter kpu;
+
+ struct ptp *ptp;
#ifdef CONFIG_DEBUG_FS
struct rvu_debugfs rvu_dbg;
@@ -363,20 +500,46 @@ static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
}
/* Silicon revisions */
+
+static inline bool is_rvu_post_96xx_C0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ return (pdev->revision == 0x08) || (pdev->revision == 0x30);
+}
+
static inline bool is_rvu_96xx_A0(struct rvu *rvu)
{
struct pci_dev *pdev = rvu->pdev;
- return (pdev->revision == 0x00) &&
- (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
+ return (pdev->revision == 0x00);
}
static inline bool is_rvu_96xx_B0(struct rvu *rvu)
{
struct pci_dev *pdev = rvu->pdev;
- return ((pdev->revision == 0x00) || (pdev->revision == 0x01)) &&
- (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
+ return (pdev->revision == 0x00) || (pdev->revision == 0x01);
+}
+
+static inline bool is_rvu_95xx_B0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ return (pdev->revision == 0x14);
+}
+
+static inline bool is_rvu_95xx_A0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ return (pdev->revision == 0x10) || (pdev->revision == 0x11);
+}
+
+static inline bool is_cgx_mapped_to_nix(unsigned short id, u8 cgx_id)
+{
+ return !(cgx_id && !(id == PCI_SUBSYS_DEVID_96XX ||
+ id == PCI_SUBSYS_DEVID_98XX));
}
/* Function Prototypes
@@ -399,6 +562,7 @@ void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
+u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype);
int rvu_get_pf(u16 pcifunc);
struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
@@ -408,6 +572,9 @@ int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
+u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkid);
+int rvu_get_num_lbk_chans(void);
+int rvu_ndc_sync(struct rvu *rvu, int lfblkid, int lfidx, u64 lfoffset);
/* RVU HW reg validation */
enum regmap_block {
@@ -444,15 +611,30 @@ int rvu_cgx_exit(struct rvu *rvu);
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu);
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start);
void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable);
+void rvu_cgx_enadis_higig2(struct rvu *rvu, int pf, bool enable);
+bool rvu_cgx_is_higig2_enabled(struct rvu *rvu, int pf);
int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start);
int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index,
int rxtxflag, u64 *stat);
+void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc);
+bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc);
+/* SSO APIs */
+int rvu_sso_init(struct rvu *rvu);
+void rvu_sso_freemem(struct rvu *rvu);
+int rvu_sso_register_interrupts(struct rvu *rvu);
+void rvu_sso_unregister_interrupts(struct rvu *rvu);
+int rvu_sso_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot_id);
+int rvu_ssow_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot_id);
+void rvu_sso_hwgrp_config_thresh(struct rvu *rvu, int blkaddr, int lf);
+
/* NPA APIs */
int rvu_npa_init(struct rvu *rvu);
void rvu_npa_freemem(struct rvu *rvu);
void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf);
int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp);
+int rvu_npa_register_interrupts(struct rvu *rvu);
+void rvu_npa_unregister_interrupts(struct rvu *rvu);
/* NIX APIs */
bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc);
@@ -463,12 +645,17 @@ void rvu_nix_freemem(struct rvu *rvu);
int rvu_get_nixlf_count(struct rvu *rvu);
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr);
+int rvu_nix_register_interrupts(struct rvu *rvu);
+void rvu_nix_unregister_interrupts(struct rvu *rvu);
+void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc);
+bool rvu_nix_is_ptp_tx_enabled(struct rvu *rvu, u16 pcifunc);
/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
void rvu_npc_freemem(struct rvu *rvu);
int rvu_npc_get_pkind(struct rvu *rvu, u16 pf);
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf);
+int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool en);
void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, u8 *mac_addr);
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
@@ -478,8 +665,8 @@ void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan);
void rvu_npc_disable_bcast_entry(struct rvu *rvu, u16 pcifunc);
-int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
@@ -498,4 +685,48 @@ void rvu_dbg_exit(struct rvu *rvu);
static inline void rvu_dbg_init(struct rvu *rvu) {}
static inline void rvu_dbg_exit(struct rvu *rvu) {}
#endif
+
+int npc_flow_steering_init(struct rvu *rvu, int blkaddr);
+const char *npc_get_field_name(u8 hdr);
+bool rvu_npc_write_default_rule(struct rvu *rvu, int blkaddr, int nixlf,
+ u16 pcifunc, u8 intf, struct mcam_entry *entry,
+ int *entry_index);
+int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel);
+int npc_get_bank(struct npc_mcam *mcam, int index);
+void npc_mcam_enable_flows(struct rvu *rvu, u16 target);
+void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, bool enable);
+
+/* CPT APIs */
+int rvu_cpt_init(struct rvu *rvu);
+int rvu_cpt_register_interrupts(struct rvu *rvu);
+void rvu_cpt_unregister_interrupts(struct rvu *rvu);
+
+/* TIM APIs */
+int rvu_tim_init(struct rvu *rvu);
+int rvu_tim_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot);
+
+/* SDP APIs */
+int rvu_sdp_init(struct rvu *rvu);
+bool is_sdp_pf(u16 pcifunc);
+
+/* HW workarounds/fixes */
+#include "npc.h"
+void rvu_nix_txsch_lock(struct nix_hw *nix_hw);
+void rvu_nix_txsch_unlock(struct nix_hw *nix_hw);
+void rvu_nix_update_link_credits(struct rvu *rvu, int blkaddr,
+ int link, u64 ncredits);
+
+void rvu_nix_update_sq_smq_mapping(struct rvu *rvu, int blkaddr, int nixlf,
+ u16 sq, u16 smq);
+void rvu_nix_txsch_config_changed(struct nix_hw *nix_hw);
+ssize_t rvu_nix_get_tx_stall_counters(struct rvu *rvu,
+ char __user *buffer, loff_t *ppos);
+int rvu_nix_fixes_init(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr);
+void rvu_nix_fixes_exit(struct rvu *rvu, struct nix_hw *nix_hw);
+int rvu_tim_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
+ u16 pcifunc, int slot);
+int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena);
+bool is_parse_nibble_config_valid(struct rvu *rvu,
+ struct npc_mcam_kex *mcam_kex);
#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index f3c82e489897..8e8b44c7f42b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -185,12 +185,8 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
clear_bit(pfid, &pfmap);
/* check if notification is enabled */
- if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
- dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
- event->cgx_id, event->lmac_id,
- linfo->link_up ? "UP" : "DOWN");
+ if (!test_bit(pfid, &rvu->pf_notify_bmap))
continue;
- }
/* Send mbox message to PF */
msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
@@ -286,7 +282,7 @@ int rvu_cgx_init(struct rvu *rvu)
rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
if (!rvu->cgx_cnt_max) {
dev_info(rvu->dev, "No CGX devices found!\n");
- return -ENODEV;
+ return 0;
}
rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
@@ -354,7 +350,7 @@ int rvu_cgx_exit(struct rvu *rvu)
* VF's of mapped PF and other PFs are not allowed. This fn() checks
* whether a PFFUNC is permitted to do the config or not.
*/
-static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
+inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
{
if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
@@ -380,6 +376,33 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
}
+void rvu_cgx_enadis_higig2(struct rvu *rvu, int pf, bool enable)
+{
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ cgx_lmac_enadis_higig2(cgxd, lmac_id, enable);
+}
+
+bool rvu_cgx_is_higig2_enabled(struct rvu *rvu, int pf)
+{
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return false;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+
+ return is_higig2_enabled(cgxd, lmac_id);
+}
+
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
{
int pf = rvu_get_pf(pcifunc);
@@ -395,6 +418,31 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
return 0;
}
+void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
+{
+ int pf = rvu_get_pf(pcifunc);
+ int i = 0, lmac_count = 0;
+ u8 max_dmac_filters;
+ u8 cgx_id, lmac_id;
+ void *cgx_dev;
+
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgx_dev = cgx_get_pdata(cgx_id);
+ lmac_count = cgx_get_lmac_cnt(cgx_dev);
+ max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count;
+
+ for (i = 0; i < max_dmac_filters; i++)
+ cgx_lmac_addr_del(cgx_id, lmac_id, i);
+
+ /* As cgx_lmac_addr_del does not clear entry for index 0
+ * so it needs to be done explicitly
+ */
+ cgx_lmac_addr_reset(cgx_id, lmac_id);
+}
+
int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
@@ -445,36 +493,135 @@ int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_mbox_handler_cgx_stats_rst(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct rvu_pfvf *parent_pf;
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -ENODEV;
+
+ parent_pf = &rvu->pf[pf];
+ /* To ensure reset cgx stats won't affect VF stats,
+ * check if it used by only PF interface.
+ * If not, return
+ */
+ if (parent_pf->cgx_users > 1) {
+ dev_info(rvu->dev, "CGX busy, could not reset statistics\n");
+ return 0;
+ }
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+
+ return cgx_stats_rst(cgxd, lmac);
+}
+
+int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
+ struct msg_req *req,
+ struct cgx_fec_stats_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_idx, lmac;
+ int err = 0;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ err = cgx_get_fec_stats(cgxd, lmac, rsp);
+ return err;
+}
+
int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ pfvf = &rvu->pf[pf];
+ memcpy(pfvf->mac_addr, req->mac_addr, ETH_ALEN);
cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
return 0;
}
-int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
- struct cgx_mac_addr_set_or_get *req,
- struct cgx_mac_addr_set_or_get *rsp)
+int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
+ struct cgx_mac_addr_add_req *req,
+ struct cgx_mac_addr_add_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+ int rc = 0;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
+ if (rc >= 0) {
+ rsp->index = rc;
+ return 0;
+ }
+
+ return rc;
+}
+
+int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
+ struct cgx_mac_addr_del_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
+}
+
+int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
+ struct msg_req *req,
+ struct cgx_max_dmac_entries_get_rsp *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
- int rc = 0, i;
- u64 cfg;
+
+ /* If msg is received from PFs(which are not mapped to CGX LMACs)
+ * or VF then no entries are allocated for DMAC filters at CGX level.
+ * So returning zero.
+ */
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) {
+ rsp->max_dmac_filters = 0;
+ return 0;
+ }
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
+ return 0;
+}
- rsp->hdr.rc = rc;
- cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
- /* copy 48 bit mac address to req->mac_addr */
+int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
+ struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ int i;
+
+ /* copy 48 bit mac address to rsp->mac_addr */
for (i = 0; i < ETH_ALEN; i++)
- rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
+ rsp->mac_addr[i] = pfvf->mac_addr[i];
+
return 0;
}
@@ -509,6 +656,85 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
return 0;
}
+static void cgx_notify_up_ptp_info(struct rvu *rvu, int pf, bool enable)
+{
+ struct cgx_ptp_rx_info_msg *msg;
+ int err;
+
+ /* Send mbox message to PF */
+ msg = otx2_mbox_alloc_msg_cgx_ptp_rx_info(rvu, pf);
+ if (!msg) {
+ dev_err(rvu->dev, "ptp notification to pf %d failed\n", pf);
+ return;
+ }
+
+ msg->ptp_en = enable;
+ otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pf);
+ err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
+ if (err)
+ dev_err(rvu->dev, "ptp notification to pf %d failed\n", pf);
+}
+
+int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ u16 pcifunc = req->hdr.pcifunc;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return -EPERM;
+
+ /* Silicon does not support enabling time stamp in higig mode */
+ if (rvu_cgx_is_higig2_enabled(rvu, pf))
+ return NIX_AF_ERR_PTP_CONFIG_FAIL;
+
+ cgx_notify_up_ptp_info(rvu, pf, true);
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+
+ cgx_lmac_ptp_config(cgxd, lmac_id, true);
+ /* Inform NPC that packets to be parsed by this PF
+ * will have their data shifted by 8B
+ */
+ if (npc_config_ts_kpuaction(rvu, pf, pcifunc, true))
+ return -EINVAL;
+ /* This flag is required to clean up CGX conf if app gets killed */
+ pfvf->hw_rx_tstamp_en = true;
+
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ u16 pcifunc = req->hdr.pcifunc;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return -EPERM;
+
+ cgx_notify_up_ptp_info(rvu, pf, false);
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+
+ cgx_lmac_ptp_config(cgxd, lmac_id, false);
+ /* Inform NPC that 8B shift is cancelled */
+ if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false))
+ return -EINVAL;
+
+ pfvf->hw_rx_tstamp_en = false;
+
+ return 0;
+}
+
static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
{
int pf = rvu_get_pf(pcifunc);
@@ -553,7 +779,7 @@ int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
pf = rvu_get_pf(req->hdr.pcifunc);
if (!is_pf_cgxmapped(rvu, pf))
- return -ENODEV;
+ return -EPERM;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -606,11 +832,40 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
if (req->set)
- cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
- req->tx_pause, req->rx_pause);
+ cgx_lmac_enadis_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ req->tx_pause, req->rx_pause);
else
- cgx_lmac_get_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
- &rsp->tx_pause, &rsp->rx_pause);
+ cgx_lmac_get_pause_frm_status(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, &rsp->tx_pause,
+ &rsp->rx_pause);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
+ struct cgx_fw_data *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!rvu->fwdata)
+ return -ENXIO;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ memcpy(&rsp->fwdata, &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
+ sizeof(struct cgx_lmac_fwdata_s));
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
+ struct fec_mode *req,
+ struct fec_mode *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rsp->fec = cgx_set_fec(req->fec, cgx_id, lmac_id);
return 0;
}
@@ -705,3 +960,81 @@ exit:
mutex_unlock(&rvu->cgx_cfg_lock);
return err;
}
+
+int rvu_mbox_handler_cgx_set_link_state(struct rvu *rvu,
+ struct cgx_set_link_state_msg *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ u8 cgx_id, lmac_id;
+ int pf, err;
+
+ pf = rvu_get_pf(pcifunc);
+
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ err = cgx_set_link_state(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ !!req->enable);
+ if (err)
+ dev_warn(rvu->dev, "Cannot set link state to %s, err %d\n",
+ (req->enable) ? "enable" : "disable", err);
+
+ return err;
+}
+
+int rvu_mbox_handler_cgx_set_phy_mod_type(struct rvu *rvu,
+ struct cgx_phy_mod_type *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_set_phy_mod_type(req->mod, rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id);
+}
+
+int rvu_mbox_handler_cgx_get_phy_mod_type(struct rvu *rvu, struct msg_req *req,
+ struct cgx_phy_mod_type *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rsp->mod = cgx_get_phy_mod_type(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
+ if (rsp->mod < 0)
+ return rsp->mod;
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
+}
+
+int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
+ struct cgx_set_link_mode_req *req,
+ struct cgx_set_link_mode_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+
+ rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
new file mode 100644
index 000000000000..f65af65efa66
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/pci.h>
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "mbox.h"
+#include "rvu.h"
+
+/* CPT PF device id */
+#define PCI_DEVID_OTX2_CPT_PF 0xA0FD
+
+/* Maximum supported microcode groups */
+#define CPT_MAX_ENGINE_GROUPS 8
+
+/* Invalid engine group */
+#define INVALID_ENGINE_GRP 0xFF
+
+/* Number of engine group for symmetric crypto */
+static int crypto_eng_grp = INVALID_ENGINE_GRP;
+
+/* CPT PF number */
+static int cpt_pf_num = -1;
+
+/* Fault interrupts names */
+static const char *cpt_flt_irq_name[2] = { "CPTAF FLT0", "CPTAF FLT1" };
+
+static irqreturn_t rvu_cpt_af_flr_intr_handler(int irq, void *ptr)
+{
+ struct rvu *rvu = (struct rvu *) ptr;
+ u64 reg0, reg1;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
+ reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
+ dev_err(rvu->dev, "Received CPTAF FLT irq : 0x%llx, 0x%llx",
+ reg0, reg1);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(0), reg0);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(1), reg1);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_cpt_af_rvu_intr_handler(int irq, void *ptr)
+{
+ struct rvu *rvu = (struct rvu *) ptr;
+ int blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
+ dev_err(rvu->dev, "Received CPTAF RVU irq : 0x%llx", reg);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT, reg);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_cpt_af_ras_intr_handler(int irq, void *ptr)
+{
+ struct rvu *rvu = (struct rvu *) ptr;
+ int blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
+ dev_err(rvu->dev, "Received CPTAF RAS irq : 0x%llx", reg);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT, reg);
+ return IRQ_HANDLED;
+}
+
+static int rvu_cpt_do_register_interrupt(struct rvu *rvu, int irq_offs,
+ irq_handler_t handler,
+ const char *name)
+{
+ int ret = 0;
+
+ ret = request_irq(pci_irq_vector(rvu->pdev, irq_offs), handler, 0,
+ name, rvu);
+ if (ret) {
+ dev_err(rvu->dev, "RVUAF: %s irq registration failed", name);
+ goto err;
+ }
+
+ WARN_ON(rvu->irq_allocated[irq_offs]);
+ rvu->irq_allocated[irq_offs] = true;
+err:
+ return ret;
+}
+
+void rvu_cpt_unregister_interrupts(struct rvu *rvu)
+{
+ int blkaddr, i, offs;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
+ if (blkaddr < 0)
+ return;
+
+ offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs) {
+ dev_warn(rvu->dev,
+ "Failed to get CPT_AF_INT vector offsets\n");
+ return;
+ }
+
+ /* Disable all CPT AF interrupts */
+ for (i = 0; i < 2; i++)
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
+
+ for (i = 0; i < CPT_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+static bool is_cpt_pf(u16 pcifunc)
+{
+ if (rvu_get_pf(pcifunc) != cpt_pf_num)
+ return false;
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ return false;
+
+ return true;
+}
+
+static bool is_cpt_vf(u16 pcifunc)
+{
+ if (rvu_get_pf(pcifunc) != cpt_pf_num)
+ return false;
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ return false;
+
+ return true;
+}
+
+int rvu_cpt_init(struct rvu *rvu)
+{
+ struct pci_dev *pdev;
+ int i;
+
+ for (i = 0; i < rvu->hw->total_pfs; i++) {
+ pdev = pci_get_domain_bus_and_slot(
+ pci_domain_nr(rvu->pdev->bus), i + 1, 0);
+ if (!pdev)
+ continue;
+
+ if (pdev->device == PCI_DEVID_OTX2_CPT_PF) {
+ cpt_pf_num = i;
+ put_device(&pdev->dev);
+ break;
+ }
+
+ put_device(&pdev->dev);
+ }
+
+ return 0;
+}
+
+int rvu_cpt_register_interrupts(struct rvu *rvu)
+{
+
+ int i, offs, blkaddr, ret = 0;
+
+ if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ return 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs) {
+ dev_warn(rvu->dev,
+ "Failed to get CPT_AF_INT vector offsets\n");
+ return 0;
+ }
+
+ for (i = CPT_AF_INT_VEC_FLT0; i < CPT_AF_INT_VEC_RVU; i++) {
+ ret = rvu_cpt_do_register_interrupt(rvu, offs + i,
+ rvu_cpt_af_flr_intr_handler,
+ cpt_flt_irq_name[i]);
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
+ }
+
+ ret = rvu_cpt_do_register_interrupt(rvu, offs + CPT_AF_INT_VEC_RVU,
+ rvu_cpt_af_rvu_intr_handler,
+ "CPTAF RVU");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
+
+ ret = rvu_cpt_do_register_interrupt(rvu, offs + CPT_AF_INT_VEC_RAS,
+ rvu_cpt_af_ras_intr_handler,
+ "CPTAF RAS");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1);
+
+ return 0;
+err:
+ rvu_cpt_unregister_interrupts(rvu);
+ return ret;
+}
+
+int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
+ struct cpt_lf_alloc_req_msg *req,
+ struct cpt_lf_alloc_rsp_msg *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int num_lfs, slot, grp_mask;
+ struct rvu_block *block;
+ int cptlf, blkaddr;
+ u64 val;
+
+ if (crypto_eng_grp == INVALID_ENGINE_GRP)
+ return CPT_AF_ERR_GRP_INVALID;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, pcifunc);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ block = &rvu->hw->block[blkaddr];
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ block->type);
+ if (!num_lfs)
+ return CPT_AF_ERR_LF_INVALID;
+
+ /* Check if requested 'CPTLF <=> NIXLF' mapping is valid */
+ if (req->nix_pf_func) {
+ /* If default, use 'this' CPTLF's PFFUNC */
+ if (req->nix_pf_func == RVU_DEFAULT_PF_FUNC)
+ req->nix_pf_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->nix_pf_func, BLKTYPE_NIX))
+ return CPT_AF_ERR_NIX_PF_FUNC_INVALID;
+ }
+
+ /* Check if requested 'CPTLF <=> SSOLF' mapping is valid */
+ if (req->sso_pf_func) {
+ /* If default, use 'this' CPTLF's PFFUNC */
+ if (req->sso_pf_func == RVU_DEFAULT_PF_FUNC)
+ req->sso_pf_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->sso_pf_func, BLKTYPE_SSO))
+ return CPT_AF_ERR_SSO_PF_FUNC_INVALID;
+ }
+
+ for (slot = 0; slot < num_lfs; slot++) {
+ cptlf = rvu_get_lf(rvu, block, pcifunc, slot);
+ if (cptlf < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ /* Set CPT LF group and priority */
+ grp_mask = 1 << crypto_eng_grp;
+ val = (u64) grp_mask << 48 | 1;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+
+ /* Set CPT LF NIX_PF_FUNC and SSO_PF_FUNC */
+ val = (u64) req->nix_pf_func << 48 |
+ (u64) req->sso_pf_func << 32;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
+ }
+
+ /* Set SSO_PF_FUNC_OVRD for inline IPSec */
+ rvu_write64(rvu, blkaddr, CPT_AF_ECO, 0x1);
+
+ rsp->crypto_eng_grp = crypto_eng_grp;
+ return 0;
+}
+
+int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ int cptlf, blkaddr;
+ int num_lfs, slot;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, pcifunc);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ block = &rvu->hw->block[blkaddr];
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ block->type);
+ if (!num_lfs)
+ return CPT_AF_ERR_LF_INVALID;
+
+ for (slot = 0; slot < num_lfs; slot++) {
+ cptlf = rvu_get_lf(rvu, block, pcifunc, slot);
+ if (cptlf < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ /* Reset CPT LF group and priority */
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), 0x0);
+ /* Reset CPT LF NIX_PF_FUNC and SSO_PF_FUNC */
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), 0x0);
+ }
+
+ return 0;
+}
+
+static int cpt_inline_ipsec_cfg_inbound(struct rvu *rvu, int blkaddr, u8 cptlf,
+ u8 enable, u16 sso_pf_func,
+ u16 nix_pf_func)
+{
+ u64 val;
+
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ if (enable && (val & BIT_ULL(16))) {
+ /* IPSec inline outbound path is already enabled for a given
+ * CPT LF, HRM states that inline inbound & outbound paths
+ * must not be enabled at the same time for a given CPT LF
+ */
+ return CPT_AF_ERR_INLINE_IPSEC_INB_ENA;
+ }
+ /* Check if requested 'CPTLF <=> SSOLF' mapping is valid */
+ if (sso_pf_func && !is_pffunc_map_valid(rvu, sso_pf_func, BLKTYPE_SSO))
+ return CPT_AF_ERR_SSO_PF_FUNC_INVALID;
+
+ /* Set PF_FUNC_INST */
+ if (enable)
+ val |= BIT_ULL(9);
+ else
+ val &= ~BIT_ULL(9);
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+
+ if (sso_pf_func) {
+ /* Set SSO_PF_FUNC */
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+ val |= (u64)sso_pf_func << 32;
+ val |= (u64)nix_pf_func << 48;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
+ }
+
+ return 0;
+}
+
+static int cpt_inline_ipsec_cfg_outbound(struct rvu *rvu, int blkaddr,
+ u8 cptlf, u8 enable,
+ u16 nix_pf_func)
+{
+ u64 val;
+
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ if (enable && (val & BIT_ULL(9))) {
+ /* IPSec inline inbound path is already enabled for a given
+ * CPT LF, HRM states that inline inbound & outbound paths
+ * must not be enabled at the same time for a given CPT LF
+ */
+ return CPT_AF_ERR_INLINE_IPSEC_OUT_ENA;
+ }
+
+ /* Check if requested 'CPTLF <=> NIXLF' mapping is valid */
+ if (nix_pf_func && !is_pffunc_map_valid(rvu, nix_pf_func, BLKTYPE_NIX))
+ return CPT_AF_ERR_NIX_PF_FUNC_INVALID;
+
+ /* Set PF_FUNC_INST */
+ if (enable)
+ val |= BIT_ULL(16);
+ else
+ val &= ~BIT_ULL(16);
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+
+ if (nix_pf_func) {
+ /* Set NIX_PF_FUNC */
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+ val |= (u64)nix_pf_func << 48;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_cpt_inline_ipsec_cfg(struct rvu *rvu,
+ struct cpt_inline_ipsec_cfg_msg *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ int cptlf, blkaddr;
+ int num_lfs, ret;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, pcifunc);
+ if (blkaddr < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ block = &rvu->hw->block[blkaddr];
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ block->type);
+ if (req->slot >= num_lfs)
+ return CPT_AF_ERR_LF_INVALID;
+
+ cptlf = rvu_get_lf(rvu, block, pcifunc, req->slot);
+ if (cptlf < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ switch (req->dir) {
+ case CPT_INLINE_INBOUND:
+ ret = cpt_inline_ipsec_cfg_inbound(rvu, blkaddr, cptlf,
+ req->enable,
+ req->sso_pf_func,
+ req->nix_pf_func);
+ break;
+
+ case CPT_INLINE_OUTBOUND:
+ ret = cpt_inline_ipsec_cfg_outbound(rvu, blkaddr, cptlf,
+ req->enable,
+ req->nix_pf_func);
+ break;
+
+ default:
+
+ return CPT_AF_ERR_PARAM;
+ }
+
+ return ret;
+}
+
+int rvu_mbox_handler_cpt_set_crypto_grp(struct rvu *rvu,
+ struct cpt_set_crypto_grp_req_msg *req,
+ struct cpt_set_crypto_grp_req_msg *rsp)
+{
+ /* This message is accepted only if sent from CPT PF */
+ if (!is_cpt_pf(req->hdr.pcifunc))
+ return CPT_AF_ERR_ACCESS_DENIED;
+
+ rsp->crypto_eng_grp = req->crypto_eng_grp;
+
+ if (req->crypto_eng_grp != INVALID_ENGINE_GRP &&
+ req->crypto_eng_grp >= CPT_MAX_ENGINE_GROUPS)
+ return CPT_AF_ERR_GRP_INVALID;
+
+ crypto_eng_grp = req->crypto_eng_grp;
+ return 0;
+}
+
+int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
+ struct cpt_rd_wr_reg_msg *req,
+ struct cpt_rd_wr_reg_msg *rsp)
+{
+ int blkaddr, num_lfs, offs, lf;
+ struct rvu_block *block;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* This message is accepted only if sent from CPT PF/VF */
+ if (!is_cpt_pf(req->hdr.pcifunc) &&
+ !is_cpt_vf(req->hdr.pcifunc))
+ return CPT_AF_ERR_ACCESS_DENIED;
+
+ rsp->reg_offset = req->reg_offset;
+ rsp->ret_val = req->ret_val;
+ rsp->is_write = req->is_write;
+
+ if (req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) {
+ /* Registers that can be accessed from VF */
+ switch (req->reg_offset & 0xFF000) {
+ case CPT_AF_LFX_CTL(0):
+ offs = req->reg_offset & 0xFFF;
+ if (offs % 8)
+ goto error;
+ lf = offs >> 3;
+ break;
+
+ default:
+ goto error;
+ }
+
+ block = &rvu->hw->block[blkaddr];
+ num_lfs = rvu_get_rsrc_mapcount(
+ rvu_get_pfvf(rvu, req->hdr.pcifunc),
+ block->type);
+ if (lf >= num_lfs)
+ /* Slot is not valid for that VF */
+ goto error;
+
+ /* Need to translate CPT LF slot to global number because
+ * VFs use local numbering from 0 to number of LFs - 1
+ */
+ lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr],
+ req->hdr.pcifunc, lf);
+ if (lf < 0)
+ goto error;
+
+ req->reg_offset &= 0xFF000;
+ req->reg_offset += lf << 3;
+ rsp->reg_offset = req->reg_offset;
+ } else {
+ /* Registers that can be accessed from PF */
+ switch (req->reg_offset & 0xFF000) {
+ case CPT_AF_PF_FUNC:
+ case CPT_AF_BLK_RST:
+ case CPT_AF_CONSTANTS1:
+ if (req->reg_offset & 0xFFF)
+ goto error;
+ break;
+
+ case CPT_AF_EXEX_STS(0):
+ case CPT_AF_EXEX_CTL(0):
+ case CPT_AF_EXEX_CTL2(0):
+ case CPT_AF_EXEX_UCODE_BASE(0):
+ offs = req->reg_offset & 0xFFF;
+ if ((offs % 8) || (offs >> 3) > 127)
+ goto error;
+ break;
+
+ default:
+ goto error;
+ }
+ }
+
+ if (req->is_write)
+ rvu_write64(rvu, blkaddr, req->reg_offset, req->val);
+ else
+ rsp->val = rvu_read64(rvu, blkaddr, req->reg_offset);
+
+ return 0;
+error:
+ /* Access to register denied */
+ return CPT_AF_ERR_ACCESS_DENIED;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 77adad4adb1b..cc32e800375e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -1259,6 +1259,15 @@ static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
+static ssize_t rvu_dbg_nix_tx_stall_hwissue_display(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_nix_get_tx_stall_counters(filp->private_data, buffer, ppos);
+}
+
+RVU_DEBUG_FOPS(nix_tx_stall_hwissue, nix_tx_stall_hwissue_display, NULL);
+
static void rvu_dbg_nix_init(struct rvu *rvu)
{
const struct device *dev = &rvu->pdev->dev;
@@ -1310,6 +1319,13 @@ static void rvu_dbg_nix_init(struct rvu *rvu)
if (!pfile)
goto create_failed;
+ if (is_rvu_96xx_A0(rvu)) {
+ pfile = debugfs_create_file("tx_stall_hwissue", 0600, rvu->rvu_dbg.nix,
+ rvu, &rvu_dbg_nix_tx_stall_hwissue_fops);
+ if (!pfile)
+ goto create_failed;
+ }
+
return;
create_failed:
dev_err(dev, "Failed to create debugfs dir/file for NIX\n");
@@ -1650,6 +1666,173 @@ static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
+static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
+ struct rvu_npc_mcam_rule *rule)
+{
+ u8 bit;
+
+ for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
+ seq_printf(s, "\t%s ", npc_get_field_name(bit));
+ switch (bit) {
+ case NPC_DMAC:
+ seq_printf(s, "%pM ", rule->packet.dmac);
+ seq_printf(s, "%pM\n", rule->mask.dmac);
+ break;
+ case NPC_SMAC:
+ seq_printf(s, "%pM ", rule->packet.smac);
+ seq_printf(s, "%pM\n", rule->mask.smac);
+ break;
+ case NPC_ETYPE:
+ seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
+ seq_printf(s, "0x%x\n", ntohs(rule->mask.etype));
+ break;
+ case NPC_OUTER_VID:
+ seq_printf(s, "%d ", ntohs(rule->packet.vlan_tci));
+ seq_printf(s, "mask 0x%x\n",
+ ntohs(rule->mask.vlan_tci));
+ break;
+ case NPC_TOS:
+ seq_printf(s, "%d ", rule->packet.tos);
+ seq_printf(s, "mask 0x%x\n", rule->mask.tos);
+ break;
+ case NPC_SIP_IPV4:
+ seq_printf(s, "%pI4 ", &rule->packet.ip4src);
+ seq_printf(s, "%pI4\n", &rule->mask.ip4src);
+ break;
+ case NPC_DIP_IPV4:
+ seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
+ seq_printf(s, "%pI4\n", &rule->mask.ip4dst);
+ break;
+ case NPC_SIP_IPV6:
+ seq_printf(s, "%pI6 ", rule->packet.ip6src);
+ seq_printf(s, "%pI6\n", rule->mask.ip6src);
+ break;
+ case NPC_DIP_IPV6:
+ seq_printf(s, "%pI6 ", rule->packet.ip6dst);
+ seq_printf(s, "%pI6\n", rule->mask.ip6dst);
+ break;
+ case NPC_SPORT_TCP:
+ case NPC_SPORT_UDP:
+ seq_printf(s, "%d ", ntohs(rule->packet.sport));
+ seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
+ break;
+ case NPC_DPORT_TCP:
+ case NPC_DPORT_UDP:
+ seq_printf(s, "%d ", ntohs(rule->packet.dport));
+ seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
+ struct rvu_npc_mcam_rule *rule)
+{
+ if (rule->intf == NIX_INTF_TX) {
+ switch (rule->tx_action.op) {
+ case NIX_TX_ACTIONOP_DROP:
+ seq_puts(s, "\taction: Drop\n");
+ break;
+ case NIX_TX_ACTIONOP_UCAST_DEFAULT:
+ seq_puts(s, "\taction: Unicast to default channel\n");
+ break;
+ case NIX_TX_ACTIONOP_UCAST_CHAN:
+ seq_printf(s, "\taction: Unicast to channel %d\n",
+ rule->tx_action.index);
+ break;
+ case NIX_TX_ACTIONOP_MCAST:
+ seq_puts(s, "\taction: Multicast\n");
+ break;
+ case NIX_TX_ACTIONOP_DROP_VIOL:
+ seq_puts(s, "\taction: Lockdown Violation Drop\n");
+ break;
+ default:
+ break;
+ };
+ } else {
+ switch (rule->rx_action.op) {
+ case NIX_RX_ACTIONOP_DROP:
+ seq_puts(s, "\taction: Drop\n");
+ break;
+ case NIX_RX_ACTIONOP_UCAST:
+ seq_printf(s, "\taction: Direct to queue %d\n",
+ rule->rx_action.index);
+ break;
+ case NIX_RX_ACTIONOP_RSS:
+ seq_puts(s, "\taction: RSS\n");
+ break;
+ case NIX_RX_ACTIONOP_UCAST_IPSEC:
+ seq_puts(s, "\taction: Unicast ipsec\n");
+ break;
+ case NIX_RX_ACTIONOP_MCAST:
+ seq_puts(s, "\taction: Multicast\n");
+ break;
+ default:
+ break;
+ };
+ }
+}
+
+static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
+{
+ struct rvu_npc_mcam_rule *iter;
+ struct rvu *rvu = s->private;
+ struct npc_mcam *mcam;
+ int pf, vf = -1;
+ int blkaddr;
+ u16 target;
+ u64 hits;
+
+ mcam = &rvu->hw->mcam;
+
+ list_for_each_entry(iter, &mcam->mcam_rules, list) {
+ pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+ seq_printf(s, "\n\tPF%d ", pf);
+
+ if (iter->owner & RVU_PFVF_FUNC_MASK) {
+ vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
+ seq_printf(s, "VF%d", vf);
+ }
+ seq_puts(s, "\n");
+
+ if (iter->intf == NIX_INTF_RX) {
+ seq_puts(s, "\tdirection: RX\n");
+ target = iter->rx_action.pf_func;
+ pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+ seq_printf(s, "\ttarget: PF%d ", pf);
+
+ if (target & RVU_PFVF_FUNC_MASK) {
+ vf = (target & RVU_PFVF_FUNC_MASK) - 1;
+ seq_printf(s, "VF%d", vf);
+ }
+ seq_puts(s, "\n");
+ } else {
+ seq_puts(s, "\tdirection: TX\n");
+ }
+
+ seq_printf(s, "\tmcam entry: %d\n", iter->entry);
+ rvu_dbg_npc_mcam_show_flows(s, iter);
+ rvu_dbg_npc_mcam_show_action(s, iter);
+ seq_printf(s, "\tenabled: %s\n", iter->enable ? "yes" : "no");
+
+ if (!iter->has_cntr)
+ continue;
+ seq_printf(s, "\tcounter: %d\n", iter->cntr);
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return 0;
+ hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
+ seq_printf(s, "\thits: %lld\n", hits);
+ }
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
+
static void rvu_dbg_npc_init(struct rvu *rvu)
{
const struct device *dev = &rvu->pdev->dev;
@@ -1664,6 +1847,11 @@ static void rvu_dbg_npc_init(struct rvu *rvu)
if (!pfile)
goto create_failed;
+ pfile = debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc,
+ rvu, &rvu_dbg_npc_mcam_rules_fops);
+ if (!pfile)
+ goto create_failed;
+
pfile = debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc,
rvu, &rvu_dbg_npc_rx_miss_act_fops);
if (!pfile)
@@ -1676,6 +1864,1045 @@ create_failed:
debugfs_remove_recursive(rvu->rvu_dbg.npc);
}
+static int parse_sso_cmd_buffer(char *cmd_buf, size_t *count,
+ const char __user *buffer, int *ssolf,
+ bool *all)
+{
+ int ret, bytes_not_copied;
+ char *cmd_buf_tmp;
+ char *subtoken;
+
+ bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ cmd_buf[*count] = '\0';
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ *count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ subtoken = strsep(&cmd_buf, " ");
+ if (subtoken && strcmp(subtoken, "all") == 0) {
+ *all = true;
+ } else{
+ ret = subtoken ? kstrtoint(subtoken, 10, ssolf) : -EINVAL;
+ if (ret < 0)
+ return ret;
+ }
+ if (cmd_buf)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void sso_hwgrp_display_iq_list(struct rvu *rvu, int ssolf, u16 idx,
+ u16 tail_idx, u8 queue_type)
+{
+ const char *queue[3] = {"DQ", "CQ", "AQ"};
+ int blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ pr_info("SSO HWGGRP[%d] [%s] Chain queue head[%d]", ssolf,
+ queue[queue_type], idx);
+ pr_info("SSO HWGGRP[%d] [%s] Chain queue tail[%d]", ssolf,
+ queue[queue_type], tail_idx);
+ pr_info("--------------------------------------------------\n");
+ do {
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_TAG(idx));
+ pr_info("SSO HWGGRP[%d] [%s] IE[%d] TAG 0x%llx\n", ssolf,
+ queue[queue_type], idx, reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_GRP(idx));
+ pr_info("SSO HWGGRP[%d] [%s] IE[%d] GRP 0x%llx\n", ssolf,
+ queue[queue_type], idx, reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_PENDTAG(idx));
+ pr_info("SSO HWGGRP[%d] [%s] IE[%d] PENDTAG 0x%llx\n", ssolf,
+ queue[queue_type], idx, reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_LINKS(idx));
+ pr_info("SSO HWGGRP[%d] [%s] IE[%d] LINKS 0x%llx\n", ssolf,
+ queue[queue_type], idx, reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_QLINKS(idx));
+ pr_info("SSO HWGGRP[%d] [%s] IE[%d] QLINKS 0x%llx\n", ssolf,
+ queue[queue_type], idx, reg);
+ pr_info("--------------------------------------------------\n");
+ if (idx == tail_idx)
+ break;
+ idx = reg & 0x1FFF;
+ } while (idx != 0x1FFF);
+}
+
+static void sso_hwgrp_display_taq_list(struct rvu *rvu, int ssolf, u8 wae_head,
+ u16 ent_head, u8 wae_used, u8 taq_lines)
+{
+ int i, blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ pr_info("--------------------------------------------------\n");
+ do {
+ for (i = wae_head; i < taq_lines && wae_used; i++) {
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_TAQX_WAEY_TAG(ent_head, i));
+ pr_info("SSO HWGGRP[%d] TAQ[%d] WAE[%d] TAG 0x%llx\n",
+ ssolf, ent_head, i, reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_TAQX_WAEY_WQP(ent_head, i));
+ pr_info("SSO HWGGRP[%d] TAQ[%d] WAE[%d] WQP 0x%llx\n",
+ ssolf, ent_head, i, reg);
+ wae_used--;
+ }
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_TAQX_LINK(ent_head));
+ pr_info("SSO HWGGRP[%d] TAQ[%d] LINK 0x%llx\n",
+ ssolf, ent_head, reg);
+ ent_head = reg & 0x7FF;
+ pr_info("--------------------------------------------------\n");
+ } while (ent_head && wae_used);
+}
+
+static int read_sso_pc(struct rvu *rvu)
+{
+ int blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_ACTIVE_CYCLES0);
+ pr_info("SSO Add-Work active cycles %lld\n", reg);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_ACTIVE_CYCLES1);
+ pr_info("SSO Get-Work active cycles %lld\n", reg);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_ACTIVE_CYCLES2);
+ pr_info("SSO Work-Slot active cycles %lld\n", reg);
+ pr_info("\n");
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_NOS_CNT) & 0x1FFF;
+ pr_info("SSO work-queue entries on the no-schedule list %lld\n", reg);
+ pr_info("\n");
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_AW_READ_ARB);
+ pr_info("SSO XAQ reads outstanding %lld\n",
+ (reg >> 24) & 0x3F);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_XAQ_REQ_PC);
+ pr_info("SSO XAQ reads requests %lld\n", reg);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_XAQ_LATENCY_PC);
+ pr_info("SSO XAQ read latency cycles %lld\n", reg);
+ pr_info("\n");
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_AW_WE);
+ pr_info("SSO IAQ reserved %lld\n",
+ (reg >> 16) & 0x3FFF);
+ pr_info("SSO IAQ total %lld\n", reg & 0x3FFF);
+ pr_info("\n");
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_TAQ_CNT);
+ pr_info("SSO TAQ reserved %lld\n",
+ (reg >> 16) & 0x7FF);
+ pr_info("SSO TAQ total %lld\n", reg & 0x7FF);
+ pr_info("\n");
+
+ return 0;
+}
+
+/* Reads SSO hwgrp perfomance counters */
+static void read_sso_hwgrp_pc(struct rvu *rvu, int ssolf, bool all)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr, max_id;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ if (ssolf < 0 || ssolf >= block->lf.max) {
+ pr_info("Invalid SSOLF(HWGRP), valid range is 0-%d\n",
+ block->lf.max - 1);
+ return;
+ }
+ max_id = block->lf.max;
+
+ if (all)
+ ssolf = 0;
+ else
+ max_id = ssolf + 1;
+
+ pr_info("==================================================\n");
+ for (; ssolf < max_id; ssolf++) {
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_WS_PC(ssolf));
+ pr_info("SSO HWGGRP[%d] Work-Schedule PC 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_EXT_PC(ssolf));
+ pr_info("SSO HWGGRP[%d] External Schedule PC 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_WA_PC(ssolf));
+ pr_info("SSO HWGGRP[%d] Work-Add PC 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_TS_PC(ssolf));
+ pr_info("SSO HWGGRP[%d] Tag Switch PC 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_DS_PC(ssolf));
+ pr_info("SSO HWGGRP[%d] Deschedule PC 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_DQ_PC(ssolf));
+ pr_info("SSO HWGGRP[%d] Work-Descheduled PC 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWGRPX_PAGE_CNT(ssolf));
+ pr_info("SSO HWGGRP[%d] In-use Page Count 0x%llx\n", ssolf,
+ reg);
+ pr_info("==================================================\n");
+ }
+}
+
+/* Reads SSO hwgrp Threshold */
+static void read_sso_hwgrp_thresh(struct rvu *rvu, int ssolf, bool all)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr, max_id;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ if (ssolf < 0 || ssolf >= block->lf.max) {
+ pr_info("Invalid SSOLF(HWGRP), valid range is 0-%d\n",
+ block->lf.max - 1);
+ return;
+ }
+ max_id = block->lf.max;
+
+ if (all)
+ ssolf = 0;
+ else
+ max_id = ssolf + 1;
+
+ pr_info("==================================================\n");
+ for (; ssolf < max_id; ssolf++) {
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWGRPX_IAQ_THR(ssolf));
+ pr_info("SSO HWGGRP[%d] IAQ Threshold 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWGRPX_TAQ_THR(ssolf));
+ pr_info("SSO HWGGRP[%d] TAQ Threshold 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWGRPX_XAQ_AURA(ssolf));
+ pr_info("SSO HWGGRP[%d] XAQ Aura 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWGRPX_XAQ_LIMIT(ssolf));
+ pr_info("SSO HWGGRP[%d] XAQ Limit 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWGRPX_IU_ACCNT(ssolf));
+ pr_info("SSO HWGGRP[%d] IU Account Index 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_IU_ACCNTX_CFG(reg & 0xFF));
+ pr_info("SSO HWGGRP[%d] IU Accounting Cfg 0x%llx\n", ssolf,
+ reg);
+ pr_info("==================================================\n");
+ }
+}
+
+/* Reads SSO hwgrp TAQ list */
+static void read_sso_hwgrp_taq_list(struct rvu *rvu, int ssolf, bool all)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u8 taq_entries, wae_head;
+ struct rvu_block *block;
+ u16 ent_head, cl_used;
+ int blkaddr, max_id;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ if (ssolf < 0 || ssolf >= block->lf.max) {
+ pr_info("Invalid SSOLF(HWGRP), valid range is 0-%d\n",
+ block->lf.max - 1);
+ return;
+ }
+ max_id = block->lf.max;
+
+ if (all)
+ ssolf = 0;
+ else
+ max_id = ssolf + 1;
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_CONST);
+ taq_entries = (reg >> 48) & 0xFF;
+ pr_info("==================================================\n");
+ for (; ssolf < max_id; ssolf++) {
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("SSO HWGGRP[%d] Transitory Output Admission Queue",
+ ssolf);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_TOAQX_STATUS(ssolf));
+ pr_info("SSO HWGGRP[%d] TOAQ Status 0x%llx\n", ssolf,
+ reg);
+ ent_head = (reg >> 12) & 0x7FF;
+ cl_used = (reg >> 32) & 0x7FF;
+ if (reg & BIT_ULL(61) && cl_used) {
+ pr_info("SSO HWGGRP[%d] TOAQ CL_USED 0x%x\n",
+ ssolf, cl_used);
+ sso_hwgrp_display_taq_list(rvu, ssolf, ent_head, 0,
+ cl_used * taq_entries,
+ taq_entries);
+ }
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("SSO HWGGRP[%d] Transitory Input Admission Queue",
+ ssolf);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_TIAQX_STATUS(ssolf));
+ pr_info("SSO HWGGRP[%d] TIAQ Status 0x%llx\n", ssolf,
+ reg);
+ wae_head = (reg >> 60) & 0xF;
+ cl_used = (reg >> 32) & 0x7FFF;
+ ent_head = (reg >> 12) & 0x7FF;
+ if (reg & BIT_ULL(61) && cl_used) {
+ pr_info("SSO HWGGRP[%d] TIAQ WAE_USED 0x%x\n",
+ ssolf, cl_used);
+ sso_hwgrp_display_taq_list(rvu, ssolf, ent_head,
+ wae_head, cl_used,
+ taq_entries);
+ }
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("==================================================\n");
+ }
+}
+
+/* Reads SSO hwgrp IAQ list */
+static void read_sso_hwgrp_iaq_list(struct rvu *rvu, int ssolf, bool all)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ u16 head_idx, tail_idx;
+ int blkaddr, max_id;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ if (ssolf < 0 || ssolf >= block->lf.max) {
+ pr_info("Invalid SSOLF(HWGRP), valid range is 0-%d\n",
+ block->lf.max - 1);
+ return;
+ }
+ max_id = block->lf.max;
+
+ if (all)
+ ssolf = 0;
+ else
+ max_id = ssolf + 1;
+ pr_info("==================================================\n");
+ for (; ssolf < max_id; ssolf++) {
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("SSO HWGGRP[%d] Deschedule Queue(DQ)\n", ssolf);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IPL_DESCHEDX(ssolf));
+ pr_info("SSO HWGGRP[%d] DQ List 0x%llx\n", ssolf,
+ reg);
+ head_idx = (reg >> 13) & 0x1FFF;
+ tail_idx = reg & 0x1FFF;
+ if (reg & (BIT_ULL(26) | BIT_ULL(27)))
+ sso_hwgrp_display_iq_list(rvu, ssolf, head_idx,
+ tail_idx, 0);
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("SSO HWGGRP[%d] Conflict Queue(CQ)\n", ssolf);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IPL_CONFX(ssolf));
+ pr_info("SSO HWGGRP[%d] CQ List 0x%llx\n", ssolf,
+ reg);
+ head_idx = (reg >> 13) & 0x1FFF;
+ tail_idx = reg & 0x1FFF;
+ if (reg & (BIT_ULL(26) | BIT_ULL(27)))
+ sso_hwgrp_display_iq_list(rvu, ssolf, head_idx,
+ tail_idx, 1);
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("SSO HWGGRP[%d] Admission Queue(AQ)\n", ssolf);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IPL_IAQX(ssolf));
+ pr_info("SSO HWGGRP[%d] AQ List 0x%llx\n", ssolf,
+ reg);
+ head_idx = (reg >> 13) & 0x1FFF;
+ tail_idx = reg & 0x1FFF;
+ if (reg & (BIT_ULL(26) | BIT_ULL(27)))
+ sso_hwgrp_display_iq_list(rvu, ssolf, head_idx,
+ tail_idx, 2);
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("==================================================\n");
+ }
+}
+
+/* Reads SSO hwgrp IENT list */
+static int read_sso_hwgrp_ient_list(struct rvu *rvu)
+{
+ const char *tt_c[4] = {"SSO_TT_ORDERED_", "SSO_TT_ATOMIC__",
+ "SSO_TT_UNTAGGED", "SSO_TT_EMPTY___"};
+ struct rvu_hwinfo *hw = rvu->hw;
+ int max_idx = hw->sso.sso_iue;
+ u64 pendtag, qlinks, links;
+ int len, idx, blkaddr;
+ u64 tag, grp, wqp;
+ char str[300];
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ for (idx = 0; idx < max_idx; idx++) {
+ len = 0;
+ tag = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_TAG(idx));
+ grp = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_GRP(idx));
+ pendtag = rvu_read64(rvu, blkaddr,
+ SSO_AF_IENTX_PENDTAG(idx));
+ links = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_LINKS(idx));
+ qlinks = rvu_read64(rvu, blkaddr,
+ SSO_AF_IENTX_QLINKS(idx));
+ wqp = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_WQP(idx));
+ len = snprintf(str + len, 300,
+ "SSO IENT[%4d] TT [%s] HWGRP [%3lld] ", idx,
+ tt_c[(tag >> 32) & 0x3], (grp >> 48) & 0x1f);
+ len += snprintf(str + len, 300 - len,
+ "TAG [0x%010llx] GRP [0x%016llx] ", tag, grp);
+ len += snprintf(str + len, 300 - len, "PENDTAG [0x%010llx] ",
+ pendtag);
+ len += snprintf(str + len, 300 - len,
+ "LINKS [0x%016llx] QLINKS [0x%010llx] ", links,
+ qlinks);
+ snprintf(str + len, 300 - len, "WQP [0x%016llx]\n", wqp);
+ pr_info("%s", str);
+ }
+
+ return 0;
+}
+
+/* Reads SSO hwgrp free list */
+static int read_sso_hwgrp_free_list(struct rvu *rvu)
+{
+ int blkaddr;
+ u64 reg;
+ u8 idx;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ pr_info("==================================================\n");
+ for (idx = 0; idx < 4; idx++) {
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IPL_FREEX(idx));
+ pr_info("SSO FREE LIST[%d]\n", idx);
+ pr_info("qnum_head : %lld qnum_tail : %lld\n",
+ (reg >> 58) & 0x3, (reg >> 56) & 0x3);
+ pr_info("queue_cnt : %llx\n", (reg >> 26) & 0x7fff);
+ pr_info("queue_val : %lld queue_head : %4lld queue_tail %4lld\n"
+ , (reg >> 40) & 0x1, (reg >> 13) & 0x1fff,
+ reg & 0x1fff);
+ pr_info("==================================================\n");
+ }
+
+ return 0;
+}
+
+/* Reads SSO hwgrp perfomance counters */
+static void read_sso_hws_info(struct rvu *rvu, int ssowlf, bool all)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr;
+ int max_id;
+ u64 reg;
+ u8 mask;
+ u8 set;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ if (ssowlf < 0 || ssowlf >= block->lf.max) {
+ pr_info("Invalid SSOWLF(HWS), valid range is 0-%d\n",
+ block->lf.max - 1);
+ return;
+ }
+ max_id = block->lf.max;
+
+ if (all)
+ ssowlf = 0;
+ else
+ max_id = ssowlf + 1;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ pr_info("==================================================\n");
+ for (; ssowlf < max_id; ssowlf++) {
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWSX_ARB(ssowlf));
+ pr_info("SSOW HWS[%d] Arbitration State 0x%llx\n", ssowlf,
+ reg);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWSX_GMCTL(ssowlf));
+ pr_info("SSOW HWS[%d] Guest Machine Control 0x%llx\n", ssowlf,
+ reg);
+ for (set = 0; set < 2; set++)
+ for (mask = 0; mask < 4; mask++) {
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWSX_SX_GRPMSKX(ssowlf,
+ set,
+ mask));
+ pr_info(
+ "SSOW HWS[%d] SET[%d] Group Mask[%d] 0x%llx\n",
+ ssowlf, set, mask, reg);
+ }
+ pr_info("==================================================\n");
+ }
+}
+
+typedef void (*sso_dump_cb)(struct rvu *rvu, int ssolf, bool all);
+
+static ssize_t rvu_dbg_sso_cmd_parser(struct file *filp,
+ const char __user *buffer, size_t count,
+ loff_t *ppos, char *lf_type,
+ char *file_nm, sso_dump_cb fn)
+{
+ struct rvu *rvu = filp->private_data;
+ bool all = false;
+ char *cmd_buf;
+ int lf = 0;
+
+ if ((*ppos != 0) || !count)
+ return -EINVAL;
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return -ENOSPC;
+
+ if (parse_sso_cmd_buffer(cmd_buf, &count, buffer,
+ &lf, &all) < 0) {
+ pr_info("Usage: echo [<%s>/all] > %s\n", lf_type, file_nm);
+ } else {
+ fn(rvu, lf, all);
+ }
+ kfree(cmd_buf);
+
+ return count;
+}
+
+/* SSO debugfs APIs */
+static ssize_t rvu_dbg_sso_pc_display(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return read_sso_pc(filp->private_data);
+}
+
+static ssize_t rvu_dbg_sso_hwgrp_pc_display(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_sso_cmd_parser(filp, buffer, count, ppos, "hwgrp",
+ "sso_hwgrp_pc", read_sso_hwgrp_pc);
+}
+
+static ssize_t rvu_dbg_sso_hwgrp_thresh_display(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_sso_cmd_parser(filp, buffer, count, ppos, "hwgrp",
+ "sso_hwgrp_thresh", read_sso_hwgrp_thresh);
+}
+
+static ssize_t rvu_dbg_sso_hwgrp_taq_wlk_display(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_sso_cmd_parser(filp, buffer, count, ppos, "hwgrp",
+ "sso_hwgrp_taq_wlk", read_sso_hwgrp_taq_list);
+}
+
+static ssize_t rvu_dbg_sso_hwgrp_iaq_wlk_display(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_sso_cmd_parser(filp, buffer, count, ppos, "hwgrp",
+ "sso_hwgrp_iaq_wlk", read_sso_hwgrp_iaq_list);
+}
+
+static ssize_t rvu_dbg_sso_hwgrp_ient_wlk_display(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return read_sso_hwgrp_ient_list(filp->private_data);
+}
+
+static ssize_t rvu_dbg_sso_hwgrp_fl_wlk_display(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return read_sso_hwgrp_free_list(filp->private_data);
+}
+
+static ssize_t rvu_dbg_sso_hws_info_display(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_sso_cmd_parser(filp, buffer, count, ppos, "hws",
+ "sso_hws_info", read_sso_hws_info);
+}
+
+RVU_DEBUG_FOPS(sso_pc, sso_pc_display, NULL);
+RVU_DEBUG_FOPS(sso_hwgrp_pc, NULL, sso_hwgrp_pc_display);
+RVU_DEBUG_FOPS(sso_hwgrp_thresh, NULL, sso_hwgrp_thresh_display);
+RVU_DEBUG_FOPS(sso_hwgrp_taq_wlk, NULL, sso_hwgrp_taq_wlk_display);
+RVU_DEBUG_FOPS(sso_hwgrp_iaq_wlk, NULL, sso_hwgrp_iaq_wlk_display);
+RVU_DEBUG_FOPS(sso_hwgrp_ient_wlk, sso_hwgrp_ient_wlk_display, NULL);
+RVU_DEBUG_FOPS(sso_hwgrp_fl_wlk, sso_hwgrp_fl_wlk_display, NULL);
+RVU_DEBUG_FOPS(sso_hws_info, NULL, sso_hws_info_display);
+
+static void rvu_dbg_sso_init(struct rvu *rvu)
+{
+ const struct device *dev = &rvu->pdev->dev;
+ struct dentry *pfile;
+
+ rvu->rvu_dbg.sso = debugfs_create_dir("sso", rvu->rvu_dbg.root);
+ if (!rvu->rvu_dbg.sso)
+ return;
+
+ rvu->rvu_dbg.sso_hwgrp = debugfs_create_dir("hwgrp", rvu->rvu_dbg.sso);
+ if (!rvu->rvu_dbg.sso_hwgrp)
+ return;
+
+ rvu->rvu_dbg.sso_hws = debugfs_create_dir("hws", rvu->rvu_dbg.sso);
+ if (!rvu->rvu_dbg.sso_hws)
+ return;
+
+ pfile = debugfs_create_file("sso_pc", 0600,
+ rvu->rvu_dbg.sso, rvu,
+ &rvu_dbg_sso_pc_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("sso_hwgrp_pc", 0600,
+ rvu->rvu_dbg.sso_hwgrp, rvu,
+ &rvu_dbg_sso_hwgrp_pc_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("sso_hwgrp_thresh", 0600,
+ rvu->rvu_dbg.sso_hwgrp, rvu,
+ &rvu_dbg_sso_hwgrp_thresh_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("sso_hwgrp_taq_walk", 0600,
+ rvu->rvu_dbg.sso_hwgrp, rvu,
+ &rvu_dbg_sso_hwgrp_taq_wlk_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("sso_hwgrp_iaq_walk", 0600,
+ rvu->rvu_dbg.sso_hwgrp, rvu,
+ &rvu_dbg_sso_hwgrp_iaq_wlk_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("sso_hwgrp_ient_walk", 0600,
+ rvu->rvu_dbg.sso_hwgrp, rvu,
+ &rvu_dbg_sso_hwgrp_ient_wlk_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("sso_hwgrp_free_list_walk", 0600,
+ rvu->rvu_dbg.sso_hwgrp, rvu,
+ &rvu_dbg_sso_hwgrp_fl_wlk_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("sso_hws_info", 0600,
+ rvu->rvu_dbg.sso_hws, rvu,
+ &rvu_dbg_sso_hws_info_fops);
+ if (!pfile)
+ goto create_failed;
+
+ return;
+
+create_failed:
+ dev_err(dev, "Failed to create debugfs dir/file for SSO\n");
+ debugfs_remove_recursive(rvu->rvu_dbg.sso);
+}
+
+/* CPT debugfs APIs */
+static int parse_cpt_cmd_buffer(char *cmd_buf, size_t *count,
+ const char __user *buffer, char *e_type)
+{
+ int bytes_not_copied;
+ char *cmd_buf_tmp;
+ char *subtoken;
+
+ bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ cmd_buf[*count] = '\0';
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ *count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ subtoken = strsep(&cmd_buf, " ");
+ if (subtoken)
+ strcpy(e_type, subtoken);
+ else
+ return -EINVAL;
+
+ if (cmd_buf)
+ return -EINVAL;
+
+ if (strcmp(e_type, "SE") && strcmp(e_type, "IE") &&
+ strcmp(e_type, "AE") && strcmp(e_type, "all"))
+ return -EINVAL;
+
+ return 0;
+}
+
+static ssize_t rvu_dbg_cpt_cmd_parser(struct file *filp,
+ const char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct seq_file *s = filp->private_data;
+ struct rvu *rvu = s->private;
+ char *cmd_buf;
+ int ret = 0;
+
+ if ((*ppos != 0) || !count)
+ return -EINVAL;
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return -ENOSPC;
+
+ if (parse_cpt_cmd_buffer(cmd_buf, &count, buffer,
+ rvu->rvu_dbg.cpt_ctx.e_type) < 0)
+ ret = -EINVAL;
+
+ kfree(cmd_buf);
+
+ if (ret)
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t rvu_dbg_cpt_engines_sts_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_cpt_cmd_parser(filp, buffer, count, ppos);
+}
+
+static int rvu_dbg_cpt_engines_sts_display(struct seq_file *filp, void *unused)
+{
+ u64 busy_sts[2] = {0}, free_sts[2] = {0};
+ struct rvu *rvu = filp->private;
+ u16 max_ses, max_ies, max_aes;
+ u32 e_min = 0, e_max = 0, e;
+ int blkaddr;
+ char *e_type;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
+ max_ses = reg & 0xffff;
+ max_ies = (reg >> 16) & 0xffff;
+ max_aes = (reg >> 32) & 0xffff;
+
+ e_type = rvu->rvu_dbg.cpt_ctx.e_type;
+
+ if (strcmp(e_type, "SE") == 0) {
+ e_min = 0;
+ e_max = max_ses - 1;
+ } else if (strcmp(e_type, "IE") == 0) {
+ e_min = max_ses;
+ e_max = max_ses + max_ies - 1;
+ } else if (strcmp(e_type, "AE") == 0) {
+ e_min = max_ses + max_ies;
+ e_max = max_ses + max_ies + max_aes - 1;
+ } else if (strcmp(e_type, "all") == 0) {
+ e_min = 0;
+ e_max = max_ses + max_ies + max_aes - 1;
+ } else {
+ return -EINVAL;
+ }
+
+ for (e = e_min; e <= e_max; e++) {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
+ if (reg & 0x1) {
+ if (e < max_ses)
+ busy_sts[0] |= 1ULL << e;
+ else if (e >= max_ses)
+ busy_sts[1] |= 1ULL << (e - max_ses);
+ }
+ if (reg & 0x2) {
+ if (e < max_ses)
+ free_sts[0] |= 1ULL << e;
+ else if (e >= max_ses)
+ free_sts[1] |= 1ULL << (e - max_ses);
+ }
+ }
+ seq_printf(filp, "FREE STS : 0x%016llx 0x%016llx\n", free_sts[1],
+ free_sts[0]);
+ seq_printf(filp, "BUSY STS : 0x%016llx 0x%016llx\n", busy_sts[1],
+ busy_sts[0]);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_engines_sts, cpt_engines_sts_display,
+ cpt_engines_sts_write);
+
+static ssize_t rvu_dbg_cpt_engines_info_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_cpt_cmd_parser(filp, buffer, count, ppos);
+}
+
+static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
+{
+ struct rvu *rvu = filp->private;
+ u16 max_ses, max_ies, max_aes;
+ u32 e_min, e_max, e;
+ int blkaddr;
+ char *e_type;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
+ max_ses = reg & 0xffff;
+ max_ies = (reg >> 16) & 0xffff;
+ max_aes = (reg >> 32) & 0xffff;
+
+ e_type = rvu->rvu_dbg.cpt_ctx.e_type;
+
+ if (strcmp(e_type, "SE") == 0) {
+ e_min = 0;
+ e_max = max_ses - 1;
+ } else if (strcmp(e_type, "IE") == 0) {
+ e_min = max_ses;
+ e_max = max_ses + max_ies - 1;
+ } else if (strcmp(e_type, "AE") == 0) {
+ e_min = max_ses + max_ies;
+ e_max = max_ses + max_ies + max_aes - 1;
+ } else if (strcmp(e_type, "all") == 0) {
+ e_min = 0;
+ e_max = max_ses + max_ies + max_aes - 1;
+ } else {
+ return -EINVAL;
+ }
+
+ seq_puts(filp, "===========================================\n");
+ for (e = e_min; e <= e_max; e++) {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
+ seq_printf(filp, "CPT Engine[%u] Group Enable 0x%02llx\n", e,
+ reg & 0xff);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
+ seq_printf(filp, "CPT Engine[%u] Active Info 0x%llx\n", e,
+ reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
+ seq_printf(filp, "CPT Engine[%u] Control 0x%llx\n", e,
+ reg);
+ seq_puts(filp, "===========================================\n");
+ }
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display,
+ cpt_engines_info_write);
+
+static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
+{
+ struct rvu *rvu = filp->private;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr;
+ u64 reg;
+ u32 lf;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ block = &hw->block[blkaddr];
+ if (!block->lf.bmap)
+ return -ENODEV;
+
+ seq_puts(filp, "===========================================\n");
+ for (lf = 0; lf < block->lf.max; lf++) {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
+ seq_printf(filp, "CPT Lf[%u] CTL 0x%llx\n", lf, reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
+ seq_printf(filp, "CPT Lf[%u] CTL2 0x%llx\n", lf, reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
+ seq_printf(filp, "CPT Lf[%u] PTR_CTL 0x%llx\n", lf, reg);
+ reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
+ (lf << block->lfshift));
+ seq_printf(filp, "CPT Lf[%u] CFG 0x%llx\n", lf, reg);
+ seq_puts(filp, "===========================================\n");
+ }
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
+
+static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
+{
+ struct rvu *rvu = filp->private;
+ u64 reg0, reg1;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
+ reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
+ seq_printf(filp, "CPT_AF_FLTX_INT: 0x%llx 0x%llx\n", reg0, reg1);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
+ reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
+ seq_printf(filp, "CPT_AF_PSNX_EXE: 0x%llx 0x%llx\n", reg0, reg1);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
+ seq_printf(filp, "CPT_AF_PSNX_LF: 0x%llx\n", reg0);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
+ seq_printf(filp, "CPT_AF_RVU_INT: 0x%llx\n", reg0);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
+ seq_printf(filp, "CPT_AF_RAS_INT: 0x%llx\n", reg0);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
+ seq_printf(filp, "CPT_AF_EXE_ERR_INFO: 0x%llx\n", reg0);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
+
+static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
+{
+ struct rvu *rvu;
+ int blkaddr;
+ u64 reg;
+
+ rvu = filp->private;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
+ seq_printf(filp, "CPT instruction requests %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
+ seq_printf(filp, "CPT instruction latency %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
+ seq_printf(filp, "CPT NCB read requests %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
+ seq_printf(filp, "CPT NCB read latency %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
+ seq_printf(filp, "CPT read requests caused by UC fills %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
+ seq_printf(filp, "CPT active cycles pc %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
+ seq_printf(filp, "CPT clock count pc %llu\n", reg);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
+
+static void rvu_dbg_cpt_init(struct rvu *rvu)
+{
+ const struct device *dev = &rvu->pdev->dev;
+ struct dentry *pfile;
+
+ rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
+ if (!rvu->rvu_dbg.cpt)
+ return;
+
+ pfile = debugfs_create_file("cpt_pc", 0600,
+ rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_pc_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("cpt_engines_sts", 0600,
+ rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_engines_sts_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("cpt_engines_info", 0600,
+ rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_engines_info_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("cpt_lfs_info", 0600,
+ rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_lfs_info_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("cpt_err_info", 0600,
+ rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_err_info_fops);
+ if (!pfile)
+ goto create_failed;
+
+ return;
+
+create_failed:
+ dev_err(dev, "Failed to create debugfs dir/file for CPT\n");
+ debugfs_remove_recursive(rvu->rvu_dbg.cpt);
+}
+
void rvu_dbg_init(struct rvu *rvu)
{
struct device *dev = &rvu->pdev->dev;
@@ -1695,6 +2922,10 @@ void rvu_dbg_init(struct rvu *rvu)
rvu_dbg_nix_init(rvu);
rvu_dbg_cgx_init(rvu);
rvu_dbg_npc_init(rvu);
+ rvu_dbg_sso_init(rvu);
+
+ if (is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ rvu_dbg_cpt_init(rvu);
return;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_fixes.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_fixes.c
new file mode 100644
index 000000000000..2925d1554ad9
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_fixes.c
@@ -0,0 +1,1019 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kthread.h>
+#include <linux/pci.h>
+#include <linux/cpu.h>
+#include <linux/sched/signal.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "common.h"
+#include "mbox.h"
+#include "rvu.h"
+#include "cgx.h"
+#include "rvu_fixes.h"
+
+#define OTX2_MAX_CQ_CNT 64
+
+struct nix_tx_stall {
+ struct rvu *rvu;
+ int blkaddr;
+ int smq_count;
+ int tl4_count;
+ int tl3_count;
+ int tl2_count;
+ int sq_count;
+ u16 *smq_tl2_map;
+ u16 *tl4_tl2_map;
+ u16 *tl3_tl2_map;
+ u16 *tl2_tl1_map;
+ u16 *sq_smq_map;
+#define LINK_TYPE_SHIFT 7
+#define EXPR_LINK(map) (map & (1 << LINK_TYPE_SHIFT))
+#define LINK_CHAN_SHIFT 8
+#define LINK_CHAN(map) (map >> LINK_CHAN_SHIFT)
+ u16 *tl2_link_map;
+ u8 *nixlf_tl2_count;
+ u64 *nixlf_poll_count;
+ u64 *nixlf_stall_count;
+ u64 *nlink_credits; /* Normal link credits */
+ u64 poll_cntr;
+ u64 stalled_cntr;
+ int pse_link_bp_level;
+ bool txsch_config_changed;
+ struct mutex txsch_lock; /* To sync Tx SCHQ config update and poll */
+ struct task_struct *poll_thread; /* Tx stall condition polling thread */
+};
+
+/* Tranmsit stall hw issue's workaround reads loads of registers
+ * at frequent intervals, having barrier for every register access
+ * will increase the cycles spent in stall detection. Hence using
+ * relaxed counterparts.
+ */
+static inline void rvu_wr64(struct rvu *rvu, u64 block, u64 offset, u64 val)
+{
+ writeq_relaxed(val, rvu->afreg_base + ((block << 28) | offset));
+}
+
+static inline u64 rvu_rd64(struct rvu *rvu, u64 block, u64 offset)
+{
+ return readq_relaxed(rvu->afreg_base + ((block << 28) | offset));
+}
+
+/**
+ * rvu_usleep_interruptible - sleep waiting for signals
+ * @usecs: Time in microseconds to sleep for
+ *
+ * A replica of msleep_interruptable to reduce tx stall
+ * poll interval.
+ */
+static unsigned long rvu_usleep_interruptible(unsigned int usecs)
+{
+ unsigned long timeout = usecs_to_jiffies(usecs) + 1;
+
+ while (timeout && !signal_pending(current))
+ timeout = schedule_timeout_interruptible(timeout);
+ return jiffies_to_usecs(timeout);
+}
+
+void rvu_nix_txsch_lock(struct nix_hw *nix_hw)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+
+ if (tx_stall)
+ mutex_lock(&tx_stall->txsch_lock);
+}
+
+void rvu_nix_txsch_unlock(struct nix_hw *nix_hw)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+
+ if (tx_stall)
+ mutex_unlock(&tx_stall->txsch_lock);
+}
+
+void rvu_nix_txsch_config_changed(struct nix_hw *nix_hw)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+
+ if (tx_stall)
+ tx_stall->txsch_config_changed = true;
+}
+
+static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
+{
+ if (blkaddr == BLKADDR_NIX0 && hw->nix0)
+ return hw->nix0;
+
+ return NULL;
+}
+
+void rvu_nix_update_link_credits(struct rvu *rvu, int blkaddr,
+ int link, u64 ncredits)
+{
+ struct nix_tx_stall *tx_stall;
+ struct nix_hw *nix_hw;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
+ tx_stall = nix_hw->tx_stall;
+ if (!tx_stall)
+ return;
+
+ rvu_nix_txsch_lock(nix_hw);
+ tx_stall->nlink_credits[link] = ncredits;
+ rvu_nix_txsch_unlock(nix_hw);
+}
+
+void rvu_nix_update_sq_smq_mapping(struct rvu *rvu, int blkaddr, int nixlf,
+ u16 sq, u16 smq)
+{
+ struct nix_tx_stall *tx_stall;
+ struct nix_hw *nix_hw;
+ int sq_count;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
+ tx_stall = nix_hw->tx_stall;
+ if (!tx_stall)
+ return;
+
+ sq_count = tx_stall->sq_count;
+
+ rvu_nix_txsch_lock(nix_hw);
+ tx_stall->sq_smq_map[nixlf * sq_count + sq] = smq;
+ rvu_nix_txsch_unlock(nix_hw);
+}
+
+static void rvu_nix_scan_link_credits(struct rvu *rvu, int blkaddr,
+ struct nix_tx_stall *tx_stall)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 credits;
+ int link;
+
+ for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
+ credits = rvu_rd64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_NORM_CREDIT(link));
+ tx_stall->nlink_credits[link] = credits;
+ }
+}
+
+static void rvu_nix_scan_tl2_link_mapping(struct rvu *rvu,
+ struct nix_tx_stall *tx_stall,
+ int blkaddr, int tl2, int smq)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int link, chan;
+ u64 link_cfg;
+
+ for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
+ link_cfg = rvu_rd64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(tl2, link));
+ if (!(link_cfg & BIT_ULL(12)))
+ continue;
+
+ /* Get channel of the LINK to which this TL2 is transmitting */
+ chan = link_cfg & 0x3F;
+ tx_stall->tl2_link_map[tl2] = chan << LINK_CHAN_SHIFT;
+
+ /* Save link info */
+ tx_stall->tl2_link_map[tl2] |= (link & 0x7F);
+
+ /* Workaround assumes TL2 transmits to only one link.
+ * So assume the first link enabled is the only one.
+ */
+ break;
+ }
+}
+
+static bool is_sq_allocated(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ int blkaddr, int sq)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ struct admin_queue *aq;
+
+ block = &hw->block[blkaddr];
+ aq = block->aq;
+ spin_lock(&aq->lock);
+ if (test_bit(sq, pfvf->sq_bmap)) {
+ spin_unlock(&aq->lock);
+ return true;
+ }
+ spin_unlock(&aq->lock);
+ return false;
+}
+
+static bool is_schq_allocated(struct rvu *rvu, struct nix_hw *nix_hw,
+ int lvl, int schq)
+{
+ struct nix_txsch *txsch = &nix_hw->txsch[lvl];
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (test_bit(schq, txsch->schq.bmap)) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return true;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return false;
+}
+
+static bool is_sw_xoff_set(struct rvu *rvu, int blkaddr, int lvl, int schq)
+{
+ u64 cfg, swxoff_reg = 0x00;
+
+ switch (lvl) {
+ case NIX_TXSCH_LVL_MDQ:
+ swxoff_reg = NIX_AF_MDQX_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ swxoff_reg = NIX_AF_TL4X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ swxoff_reg = NIX_AF_TL3X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ swxoff_reg = NIX_AF_TL2X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL1:
+ swxoff_reg = NIX_AF_TL1X_SW_XOFF(schq);
+ break;
+ }
+ if (!swxoff_reg)
+ return false;
+
+ cfg = rvu_rd64(rvu, blkaddr, swxoff_reg);
+ if (cfg & BIT_ULL(0))
+ return true;
+
+ return false;
+}
+
+static void rvu_nix_scan_txsch_hierarchy(struct rvu *rvu,
+ struct nix_hw *nix_hw, int blkaddr)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_txsch *tl2_txsch;
+ struct rvu_block *block;
+ int tl4, tl3, tl2, tl1;
+ int lf, smq, size;
+ u16 pcifunc;
+ u64 cfg;
+
+ /* Clear previous mappings */
+ size = sizeof(u16);
+ memset(tx_stall->smq_tl2_map, U16_MAX, tx_stall->smq_count * size);
+ memset(tx_stall->tl4_tl2_map, U16_MAX, tx_stall->tl4_count * size);
+ memset(tx_stall->tl3_tl2_map, U16_MAX, tx_stall->tl3_count * size);
+ memset(tx_stall->tl2_tl1_map, U16_MAX, tx_stall->tl2_count * size);
+ memset(tx_stall->tl2_link_map, U16_MAX, tx_stall->tl2_count * size);
+
+ for (smq = 0; smq < tx_stall->smq_count; smq++) {
+ /* Skip SMQ if it's not assigned to any */
+ if (!is_schq_allocated(rvu, nix_hw, NIX_TXSCH_LVL_SMQ, smq))
+ continue;
+
+ /* If SW_XOFF is set, ignore the scheduler queue */
+ cfg = rvu_rd64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
+ if (cfg & BIT_ULL(50))
+ continue;
+ if (is_sw_xoff_set(rvu, blkaddr, NIX_TXSCH_LVL_MDQ, smq))
+ continue;
+
+ cfg = rvu_rd64(rvu, blkaddr, NIX_AF_MDQX_PARENT(smq));
+ tl4 = (cfg >> 16) & 0x1FF;
+ if (is_sw_xoff_set(rvu, blkaddr, NIX_TXSCH_LVL_TL4, tl4))
+ continue;
+
+ cfg = rvu_rd64(rvu, blkaddr, NIX_AF_TL4X_PARENT(tl4));
+ tl3 = (cfg >> 16) & 0x1FF;
+ if (is_sw_xoff_set(rvu, blkaddr, NIX_TXSCH_LVL_TL3, tl3))
+ continue;
+
+ cfg = rvu_rd64(rvu, blkaddr, NIX_AF_TL3X_PARENT(tl3));
+ tl2 = (cfg >> 16) & 0x1FF;
+ if (is_sw_xoff_set(rvu, blkaddr, NIX_TXSCH_LVL_TL2, tl2))
+ continue;
+
+ cfg = rvu_rd64(rvu, blkaddr, NIX_AF_TL2X_PARENT(tl2));
+ tl1 = (cfg >> 16) & 0x1FF;
+ if (is_sw_xoff_set(rvu, blkaddr, NIX_TXSCH_LVL_TL1, tl1))
+ continue;
+
+ tx_stall->smq_tl2_map[smq] = tl2;
+ tx_stall->tl4_tl2_map[tl4] = tl2;
+ tx_stall->tl3_tl2_map[tl3] = tl2;
+ tx_stall->tl2_tl1_map[tl2] = tl1;
+ rvu_nix_scan_tl2_link_mapping(rvu, tx_stall, blkaddr, tl2, smq);
+ }
+
+ /* Get count of TL2s attached to each NIXLF */
+ tl2_txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+ block = &hw->block[blkaddr];
+ memset(tx_stall->nixlf_tl2_count, 0, block->lf.max * sizeof(u8));
+ for (lf = 0; lf < block->lf.max; lf++) {
+ mutex_lock(&rvu->rsrc_lock);
+ if (!test_bit(lf, block->lf.bmap)) {
+ mutex_unlock(&rvu->rsrc_lock);
+ continue;
+ }
+ pcifunc = block->fn_map[lf];
+ mutex_unlock(&rvu->rsrc_lock);
+
+ for (tl2 = 0; tl2 < tx_stall->tl2_count; tl2++) {
+ if (!is_schq_allocated(rvu, nix_hw,
+ NIX_TXSCH_LVL_TL2, tl2))
+ continue;
+ if (pcifunc == TXSCH_MAP_FUNC(tl2_txsch->pfvf_map[tl2]))
+ tx_stall->nixlf_tl2_count[lf]++;
+ }
+ }
+}
+
+#define TX_OCTS 4
+#define RVU_AF_BAR2_SEL (0x9000000ull)
+#define RVU_AF_BAR2_ALIASX(a, b) (0x9100000ull | (a) << 12 | (b))
+#define NIX_LF_SQ_OP_OCTS (0xa10)
+
+static bool is_sq_stalled(struct rvu *rvu, struct nix_hw *nix_hw, int smq)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+ u64 btx_octs, atx_octs, cfg, incr;
+ int sq_count = tx_stall->sq_count;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr = tx_stall->blkaddr;
+ struct nix_txsch *smq_txsch;
+ struct rvu_pfvf *pfvf;
+ atomic64_t *ptr;
+ int nixlf, sq;
+ u16 pcifunc;
+
+ smq_txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+ pcifunc = TXSCH_MAP_FUNC(smq_txsch->pfvf_map[smq]);
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return false;
+
+ /* If a NIXLF is transmitting pkts via only one TL2, then checking
+ * global NIXLF TX stats is sufficient.
+ */
+ if (tx_stall->nixlf_tl2_count[nixlf] != 1)
+ goto poll_sq_stats;
+
+ tx_stall->nixlf_poll_count[nixlf]++;
+ btx_octs = rvu_rd64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, TX_OCTS));
+ usleep_range(50, 60);
+ atx_octs = rvu_rd64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, TX_OCTS));
+ if (btx_octs == atx_octs) {
+ tx_stall->nixlf_stall_count[nixlf]++;
+ return true;
+ }
+ return false;
+
+poll_sq_stats:
+ if (!tx_stall->nixlf_tl2_count[nixlf])
+ return false;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ /* Enable BAR2 register access from AF BAR2 alias registers*/
+ cfg = BIT_ULL(16) | pcifunc;
+ rvu_wr64(rvu, blkaddr, RVU_AF_BAR2_SEL, cfg);
+
+ for (sq = 0; sq < pfvf->sq_ctx->qsize; sq++) {
+ if (!is_sq_allocated(rvu, pfvf, blkaddr, sq))
+ continue;
+
+ rvu_nix_txsch_lock(nix_hw);
+ if (tx_stall->sq_smq_map[nixlf * sq_count + sq] != smq) {
+ rvu_nix_txsch_unlock(nix_hw);
+ continue;
+ }
+ rvu_nix_txsch_unlock(nix_hw);
+
+ incr = (u64)sq << 32;
+ ptr = (__force atomic64_t *)(rvu->afreg_base + ((blkaddr << 28)
+ | RVU_AF_BAR2_ALIASX(nixlf, NIX_LF_SQ_OP_OCTS)));
+
+ btx_octs = atomic64_fetch_add_relaxed(incr, ptr);
+ usleep_range(50, 60);
+ atx_octs = atomic64_fetch_add_relaxed(incr, ptr);
+ /* If atleast one SQ is transmitting pkts then SMQ is
+ * not stalled.
+ */
+ if (btx_octs != atx_octs)
+ return false;
+ }
+ tx_stall->nixlf_stall_count[nixlf]++;
+
+ return true;
+}
+
+static bool rvu_nix_check_smq_stall(struct rvu *rvu, struct nix_hw *nix_hw,
+ int tl2)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+ int blkaddr = tx_stall->blkaddr;
+ u64 mdesc_cnt;
+ int smq;
+
+ for (smq = 0; smq < tx_stall->smq_count; smq++) {
+ if (tx_stall->smq_tl2_map[smq] != tl2)
+ continue;
+
+ mdesc_cnt = rvu_rd64(rvu, blkaddr, NIX_AF_SMQX_STATUS(smq));
+ if (!(mdesc_cnt & 0x7F))
+ continue;
+ if (is_sq_stalled(rvu, nix_hw, smq))
+ return true;
+ }
+ return false;
+}
+
+static bool is_cgx_idle(u64 status, u8 link_map)
+{
+ if (EXPR_LINK(link_map))
+ return status & CGXX_CMRX_TX_LMAC_E_IDLE;
+ return status & CGXX_CMRX_TX_LMAC_IDLE;
+}
+
+static bool rvu_cgx_tx_idle(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_txsch *tl2_txsch, int tl2)
+{
+ unsigned long timeout = jiffies + usecs_to_jiffies(20);
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+ u16 pcifunc, link_map;
+ u8 cgx_id, lmac_id;
+ u64 status;
+ void *cgxd;
+ int pf;
+
+ pcifunc = TXSCH_MAP_FUNC(tl2_txsch->pfvf_map[tl2]);
+ pf = rvu_get_pf(pcifunc);
+ if (!is_pf_cgxmapped(rvu, pf))
+ return false;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ if (!cgxd)
+ return false;
+
+ link_map = tx_stall->tl2_link_map[tl2];
+
+ /* Wait for LMAC TX_IDLE */
+ while (time_before(jiffies, timeout)) {
+ status = cgx_get_lmac_tx_fifo_status(cgxd, lmac_id);
+ if (is_cgx_idle(status, link_map))
+ return true;
+ usleep_range(1, 2);
+ }
+ return false;
+}
+
+static void rvu_nix_restore_tx(struct rvu *rvu, struct nix_hw *nix_hw,
+ int blkaddr, int tl2)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+ struct nix_txsch *tl2_txsch;
+ int tl, link;
+
+ link = tx_stall->tl2_link_map[tl2] & 0x7F;
+
+ tx_stall->stalled_cntr++;
+
+ tl2_txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+ rvu_nix_txsch_lock(nix_hw);
+
+ /* Set SW_XOFF for every TL2 queue which transmits to
+ * the associated link.
+ */
+ for (tl = 0; tl < tx_stall->tl2_count; tl++) {
+ if ((tx_stall->tl2_link_map[tl] & 0x7F) != link)
+ continue;
+ /* Full workaround is implemented assuming fixed 1:1
+ * TL3:TL2 mapping, ie TL3 and TL2 index can be used
+ * interchangeably. Hence except in this API, no other
+ * place we check for PSE backpressure level configured
+ * in NIX_AF_PSE_CHANNEL_LEVEL reg.
+ */
+ if (tx_stall->pse_link_bp_level == NIX_TXSCH_LVL_TL2)
+ rvu_wr64(rvu, blkaddr,
+ NIX_AF_TL2X_SW_XOFF(tl), BIT_ULL(0));
+ else
+ rvu_wr64(rvu, blkaddr,
+ NIX_AF_TL3X_SW_XOFF(tl), BIT_ULL(0));
+ }
+ usleep_range(20, 25);
+
+ /* Wait for LMAC TX_IDLE */
+ if (link < rvu->hw->cgx_links) {
+ if (!rvu_cgx_tx_idle(rvu, nix_hw, tl2_txsch, tl2))
+ goto clear_sw_xoff;
+ }
+
+ /* Restore link credits */
+ rvu_wr64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link),
+ tx_stall->nlink_credits[link]);
+
+ /* Toggle SW_XOFF of every scheduler queue at every level
+ * which points to this TL2.
+ */
+ for (tl = 0; tl < tx_stall->smq_count; tl++) {
+ if (tx_stall->smq_tl2_map[tl] != tl2)
+ continue;
+ rvu_wr64(rvu, blkaddr, NIX_AF_MDQX_SW_XOFF(tl), BIT_ULL(0));
+ rvu_wr64(rvu, blkaddr, NIX_AF_MDQX_SW_XOFF(tl), 0x00);
+ }
+
+ for (tl = 0; tl < tx_stall->tl4_count; tl++) {
+ if (tx_stall->tl4_tl2_map[tl] != tl2)
+ continue;
+ rvu_wr64(rvu, blkaddr, NIX_AF_TL4X_SW_XOFF(tl), BIT_ULL(0));
+ rvu_wr64(rvu, blkaddr, NIX_AF_TL4X_SW_XOFF(tl), 0x00);
+ }
+
+ for (tl = 0; tl < tx_stall->tl3_count; tl++) {
+ if (tx_stall->tl3_tl2_map[tl] != tl2)
+ continue;
+ if (tx_stall->pse_link_bp_level == NIX_TXSCH_LVL_TL2) {
+ rvu_wr64(rvu, blkaddr,
+ NIX_AF_TL3X_SW_XOFF(tl), BIT_ULL(0));
+ rvu_wr64(rvu, blkaddr, NIX_AF_TL3X_SW_XOFF(tl), 0x00);
+ } else {
+ /* TL3 and TL2 indices used by this NIXLF are same */
+ rvu_wr64(rvu, blkaddr,
+ NIX_AF_TL2X_SW_XOFF(tl), BIT_ULL(0));
+ rvu_wr64(rvu, blkaddr, NIX_AF_TL2X_SW_XOFF(tl), 0x00);
+ }
+ }
+
+clear_sw_xoff:
+ /* Clear SW_XOFF of all TL2 queues, which are set above */
+ for (tl = 0; tl < tx_stall->tl2_count; tl++) {
+ if ((tx_stall->tl2_link_map[tl] & 0x7F) != link)
+ continue;
+ if (tx_stall->pse_link_bp_level == NIX_TXSCH_LVL_TL2)
+ rvu_wr64(rvu, blkaddr, NIX_AF_TL2X_SW_XOFF(tl), 0x00);
+ else
+ rvu_wr64(rvu, blkaddr, NIX_AF_TL3X_SW_XOFF(tl), 0x00);
+ }
+ rvu_nix_txsch_unlock(nix_hw);
+}
+
+static bool is_link_backpressured(struct nix_tx_stall *tx_stall,
+ struct nix_hw *nix_hw,
+ int blkaddr, int tl2)
+{
+ struct rvu *rvu = tx_stall->rvu;
+ struct nix_txsch *tl2_txsch;
+ int pkt_cnt, unit_cnt;
+ int link, chan;
+ u64 cfg;
+
+ /* Skip uninitialized ones */
+ if (tx_stall->tl2_link_map[tl2] == U16_MAX)
+ return true;
+
+ link = tx_stall->tl2_link_map[tl2] & 0x7F;
+ chan = LINK_CHAN(tx_stall->tl2_link_map[tl2]);
+
+ cfg = rvu_rd64(rvu, blkaddr, NIX_AF_TX_LINKX_HW_XOFF(link));
+ if (cfg & BIT_ULL(chan))
+ return true;
+
+ /* Skip below checks for LBK links */
+ if (link >= rvu->hw->cgx_links)
+ return false;
+
+ cfg = rvu_rd64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
+
+ /* Check if current credits or pkt count is -ve or simply
+ * morethan what is configured.
+ */
+ pkt_cnt = (cfg >> 2) & 0x3FF;
+ unit_cnt = (cfg >> 12) & 0xFFFFF;
+ if (pkt_cnt > ((tx_stall->nlink_credits[link] >> 2) & 0x3FF) ||
+ unit_cnt > ((tx_stall->nlink_credits[link] >> 12) & 0xFFFFF)) {
+ return false;
+ }
+
+ tl2_txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+ if (rvu_cgx_tx_idle(rvu, nix_hw, tl2_txsch, tl2))
+ return false;
+
+ return true;
+}
+
+static int rvu_nix_poll_for_tx_stall(void *arg)
+{
+ struct nix_tx_stall *tx_stall = arg;
+ struct rvu *rvu = tx_stall->rvu;
+ int blkaddr = tx_stall->blkaddr;
+ struct nix_hw *nix_hw;
+ int tl2;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ while (!kthread_should_stop()) {
+ for (tl2 = 0; tl2 < tx_stall->tl2_count; tl2++) {
+ /* Skip TL2 if it's not assigned to any */
+ if (!is_schq_allocated(rvu, nix_hw,
+ NIX_TXSCH_LVL_TL2, tl2))
+ continue;
+
+ tx_stall->poll_cntr++;
+
+ if (tx_stall->txsch_config_changed) {
+ rvu_nix_txsch_lock(nix_hw);
+ rvu_nix_scan_txsch_hierarchy(rvu, nix_hw,
+ blkaddr);
+ tx_stall->txsch_config_changed = false;
+ rvu_nix_txsch_unlock(nix_hw);
+ }
+
+ rvu_nix_txsch_lock(nix_hw);
+ if (is_link_backpressured(tx_stall, nix_hw,
+ blkaddr, tl2)) {
+ rvu_nix_txsch_unlock(nix_hw);
+ continue;
+ }
+ rvu_nix_txsch_unlock(nix_hw);
+
+ if (!rvu_nix_check_smq_stall(rvu, nix_hw, tl2))
+ continue;
+
+ rvu_nix_restore_tx(rvu, nix_hw, blkaddr, tl2);
+ }
+ rvu_usleep_interruptible(250);
+ }
+
+ return 0;
+}
+
+static int rvu_nix_init_tl_map(struct rvu *rvu, struct nix_hw *nix_hw, int lvl)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+ struct nix_txsch *txsch;
+ u16 *tl_map;
+
+ txsch = &nix_hw->txsch[lvl];
+ tl_map = devm_kcalloc(rvu->dev, txsch->schq.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!tl_map)
+ return -ENOMEM;
+
+ switch (lvl) {
+ case NIX_TXSCH_LVL_SMQ:
+ tx_stall->smq_count = txsch->schq.max;
+ tx_stall->smq_tl2_map = tl_map;
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ tx_stall->tl4_count = txsch->schq.max;
+ tx_stall->tl4_tl2_map = tl_map;
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ tx_stall->tl3_count = txsch->schq.max;
+ tx_stall->tl3_tl2_map = tl_map;
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ tx_stall->tl2_count = txsch->schq.max;
+ tx_stall->tl2_tl1_map = tl_map;
+ break;
+ }
+ memset(tl_map, U16_MAX, txsch->schq.max * sizeof(u16));
+ return 0;
+}
+
+static int rvu_nix_tx_stall_workaround_init(struct rvu *rvu,
+ struct nix_hw *nix_hw, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_tx_stall *tx_stall;
+ struct rvu_block *block;
+ int links, err;
+
+ if (!hw->cap.nix_fixed_txschq_mapping)
+ return 0;
+
+ tx_stall = devm_kzalloc(rvu->dev,
+ sizeof(struct nix_tx_stall), GFP_KERNEL);
+ if (!tx_stall)
+ return -ENOMEM;
+
+ tx_stall->blkaddr = blkaddr;
+ tx_stall->rvu = rvu;
+ nix_hw->tx_stall = tx_stall;
+
+ /* Get the level at which link/chan will assert backpressure */
+ if (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL))
+ tx_stall->pse_link_bp_level = NIX_TXSCH_LVL_TL3;
+ else
+ tx_stall->pse_link_bp_level = NIX_TXSCH_LVL_TL2;
+
+ mutex_init(&tx_stall->txsch_lock);
+
+ /* Alloc memory for saving SMQ/TL4/TL3/TL1 to TL2 mapping */
+ err = rvu_nix_init_tl_map(rvu, nix_hw, NIX_TXSCH_LVL_SMQ);
+ if (err)
+ return err;
+ err = rvu_nix_init_tl_map(rvu, nix_hw, NIX_TXSCH_LVL_TL4);
+ if (err)
+ return err;
+ err = rvu_nix_init_tl_map(rvu, nix_hw, NIX_TXSCH_LVL_TL3);
+ if (err)
+ return err;
+ err = rvu_nix_init_tl_map(rvu, nix_hw, NIX_TXSCH_LVL_TL2);
+ if (err)
+ return err;
+
+ block = &hw->block[blkaddr];
+ tx_stall->sq_count = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT);
+
+ /* SMQs to nixlf SQ mapping info */
+ tx_stall->sq_smq_map = devm_kcalloc(rvu->dev,
+ block->lf.max * tx_stall->sq_count,
+ sizeof(u16), GFP_KERNEL);
+ if (!tx_stall->sq_smq_map)
+ return -ENOMEM;
+ memset(tx_stall->sq_smq_map, U16_MAX,
+ block->lf.max * tx_stall->sq_count * sizeof(u16));
+
+ /* TL2 to transmit link mapping info */
+ tx_stall->tl2_link_map = devm_kcalloc(rvu->dev, tx_stall->tl2_count,
+ sizeof(u16), GFP_KERNEL);
+ if (!tx_stall->tl2_link_map)
+ return -ENOMEM;
+ memset(tx_stall->tl2_link_map, U16_MAX,
+ tx_stall->tl2_count * sizeof(u16));
+
+ /* Number of Tl2s attached to NIXLF */
+ tx_stall->nixlf_tl2_count = devm_kcalloc(rvu->dev, block->lf.max,
+ sizeof(u8), GFP_KERNEL);
+ if (!tx_stall->nixlf_tl2_count)
+ return -ENOMEM;
+ memset(tx_stall->nixlf_tl2_count, 0, block->lf.max * sizeof(u8));
+
+ /* Per NIXLF poll and stall counters */
+ tx_stall->nixlf_poll_count = devm_kcalloc(rvu->dev, block->lf.max,
+ sizeof(u64), GFP_KERNEL);
+ if (!tx_stall->nixlf_poll_count)
+ return -ENOMEM;
+ memset(tx_stall->nixlf_poll_count, 0, block->lf.max * sizeof(u64));
+
+ tx_stall->nixlf_stall_count = devm_kcalloc(rvu->dev, block->lf.max,
+ sizeof(u64), GFP_KERNEL);
+ if (!tx_stall->nixlf_stall_count)
+ return -ENOMEM;
+ memset(tx_stall->nixlf_stall_count, 0, block->lf.max * sizeof(u64));
+
+ /* For saving HW link's transmit credits config */
+ links = rvu->hw->cgx_links + rvu->hw->lbk_links;
+ tx_stall->nlink_credits = devm_kcalloc(rvu->dev, links,
+ sizeof(u64), GFP_KERNEL);
+ if (!tx_stall->nlink_credits)
+ return -ENOMEM;
+ rvu_nix_scan_link_credits(rvu, blkaddr, tx_stall);
+
+ tx_stall->poll_thread = kthread_create(rvu_nix_poll_for_tx_stall,
+ (void *)tx_stall,
+ "nix_tx_stall_polling_kthread");
+ if (IS_ERR(tx_stall->poll_thread))
+ return PTR_ERR(tx_stall->poll_thread);
+
+ kthread_bind(tx_stall->poll_thread, cpumask_first(cpu_online_mask));
+ wake_up_process(tx_stall->poll_thread);
+ return 0;
+}
+
+static void rvu_nix_tx_stall_workaround_exit(struct rvu *rvu,
+ struct nix_hw *nix_hw)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+
+ if (!tx_stall)
+ return;
+
+ if (tx_stall->poll_thread)
+ kthread_stop(tx_stall->poll_thread);
+ mutex_destroy(&tx_stall->txsch_lock);
+}
+
+ssize_t rvu_nix_get_tx_stall_counters(struct rvu *rvu,
+ char __user *buffer, loff_t *ppos)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_tx_stall *tx_stall;
+ struct rvu_block *block;
+ struct nix_hw *nix_hw;
+ int blkaddr, len, lf;
+ char *kbuf;
+ int size = 2000;
+
+ if (*ppos)
+ return 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return -EFAULT;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EFAULT;
+
+ tx_stall = nix_hw->tx_stall;
+ if (!tx_stall)
+ return -EFAULT;
+
+ kbuf = kmalloc(size, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ len = snprintf(kbuf, size, "\n NIX transmit stall stats\n");
+ len += snprintf(kbuf + len, size,
+ "\t\tPolled: \t\t%lld\n", tx_stall->poll_cntr);
+ len += snprintf(kbuf + len, size,
+ "\t\tTx stall detected: \t%lld\n\n",
+ tx_stall->stalled_cntr);
+
+ block = &hw->block[blkaddr];
+ mutex_lock(&rvu->rsrc_lock);
+ for (lf = 0; lf < block->lf.max; lf++) {
+ if (!test_bit(lf, block->lf.bmap))
+ continue;
+ len += snprintf(kbuf + len, sizeof(kbuf),
+ "\t\tNIXLF%d Polled: %lld \tStalled: %lld\n",
+ lf, tx_stall->nixlf_poll_count[lf],
+ tx_stall->nixlf_stall_count[lf]);
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ if (len > 0) {
+ if (copy_to_user(buffer, kbuf, len)) {
+ kfree(kbuf);
+ return -EFAULT;
+ }
+ }
+
+ *ppos += len;
+ kfree(kbuf);
+ return len;
+}
+
+static void rvu_nix_enable_internal_bp(struct rvu *rvu, int blkaddr)
+{
+ /* An issue exists in A0 silicon whereby, NIX CQ may reach in CQ full
+ * state followed by CQ hang on CQM query response from stale
+ * CQ context. To avoid such condition, enable internal backpressure
+ * with BP_TEST registers.
+ */
+ if (is_rvu_96xx_A0(rvu)) {
+ /* Enable internal backpressure on pipe_stg0 */
+ rvu_write64(rvu, blkaddr, NIX_AF_RQM_BP_TEST,
+ BIT_ULL(51) | BIT_ULL(23) | BIT_ULL(22) | 0x100ULL);
+ /* Enable internal backpressure on cqm query request */
+ rvu_write64(rvu, blkaddr, NIX_AF_CQM_BP_TEST,
+ BIT_ULL(43) | BIT_ULL(23) | BIT_ULL(22) | 0x100ULL);
+ }
+}
+
+int rvu_nix_fixes_init(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
+{
+ int err;
+ u64 cfg;
+
+
+ /* As per a HW errata in 96xx A0 silicon, NIX may corrupt
+ * internal state when conditional clocks are turned off.
+ * Hence enable them.
+ */
+ if (is_rvu_96xx_A0(rvu))
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x5EULL);
+ if (!is_rvu_post_96xx_C0(rvu))
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
+
+ /* Set chan/link to backpressure TL3 instead of TL2 */
+ rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
+
+ /* Disable SQ manager's sticky mode operation (set TM6 = 0)
+ * This sticky mode is known to cause SQ stalls when multiple
+ * SQs are mapped to same SMQ and transmitting pkts simultaneously
+ */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
+ cfg &= ~BIT_ULL(15);
+ rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
+
+ rvu_nix_enable_internal_bp(rvu, blkaddr);
+
+ if (!is_rvu_96xx_A0(rvu))
+ return 0;
+
+ err = rvu_nix_tx_stall_workaround_init(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void rvu_nix_fixes_exit(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ if (!is_rvu_96xx_A0(rvu))
+ return;
+
+ rvu_nix_tx_stall_workaround_exit(rvu, nix_hw);
+}
+
+int rvu_tim_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
+ u16 pcifunc, int slot)
+{
+ int lf, blkaddr;
+ u64 val;
+
+ /* Due to a HW issue LF_CFG_DEBUG register cannot be used to
+ * find PF_FUNC <=> LF mapping, hence scan through LFX_CFG
+ * registers to find mapped LF for a given PF_FUNC.
+ */
+ if (is_rvu_96xx_B0(rvu)) {
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_TIM, pcifunc);
+ if (blkaddr < 0)
+ return TIM_AF_LF_INVALID;
+
+ for (lf = 0; lf < block->lf.max; lf++) {
+ val = rvu_read64(rvu, block->addr, block->lfcfg_reg |
+ (lf << block->lfshift));
+ if ((((val >> 8) & 0xffff) == pcifunc) &&
+ (val & 0xff) == slot)
+ return lf;
+ }
+ return -1;
+ }
+
+ val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
+ rvu_write64(rvu, block->addr, block->lookup_reg, val);
+
+ /* Wait for the lookup to finish */
+ while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
+ ;
+
+ val = rvu_read64(rvu, block->addr, block->lookup_reg);
+
+ /* Check LF valid bit */
+ if (!(val & (1ULL << 12)))
+ return -1;
+
+ return (val & 0xFFF);
+}
+
+int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena)
+{
+ /* Due to a HW issue in these silicon versions, parse nibble enable
+ * configuration has to be identical for both Rx and Tx interfaces.
+ */
+ if (is_rvu_96xx_B0(rvu))
+ return nibble_ena;
+ return 0;
+}
+
+bool is_parse_nibble_config_valid(struct rvu *rvu,
+ struct npc_mcam_kex *mcam_kex)
+{
+ if (!is_rvu_96xx_B0(rvu))
+ return true;
+
+ /* Due to a HW issue in above silicon versions, parse nibble enable
+ * configuration has to be identical for both Rx and Tx interfaces.
+ */
+ if (mcam_kex->keyx_cfg[NIX_INTF_RX] != mcam_kex->keyx_cfg[NIX_INTF_TX])
+ return false;
+ return true;
+}
+
+void __weak otx2smqvf_xmit(void)
+{
+ /* Nothing to do */
+}
+
+void rvu_smqvf_xmit(struct rvu *rvu)
+{
+ if (is_rvu_95xx_A0(rvu) || is_rvu_96xx_A0(rvu)) {
+ usleep_range(50, 60);
+ otx2smqvf_xmit();
+ }
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_fixes.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_fixes.h
new file mode 100644
index 000000000000..04ba74b92cca
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_fixes.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RVU_FIXES_H
+#define RVU_FIXES_H
+
+#define RVU_SMQVF_PCIFUNC 17
+
+struct rvu;
+
+void otx2smqvf_xmit(void);
+void rvu_smqvf_xmit(struct rvu *rvu);
+
+#endif /* RVU_FIXES_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 36953d4f51c7..176de5de8a22 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -16,7 +16,9 @@
#include "rvu.h"
#include "npc.h"
#include "cgx.h"
+#include "rvu_fixes.h"
+static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id);
@@ -193,7 +195,8 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
int err;
pf = rvu_get_pf(pcifunc);
- if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
+ if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
+ type != NIX_INTF_TYPE_SDP)
return 0;
switch (type) {
@@ -216,8 +219,8 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
/* By default we enable pause frames */
if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
- cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu),
- lmac_id, true, true);
+ cgx_lmac_enadis_pause_frm(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, true, true);
break;
case NIX_INTF_TYPE_LBK:
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
@@ -234,6 +237,15 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base, false);
break;
+ case NIX_INTF_TYPE_SDP:
+ /* Added single interface and single channel support for now */
+ pfvf->rx_chan_base = NIX_CHAN_SDP_CHX(0);
+ pfvf->tx_chan_base = pfvf->rx_chan_base;
+ pfvf->rx_chan_cnt = 1;
+ pfvf->tx_chan_cnt = 1;
+ rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base, false);
+ break;
}
/* Add a UCAST forwarding rule in MCAM with this NIXLF attached
@@ -266,7 +278,6 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
pfvf->maxlen = 0;
pfvf->minlen = 0;
- pfvf->rxvlan = false;
/* Remove this PF_FUNC from bcast pkt replication list */
err = nix_update_bcast_mce_list(rvu, pcifunc, false);
@@ -276,8 +287,8 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
pcifunc);
}
- /* Free and disable any MCAM entries used by this NIX LF */
- rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+ /* Disable DMAC filters used */
+ rvu_cgx_disable_dmac_entries(rvu, pcifunc);
}
int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
@@ -547,9 +558,10 @@ static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
struct rvu_pfvf *pfvf, int nixlf,
int rss_sz, int rss_grps, int hwctx_size,
- u64 way_mask)
+ u64 way_mask, bool tag_lsb_as_adder)
{
int err, grp, num_indices;
+ u64 val;
/* RSS is not requested for this NIXLF */
if (!rss_sz)
@@ -565,10 +577,13 @@ static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
(u64)pfvf->rss_ctx->iova);
/* Config full RSS table size, enable RSS and caching */
- rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
- BIT_ULL(36) | BIT_ULL(4) |
- ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
- way_mask << 20);
+ val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 |
+ ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE);
+
+ if (tag_lsb_as_adder)
+ val |= BIT_ULL(5);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val);
/* Config RSS group offset and sizes */
for (grp = 0; grp < rss_grps; grp++)
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
@@ -696,6 +711,8 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
pcifunc, req->sq.smq))
return NIX_AF_ERR_AQ_ENQUEUE;
+ rvu_nix_update_sq_smq_mapping(rvu, blkaddr, nixlf, req->qidx,
+ req->sq.smq);
}
memset(&inst, 0, sizeof(struct nix_aq_inst_s));
@@ -961,10 +978,10 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
struct nix_lf_alloc_rsp *rsp)
{
int nixlf, qints, hwctx_size, intf, err, rc = 0;
+ struct rvu_pfvf *pfvf, *parent_pf;
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_block *block;
- struct rvu_pfvf *pfvf;
u64 cfg, ctx_cfg;
int blkaddr;
@@ -974,6 +991,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
if (req->way_mask)
req->way_mask &= 0xFFFF;
+ parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (!pfvf->nixlf || blkaddr < 0)
@@ -1077,7 +1095,8 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
/* Initialize receive side scaling (RSS) */
hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
- req->rss_grps, hwctx_size, req->way_mask);
+ req->rss_grps, hwctx_size, req->way_mask,
+ !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER));
if (err)
goto free_mem;
@@ -1130,7 +1149,14 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
/* Config Rx pkt length, csum checks and apad enable / disable */
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
+ /* Configure pkind for TX parse config, 63 from npc_profile */
+ cfg = NPC_TX_DEF_PKIND;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
+
intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ if (is_sdp_pf(pcifunc))
+ intf = NIX_INTF_TYPE_SDP;
+
err = nix_interface_init(rvu, pcifunc, intf, nixlf);
if (err)
goto free_mem;
@@ -1138,6 +1164,11 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
/* Disable NPC entries as NIXLF's contexts are not initialized yet */
rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+ /* Configure RX VTAG Type 7 (strip) for vf vlan */
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
+ VTAGSIZE_T4 | BIT_ULL(4));
+
goto exit;
free_mem:
@@ -1165,10 +1196,11 @@ exit:
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
rsp->qints = ((cfg >> 12) & 0xFFF);
rsp->cints = ((cfg >> 24) & 0xFFF);
+ rsp->hw_rx_tstamp_en = parent_pf->hw_rx_tstamp_en;
return rc;
}
-int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
struct msg_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -1187,6 +1219,15 @@ int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
+ if (req->flags & NIX_LF_DISABLE_FLOWS)
+ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+ else
+ rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
+
+ /* Free any tx vtag def entries used by this NIX LF */
+ if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
+ nix_free_tx_vtag_entries(rvu, pcifunc);
+
nix_interface_deinit(rvu, pcifunc, nixlf);
/* Reset this NIX LF */
@@ -1491,6 +1532,14 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
mutex_lock(&rvu->rsrc_lock);
+ /* Check if request can be accommodated as per limits set by admin */
+ if (!hw->cap.nix_fixed_txschq_mapping &&
+ rvu_check_txsch_policy(rvu, req, pcifunc)) {
+ dev_err(rvu->dev, "Func 0x%x: TXSCH policy check failed\n",
+ pcifunc);
+ goto err;
+ }
+
/* Check if request is valid as per HW capabilities
* and can be accomodated.
*/
@@ -1583,6 +1632,8 @@ static void nix_smq_flush(struct rvu *rvu, int blkaddr,
*/
rvu_cgx_enadis_rx_bp(rvu, pf, false);
+ rvu_smqvf_xmit(rvu);
+
/* Wait for flush to complete */
err = rvu_poll_reg(rvu, blkaddr,
NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
@@ -1655,9 +1706,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
}
mutex_unlock(&rvu->rsrc_lock);
- /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
- rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
- err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
+ err = rvu_ndc_sync(rvu, blkaddr, nixlf, NIX_AF_NDC_TX_SYNC);
if (err)
dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
@@ -1856,14 +1905,17 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
return 0;
}
+ rvu_nix_txsch_lock(nix_hw);
for (idx = 0; idx < req->num_regs; idx++) {
reg = req->reg[idx];
regval = req->regval[idx];
schq_regbase = reg & 0xFFFF;
if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
- txsch->lvl, reg, regval))
+ txsch->lvl, reg, regval)) {
+ rvu_nix_txsch_unlock(nix_hw);
return NIX_AF_INVAL_TXSCHQ_CFG;
+ }
/* Check if shaping and coloring is supported */
if (!is_txschq_shaping_valid(hw, req->lvl, reg))
@@ -1907,6 +1959,8 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
rvu_write64(rvu, blkaddr, reg, regval);
}
+ rvu_nix_txsch_config_changed(nix_hw);
+ rvu_nix_txsch_unlock(nix_hw);
return 0;
}
@@ -1915,9 +1969,14 @@ static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
{
u64 regval = req->vtag_size;
- if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8)
+ if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
+ req->vtag_size > VTAGSIZE_T8)
return -EINVAL;
+ /* RX VTAG Type 7 reserved for vf vlan */
+ if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
+ return NIX_AF_ERR_RX_VTAG_INUSE;
+
if (req->rx.capture_vtag)
regval |= BIT_ULL(5);
if (req->rx.strip_vtag)
@@ -1928,9 +1987,149 @@ static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
return 0;
}
+static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
+ u16 pcifunc, int index)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ struct nix_txvlan *vlan = &nix_hw->txvlan;
+
+ if (vlan->entry2pfvf_map[index] != pcifunc)
+ return NIX_AF_ERR_PARAM;
+
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
+
+ vlan->entry2pfvf_map[index] = 0;
+ rvu_free_rsrc(&vlan->rsrc, index);
+
+ return 0;
+}
+
+static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
+{
+ struct nix_txvlan *vlan;
+ struct nix_hw *nix_hw;
+ int index, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ vlan = &nix_hw->txvlan;
+
+ mutex_lock(&vlan->rsrc_lock);
+ /* Scan all the entries and free the ones mapped to 'pcifunc' */
+ for (index = 0; index < vlan->rsrc.max; index++) {
+ if (vlan->entry2pfvf_map[index] == pcifunc)
+ nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
+ }
+ mutex_unlock(&vlan->rsrc_lock);
+}
+
+static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
+ u64 vtag, u8 size)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ struct nix_txvlan *vlan = &nix_hw->txvlan;
+ u64 regval;
+ int index;
+
+ mutex_lock(&vlan->rsrc_lock);
+
+ index = rvu_alloc_rsrc(&vlan->rsrc);
+ if (index < 0) {
+ mutex_unlock(&vlan->rsrc_lock);
+ return index;
+ }
+
+ mutex_unlock(&vlan->rsrc_lock);
+
+ regval = size ? vtag : vtag << 32;
+
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_CTL(index), size);
+
+ return index;
+}
+
+static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
+ struct nix_vtag_config *req)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ struct nix_txvlan *vlan = &nix_hw->txvlan;
+ u16 pcifunc = req->hdr.pcifunc;
+ int idx0 = req->tx.vtag0_idx;
+ int idx1 = req->tx.vtag1_idx;
+ int err = 0;
+
+ if (req->tx.free_vtag0 && req->tx.free_vtag1)
+ if (vlan->entry2pfvf_map[idx0] != pcifunc ||
+ vlan->entry2pfvf_map[idx1] != pcifunc)
+ return NIX_AF_ERR_PARAM;
+
+ mutex_lock(&vlan->rsrc_lock);
+
+ if (req->tx.free_vtag0) {
+ err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
+ if (err)
+ goto exit;
+ }
+
+ if (req->tx.free_vtag1)
+ err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
+
+exit:
+ mutex_unlock(&vlan->rsrc_lock);
+ return err;
+}
+
+static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
+ struct nix_vtag_config *req,
+ struct nix_vtag_config_rsp *rsp)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ struct nix_txvlan *vlan = &nix_hw->txvlan;
+ u16 pcifunc = req->hdr.pcifunc;
+
+ if (req->tx.cfg_vtag0) {
+ rsp->vtag0_idx =
+ nix_tx_vtag_alloc(rvu, blkaddr,
+ req->tx.vtag0, req->vtag_size);
+
+ if (rsp->vtag0_idx < 0)
+ return NIX_AF_ERR_TX_VTAG_NOSPC;
+
+ vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
+ }
+
+ if (req->tx.cfg_vtag1) {
+ rsp->vtag1_idx =
+ nix_tx_vtag_alloc(rvu, blkaddr,
+ req->tx.vtag1, req->vtag_size);
+
+ if (rsp->vtag1_idx < 0)
+ goto err_free;
+
+ vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
+ }
+
+ return 0;
+
+err_free:
+ if (req->tx.cfg_vtag0)
+ nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
+
+ return NIX_AF_ERR_TX_VTAG_NOSPC;
+}
+
int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
struct nix_vtag_config *req,
- struct msg_rsp *rsp)
+ struct nix_vtag_config_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
int blkaddr, nixlf, err;
@@ -1940,12 +2139,21 @@ int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
return err;
if (req->cfg_type) {
+ /* rx vtag configuration */
err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
if (err)
return NIX_AF_ERR_PARAM;
} else {
- /* TODO: handle tx vtag configuration */
- return 0;
+ /* tx vtag configuration */
+ if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
+ (req->tx.free_vtag0 || req->tx.free_vtag1))
+ return NIX_AF_ERR_PARAM;
+
+ if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
+ return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
+
+ if (req->tx.free_vtag0 || req->tx.free_vtag1)
+ return nix_tx_vtag_decfg(rvu, blkaddr, req);
}
return 0;
@@ -2177,6 +2385,31 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
return nix_setup_bcast_tables(rvu, nix_hw);
}
+static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ struct nix_txvlan *vlan = &nix_hw->txvlan;
+ int err;
+
+ /* Allocate resource bimap for tx vtag def registers*/
+ vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
+ err = rvu_alloc_bitmap(&vlan->rsrc);
+ if (err)
+ return -ENOMEM;
+
+ /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
+ vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!vlan->entry2pfvf_map)
+ goto free_mem;
+
+ mutex_init(&vlan->rsrc_lock);
+ return 0;
+
+free_mem:
+ kfree(vlan->rsrc.bmap);
+ return -ENOMEM;
+}
+
static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
{
struct nix_txsch *txsch;
@@ -2321,6 +2554,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
struct nix_rx_flowkey_alg *field;
struct nix_rx_flowkey_alg tmp;
u32 key_type, valid_key;
+ u32 l3_l4_src_dst;
if (!alg)
return -EINVAL;
@@ -2347,6 +2581,15 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
* group_member - Enabled when protocol is part of a group.
*/
+ /* Last 4 bits (31:28) are reserved to specify SRC, DST
+ * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST,
+ * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST
+ * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST
+ */
+ l3_l4_src_dst = flow_cfg;
+ /* Reset these 4 bits, so that these won't be part of key */
+ flow_cfg &= NIX_FLOW_KEY_TYPE_L3_L4_MASK;
+
keyoff_marker = 0; max_key_off = 0; group_member = 0;
nr_field = 0; key_off = 0; field_marker = 1;
field = &tmp; max_bit_pos = fls(flow_cfg);
@@ -2377,6 +2620,22 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
}
field->hdr_offset = 12; /* SIP offset */
field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
+
+ /* Only SIP */
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
+ field->bytesm1 = 3; /* SIP, 4 bytes */
+
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
+ /* Both SIP + DIP */
+ if (field->bytesm1 == 3) {
+ field->bytesm1 = 7; /* SIP + DIP, 8B */
+ } else {
+ /* Only DIP */
+ field->hdr_offset = 16; /* DIP off */
+ field->bytesm1 = 3; /* DIP, 4 bytes */
+ }
+ }
+
field->ltype_mask = 0xF; /* Match only IPv4 */
keyoff_marker = false;
break;
@@ -2390,6 +2649,22 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
}
field->hdr_offset = 8; /* SIP offset */
field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
+
+ /* Only SIP */
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
+ field->bytesm1 = 15; /* SIP, 16 bytes */
+
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
+ /* Both SIP + DIP */
+ if (field->bytesm1 == 15) {
+ /* SIP + DIP, 32 bytes */
+ field->bytesm1 = 31;
+ } else {
+ /* Only DIP */
+ field->hdr_offset = 24; /* DIP off */
+ field->bytesm1 = 15; /* DIP,16 bytes */
+ }
+ }
field->ltype_mask = 0xF; /* Match only IPv6 */
break;
case NIX_FLOW_KEY_TYPE_TCP:
@@ -2405,6 +2680,21 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->lid = NPC_LID_LH;
field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_SRC_ONLY)
+ field->bytesm1 = 1; /* SRC, 2 bytes */
+
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_DST_ONLY) {
+ /* Both SRC + DST */
+ if (field->bytesm1 == 1) {
+ /* SRC + DST, 4 bytes */
+ field->bytesm1 = 3;
+ } else {
+ /* Only DIP */
+ field->hdr_offset = 2; /* DST off */
+ field->bytesm1 = 1; /* DST, 2 bytes */
+ }
+ }
+
/* Enum values for NPC_LID_LD and NPC_LID_LG are same,
* so no need to change the ltype_match, just change
* the lid for inner protocols
@@ -2673,6 +2963,7 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
struct nix_set_mac_addr *req,
struct msg_rsp *rsp)
{
+ bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK);
u16 pcifunc = req->hdr.pcifunc;
int blkaddr, nixlf, err;
struct rvu_pfvf *pfvf;
@@ -2683,13 +2974,15 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
pfvf = rvu_get_pfvf(rvu, pcifunc);
+ /* VF can't overwrite admin(PF) changes */
+ if (from_vf && pfvf->pf_set_vf_cfg)
+ return -EPERM;
+
ether_addr_copy(pfvf->mac_addr, req->mac_addr);
rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base, req->mac_addr);
- rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
-
return 0;
}
@@ -2736,9 +3029,6 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
else
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base, allmulti);
-
- rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
-
return 0;
}
@@ -2872,66 +3162,9 @@ linkcfg:
cfg &= ~(0xFFFFFULL << 12);
cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12;
rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
- return 0;
-}
-
-int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp)
-{
- struct npc_mcam_alloc_entry_req alloc_req = { };
- struct npc_mcam_alloc_entry_rsp alloc_rsp = { };
- struct npc_mcam_free_entry_req free_req = { };
- u16 pcifunc = req->hdr.pcifunc;
- int blkaddr, nixlf, err;
- struct rvu_pfvf *pfvf;
-
- /* LBK VFs do not have separate MCAM UCAST entry hence
- * skip allocating rxvlan for them
- */
- if (is_afvf(pcifunc))
- return 0;
-
- pfvf = rvu_get_pfvf(rvu, pcifunc);
- if (pfvf->rxvlan)
- return 0;
-
- /* alloc new mcam entry */
- alloc_req.hdr.pcifunc = pcifunc;
- alloc_req.count = 1;
-
- err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
- &alloc_rsp);
- if (err)
- return err;
-
- /* update entry to enable rxvlan offload */
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0) {
- err = NIX_AF_ERR_AF_LF_INVALID;
- goto free_entry;
- }
-
- nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0);
- if (nixlf < 0) {
- err = NIX_AF_ERR_AF_LF_INVALID;
- goto free_entry;
- }
-
- pfvf->rxvlan_index = alloc_rsp.entry_list[0];
- /* all it means is that rxvlan_index is valid */
- pfvf->rxvlan = true;
-
- err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
- if (err)
- goto free_entry;
+ rvu_nix_update_link_credits(rvu, blkaddr, link, cfg);
return 0;
-free_entry:
- free_req.hdr.pcifunc = pcifunc;
- free_req.entry = alloc_rsp.entry_list[0];
- rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp);
- pfvf->rxvlan = false;
- return err;
}
int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
@@ -2994,6 +3227,9 @@ static void nix_link_config(struct rvu *rvu, int blkaddr)
*/
for (cgx = 0; cgx < hw->cgx; cgx++) {
lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
+ /* Skip when cgx is not available or lmac cnt is zero */
+ if (lmac_cnt <= 0)
+ continue;
tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16;
/* Enable credits and set credit pkt count to max allowed */
tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
@@ -3105,6 +3341,7 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
int rvu_nix_init(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
+ struct npc_lt_def_cfg *ltdefs;
struct rvu_block *block;
int blkaddr, err;
u64 cfg;
@@ -3134,6 +3371,7 @@ int rvu_nix_init(struct rvu *rvu)
rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
}
+ ltdefs = rvu->kpu.lt_def;
/* Calibrate X2P bus to check if CGX/LBK links are fine */
err = nix_calibrate_x2p(rvu, blkaddr);
if (err)
@@ -3155,6 +3393,9 @@ int rvu_nix_init(struct rvu *rvu)
/* Restore CINT timer delay to HW reset values */
rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
+ /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
+ rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, 0x1ULL);
+
if (blkaddr == BLKADDR_NIX0) {
hw->nix0 = devm_kzalloc(rvu->dev,
sizeof(struct nix_hw), GFP_KERNEL);
@@ -3173,6 +3414,10 @@ int rvu_nix_init(struct rvu *rvu)
if (err)
return err;
+ err = nix_setup_txvlan(rvu, hw->nix0);
+ if (err)
+ return err;
+
/* Configure segmentation offload formats */
nix_setup_lso(rvu, hw->nix0, blkaddr);
@@ -3181,28 +3426,49 @@ int rvu_nix_init(struct rvu *rvu)
* and validate length and checksums.
*/
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
- (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
+ (ltdefs->rx_ol2.lid << 8) |
+ (ltdefs->rx_ol2.ltype_match << 4) |
+ ltdefs->rx_ol2.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
- (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
+ (ltdefs->rx_oip4.lid << 8) |
+ (ltdefs->rx_oip4.ltype_match << 4) |
+ ltdefs->rx_oip4.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
- (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP << 4) | 0x0F);
+ (ltdefs->rx_iip4.lid << 8) |
+ (ltdefs->rx_iip4.ltype_match << 4) |
+ ltdefs->rx_iip4.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
- (NPC_LID_LC << 8) | (NPC_LT_LC_IP6 << 4) | 0x0F);
+ (ltdefs->rx_oip6.lid << 8) |
+ (ltdefs->rx_oip6.ltype_match << 4) |
+ ltdefs->rx_oip6.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
- (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP6 << 4) | 0x0F);
+ (ltdefs->rx_iip6.lid << 8) |
+ (ltdefs->rx_iip6.ltype_match << 4) |
+ ltdefs->rx_iip6.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
- (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
+ (ltdefs->rx_otcp.lid << 8) |
+ (ltdefs->rx_otcp.ltype_match << 4) |
+ ltdefs->rx_otcp.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
- (NPC_LID_LH << 8) | (NPC_LT_LH_TU_TCP << 4) | 0x0F);
+ (ltdefs->rx_itcp.lid << 8) |
+ (ltdefs->rx_itcp.ltype_match << 4) |
+ ltdefs->rx_itcp.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
- (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
+ (ltdefs->rx_oudp.lid << 8) |
+ (ltdefs->rx_oudp.ltype_match << 4) |
+ ltdefs->rx_oudp.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
- (NPC_LID_LH << 8) | (NPC_LT_LH_TU_UDP << 4) | 0x0F);
+ (ltdefs->rx_iudp.lid << 8) |
+ (ltdefs->rx_iudp.ltype_match << 4) |
+ ltdefs->rx_iudp.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
- (NPC_LID_LD << 8) | (NPC_LT_LD_SCTP << 4) | 0x0F);
+ (ltdefs->rx_osctp.lid << 8) |
+ (ltdefs->rx_osctp.ltype_match << 4) |
+ ltdefs->rx_osctp.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
- (NPC_LID_LH << 8) | (NPC_LT_LH_TU_SCTP << 4) |
- 0x0F);
+ (ltdefs->rx_isctp.lid << 8) |
+ (ltdefs->rx_isctp.ltype_match << 4) |
+ ltdefs->rx_isctp.ltype_mask);
err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
if (err)
@@ -3213,6 +3479,24 @@ int rvu_nix_init(struct rvu *rvu)
/* Enable Channel backpressure */
rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
+
+ err = rvu_nix_fixes_init(rvu, hw->nix0, blkaddr);
+ if (err)
+ return err;
+
+ if (is_block_implemented(rvu->hw, BLKADDR_CPT0)) {
+ /* Config IPSec headers identification */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IPSECX(0),
+ (ltdefs->rx_ipsec[0].lid << 8) |
+ (ltdefs->rx_ipsec[0].ltype_match << 4) |
+ ltdefs->rx_ipsec[0].ltype_mask);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IPSECX(1),
+ (ltdefs->rx_ipsec[1].spi_offset << 12) |
+ (ltdefs->rx_ipsec[1].lid << 8) |
+ (ltdefs->rx_ipsec[1].ltype_match << 4) |
+ ltdefs->rx_ipsec[1].ltype_mask);
+ }
}
return 0;
}
@@ -3223,6 +3507,7 @@ void rvu_nix_freemem(struct rvu *rvu)
struct rvu_block *block;
struct nix_txsch *txsch;
struct nix_mcast *mcast;
+ struct nix_txvlan *vlan;
struct nix_hw *nix_hw;
int blkaddr, lvl;
@@ -3243,10 +3528,16 @@ void rvu_nix_freemem(struct rvu *rvu)
kfree(txsch->schq.bmap);
}
+ vlan = &nix_hw->txvlan;
+ kfree(vlan->rsrc.bmap);
+ mutex_destroy(&vlan->rsrc_lock);
+ devm_kfree(rvu->dev, vlan->entry2pfvf_map);
+
mcast = &nix_hw->mcast;
qmem_free(rvu->dev, mcast->mce_ctx);
qmem_free(rvu->dev, mcast->mcast_buf);
mutex_destroy(&mcast->mce_lock);
+ rvu_nix_fixes_exit(rvu, nix_hw);
}
}
@@ -3262,6 +3553,8 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
+ npc_mcam_enable_flows(rvu, pcifunc);
+
return rvu_cgx_start_stop_io(rvu, pcifunc, true);
}
@@ -3284,11 +3577,16 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct hwctx_disable_req ctx_req;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+ void *cgxd;
int err;
ctx_req.hdr.pcifunc = pcifunc;
/* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
+ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+ rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
nix_interface_deinit(rvu, pcifunc, nixlf);
nix_rx_sync(rvu, blkaddr);
nix_txschq_free(rvu, pcifunc);
@@ -3316,7 +3614,70 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
dev_err(rvu->dev, "CQ ctx disable failed\n");
}
+ /* Disabling CGX and NPC config done for PTP */
+ if (pfvf->hw_rx_tstamp_en) {
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ cgx_lmac_ptp_config(cgxd, lmac_id, false);
+ /* Undo NPC config done for PTP */
+ if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false))
+ dev_err(rvu->dev, "NPC config for PTP failed\n");
+ pfvf->hw_rx_tstamp_en = false;
+ }
+
nix_ctx_free(rvu, pfvf);
+
+ if (is_block_implemented(rvu->hw, BLKADDR_CPT0)) {
+ /* reset the configuration related to inline ipsec */
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(nixlf),
+ 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(nixlf),
+ 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf),
+ 0x0);
+ }
+}
+
+int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, nixlf, err;
+ u64 cfg;
+
+ /* Silicon does not support enabling time stamp in higig mode */
+ if (rvu_cgx_is_higig2_enabled(rvu, rvu_get_pf(pcifunc)))
+ return NIX_AF_ERR_PTP_CONFIG_FAIL;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
+ cfg |= BIT_ULL(32);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
+ return 0;
+}
+
+int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, nixlf, err;
+ u64 cfg;
+
+ /* Silicon does not support enabling time stamp in higig mode */
+ if (rvu_cgx_is_higig2_enabled(rvu, rvu_get_pf(pcifunc)))
+ return NIX_AF_ERR_PTP_CONFIG_FAIL;
+
+ err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
+ cfg &= ~BIT_ULL(32);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
+ return 0;
}
int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
@@ -3369,3 +3730,330 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
return 0;
}
+
+int rvu_mbox_handler_nix_set_vlan_tpid(struct rvu *rvu,
+ struct nix_set_vlan_tpid *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int nixlf, err, blkaddr;
+ u64 cfg;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
+
+ if (req->vlan_type != NIX_VLAN_TYPE_OUTER &&
+ req->vlan_type != NIX_VLAN_TYPE_INNER)
+ return NIX_AF_ERR_PARAM;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
+
+ if (req->vlan_type == NIX_VLAN_TYPE_OUTER)
+ cfg = (cfg & ~GENMASK_ULL(15, 0)) | req->tpid;
+ else
+ cfg = (cfg & ~GENMASK_ULL(31, 16)) | ((u64)req->tpid << 16);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
+ return 0;
+}
+
+static irqreturn_t rvu_nix_af_rvu_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ int blkaddr;
+ u64 intr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_RVU_INT);
+
+ if (intr & BIT_ULL(0))
+ dev_err(rvu->dev, "NIX: Unmapped slot error\n");
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT, intr);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_nix_af_err_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ int blkaddr;
+ u64 intr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
+
+ if (intr & BIT_ULL(14))
+ dev_err(rvu->dev, "NIX: Memory fault on NIX_AQ_INST_S read\n");
+
+ if (intr & BIT_ULL(13))
+ dev_err(rvu->dev, "NIX: Memory fault on NIX_AQ_RES_S write\n");
+
+ if (intr & BIT_ULL(12))
+ dev_err(rvu->dev, "NIX: AQ doorbell error\n");
+
+ if (intr & BIT_ULL(6))
+ dev_err(rvu->dev, "NIX: Rx on unmapped PF_FUNC\n");
+
+ if (intr & BIT_ULL(5))
+ dev_err(rvu->dev, "NIX: Rx multicast replication error\n");
+
+ if (intr & BIT_ULL(4))
+ dev_err(rvu->dev, "NIX: Memory fault on NIX_RX_MCE_S read\n");
+
+ if (intr & BIT_ULL(3))
+ dev_err(rvu->dev, "NIX: Memory fault on multicast WQE read\n");
+
+ if (intr & BIT_ULL(2))
+ dev_err(rvu->dev, "NIX: Memory fault on mirror WQE read\n");
+
+ if (intr & BIT_ULL(1))
+ dev_err(rvu->dev, "NIX: Memory fault on mirror pkt write\n");
+
+ if (intr & BIT_ULL(0))
+ dev_err(rvu->dev, "NIX: Memory fault on multicast pkt write\n");
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT, intr);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_nix_af_ras_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ int blkaddr;
+ u64 intr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_RAS);
+
+ if (intr & BIT_ULL(34))
+ dev_err(rvu->dev, "NIX: Poisoned data on NIX_AQ_INST_S read\n");
+
+ if (intr & BIT_ULL(33))
+ dev_err(rvu->dev, "NIX: Poisoned data on NIX_AQ_RES_S write\n");
+
+ if (intr & BIT_ULL(32))
+ dev_err(rvu->dev, "NIX: Poisoned data on HW context read\n");
+
+ if (intr & BIT_ULL(4))
+ dev_err(rvu->dev, "NIX: Poisoned data on packet read from mirror buffer\n");
+
+ if (intr & BIT_ULL(3))
+ dev_err(rvu->dev, "NIX: Poisoned data on packet read from multicast buffer\n");
+
+ if (intr & BIT_ULL(2))
+ dev_err(rvu->dev, "NIX: Poisoned data on WQE read from mirror buffer\n");
+
+ if (intr & BIT_ULL(1))
+ dev_err(rvu->dev, "NIX: Poisoned data on WQE read from multicast buffer\n");
+
+ if (intr & BIT_ULL(0))
+ dev_err(rvu->dev, "NIX: Poisoned data on NIX_RX_MCE_S read\n");
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS, intr);
+ return IRQ_HANDLED;
+}
+
+static bool rvu_nix_af_request_irq(struct rvu *rvu, int blkaddr, int offset,
+ const char *name, irq_handler_t fn)
+{
+ int rc;
+
+ WARN_ON(rvu->irq_allocated[offset]);
+ rvu->irq_allocated[offset] = false;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], name);
+ rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu);
+ if (rc)
+ dev_warn(rvu->dev, "Failed to register %s irq\n", name);
+ else
+ rvu->irq_allocated[offset] = true;
+
+ return rvu->irq_allocated[offset];
+}
+
+int rvu_nix_register_interrupts(struct rvu *rvu)
+{
+ int blkaddr, base;
+ bool rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* Get NIX AF MSIX vectors offset. */
+ base = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
+ if (!base) {
+ dev_warn(rvu->dev,
+ "Failed to get NIX_AF_INT vector offsets\n");
+ return 0;
+ }
+
+ /* Register and enable NIX_AF_RVU_INT interrupt */
+ rc = rvu_nix_af_request_irq(rvu, blkaddr, base + NIX_AF_INT_VEC_RVU,
+ "NIX_AF_RVU_INT",
+ rvu_nix_af_rvu_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NIX_AF_ERR_INT interrupt */
+ rc = rvu_nix_af_request_irq(rvu, blkaddr, base + NIX_AF_INT_VEC_AF_ERR,
+ "NIX_AF_ERR_INT",
+ rvu_nix_af_err_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NIX_AF_RAS interrupt */
+ rc = rvu_nix_af_request_irq(rvu, blkaddr, base + NIX_AF_INT_VEC_POISON,
+ "NIX_AF_RAS",
+ rvu_nix_af_ras_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+err:
+ rvu_nix_unregister_interrupts(rvu);
+ return rc;
+}
+
+void rvu_nix_unregister_interrupts(struct rvu *rvu)
+{
+ int blkaddr, offs, i;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return;
+
+ offs = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
+ if (!offs)
+ return;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
+
+ if (rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + NIX_AF_INT_VEC_RVU),
+ rvu);
+ rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false;
+ }
+
+ for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu,
+ struct nix_inline_ipsec_cfg *req,
+ struct msg_rsp *rsp)
+{
+ int blkaddr;
+ u64 val;
+
+ if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ return 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (req->enable) {
+ /* Set OPCODE and EGRP */
+ val = (u64)req->gen_cfg.egrp << 48 |
+ (u64)req->gen_cfg.opcode << 32;
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val);
+
+ /* Set CPT queue for inline IPSec */
+ val = (u64)req->inst_qsel.cpt_pf_func << 8 |
+ req->inst_qsel.cpt_slot;
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(0), val);
+
+ /* Set CPT credit */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(0),
+ req->cpt_credit);
+ } else {
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(0), 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(0), 0x3FFFFF);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(
+struct rvu *rvu, struct nix_inline_ipsec_lf_cfg *req, struct msg_rsp *rsp)
+{
+ int lf, blkaddr, err;
+ u64 val;
+
+ if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ return 0;
+
+ err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr);
+ if (err)
+ return err;
+
+ if (req->enable) {
+ /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */
+ val = (u64)req->ipsec_cfg0.tt << 44 |
+ (u64)req->ipsec_cfg0.tag_const << 20 |
+ (u64)req->ipsec_cfg0.sa_pow2_size << 16 |
+ req->ipsec_cfg0.lenm1_max;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val);
+
+ /* Set SA_IDX_W and SA_IDX_MAX */
+ val = (u64)req->ipsec_cfg1.sa_idx_w << 32 |
+ req->ipsec_cfg1.sa_idx_max;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val);
+
+ /* Set SA base address */
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
+ req->sa_base_addr);
+ } else {
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
+ 0x0);
+ }
+
+ return 0;
+}
+
+void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
+{
+ bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
+
+ /* overwrite vf mac address with default_mac */
+ if (from_vf)
+ ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
+}
+
+bool rvu_nix_is_ptp_tx_enabled(struct rvu *rvu, u16 pcifunc)
+{
+ int blkaddr, nixlf, err;
+ u64 cfg;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return false;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
+ return (cfg & BIT_ULL(32));
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
index 67471cb2b129..2476d20280cb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -8,8 +8,10 @@
* published by the Free Software Foundation.
*/
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/stringify.h>
#include "rvu_struct.h"
#include "rvu_reg.h"
@@ -541,3 +543,231 @@ void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf)
npa_ctx_free(rvu, pfvf);
}
+
+static irqreturn_t rvu_npa_af_rvu_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ int blkaddr;
+ u64 intr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ intr = rvu_read64(rvu, blkaddr, NPA_AF_RVU_INT);
+
+ if (intr & BIT_ULL(0))
+ dev_err(rvu->dev, "NPA: Unmapped slot error\n");
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT, intr);
+ return IRQ_HANDLED;
+}
+
+static const char *rvu_npa_inpq_to_str(u16 in)
+{
+ switch (in) {
+ case 0:
+ return NULL;
+ case BIT(NPA_INPQ_NIX0_RX):
+ return __stringify(NPA_INPQ_NIX0_RX);
+ case BIT(NPA_INPQ_NIX0_TX):
+ return __stringify(NPA_INPQ_NIX0_TX);
+ case BIT(NPA_INPQ_NIX1_RX):
+ return __stringify(NPA_INPQ_NIX1_RX);
+ case BIT(NPA_INPQ_NIX1_TX):
+ return __stringify(NPA_INPQ_NIX1_TX);
+ case BIT(NPA_INPQ_SSO):
+ return __stringify(NPA_INPQ_SSO);
+ case BIT(NPA_INPQ_TIM):
+ return __stringify(NPA_INPQ_TIM);
+ case BIT(NPA_INPQ_DPI):
+ return __stringify(NPA_INPQ_DPI);
+ case BIT(NPA_INPQ_AURA_OP):
+ return __stringify(NPA_INPQ_AURA_OP);
+ case BIT(NPA_INPQ_INTERNAL_RSV):
+ return __stringify(NPA_INPQ_INTERNAL_RSV);
+ }
+
+ return "Reserved";
+}
+
+static irqreturn_t rvu_npa_af_gen_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ const char *err_msg;
+ int blkaddr, val;
+ u64 intr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ intr = rvu_read64(rvu, blkaddr, NPA_AF_GEN_INT);
+
+ if (intr & BIT_ULL(32))
+ dev_err(rvu->dev, "NPA: Unmapped PF func error\n");
+
+ val = FIELD_GET(GENMASK(31, 16), intr);
+ err_msg = rvu_npa_inpq_to_str(val);
+ if (err_msg)
+ dev_err(rvu->dev, "NPA: Alloc disabled for %s\n", err_msg);
+
+ val = FIELD_GET(GENMASK(15, 0), intr);
+ err_msg = rvu_npa_inpq_to_str(val);
+ if (err_msg)
+ dev_err(rvu->dev, "NPA: Free disabled for %s\n", err_msg);
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT, intr);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_npa_af_err_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ int blkaddr;
+ u64 intr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ intr = rvu_read64(rvu, blkaddr, NPA_AF_ERR_INT);
+
+ if (intr & BIT_ULL(14))
+ dev_err(rvu->dev, "NPA: Memory fault on NPA_AQ_INST_S read\n");
+
+ if (intr & BIT_ULL(13))
+ dev_err(rvu->dev, "NPA: Memory fault on NPA_AQ_RES_S write\n");
+
+ if (intr & BIT_ULL(12))
+ dev_err(rvu->dev, "NPA: AQ doorbell error\n");
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT, intr);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_npa_af_ras_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ int blkaddr;
+ u64 intr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ intr = rvu_read64(rvu, blkaddr, NPA_AF_RAS);
+
+ if (intr & BIT_ULL(34))
+ dev_err(rvu->dev, "NPA: Poisoned data on NPA_AQ_INST_S read\n");
+
+ if (intr & BIT_ULL(33))
+ dev_err(rvu->dev, "NPA: Poisoned data on NPA_AQ_RES_S write\n");
+
+ if (intr & BIT_ULL(32))
+ dev_err(rvu->dev, "NPA: Poisoned data on HW context read\n");
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NPA_AF_RAS, intr);
+ return IRQ_HANDLED;
+}
+
+static bool rvu_npa_af_request_irq(struct rvu *rvu, int blkaddr, int offset,
+ const char *name, irq_handler_t fn)
+{
+ int rc;
+
+ WARN_ON(rvu->irq_allocated[offset]);
+ rvu->irq_allocated[offset] = false;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], name);
+ rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu);
+ if (rc)
+ dev_warn(rvu->dev, "Failed to register %s irq\n", name);
+ else
+ rvu->irq_allocated[offset] = true;
+
+ return rvu->irq_allocated[offset];
+}
+
+int rvu_npa_register_interrupts(struct rvu *rvu)
+{
+ int blkaddr, base;
+ bool rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* Get NPA AF MSIX vectors offset. */
+ base = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG) & 0x3ff;
+ if (!base) {
+ dev_warn(rvu->dev,
+ "Failed to get NPA_AF_INT vector offsets\n");
+ return 0;
+ }
+
+ /* Register and enable NPA_AF_RVU_INT interrupt */
+ rc = rvu_npa_af_request_irq(rvu, blkaddr, base + NPA_AF_INT_VEC_RVU,
+ "NPA_AF_RVU_INT",
+ rvu_npa_af_rvu_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NPA_AF_GEN_INT interrupt */
+ rc = rvu_npa_af_request_irq(rvu, blkaddr, base + NPA_AF_INT_VEC_GEN,
+ "NPA_AF_RVU_GEN",
+ rvu_npa_af_gen_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NPA_AF_ERR_INT interrupt */
+ rc = rvu_npa_af_request_irq(rvu, blkaddr, base + NPA_AF_INT_VEC_AF_ERR,
+ "NPA_AF_ERR_INT",
+ rvu_npa_af_err_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NPA_AF_RAS interrupt */
+ rc = rvu_npa_af_request_irq(rvu, blkaddr, base + NPA_AF_INT_VEC_POISON,
+ "NPA_AF_RAS",
+ rvu_npa_af_ras_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+err:
+ rvu_npa_unregister_interrupts(rvu);
+ return rc;
+}
+
+void rvu_npa_unregister_interrupts(struct rvu *rvu)
+{
+ int i, offs, blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return;
+
+ reg = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG);
+ offs = reg & 0x3FF;
+
+ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
+
+ for (i = 0; i < NPA_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 0a214084406a..2b48a470ad5d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -11,6 +11,8 @@
#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/stddef.h>
#include "rvu_struct.h"
#include "rvu_reg.h"
@@ -26,13 +28,66 @@
#define NIXLF_BCAST_ENTRY 1
#define NIXLF_PROMISC_ENTRY 2
-#define NPC_PARSE_RESULT_DMAC_OFFSET 8
+#define NPC_KEX_CHAN_MASK 0xFFFULL
+#define NPC_KEX_PF_FUNC_MASK 0xFFFFULL
+
+#define NPC_HW_TSTAMP_OFFSET 8
+
+static const char def_pfl_name[] = "default";
static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 pcifunc);
static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam,
u16 pcifunc);
+static int npc_mcam_verify_pf_func(struct rvu *rvu,
+ struct mcam_entry *entry_data,
+ u8 intf, u16 pcifunc)
+{
+ u16 pf_func, pf_func_mask;
+
+ if (intf == NIX_INTF_RX)
+ return 0;
+
+ pf_func_mask = (entry_data->kw_mask[0] >> 32) &
+ NPC_KEX_PF_FUNC_MASK;
+ pf_func = (entry_data->kw[0] >> 32) & NPC_KEX_PF_FUNC_MASK;
+
+ pf_func = htons(pf_func);
+ if (pf_func_mask != NPC_KEX_PF_FUNC_MASK ||
+ ((pf_func & ~RVU_PFVF_FUNC_MASK) !=
+ (pcifunc & ~RVU_PFVF_FUNC_MASK)))
+ return -EINVAL;
+
+ return 0;
+}
+
+int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+ int base = 0, end;
+
+ if (intf == NIX_INTF_TX)
+ return 0;
+
+ if (is_afvf(pcifunc)) {
+ end = rvu_get_num_lbk_chans();
+ if (end < 0)
+ return -EINVAL;
+ } else {
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0x0);
+ /* CGX mapped functions has maximum of 16 channels */
+ end = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0xF);
+ }
+
+ if (channel < base || channel > end)
+ return -EINVAL;
+
+ return 0;
+}
+
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf)
{
int blkaddr;
@@ -61,6 +116,32 @@ int rvu_npc_get_pkind(struct rvu *rvu, u16 pf)
return -1;
}
+int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool en)
+{
+ int pkind, blkaddr;
+ u64 val;
+
+ pkind = rvu_npc_get_pkind(rvu, pf);
+ if (pkind < 0) {
+ dev_err(rvu->dev, "%s: pkind not mapped\n", __func__);
+ return -EINVAL;
+ }
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+ val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind));
+ val &= ~0xff00000ULL; /* Zero ptr advance field */
+ if (en)
+ /* Set to timestamp offset */
+ val |= (NPC_HW_TSTAMP_OFFSET << 20);
+ rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val);
+
+ return 0;
+}
+
static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
u16 pcifunc, int nixlf, int type)
{
@@ -84,7 +165,7 @@ static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
return (mcam->nixlf_offset + (nixlf * RSVD_MCAM_ENTRIES_PER_NIXLF));
}
-static int npc_get_bank(struct npc_mcam *mcam, int index)
+int npc_get_bank(struct npc_mcam *mcam, int index)
{
int bank = index / mcam->banksize;
@@ -106,8 +187,8 @@ static bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam,
return (cfg & 1);
}
-static void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
- int blkaddr, int index, bool enable)
+void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, bool enable)
{
int bank = npc_get_bank(mcam, index);
int actbank = bank;
@@ -224,6 +305,58 @@ static void npc_get_keyword(struct mcam_entry *entry, int idx,
*cam0 = ~*cam1 & kw_mask;
}
+static void npc_fill_entryword(struct mcam_entry *entry, int idx,
+ u64 cam0, u64 cam1)
+{
+ /* Similar to npc_get_keyword, but fills mcam_entry structure from
+ * CAM registers.
+ */
+ switch (idx) {
+ case 0:
+ entry->kw[0] = cam1;
+ entry->kw_mask[0] = cam1 ^ cam0;
+ break;
+ case 1:
+ entry->kw[1] = cam1;
+ entry->kw_mask[1] = cam1 ^ cam0;
+ break;
+ case 2:
+ entry->kw[1] |= (cam1 & CAM_MASK(16)) << 48;
+ entry->kw[2] = (cam1 >> 16) & CAM_MASK(48);
+ entry->kw_mask[1] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48;
+ entry->kw_mask[2] = ((cam1 ^ cam0) >> 16) & CAM_MASK(48);
+ break;
+ case 3:
+ entry->kw[2] |= (cam1 & CAM_MASK(16)) << 48;
+ entry->kw[3] = (cam1 >> 16) & CAM_MASK(32);
+ entry->kw_mask[2] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48;
+ entry->kw_mask[3] = ((cam1 ^ cam0) >> 16) & CAM_MASK(32);
+ break;
+ case 4:
+ entry->kw[3] |= (cam1 & CAM_MASK(32)) << 32;
+ entry->kw[4] = (cam1 >> 32) & CAM_MASK(32);
+ entry->kw_mask[3] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32;
+ entry->kw_mask[4] = ((cam1 ^ cam0) >> 32) & CAM_MASK(32);
+ break;
+ case 5:
+ entry->kw[4] |= (cam1 & CAM_MASK(32)) << 32;
+ entry->kw[5] = (cam1 >> 32) & CAM_MASK(16);
+ entry->kw_mask[4] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32;
+ entry->kw_mask[5] = ((cam1 ^ cam0) >> 32) & CAM_MASK(16);
+ break;
+ case 6:
+ entry->kw[5] |= (cam1 & CAM_MASK(48)) << 16;
+ entry->kw[6] = (cam1 >> 48) & CAM_MASK(16);
+ entry->kw_mask[5] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16;
+ entry->kw_mask[6] = ((cam1 ^ cam0) >> 48) & CAM_MASK(16);
+ break;
+ case 7:
+ entry->kw[6] |= (cam1 & CAM_MASK(48)) << 16;
+ entry->kw_mask[6] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16;
+ break;
+ }
+}
+
static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index, u8 intf,
struct mcam_entry *entry, bool enable)
@@ -284,6 +417,42 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true);
}
+static void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 src,
+ struct mcam_entry *entry, u8 *intf, u8 *ena)
+{
+ int sbank = npc_get_bank(mcam, src);
+ int bank, kw = 0;
+ u64 cam0, cam1;
+
+ src &= (mcam->banksize - 1);
+ bank = sbank;
+
+ for (; bank < (sbank + mcam->banks_per_entry); bank++, kw = kw + 2) {
+ cam1 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 1));
+ cam0 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 0));
+ npc_fill_entryword(entry, kw, cam0, cam1);
+
+ cam1 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 1));
+ cam0 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 0));
+ npc_fill_entryword(entry, kw + 1, cam0, cam1);
+ }
+
+ entry->action = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(src, sbank));
+ entry->vtag_action =
+ rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank));
+ *intf = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank, 1)) & 3;
+ *ena = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(src, sbank)) & 1;
+}
+
static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 src, u16 dest)
{
@@ -337,12 +506,12 @@ static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, u8 *mac_addr)
{
- struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ u8 mac_mask[] = { [0 ... ETH_ALEN] = 0xFF };
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
struct npc_mcam *mcam = &rvu->hw->mcam;
- struct mcam_entry entry = { {0} };
struct nix_rx_action action;
- int blkaddr, index, kwi;
- u64 mac = 0;
+ int blkaddr, index;
/* AF's VFs work in promiscuous mode */
if (is_afvf(pcifunc))
@@ -352,20 +521,9 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
if (blkaddr < 0)
return;
- for (index = ETH_ALEN - 1; index >= 0; index--)
- mac |= ((u64)*mac_addr++) << (8 * index);
-
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_UCAST_ENTRY);
- /* Match ingress channel and DMAC */
- entry.kw[0] = chan;
- entry.kw_mask[0] = 0xFFFULL;
-
- kwi = NPC_PARSE_RESULT_DMAC_OFFSET / sizeof(u64);
- entry.kw[kwi] = mac;
- entry.kw_mask[kwi] = BIT_ULL(48) - 1;
-
/* Don't change the action if entry is already enabled
* Otherwise RSS action may get overwritten.
*/
@@ -378,20 +536,20 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
action.pf_func = pcifunc;
}
- entry.action = *(u64 *)&action;
- npc_config_mcam_entry(rvu, mcam, blkaddr, index,
- NIX_INTF_RX, &entry, true);
-
- /* add VLAN matching, setup action and save entry back for later */
- entry.kw[0] |= (NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG) << 20;
- entry.kw_mask[0] |= (NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG) << 20;
-
- entry.vtag_action = VTAG0_VALID_BIT |
- FIELD_PREP(VTAG0_TYPE_MASK, 0) |
- FIELD_PREP(VTAG0_LID_MASK, NPC_LID_LA) |
- FIELD_PREP(VTAG0_RELPTR_MASK, 12);
-
- memcpy(&pfvf->entry, &entry, sizeof(entry));
+ req.default_rule = 1;
+ ether_addr_copy(req.packet.dmac, mac_addr);
+ ether_addr_copy(req.mask.dmac, mac_mask);
+ req.features = BIT_ULL(NPC_DMAC);
+ req.channel = chan;
+ req.intf = NIX_INTF_RX;
+ req.op = action.op;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = action.pf_func;
+ req.index = action.index;
+ req.match_id = action.match_id;
+ req.flow_key_alg = action.flow_key_alg;
+
+ rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
@@ -417,7 +575,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
entry.kw_mask[0] = 0xFFFULL;
if (allmulti) {
- kwi = NPC_PARSE_RESULT_DMAC_OFFSET / sizeof(u64);
+ kwi = NPC_KEXOF_DMAC / sizeof(u64);
entry.kw[kwi] = BIT_ULL(40); /* LSB bit of 1st byte in DMAC */
entry.kw_mask[kwi] = BIT_ULL(40);
}
@@ -552,6 +710,7 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
struct npc_mcam *mcam = &rvu->hw->mcam;
struct nix_rx_action action;
int blkaddr, index, bank;
+ struct rvu_pfvf *pfvf;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
@@ -588,6 +747,11 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action);
+ /* update the action change in default rule */
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (pfvf->def_rule)
+ pfvf->def_rule->rx_action = action;
+
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_PROMISC_ENTRY);
@@ -602,8 +766,6 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
NPC_AF_MCAMEX_BANKX_ACTION(index, bank),
*(u64 *)&action);
}
-
- rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
}
static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
@@ -643,8 +805,6 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf);
else
rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
-
- rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
}
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
@@ -659,7 +819,9 @@ void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule;
int blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -668,105 +830,63 @@ void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
mutex_lock(&mcam->lock);
- /* Disable and free all MCAM entries mapped to this 'pcifunc' */
- npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc);
-
- /* Free all MCAM counters mapped to this 'pcifunc' */
- npc_mcam_free_all_counters(rvu, mcam, pcifunc);
+ /* Disable MCAM entries directing traffic to this 'pcifunc' */
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->intf == NIX_INTF_RX &&
+ rule->rx_action.pf_func == pcifunc) {
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ rule->entry, false);
+ rule->enable = false;
+ /* Indicate that default rule is disabled */
+ if (rule->default_rule)
+ pfvf->def_rule = NULL;
+ }
+ }
mutex_unlock(&mcam->lock);
rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
}
-#define SET_KEX_LD(intf, lid, ltype, ld, cfg) \
- rvu_write64(rvu, blkaddr, \
- NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg)
+void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule, *tmp;
+ int blkaddr;
-#define SET_KEX_LDFLAGS(intf, ld, flags, cfg) \
- rvu_write64(rvu, blkaddr, \
- NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
-#define KEX_LD_CFG(bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \
- (((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
- ((flags_ena) << 6) | ((key_ofs) & 0x3F))
+ mutex_lock(&mcam->lock);
-static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr)
-{
- struct npc_mcam *mcam = &rvu->hw->mcam;
- int lid, ltype;
- int lid_count;
- u64 cfg;
+ /* Disable and free all MCAM entries owned by this 'pcifunc' */
+ npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc);
- cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
- lid_count = (cfg >> 4) & 0xF;
+ /* Free all MCAM counters owned by this 'pcifunc' */
+ npc_mcam_free_all_counters(rvu, mcam, pcifunc);
- /* First clear any existing config i.e
- * disable LDATA and FLAGS extraction.
- */
- for (lid = 0; lid < lid_count; lid++) {
- for (ltype = 0; ltype < 16; ltype++) {
- SET_KEX_LD(NIX_INTF_RX, lid, ltype, 0, 0ULL);
- SET_KEX_LD(NIX_INTF_RX, lid, ltype, 1, 0ULL);
- SET_KEX_LD(NIX_INTF_TX, lid, ltype, 0, 0ULL);
- SET_KEX_LD(NIX_INTF_TX, lid, ltype, 1, 0ULL);
-
- SET_KEX_LDFLAGS(NIX_INTF_RX, 0, ltype, 0ULL);
- SET_KEX_LDFLAGS(NIX_INTF_RX, 1, ltype, 0ULL);
- SET_KEX_LDFLAGS(NIX_INTF_TX, 0, ltype, 0ULL);
- SET_KEX_LDFLAGS(NIX_INTF_TX, 1, ltype, 0ULL);
+ /* Delete MCAM entries owned by this 'pcifunc' from list */
+ list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) {
+ if (rule->owner == pcifunc && !rule->default_rule) {
+ list_del(&rule->list);
+ kfree(rule);
}
}
- if (mcam->keysize != NPC_MCAM_KEY_X2)
- return;
+ mutex_unlock(&mcam->lock);
- /* Default MCAM KEX profile */
- /* Layer A: Ethernet: */
-
- /* DMAC: 6 bytes, KW1[47:0] */
- cfg = KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_PARSE_RESULT_DMAC_OFFSET);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 0, cfg);
-
- /* Ethertype: 2 bytes, KW0[47:32] */
- cfg = KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x4);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 1, cfg);
-
- /* Layer B: Single VLAN (CTAG) */
- /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
- cfg = KEX_LD_CFG(0x03, 0x0, 0x1, 0x0, 0x4);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_CTAG, 0, cfg);
-
- /* Layer B: Stacked VLAN (STAG|QinQ) */
- /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
- cfg = KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 0, cfg);
-
- /* Layer C: IPv4 */
- /* SIP+DIP: 8 bytes, KW2[63:0] */
- cfg = KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LC, NPC_LT_LC_IP, 0, cfg);
- /* TOS: 1 byte, KW1[63:56] */
- cfg = KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LC, NPC_LT_LC_IP, 1, cfg);
-
- /* Layer D:UDP */
- /* SPORT: 2 bytes, KW3[15:0] */
- cfg = KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_UDP, 0, cfg);
- /* DPORT: 2 bytes, KW3[31:16] */
- cfg = KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_UDP, 1, cfg);
-
- /* Layer D:TCP */
- /* SPORT: 2 bytes, KW3[15:0] */
- cfg = KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 0, cfg);
- /* DPORT: 2 bytes, KW3[31:16] */
- cfg = KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 1, cfg);
+ rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
}
+#define SET_KEX_LD(intf, lid, ltype, ld, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg)
+
+#define SET_KEX_LDFLAGS(intf, ld, flags, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
+
static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
struct npc_mcam_kex *mkex)
{
@@ -809,33 +929,38 @@ static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
}
/* strtoull of "mkexprof" with base:36 */
-#define MKEX_SIGN 0x19bbfdbd15f
#define MKEX_END_SIGN 0xdeadbeef
-static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr)
+static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr,
+ const char *mkex_profile)
{
- const char *mkex_profile = rvu->mkex_pfl_name;
struct device *dev = &rvu->pdev->dev;
void __iomem *mkex_prfl_addr = NULL;
struct npc_mcam_kex *mcam_kex;
u64 prfl_addr;
u64 prfl_sz;
+ /* Order of precedence (high to low):
+ * 1. Embedded in custom KPU profile firmware via kpu_profile param.
+ * 2. Via mkex_profile, loaded from ATF if KPU profile wasn't loaded.
+ * 3. Built-in KEX profile from npc_mkex_default.
+ */
/* If user not selected mkex profile */
- if (!strncmp(mkex_profile, "default", MKEX_NAME_LEN))
- goto load_default;
+ if (rvu->kpu_fwdata_sz ||
+ !strncmp(mkex_profile, def_pfl_name, MKEX_NAME_LEN))
+ goto program_mkex;
if (!rvu->fwdata)
- goto load_default;
+ goto program_mkex;
prfl_addr = rvu->fwdata->mcam_addr;
prfl_sz = rvu->fwdata->mcam_sz;
if (!prfl_addr || !prfl_sz)
- goto load_default;
+ goto program_mkex;
mkex_prfl_addr = ioremap_wc(prfl_addr, prfl_sz);
if (!mkex_prfl_addr)
- goto load_default;
+ goto program_mkex;
mcam_kex = (struct npc_mcam_kex *)mkex_prfl_addr;
@@ -843,33 +968,21 @@ static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr)
/* Compare with mkex mod_param name string */
if (mcam_kex->mkex_sign == MKEX_SIGN &&
!strncmp(mcam_kex->name, mkex_profile, MKEX_NAME_LEN)) {
- /* Due to an errata (35786) in A0/B0 pass silicon,
- * parse nibble enable configuration has to be
- * identical for both Rx and Tx interfaces.
- */
- if (is_rvu_96xx_B0(rvu) &&
- mcam_kex->keyx_cfg[NIX_INTF_RX] !=
- mcam_kex->keyx_cfg[NIX_INTF_TX])
- goto load_default;
-
- /* Program selected mkex profile */
- npc_program_mkex_profile(rvu, blkaddr, mcam_kex);
-
- goto unmap;
+ /* If profile is valid, switch to it. */
+ if (!is_parse_nibble_config_valid(rvu, mcam_kex))
+ rvu->kpu.mkex = mcam_kex;
+ goto program_mkex;
}
mcam_kex++;
prfl_sz -= sizeof(struct npc_mcam_kex);
}
- dev_warn(dev, "Failed to load requested profile: %s\n",
- rvu->mkex_pfl_name);
-
-load_default:
- dev_info(rvu->dev, "Using default mkex profile\n");
- /* Config packet data and flags extraction into PARSE result */
- npc_config_ldata_extract(rvu, blkaddr);
+ dev_warn(dev, "Failed to load requested profile: %s\n", mkex_profile);
-unmap:
+program_mkex:
+ dev_info(rvu->dev, "Using %s mkex profile\n", rvu->kpu.mkex->name);
+ /* Program selected mkex profile */
+ npc_program_mkex_profile(rvu, blkaddr, rvu->kpu.mkex);
if (mkex_prfl_addr)
iounmap(mkex_prfl_addr);
}
@@ -948,6 +1061,7 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
struct npc_kpu_profile *profile)
{
int entry, num_entries, max_entries;
+ u64 entry_mask;
if (profile->cam_entries != profile->action_entries) {
dev_err(rvu->dev,
@@ -971,8 +1085,12 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
/* Enable all programmed entries */
num_entries = min_t(int, profile->action_entries, profile->cam_entries);
+ entry_mask = enable_mask(num_entries);
+ /* Disable first KPU_MAX_CST_ENT entries for built-in profile */
+ if (!rvu->kpu.custom)
+ entry_mask |= GENMASK_ULL(KPU_MAX_CST_ENT - 1, 0);
rvu_write64(rvu, blkaddr,
- NPC_AF_KPUX_ENTRY_DISX(kpu, 0), enable_mask(num_entries));
+ NPC_AF_KPUX_ENTRY_DISX(kpu, 0), entry_mask);
if (num_entries > 64) {
rvu_write64(rvu, blkaddr,
NPC_AF_KPUX_ENTRY_DISX(kpu, 1),
@@ -983,6 +1101,140 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(kpu), 0x01);
}
+static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile)
+{
+ profile->custom = 0;
+ profile->name = def_pfl_name;
+ profile->version = NPC_KPU_PROFILE_VER;
+ profile->ikpu = ikpu_action_entries;
+ profile->pkinds = ARRAY_SIZE(ikpu_action_entries);
+ profile->kpu = npc_kpu_profiles;
+ profile->kpus = ARRAY_SIZE(npc_kpu_profiles);
+ profile->lt_def = &npc_lt_defaults;
+ profile->mkex = &npc_mkex_default;
+
+ return 0;
+}
+
+static int npc_apply_custom_kpu(struct rvu *rvu,
+ struct npc_kpu_profile_adapter *profile)
+{
+ size_t hdr_sz = sizeof(struct npc_kpu_profile_fwdata), offset = 0;
+ struct npc_kpu_profile_fwdata *fw = rvu->kpu_fwdata;
+ struct npc_kpu_profile_action *action;
+ struct npc_kpu_profile_cam *cam;
+ struct npc_kpu_fwdata *fw_kpu;
+ int entries;
+ u16 kpu, entry;
+
+ if (rvu->kpu_fwdata_sz < hdr_sz) {
+ dev_warn(rvu->dev, "Invalid KPU profile size\n");
+ return -EINVAL;
+ }
+ if (le64_to_cpu(fw->signature) != KPU_SIGN) {
+ dev_warn(rvu->dev, "Invalid KPU profile signature %llx\n",
+ fw->signature);
+ return -EINVAL;
+ }
+ profile->custom = 1;
+ profile->name = fw->name;
+ profile->version = le64_to_cpu(fw->version);
+ profile->mkex = &fw->mkex;
+
+ /* Verify if the using known profile structure */
+ if (NPC_KPU_VER_MAJ(profile->version) >
+ NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER)) {
+ dev_warn(rvu->dev, "Not supported Major version: %d > %d\n",
+ NPC_KPU_VER_MAJ(profile->version),
+ NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER));
+ return -EINVAL;
+ }
+ /* Verify if profile fits the HW */
+ if (fw->kpus > profile->kpus) {
+ dev_warn(rvu->dev, "Not enough KPUs: %d > %ld\n", fw->kpus,
+ profile->kpus);
+ return -EINVAL;
+ }
+ /* Update adapter structure and ensure endianness where needed. */
+ profile->lt_def = &fw->lt_def;
+
+ for (kpu = 0; kpu < fw->kpus; kpu++) {
+ fw_kpu = (struct npc_kpu_fwdata *)(fw->data + offset);
+ if (fw_kpu->entries > KPU_MAX_CST_ENT)
+ dev_warn(rvu->dev,
+ "Too many custom entries on KPU%d: %d > %d\n",
+ kpu, fw_kpu->entries, KPU_MAX_CST_ENT);
+ entries = min(fw_kpu->entries, KPU_MAX_CST_ENT);
+ cam = (struct npc_kpu_profile_cam *)fw_kpu->data;
+ offset += sizeof(*fw_kpu) + fw_kpu->entries * sizeof(*cam);
+ action = (struct npc_kpu_profile_action *)(fw->data + offset);
+ offset += fw_kpu->entries * sizeof(*action);
+ if (rvu->kpu_fwdata_sz < hdr_sz + offset) {
+ dev_warn(rvu->dev,
+ "Profile size mismatch on KPU%i parsing.\n",
+ kpu + 1);
+ return -EINVAL;
+ }
+ /* Fix endianness and update */
+ for (entry = 0; entry < entries; entry++) {
+ cam[entry].dp0 = le16_to_cpu(cam[entry].dp0);
+ cam[entry].dp0_mask = le16_to_cpu(cam[entry].dp0_mask);
+ cam[entry].dp1 = le16_to_cpu(cam[entry].dp1);
+ cam[entry].dp1_mask = le16_to_cpu(cam[entry].dp1_mask);
+ cam[entry].dp2 = le16_to_cpu(cam[entry].dp2);
+ cam[entry].dp2_mask = le16_to_cpu(cam[entry].dp2_mask);
+ profile->kpu[kpu].cam[entry] = cam[entry];
+ profile->kpu[kpu].action[entry] = action[entry];
+ }
+ }
+
+ return 0;
+}
+
+static void npc_load_kpu_profile(struct rvu *rvu)
+{
+ struct npc_kpu_profile_adapter *profile = &rvu->kpu;
+ const char *kpu_profile = rvu->kpu_pfl_name;
+ const struct firmware *fw = NULL;
+
+ /* If user not specified profile customization */
+ if (!strncmp(kpu_profile, def_pfl_name, KPU_NAME_LEN))
+ goto revert_to_default;
+ /* First prepare default KPU, then we'll customize top entries. */
+ npc_prepare_default_kpu(profile);
+
+ dev_info(rvu->dev, "Loading KPU profile from firmware: %s\n",
+ kpu_profile);
+ if (!request_firmware(&fw, kpu_profile, rvu->dev)) {
+ rvu->kpu_fwdata = kzalloc(fw->size, GFP_KERNEL);
+ if (rvu->kpu_fwdata) {
+ memcpy(rvu->kpu_fwdata, fw->data, fw->size);
+ rvu->kpu_fwdata_sz = fw->size;
+ }
+ }
+ release_firmware(fw);
+
+ /* Apply profile customization if firmware was loaded. */
+ if (!rvu->kpu_fwdata_sz || npc_apply_custom_kpu(rvu, profile)) {
+ dev_warn(rvu->dev,
+ "Can't load KPU profile %s. Using default.\n",
+ kpu_profile);
+ kfree(rvu->kpu_fwdata);
+ rvu->kpu_fwdata = NULL;
+ goto revert_to_default;
+ }
+
+ dev_info(rvu->dev, "Using custom profile '%s', version %d.%d.%d\n",
+ profile->name, NPC_KPU_VER_MAJ(profile->version),
+ NPC_KPU_VER_MIN(profile->version),
+ NPC_KPU_VER_PATCH(profile->version));
+
+ return;
+
+revert_to_default:
+ npc_prepare_default_kpu(profile);
+}
+
static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -1001,25 +1253,28 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(idx), 0x00);
}
+ /* Load and customize KPU profile. */
+ npc_load_kpu_profile(rvu);
+
/* First program IKPU profile i.e PKIND configs.
* Check HW max count to avoid configuring junk or
* writing to unsupported CSR addresses.
*/
pkind = &hw->pkind;
- num_pkinds = ARRAY_SIZE(ikpu_action_entries);
+ num_pkinds = rvu->kpu.pkinds;
num_pkinds = min_t(int, pkind->rsrc.max, num_pkinds);
for (idx = 0; idx < num_pkinds; idx++)
npc_config_kpuaction(rvu, blkaddr,
- &ikpu_action_entries[idx], 0, idx, true);
+ &rvu->kpu.ikpu[idx], 0, idx, true);
/* Program KPU CAM and Action profiles */
- num_kpus = ARRAY_SIZE(npc_kpu_profiles);
+ num_kpus = rvu->kpu.kpus;
num_kpus = min_t(int, hw->npc_kpus, num_kpus);
for (idx = 0; idx < num_kpus; idx++)
npc_program_kpu_profile(rvu, blkaddr,
- idx, &npc_kpu_profiles[idx]);
+ idx, &rvu->kpu.kpu[idx]);
}
static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
@@ -1144,11 +1399,11 @@ free_mem:
int rvu_npc_init(struct rvu *rvu)
{
+ struct npc_kpu_profile_adapter *kpu = &rvu->kpu;
struct npc_pkind *pkind = &rvu->hw->pkind;
struct npc_mcam *mcam = &rvu->hw->mcam;
- u64 keyz = NPC_MCAM_KEY_X2;
+ u64 cfg, nibble_ena, rx_kex, tx_kex;
int blkaddr, entry, bank, err;
- u64 cfg, nibble_ena;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0) {
@@ -1182,45 +1437,62 @@ int rvu_npc_init(struct rvu *rvu)
/* Config Outer L2, IPv4's NPC layer info */
rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OL2,
- (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
+ (kpu->lt_def->pck_ol2.lid << 8) |
+ (kpu->lt_def->pck_ol2.ltype_match << 4) |
+ kpu->lt_def->pck_ol2.ltype_mask);
rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OIP4,
- (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
+ (kpu->lt_def->pck_oip4.lid << 8) |
+ (kpu->lt_def->pck_oip4.ltype_match << 4) |
+ kpu->lt_def->pck_oip4.ltype_mask);
/* Config Inner IPV4 NPC layer info */
rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_IIP4,
- (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP << 4) | 0x0F);
+ (kpu->lt_def->pck_iip4.lid << 8) |
+ (kpu->lt_def->pck_iip4.ltype_match << 4) |
+ kpu->lt_def->pck_iip4.ltype_mask);
/* Enable below for Rx pkts.
* - Outer IPv4 header checksum validation.
- * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2M].
+ * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2B].
+ * - Detect outer L2 multicast address and set NPC_RESULT_S[L2M].
* - Inner IPv4 header checksum validation.
* - Set non zero checksum error code value
*/
rvu_write64(rvu, blkaddr, NPC_AF_PCK_CFG,
rvu_read64(rvu, blkaddr, NPC_AF_PCK_CFG) |
- BIT_ULL(32) | BIT_ULL(24) | BIT_ULL(6) |
- BIT_ULL(2) | BIT_ULL(1));
+ ((u64)NPC_EC_OIP4_CSUM << 32) | (NPC_EC_IIP4_CSUM << 24) |
+ BIT_ULL(7) | BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1));
/* Set RX and TX side MCAM search key size.
* LA..LD (ltype only) + Channel
*/
- nibble_ena = 0x49247;
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX),
- ((keyz & 0x3) << 32) | nibble_ena);
- /* Due to an errata (35786) in A0 pass silicon, parse nibble enable
- * configuration has to be identical for both Rx and Tx interfaces.
- */
- if (!is_rvu_96xx_B0(rvu))
- nibble_ena = (1ULL << 19) - 1;
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX),
- ((keyz & 0x3) << 32) | nibble_ena);
+ rx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_RX];
+ tx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_TX];
+ nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX), rx_kex);
+
+ /* Extract Ltypes LID_LA to LID_LE */
+ nibble_ena = rvu_npc_get_tx_nibble_cfg(rvu, nibble_ena);
+ if (nibble_ena) {
+ tx_kex &= ~NPC_PARSE_NIBBLE;
+ tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena);
+ npc_mkex_default.keyx_cfg[NIX_INTF_TX] = tx_kex;
+ }
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX), tx_kex);
+
+ /* Configure MKEX profile */
+ npc_load_mkex_profile(rvu, blkaddr, rvu->mkex_pfl_name);
err = npc_mcam_rsrcs_init(rvu, blkaddr);
if (err)
return err;
- /* Configure MKEX profile */
- npc_load_mkex_profile(rvu, blkaddr);
+ err = npc_flow_steering_init(rvu, blkaddr);
+ if (err) {
+ dev_err(rvu->dev,
+ "Incorrect mkex profile loaded using default mkex\n");
+ npc_load_mkex_profile(rvu, blkaddr, def_pfl_name);
+ }
/* Set TX miss action to UCAST_DEFAULT i.e
* transmit the packet on NIX LF SQ's default channel.
@@ -1246,6 +1518,7 @@ void rvu_npc_freemem(struct rvu *rvu)
kfree(pkind->rsrc.bmap);
kfree(mcam->counters.bmap);
+ kfree(rvu->kpu_fwdata);
mutex_destroy(&mcam->lock);
}
@@ -1804,18 +2077,47 @@ exit:
return rc;
}
+int rvu_mbox_handler_npc_mcam_read_entry(struct rvu *rvu,
+ struct npc_mcam_read_entry_req *req,
+ struct npc_mcam_read_entry_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ if (!rc) {
+ npc_read_mcam_entry(rvu, mcam, blkaddr, req->entry,
+ &rsp->entry_data,
+ &rsp->intf, &rsp->enable);
+ }
+
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
struct npc_mcam_write_entry_req *req,
struct msg_rsp *rsp)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
u16 pcifunc = req->hdr.pcifunc;
+ u16 channel, chan_mask;
int blkaddr, rc;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return NPC_MCAM_INVALID_REQ;
+ chan_mask = req->entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK;
+ channel = req->entry_data.kw[0] & NPC_KEX_CHAN_MASK;
+ channel &= chan_mask;
+
mutex_lock(&mcam->lock);
rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
if (rc)
@@ -1832,6 +2134,17 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
goto exit;
}
+ if (npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) {
+ rc = NPC_MCAM_INVALID_REQ;
+ goto exit;
+ }
+
+ if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf,
+ pcifunc)) {
+ rc = NPC_MCAM_INVALID_REQ;
+ goto exit;
+ }
+
npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, req->intf,
&req->entry_data, req->enable_entry);
@@ -2054,10 +2367,11 @@ int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry);
if (index >= mcam->bmap_entries)
break;
+ entry = index + 1;
+
if (mcam->entry2cntr_map[index] != req->cntr)
continue;
- entry = index + 1;
npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
index, req->cntr);
}
@@ -2100,10 +2414,11 @@ int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry);
if (index >= mcam->bmap_entries)
break;
+ entry = index + 1;
+
if (mcam->entry2cntr_map[index] != req->cntr)
continue;
- entry = index + 1;
npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
index, req->cntr);
}
@@ -2167,6 +2482,7 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
struct npc_mcam *mcam = &rvu->hw->mcam;
u16 entry = NPC_MCAM_ENTRY_INVALID;
u16 cntr = NPC_MCAM_ENTRY_INVALID;
+ u16 channel, chan_mask;
int blkaddr, rc;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -2176,6 +2492,17 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX)
return NPC_MCAM_INVALID_REQ;
+ chan_mask = req->entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK;
+ channel = req->entry_data.kw[0] & NPC_KEX_CHAN_MASK;
+ channel &= chan_mask;
+
+ if (npc_mcam_verify_channel(rvu, req->hdr.pcifunc, req->intf, channel))
+ return NPC_MCAM_INVALID_REQ;
+
+ if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf,
+ req->hdr.pcifunc))
+ return NPC_MCAM_INVALID_REQ;
+
/* Try to allocate a MCAM entry */
entry_req.hdr.pcifunc = req->hdr.pcifunc;
entry_req.contig = true;
@@ -2274,26 +2601,78 @@ int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
return 0;
}
-int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf)
+bool rvu_npc_write_default_rule(struct rvu *rvu, int blkaddr, int nixlf,
+ u16 pcifunc, u8 intf, struct mcam_entry *entry,
+ int *index)
{
- struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct npc_mcam *mcam = &rvu->hw->mcam;
- int blkaddr, index;
bool enable;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
- if (blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ *index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ /* dont force enable unicast entry */
+ enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, *index);
+ npc_config_mcam_entry(rvu, mcam, blkaddr, *index, intf, entry, enable);
- if (!pfvf->rxvlan)
- return 0;
+ return enable;
+}
+
+int rvu_mbox_handler_npc_set_pkind(struct rvu *rvu,
+ struct npc_set_pkind *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ u16 pcifunc = req->hdr.pcifunc;
+ int pf = rvu_get_pf(pcifunc);
+ bool enable_higig2 = false;
+ int blkaddr, nixlf, rc;
+ u64 rxpkind, txpkind;
+ u8 cgx_id, lmac_id;
+
+ /* use default pkind to disable edsa/higig */
+ rxpkind = rvu_npc_get_pkind(rvu, pf);
+ txpkind = NPC_TX_DEF_PKIND;
+
+ if (req->mode & OTX2_PRIV_FLAGS_EDSA) {
+ rxpkind = NPC_RX_EDSA_PKIND;
+ } else if (req->mode & OTX2_PRIV_FLAGS_HIGIG) {
+ /* Silicon does not support enabling higig in time stamp mode */
+ if (pfvf->hw_rx_tstamp_en ||
+ rvu_nix_is_ptp_tx_enabled(rvu, pcifunc))
+ return NPC_AF_ERR_HIGIG_CONFIG_FAIL;
+
+ rxpkind = NPC_RX_HIGIG_PKIND;
+ txpkind = NPC_TX_HIGIG_PKIND;
+ enable_higig2 = true;
+ } else if (req->mode & OTX2_PRIV_FLAGS_CUSTOM) {
+ rxpkind = req->pkind;
+ txpkind = req->pkind;
+ }
+
+ if (req->dir & PKIND_RX) {
+ /* rx pkind set req valid only for cgx mapped PFs */
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return 0;
+ rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
+
+ rc = cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, rxpkind);
+ if (rc)
+ return rc;
+ }
+
+ if (req->dir & PKIND_TX) {
+ /* Tx pkind set request valid if PCIFUNC has NIXLF attached */
+ rc = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
+ if (rc)
+ return rc;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf),
+ txpkind);
+ }
- index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
- NIXLF_UCAST_ENTRY);
- pfvf->entry.action = npc_get_mcam_action(rvu, mcam, blkaddr, index);
- enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, index);
- npc_config_mcam_entry(rvu, mcam, blkaddr, pfvf->rxvlan_index,
- NIX_INTF_RX, &pfvf->entry, enable);
+ if (enable_higig2 ^ rvu_cgx_is_higig2_enabled(rvu, pf))
+ rvu_cgx_enadis_higig2(rvu, pf, enable_higig2);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
new file mode 100644
index 000000000000..784a13729eaa
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -0,0 +1,1185 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ */
+
+#include <linux/bitfield.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "npc.h"
+
+#define NPC_BYTESM GENMASK_ULL(19, 16)
+#define NPC_HDR_OFFSET GENMASK_ULL(15, 8)
+#define NPC_KEY_OFFSET GENMASK_ULL(5, 0)
+#define NPC_LDATA_EN BIT_ULL(7)
+
+static const char * const npc_flow_names[] = {
+ [NPC_DMAC] = "dmac",
+ [NPC_SMAC] = "smac",
+ [NPC_ETYPE] = "ether type",
+ [NPC_OUTER_VID] = "outer vlan id",
+ [NPC_TOS] = "tos",
+ [NPC_SIP_IPV4] = "ipv4 source ip",
+ [NPC_DIP_IPV4] = "ipv4 destination ip",
+ [NPC_SIP_IPV6] = "ipv6 source ip",
+ [NPC_DIP_IPV6] = "ipv6 destination ip",
+ [NPC_SPORT_TCP] = "tcp source port",
+ [NPC_DPORT_TCP] = "tcp destination port",
+ [NPC_SPORT_UDP] = "udp source port",
+ [NPC_DPORT_UDP] = "udp destination port",
+ [NPC_UNKNOWN] = "unknown",
+};
+
+const char *npc_get_field_name(u8 hdr)
+{
+ if (hdr >= ARRAY_SIZE(npc_flow_names))
+ return npc_flow_names[NPC_UNKNOWN];
+
+ return npc_flow_names[hdr];
+}
+
+/* Compute keyword masks and figure out the number of keywords a field
+ * spans in the key.
+ */
+static void npc_set_kw_masks(struct npc_mcam *mcam, enum key_fields type,
+ u8 nr_bits, int start_kwi, int offset, u8 intf)
+{
+ struct npc_key_field *field = &mcam->rx_key_fields[type];
+ u8 bits_in_kw;
+ int max_kwi;
+
+ if (mcam->banks_per_entry == 1)
+ max_kwi = 1; /* NPC_MCAM_KEY_X1 */
+ else if (mcam->banks_per_entry == 2)
+ max_kwi = 3; /* NPC_MCAM_KEY_X2 */
+ else
+ max_kwi = 6; /* NPC_MCAM_KEY_X4 */
+
+ if (intf == NIX_INTF_TX)
+ field = &mcam->tx_key_fields[type];
+
+ if (offset + nr_bits <= 64) {
+ /* one KW only */
+ if (start_kwi > max_kwi)
+ return;
+ field->kw_mask[start_kwi] |= (BIT_ULL(nr_bits) - 1) << offset;
+ field->nr_kws = 1;
+ } else if (offset + nr_bits > 64 &&
+ offset + nr_bits <= 128) {
+ /* two KWs */
+ if (start_kwi + 1 > max_kwi)
+ return;
+ /* first KW mask */
+ bits_in_kw = 64 - offset;
+ field->kw_mask[start_kwi] |= (BIT_ULL(bits_in_kw) - 1)
+ << offset;
+ /* second KW mask i.e. mask for rest of bits */
+ bits_in_kw = nr_bits + offset - 64;
+ field->kw_mask[start_kwi + 1] |= BIT_ULL(bits_in_kw) - 1;
+ field->nr_kws = 2;
+ } else {
+ /* three KWs */
+ if (start_kwi + 2 > max_kwi)
+ return;
+ /* first KW mask */
+ bits_in_kw = 64 - offset;
+ field->kw_mask[start_kwi] |= (BIT_ULL(bits_in_kw) - 1)
+ << offset;
+ /* second KW mask */
+ field->kw_mask[start_kwi + 1] = ~0ULL;
+ /* third KW mask i.e. mask for rest of bits */
+ bits_in_kw = nr_bits + offset - 128;
+ field->kw_mask[start_kwi + 2] |= BIT_ULL(bits_in_kw) - 1;
+ field->nr_kws = 3;
+ }
+}
+
+/* Helper function to figure out whether field exists in the key */
+static bool npc_is_field_present(struct rvu *rvu, enum key_fields type, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct npc_key_field *input;
+
+ input = &mcam->rx_key_fields[type];
+ if (intf == NIX_INTF_TX)
+ input = &mcam->tx_key_fields[type];
+
+ return input->nr_kws > 0;
+}
+
+static bool npc_is_same(struct npc_key_field *input,
+ struct npc_key_field *field)
+{
+ int ret;
+
+ ret = memcmp(&input->layer_mdata, &field->layer_mdata,
+ sizeof(struct npc_layer_mdata));
+ return ret == 0;
+}
+
+static void npc_set_layer_mdata(struct npc_mcam *mcam, enum key_fields type,
+ u64 cfg, u8 lid, u8 lt, u8 intf)
+{
+ struct npc_key_field *input = &mcam->rx_key_fields[type];
+
+ if (intf == NIX_INTF_TX)
+ input = &mcam->tx_key_fields[type];
+
+ input->layer_mdata.hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
+ input->layer_mdata.key = FIELD_GET(NPC_KEY_OFFSET, cfg);
+ input->layer_mdata.len = FIELD_GET(NPC_BYTESM, cfg) + 1;
+ input->layer_mdata.ltype = lt;
+ input->layer_mdata.lid = lid;
+}
+
+static bool npc_check_overlap_fields(struct npc_key_field *input1,
+ struct npc_key_field *input2)
+{
+ int kwi;
+
+ /* Fields with same layer id and different ltypes are mutually
+ * exclusive hence they can be overlapped
+ */
+ if (input1->layer_mdata.lid == input2->layer_mdata.lid &&
+ input1->layer_mdata.ltype != input2->layer_mdata.ltype)
+ return false;
+
+ for (kwi = 0; kwi < NPC_MAX_KWS_IN_KEY; kwi++) {
+ if (input1->kw_mask[kwi] & input2->kw_mask[kwi])
+ return true;
+ }
+
+ return false;
+}
+
+/* Helper function to check whether given field overlaps with any other fields
+ * in the key. Due to limitations on key size and the key extraction profile in
+ * use higher layers can overwrite lower layer's header fields. Hence overlap
+ * needs to be checked.
+ */
+static bool npc_check_overlap(struct rvu *rvu, int blkaddr,
+ enum key_fields type, u8 start_lid, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct npc_key_field *dummy, *input;
+ int start_kwi, offset;
+ u8 nr_bits, lid, lt, ld;
+ u64 cfg;
+
+ dummy = &mcam->rx_key_fields[NPC_UNKNOWN];
+ input = &mcam->rx_key_fields[type];
+
+ if (intf == NIX_INTF_TX) {
+ dummy = &mcam->tx_key_fields[NPC_UNKNOWN];
+ input = &mcam->tx_key_fields[type];
+ }
+
+ for (lid = start_lid; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG
+ (intf, lid, lt, ld));
+ if (!FIELD_GET(NPC_LDATA_EN, cfg))
+ continue;
+ memset(dummy, 0, sizeof(struct npc_key_field));
+ npc_set_layer_mdata(mcam, NPC_UNKNOWN, cfg,
+ lid, lt, intf);
+ /* exclude input */
+ if (npc_is_same(input, dummy))
+ continue;
+ start_kwi = dummy->layer_mdata.key / 8;
+ offset = (dummy->layer_mdata.key * 8) % 64;
+ nr_bits = dummy->layer_mdata.len * 8;
+ /* form KW masks */
+ npc_set_kw_masks(mcam, NPC_UNKNOWN, nr_bits,
+ start_kwi, offset, intf);
+ /* check any input field bits falls in any
+ * other field bits.
+ */
+ if (npc_check_overlap_fields(dummy, input))
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static int npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type,
+ u8 intf)
+{
+ if (!npc_is_field_present(rvu, type, intf) ||
+ npc_check_overlap(rvu, blkaddr, type, 0, intf))
+ return -ENOTSUPP;
+ return 0;
+}
+
+static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number,
+ u8 key_nibble, u8 intf)
+{
+ u8 offset = (key_nibble * 4) % 64; /* offset within key word */
+ u8 kwi = (key_nibble * 4) / 64; /* which word in key */
+ u8 nr_bits = 4; /* bits in a nibble */
+ u8 type;
+
+ switch (bit_number) {
+ case 0 ... 2:
+ type = NPC_CHAN;
+ break;
+ case 3:
+ type = NPC_ERRLEV;
+ break;
+ case 4 ... 5:
+ type = NPC_ERRCODE;
+ break;
+ case 6:
+ type = NPC_LXMB;
+ break;
+ /* check for LTYPE only as of now */
+ case 9:
+ type = NPC_LA;
+ break;
+ case 12:
+ type = NPC_LB;
+ break;
+ case 15:
+ type = NPC_LC;
+ break;
+ case 18:
+ type = NPC_LD;
+ break;
+ case 21:
+ type = NPC_LE;
+ break;
+ case 24:
+ type = NPC_LF;
+ break;
+ case 27:
+ type = NPC_LG;
+ break;
+ case 30:
+ type = NPC_LH;
+ break;
+ default:
+ return;
+ };
+ npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf);
+}
+
+static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct npc_key_field *key_fields;
+ /* Ether type can come from three layers
+ * (ethernet, single tagged, double tagged)
+ */
+ struct npc_key_field *etype_ether;
+ struct npc_key_field *etype_tag1;
+ struct npc_key_field *etype_tag2;
+ /* Outer VLAN TCI can come from two layers
+ * (single tagged, double tagged)
+ */
+ struct npc_key_field *vlan_tag1;
+ struct npc_key_field *vlan_tag2;
+ u64 *features;
+ u8 start_lid;
+ int i;
+
+ key_fields = mcam->rx_key_fields;
+ features = &mcam->rx_features;
+
+ if (intf == NIX_INTF_TX) {
+ key_fields = mcam->tx_key_fields;
+ features = &mcam->tx_features;
+ }
+
+ /* Handle header fields which can come from multiple layers like
+ * etype, outer vlan tci. These fields should have same position in
+ * the key otherwise to install a mcam rule more than one entry is
+ * needed which complicates mcam space management.
+ */
+ etype_ether = &key_fields[NPC_ETYPE_ETHER];
+ etype_tag1 = &key_fields[NPC_ETYPE_TAG1];
+ etype_tag2 = &key_fields[NPC_ETYPE_TAG2];
+ vlan_tag1 = &key_fields[NPC_VLAN_TAG1];
+ vlan_tag2 = &key_fields[NPC_VLAN_TAG2];
+
+ /* if key profile programmed does not extract ether type at all */
+ if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws)
+ goto vlan_tci;
+
+ /* if key profile programmed extracts ether type from one layer */
+ if (etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws)
+ key_fields[NPC_ETYPE] = *etype_ether;
+ if (!etype_ether->nr_kws && etype_tag1->nr_kws && !etype_tag2->nr_kws)
+ key_fields[NPC_ETYPE] = *etype_tag1;
+ if (!etype_ether->nr_kws && !etype_tag1->nr_kws && etype_tag2->nr_kws)
+ key_fields[NPC_ETYPE] = *etype_tag2;
+
+ /* if key profile programmed extracts ether type from multiple layers */
+ if (etype_ether->nr_kws && etype_tag1->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (etype_ether->kw_mask[i] != etype_tag1->kw_mask[i])
+ goto vlan_tci;
+ }
+ key_fields[NPC_ETYPE] = *etype_tag1;
+ }
+ if (etype_ether->nr_kws && etype_tag2->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (etype_ether->kw_mask[i] != etype_tag2->kw_mask[i])
+ goto vlan_tci;
+ }
+ key_fields[NPC_ETYPE] = *etype_tag2;
+ }
+ if (etype_tag1->nr_kws && etype_tag2->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (etype_tag1->kw_mask[i] != etype_tag2->kw_mask[i])
+ goto vlan_tci;
+ }
+ key_fields[NPC_ETYPE] = *etype_tag2;
+ }
+
+ /* check none of higher layers overwrite ether type */
+ start_lid = key_fields[NPC_ETYPE].layer_mdata.lid + 1;
+ if (npc_check_overlap(rvu, blkaddr, NPC_ETYPE, start_lid, intf))
+ goto vlan_tci;
+ *features |= BIT_ULL(NPC_ETYPE);
+vlan_tci:
+ /* if key profile does not extract outer vlan tci at all */
+ if (!vlan_tag1->nr_kws && !vlan_tag2->nr_kws)
+ goto done;
+
+ /* if key profile extracts outer vlan tci from one layer */
+ if (vlan_tag1->nr_kws && !vlan_tag2->nr_kws)
+ key_fields[NPC_OUTER_VID] = *vlan_tag1;
+ if (!vlan_tag1->nr_kws && vlan_tag2->nr_kws)
+ key_fields[NPC_OUTER_VID] = *vlan_tag2;
+
+ /* if key profile extracts outer vlan tci from multiple layers */
+ if (vlan_tag1->nr_kws && vlan_tag2->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (vlan_tag1->kw_mask[i] != vlan_tag2->kw_mask[i])
+ goto done;
+ }
+ key_fields[NPC_OUTER_VID] = *vlan_tag2;
+ }
+ /* check none of higher layers overwrite outer vlan tci */
+ start_lid = key_fields[NPC_OUTER_VID].layer_mdata.lid + 1;
+ if (npc_check_overlap(rvu, blkaddr, NPC_OUTER_VID, start_lid, intf))
+ goto done;
+ *features |= BIT_ULL(NPC_OUTER_VID);
+done:
+ return;
+}
+
+static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid,
+ u8 lt, u64 cfg, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u8 hdr, key, nr_bytes, bit_offset;
+ u8 la_ltype, la_start;
+ /* starting KW index and starting bit position */
+ int start_kwi, offset;
+
+ nr_bytes = FIELD_GET(NPC_BYTESM, cfg) + 1;
+ hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
+ key = FIELD_GET(NPC_KEY_OFFSET, cfg);
+ start_kwi = key / 8;
+ offset = (key * 8) % 64;
+
+ /* For Tx, Layer A has NIX_INST_HDR_S(64 bytes) preceding
+ * ethernet header.
+ */
+ if (intf == NIX_INTF_TX) {
+ la_ltype = NPC_LT_LA_IH_NIX_ETHER;
+ la_start = 8;
+ } else {
+ la_ltype = NPC_LT_LA_ETHER;
+ la_start = 0;
+ }
+
+#define NPC_SCAN_HDR(name, hlid, hlt, hstart, hlen) \
+do { \
+ if (lid == (hlid) && lt == (hlt)) { \
+ if ((hstart) >= hdr && \
+ ((hstart) + (hlen)) <= (hdr + nr_bytes)) { \
+ bit_offset = (hdr + nr_bytes - (hstart) - (hlen)) * 8; \
+ npc_set_layer_mdata(mcam, name, cfg, lid, lt, intf); \
+ npc_set_kw_masks(mcam, name, (hlen) * 8, \
+ start_kwi, offset + bit_offset, intf);\
+ } \
+ } \
+} while (0)
+
+ /* List LID, LTYPE, start offset from layer and length(in bytes) of
+ * packet header fields below.
+ * Example: Source IP is 4 bytes and starts at 12th byte of IP header
+ */
+ NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4);
+ NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4);
+ NPC_SCAN_HDR(NPC_SPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 0, 2);
+ NPC_SCAN_HDR(NPC_DPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 2, 2);
+ NPC_SCAN_HDR(NPC_SPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 0, 2);
+ NPC_SCAN_HDR(NPC_DPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 2, 2);
+ NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2);
+ NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2);
+ NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2);
+ NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2);
+ NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2);
+ NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6);
+ NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start, 6);
+ /* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */
+ NPC_SCAN_HDR(NPC_PF_FUNC, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, 0, 2);
+}
+
+static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u64 *features = &mcam->rx_features;
+ u64 tcp_udp;
+ int err, hdr;
+
+ if (intf == NIX_INTF_TX)
+ features = &mcam->tx_features;
+
+ for (hdr = NPC_DMAC; hdr < NPC_HEADER_FIELDS_MAX; hdr++) {
+ err = npc_check_field(rvu, blkaddr, hdr, intf);
+ if (!err)
+ *features |= BIT_ULL(hdr);
+ }
+
+ tcp_udp = BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_SPORT_UDP) |
+ BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP);
+
+ /* for tcp/udp corresponding layer type should be in the key */
+ if (*features & tcp_udp)
+ if (npc_check_field(rvu, blkaddr, NPC_LD, intf))
+ *features &= ~tcp_udp;
+
+ /* for vlan corresponding layer type should be in the key */
+ if (*features & BIT_ULL(NPC_OUTER_VID))
+ if (npc_check_field(rvu, blkaddr, NPC_LB, intf))
+ *features &= ~BIT_ULL(NPC_OUTER_VID);
+}
+
+/* Scan key extraction profile and record how fields of our interest
+ * fill the key structure. Also verify Channel and DMAC exists in
+ * key and not overwritten by other header fields.
+ */
+static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u8 lid, lt, ld, bitnr;
+ u8 key_nibble = 0;
+ u64 cfg;
+
+ /* Scan and note how parse result is going to be in key.
+ * A bit set in PARSE_NIBBLE_ENA corresponds to a nibble from
+ * parse result in the key. The enabled nibbles from parse result
+ * will be concatenated in key.
+ */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf));
+ cfg &= NPC_PARSE_NIBBLE;
+ for_each_set_bit(bitnr, (unsigned long *)&cfg, 31) {
+ npc_scan_parse_result(mcam, bitnr, key_nibble, intf);
+ key_nibble++;
+ }
+
+ /* Scan and note how layer data is going to be in key */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG
+ (intf, lid, lt, ld));
+ if (!FIELD_GET(NPC_LDATA_EN, cfg))
+ continue;
+ npc_scan_ldata(rvu, blkaddr, lid, lt, cfg,
+ intf);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int npc_scan_verify_kex(struct rvu *rvu, int blkaddr)
+{
+ int err;
+
+ err = npc_scan_kex(rvu, blkaddr, NIX_INTF_RX);
+ if (err)
+ return err;
+
+ err = npc_scan_kex(rvu, blkaddr, NIX_INTF_TX);
+ if (err)
+ return err;
+
+ /* Channel is mandatory */
+ if (!npc_is_field_present(rvu, NPC_CHAN, NIX_INTF_RX)) {
+ dev_err(rvu->dev, "Channel not present in Key\n");
+ return -EINVAL;
+ }
+ /* check that none of the fields overwrite channel */
+ if (npc_check_overlap(rvu, blkaddr, NPC_CHAN, 0, NIX_INTF_RX)) {
+ dev_err(rvu->dev, "Channel cannot be overwritten\n");
+ return -EINVAL;
+ }
+ /* DMAC should be present in key for unicast filter to work */
+ if (!npc_is_field_present(rvu, NPC_DMAC, NIX_INTF_RX)) {
+ dev_err(rvu->dev, "DMAC not present in Key\n");
+ return -EINVAL;
+ }
+ /* check that none of the fields overwrite DMAC */
+ if (npc_check_overlap(rvu, blkaddr, NPC_DMAC, 0, NIX_INTF_RX)) {
+ dev_err(rvu->dev, "DMAC cannot be overwritten\n");
+ return -EINVAL;
+ }
+
+ npc_set_features(rvu, blkaddr, NIX_INTF_TX);
+ npc_set_features(rvu, blkaddr, NIX_INTF_RX);
+ npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_TX);
+ npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_RX);
+
+ return 0;
+}
+
+int npc_flow_steering_init(struct rvu *rvu, int blkaddr)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+
+ INIT_LIST_HEAD(&mcam->mcam_rules);
+
+ return npc_scan_verify_kex(rvu, blkaddr);
+}
+
+static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u64 *mcam_features = &mcam->rx_features;
+ u64 unsupported;
+ u8 bit;
+
+ if (intf == NIX_INTF_TX)
+ mcam_features = &mcam->tx_features;
+
+ unsupported = (*mcam_features ^ features) & ~(*mcam_features);
+ if (unsupported) {
+ dev_info(rvu->dev, "Unsupported flow(s):\n");
+ for_each_set_bit(bit, (unsigned long *)&unsupported, 64)
+ dev_info(rvu->dev, "%s ", npc_get_field_name(bit));
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+/* npc_update_entry - Based on the masks generated during
+ * the key scanning, updates the given entry with value and
+ * masks for the field of interest. Maximum 16 bytes of a packet
+ * header can be extracted by HW hence lo and hi are sufficient.
+ * When field bytes are less than or equal to 8 then hi should be
+ * 0 for value and mask.
+ *
+ * If exact match of value is required then mask should be all 1's.
+ * If any bits in mask are 0 then corresponding bits in value are
+ * dont care.
+ */
+static void npc_update_entry(struct rvu *rvu, enum key_fields type,
+ struct mcam_entry *entry, u64 val_lo,
+ u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct mcam_entry dummy = { {0} };
+ struct npc_key_field *field;
+ u64 kw1, kw2, kw3;
+ u8 shift;
+ int i;
+
+ field = &mcam->rx_key_fields[type];
+ if (intf == NIX_INTF_TX)
+ field = &mcam->tx_key_fields[type];
+
+ if (!field->nr_kws)
+ return;
+
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (!field->kw_mask[i])
+ continue;
+ /* place key value in kw[x] */
+ shift = __ffs64(field->kw_mask[i]);
+ /* update entry value */
+ kw1 = (val_lo << shift) & field->kw_mask[i];
+ dummy.kw[i] = kw1;
+ /* update entry mask */
+ kw1 = (mask_lo << shift) & field->kw_mask[i];
+ dummy.kw_mask[i] = kw1;
+
+ if (field->nr_kws == 1)
+ break;
+ /* place remaining bits of key value in kw[x + 1] */
+ if (field->nr_kws == 2) {
+ /* update entry value */
+ kw2 = (val_lo >> (64 - shift)) | (val_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ dummy.kw[i + 1] = kw2;
+ /* update entry mask */
+ kw2 = (mask_lo >> (64 - shift)) | (mask_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ dummy.kw_mask[i + 1] = kw2;
+ break;
+ }
+ /* place remaining bits of key value in kw[x + 1], kw[x + 2] */
+ if (field->nr_kws == 3) {
+ /* update entry value */
+ kw2 = (val_lo >> (64 - shift)) | (val_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ kw3 = (val_hi >> (64 - shift));
+ kw3 &= field->kw_mask[i + 2];
+ dummy.kw[i + 1] = kw2;
+ dummy.kw[i + 2] = kw3;
+ /* update entry mask */
+ kw2 = (mask_lo >> (64 - shift)) | (mask_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ kw3 = (mask_hi >> (64 - shift));
+ kw3 &= field->kw_mask[i + 2];
+ dummy.kw_mask[i + 1] = kw2;
+ dummy.kw_mask[i + 2] = kw3;
+ break;
+ }
+ }
+ /* dummy is ready with values and masks for given key
+ * field now update input entry with those
+ */
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ entry->kw[i] |= dummy.kw[i];
+ entry->kw_mask[i] |= dummy.kw_mask[i];
+ }
+}
+
+static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry,
+ u64 features, struct flow_msg *pkt,
+ struct flow_msg *mask,
+ struct rvu_npc_mcam_rule *output, u8 intf)
+{
+ u64 dmac_mask = ether_addr_to_u64(mask->dmac);
+ u64 smac_mask = ether_addr_to_u64(mask->smac);
+ u64 dmac_val = ether_addr_to_u64(pkt->dmac);
+ u64 smac_val = ether_addr_to_u64(pkt->smac);
+ struct flow_msg *opkt = &output->packet;
+ struct flow_msg *omask = &output->mask;
+
+ if (!features)
+ return;
+
+#define NPC_WRITE_FLOW(field, member, val_lo, val_hi, mask_lo, mask_hi) \
+do { \
+ if (features & BIT_ULL(field)) { \
+ npc_update_entry(rvu, field, entry, val_lo, val_hi, \
+ mask_lo, mask_hi, intf); \
+ memcpy(&opkt->member, &pkt->member, sizeof(pkt->member)); \
+ memcpy(&omask->member, &mask->member, sizeof(mask->member)); \
+ } \
+} while (0)
+
+ /* For tcp/udp LTYPE should be present in entry */
+ if (features & (BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_DPORT_TCP)))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_TCP,
+ 0, ~0ULL, 0, intf);
+ if (features & (BIT_ULL(NPC_SPORT_UDP) | BIT_ULL(NPC_DPORT_UDP)))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_UDP,
+ 0, ~0ULL, 0, intf);
+ if (features & BIT_ULL(NPC_OUTER_VID))
+ npc_update_entry(rvu, NPC_LB, entry,
+ NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0,
+ NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf);
+
+ NPC_WRITE_FLOW(NPC_DMAC, dmac, dmac_val, 0, dmac_mask, 0);
+ NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0);
+ NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0,
+ ntohs(mask->etype), 0);
+ NPC_WRITE_FLOW(NPC_SIP_IPV4, ip4src, ntohl(pkt->ip4src), 0,
+ ntohl(mask->ip4src), 0);
+ NPC_WRITE_FLOW(NPC_DIP_IPV4, ip4dst, ntohl(pkt->ip4dst), 0,
+ ntohl(mask->ip4dst), 0);
+ NPC_WRITE_FLOW(NPC_SPORT_TCP, sport, ntohs(pkt->sport), 0,
+ ntohs(mask->sport), 0);
+ NPC_WRITE_FLOW(NPC_SPORT_UDP, sport, ntohs(pkt->sport), 0,
+ ntohs(mask->sport), 0);
+ NPC_WRITE_FLOW(NPC_DPORT_TCP, dport, ntohs(pkt->dport), 0,
+ ntohs(mask->dport), 0);
+ NPC_WRITE_FLOW(NPC_DPORT_UDP, dport, ntohs(pkt->dport), 0,
+ ntohs(mask->dport), 0);
+ NPC_WRITE_FLOW(NPC_OUTER_VID, vlan_tci, ntohs(pkt->vlan_tci), 0,
+ ntohs(mask->vlan_tci), 0);
+}
+
+static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam,
+ u16 entry)
+{
+ struct rvu_npc_mcam_rule *iter;
+
+ list_for_each_entry(iter, &mcam->mcam_rules, list) {
+ if (iter->entry == entry)
+ return iter;
+ }
+
+ return NULL;
+}
+
+static void rvu_mcam_add_rule(struct npc_mcam *mcam,
+ struct rvu_npc_mcam_rule *rule)
+{
+ struct list_head *head = &mcam->mcam_rules;
+ struct rvu_npc_mcam_rule *iter;
+
+ list_for_each_entry(iter, &mcam->mcam_rules, list) {
+ if (iter->entry > rule->entry)
+ break;
+ head = &iter->list;
+ }
+
+ list_add(&rule->list, head);
+}
+
+static void rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc,
+ struct rvu_npc_mcam_rule *rule)
+{
+ struct npc_mcam_oper_counter_req free_req = { 0 };
+ struct msg_rsp free_rsp;
+
+ if (!rule->has_cntr)
+ return;
+
+ free_req.hdr.pcifunc = pcifunc;
+ free_req.cntr = rule->cntr;
+
+ rvu_mbox_handler_npc_mcam_free_counter(rvu, &free_req, &free_rsp);
+ rule->has_cntr = false;
+}
+
+static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
+ struct rvu_npc_mcam_rule *rule,
+ struct npc_install_flow_rsp *rsp)
+{
+ struct npc_mcam_alloc_counter_req cntr_req = { 0 };
+ struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 };
+ int err;
+
+ cntr_req.hdr.pcifunc = pcifunc;
+ cntr_req.contig = true;
+ cntr_req.count = 1;
+
+ /* we try to allocate a counter to track the stats of this
+ * rule. If counter could not be allocated then proceed
+ * without counter because counters are limited than entries.
+ */
+ err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req,
+ &cntr_rsp);
+ if (!err && cntr_rsp.count) {
+ rule->cntr = cntr_rsp.cntr;
+ rule->has_cntr = true;
+ rsp->counter = rule->cntr;
+ } else {
+ rsp->counter = err;
+ }
+}
+
+static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct mcam_entry *entry,
+ struct npc_install_flow_req *req, u16 target)
+{
+ struct nix_rx_action action;
+
+ npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0,
+ ~0ULL, 0, NIX_INTF_RX);
+
+ *(u64 *)&action = 0x00;
+ action.pf_func = target;
+ action.op = req->op;
+ action.index = req->index;
+ action.match_id = req->match_id;
+ action.flow_key_alg = req->flow_key_alg;
+
+ if (req->op == NIX_RX_ACTION_DEFAULT && pfvf->def_rule)
+ action = pfvf->def_rule->rx_action;
+
+ entry->action = *(u64 *)&action;
+
+ /* VTAG0 starts at 0th byte of LID_B.
+ * VTAG1 starts at 4th byte of LID_B.
+ */
+ entry->vtag_action = FIELD_PREP(RX_VTAG0_VALID_BIT, req->vtag0_valid) |
+ FIELD_PREP(RX_VTAG0_TYPE_MASK, req->vtag0_type) |
+ FIELD_PREP(RX_VTAG0_LID_MASK, NPC_LID_LB) |
+ FIELD_PREP(RX_VTAG0_RELPTR_MASK, 0) |
+ FIELD_PREP(RX_VTAG1_VALID_BIT, req->vtag1_valid) |
+ FIELD_PREP(RX_VTAG1_TYPE_MASK, req->vtag1_type) |
+ FIELD_PREP(RX_VTAG1_LID_MASK, NPC_LID_LB) |
+ FIELD_PREP(RX_VTAG1_RELPTR_MASK, 4);
+}
+
+static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct mcam_entry *entry,
+ struct npc_install_flow_req *req, u16 target)
+{
+ struct nix_tx_action action;
+
+ npc_update_entry(rvu, NPC_PF_FUNC, entry, htons(target),
+ 0, ~0ULL, 0, NIX_INTF_TX);
+
+ *(u64 *)&action = 0x00;
+ action.op = req->op;
+ action.index = req->index;
+ action.match_id = req->match_id;
+
+ entry->action = *(u64 *)&action;
+
+ /* VTAG0 starts at 0th byte of LID_B.
+ * VTAG1 starts at 4th byte of LID_B.
+ */
+ entry->vtag_action = FIELD_PREP(TX_VTAG0_DEF_MASK, req->vtag0_def) |
+ FIELD_PREP(TX_VTAG0_OP_MASK, req->vtag0_op) |
+ FIELD_PREP(TX_VTAG0_LID_MASK, NPC_LID_LA) |
+ FIELD_PREP(TX_VTAG0_RELPTR_MASK, 20) |
+ FIELD_PREP(TX_VTAG1_DEF_MASK, req->vtag1_def) |
+ FIELD_PREP(TX_VTAG1_OP_MASK, req->vtag1_op) |
+ FIELD_PREP(TX_VTAG1_LID_MASK, NPC_LID_LA) |
+ FIELD_PREP(TX_VTAG1_RELPTR_MASK, 24);
+}
+
+static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
+ int nixlf, struct rvu_pfvf *pfvf,
+ struct npc_install_flow_req *req,
+ struct npc_install_flow_rsp *rsp, bool enable,
+ bool pf_set_vfs_mac)
+{
+ u64 features, installed_features, missing_features = 0;
+ struct rvu_npc_mcam_rule *def_rule = pfvf->def_rule;
+ struct npc_mcam_write_entry_req write_req = { 0 };
+ bool new = false, msg_from_vf;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule dummy = { 0 };
+ struct rvu_npc_mcam_rule *rule;
+ u16 owner = req->hdr.pcifunc;
+ struct msg_rsp write_rsp;
+ struct mcam_entry *entry;
+ int entry_index, err;
+
+ msg_from_vf = !!(owner & RVU_PFVF_FUNC_MASK);
+
+ installed_features = req->features;
+ features = req->features;
+ entry = &write_req.entry_data;
+ entry_index = req->entry;
+
+ npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy,
+ req->intf);
+
+ if (req->intf == NIX_INTF_RX)
+ npc_update_rx_entry(rvu, pfvf, entry, req, target);
+ else
+ npc_update_tx_entry(rvu, pfvf, entry, req, target);
+
+ /* Default unicast rules do not exist for TX */
+ if (req->intf == NIX_INTF_TX)
+ goto find_rule;
+
+ if (def_rule)
+ missing_features = (def_rule->features ^ features) &
+ def_rule->features;
+
+ if (req->default_rule && req->append) {
+ /* add to default rule */
+ if (missing_features)
+ npc_update_flow(rvu, entry, missing_features,
+ &def_rule->packet, &def_rule->mask,
+ &dummy, req->intf);
+ enable = rvu_npc_write_default_rule(rvu, blkaddr,
+ nixlf, target,
+ NIX_INTF_RX, entry,
+ &entry_index);
+ installed_features = req->features | missing_features;
+ } else if (req->default_rule && !req->append) {
+ /* overwrite default rule */
+ enable = rvu_npc_write_default_rule(rvu, blkaddr,
+ nixlf, target,
+ NIX_INTF_RX, entry,
+ &entry_index);
+ } else if (msg_from_vf) {
+ /* normal rule - include default rule also to it for VF */
+ npc_update_flow(rvu, entry, missing_features, &def_rule->packet,
+ &def_rule->mask, &dummy, req->intf);
+ installed_features = req->features | missing_features;
+ }
+find_rule:
+ rule = rvu_mcam_find_rule(mcam, entry_index);
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+ new = true;
+ }
+ /* no counter for default rule */
+ if (req->default_rule)
+ goto update_rule;
+
+ /* allocate new counter if rule has no counter */
+ if (req->set_cntr && !rule->has_cntr)
+ rvu_mcam_add_counter_to_rule(rvu, owner, rule, rsp);
+
+ /* if user wants to delete an existing counter for a rule then
+ * free the counter
+ */
+ if (!req->set_cntr && rule->has_cntr)
+ rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
+
+ write_req.hdr.pcifunc = owner;
+ write_req.entry = req->entry;
+ write_req.intf = req->intf;
+ write_req.enable_entry = (u8)enable;
+ /* if counter is available then clear and use it */
+ if (req->set_cntr && rule->has_cntr) {
+ rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), 0x00);
+ write_req.set_cntr = 1;
+ write_req.cntr = rule->cntr;
+ }
+
+ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req,
+ &write_rsp);
+ if (err) {
+ rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
+ if (new)
+ kfree(rule);
+ return err;
+ }
+update_rule:
+ memcpy(&rule->packet, &dummy.packet, sizeof(rule->packet));
+ memcpy(&rule->mask, &dummy.mask, sizeof(rule->mask));
+ rule->entry = entry_index;
+ memcpy(&rule->rx_action, &entry->action, sizeof(struct nix_rx_action));
+ if (req->intf == NIX_INTF_TX)
+ memcpy(&rule->tx_action, &entry->action,
+ sizeof(struct nix_tx_action));
+ rule->vtag_action = entry->vtag_action;
+ rule->features = installed_features;
+ rule->default_rule = req->default_rule;
+ rule->owner = owner;
+ rule->enable = enable;
+ rule->intf = req->intf;
+
+ if (new)
+ rvu_mcam_add_rule(mcam, rule);
+ if (req->default_rule)
+ pfvf->def_rule = rule;
+
+ /* VF's MAC address is being changed via PF */
+ if (pf_set_vfs_mac)
+ ether_addr_copy(pfvf->default_mac, req->packet.dmac);
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
+ struct npc_install_flow_req *req,
+ struct npc_install_flow_rsp *rsp)
+{
+ bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK);
+ int blkaddr, nixlf, err;
+ struct rvu_pfvf *pfvf;
+ bool pf_set_vfs_mac = false;
+ bool enable = true;
+ u16 target;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -ENODEV;
+ }
+
+ if (from_vf && req->default_rule)
+ return NPC_MCAM_PERM_DENIED;
+
+ /* Each PF/VF info is maintained in struct rvu_pfvf.
+ * rvu_pfvf for the target PF/VF needs to be retrieved
+ * hence modify pcifunc accordingly.
+ */
+
+ /* AF installing for a PF/VF */
+ if (!req->hdr.pcifunc)
+ target = req->vf;
+ /* PF installing for its VF */
+ else if (!from_vf && req->vf) {
+ target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf;
+ pf_set_vfs_mac = req->default_rule &&
+ (req->features & BIT_ULL(NPC_DMAC));
+ }
+ /* msg received from PF/VF */
+ else
+ target = req->hdr.pcifunc;
+
+ if (npc_check_unsupported_flows(rvu, req->features, req->intf))
+ return -ENOTSUPP;
+
+ if (npc_mcam_verify_channel(rvu, target, req->intf, req->channel))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, target);
+
+ if (!from_vf && req->vf)
+ pfvf->pf_set_vf_cfg = 1;
+
+ /* update req destination mac addr */
+ if ((req->features & BIT_ULL(NPC_DMAC)) &&
+ req->intf == NIX_INTF_RX &&
+ is_zero_ether_addr(req->packet.dmac)) {
+ ether_addr_copy(req->packet.dmac, pfvf->default_mac);
+ u64_to_ether_addr(0xffffffffffffull, req->mask.dmac);
+ }
+
+ err = nix_get_nixlf(rvu, target, &nixlf, NULL);
+
+ /* If interface is uninitialized then do not enable entry */
+ if (err || (!req->default_rule && !pfvf->def_rule))
+ enable = false;
+
+ /* Packets reaching NPC in Tx path implies that a
+ * NIXLF is properly setup and transmitting.
+ * Hence rules can be enabled for Tx.
+ */
+ if (req->intf == NIX_INTF_TX)
+ enable = true;
+
+ /* Do not allow requests from uninitialized VFs */
+ if (from_vf && !enable)
+ return -EINVAL;
+
+ /* If message is from VF then its flow should not overlap with
+ * reserved unicast flow.
+ */
+ if (from_vf && pfvf->def_rule && req->intf == NIX_INTF_RX &&
+ pfvf->def_rule->features & req->features)
+ return -EINVAL;
+
+ return npc_install_flow(rvu, blkaddr, target, nixlf, pfvf,
+ req, rsp, enable, pf_set_vfs_mac);
+}
+
+static int npc_delete_flow(struct rvu *rvu, u16 entry, u16 pcifunc)
+{
+ struct npc_mcam_ena_dis_entry_req dis_req = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule;
+ struct msg_rsp dis_rsp;
+ int err;
+
+ rule = rvu_mcam_find_rule(mcam, entry);
+ if (!rule)
+ return -ENOENT;
+
+ if (rule->default_rule)
+ return 0;
+
+ if (rule->has_cntr)
+ rvu_mcam_remove_counter_from_rule(rvu, pcifunc, rule);
+
+ dis_req.hdr.pcifunc = pcifunc;
+ dis_req.entry = entry;
+ err = rvu_mbox_handler_npc_mcam_dis_entry(rvu, &dis_req, &dis_rsp);
+ if (err)
+ return err;
+
+ list_del(&rule->list);
+ kfree(rule);
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu,
+ struct npc_delete_flow_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *iter, *tmp;
+ u16 pcifunc = req->hdr.pcifunc;
+ int err;
+
+ if (req->end) {
+ list_for_each_entry_safe(iter, tmp, &mcam->mcam_rules, list) {
+ if (iter->owner == pcifunc &&
+ iter->entry >= req->start &&
+ iter->entry <= req->end) {
+ err = npc_delete_flow(rvu, iter->entry,
+ pcifunc);
+ if (err)
+ return err;
+ }
+ }
+ return 0;
+ }
+
+ if (!req->all)
+ return npc_delete_flow(rvu, req->entry, pcifunc);
+
+ list_for_each_entry_safe(iter, tmp, &mcam->mcam_rules, list) {
+ if (iter->owner == pcifunc) {
+ err = npc_delete_flow(rvu, iter->entry, pcifunc);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+void npc_mcam_enable_flows(struct rvu *rvu, u16 target)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, target);
+ struct npc_mcam_ena_dis_entry_req ena_req = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule;
+ int blkaddr, bank, err;
+ struct msg_rsp rsp;
+ u64 def_action;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return;
+ }
+
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->intf == NIX_INTF_RX &&
+ rule->rx_action.pf_func == target && !rule->enable) {
+ if (rule->default_rule) {
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ rule->entry, true);
+ rule->enable = true;
+ continue;
+ }
+
+ if (rule->rx_action.op == NIX_RX_ACTION_DEFAULT) {
+ if (!pfvf->def_rule)
+ continue;
+ /* Use default unicast entry action */
+ rule->rx_action = pfvf->def_rule->rx_action;
+ def_action = *(u64 *)&pfvf->def_rule->rx_action;
+ bank = npc_get_bank(mcam, rule->entry);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION
+ (rule->entry, bank), def_action);
+ }
+ ena_req.hdr.pcifunc = rule->owner;
+ ena_req.entry = rule->entry;
+ err = rvu_mbox_handler_npc_mcam_ena_entry(rvu, &ena_req,
+ &rsp);
+ if (err)
+ continue;
+ rule->enable = true;
+ }
+ }
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_ptp.c
new file mode 100644
index 000000000000..39cc31bcedb4
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_ptp.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "ptp.h"
+#include "mbox.h"
+#include "rvu.h"
+
+int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
+ struct ptp_rsp *rsp)
+{
+ int err = 0;
+
+ if (!rvu->ptp)
+ return -ENODEV;
+
+ switch (req->op) {
+ case PTP_OP_ADJFINE:
+ err = ptp_adjfine(rvu->ptp, req->scaled_ppm);
+ break;
+ case PTP_OP_GET_CLOCK:
+ err = ptp_get_clock(rvu->ptp, req->is_pmu, &rsp->clk,
+ &rsp->tsc);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 7ca599b973c0..05a5fac544ef 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -112,6 +112,7 @@
#define NPA_AF_LF_RST (0x0020)
#define NPA_AF_GEN_CFG (0x0030)
#define NPA_AF_NDC_CFG (0x0040)
+#define NPA_AF_NDC_SYNC (0x0050)
#define NPA_AF_INP_CTL (0x00D0)
#define NPA_AF_ACTIVE_CYCLES_PC (0x00F0)
#define NPA_AF_AVG_DELAY (0x0100)
@@ -213,9 +214,11 @@
#define NIX_AF_RX_DEF_IUDP (0x0280)
#define NIX_AF_RX_DEF_OSCTP (0x0290)
#define NIX_AF_RX_DEF_ISCTP (0x02A0)
-#define NIX_AF_RX_DEF_IPSECX (0x02B0)
+#define NIX_AF_RX_DEF_IPSECX(a) (0x02B0ull | (uint64_t)(a) << 3)
#define NIX_AF_RX_IPSEC_GEN_CFG (0x0300)
-#define NIX_AF_RX_CPTX_INST_ADDR (0x0310)
+#define NIX_AF_RX_CPTX_INST_QSEL(a) (0x0320ull | (uint64_t)(a) << 3)
+#define NIX_AF_RX_CPTX_CREDIT(a) (0x0360ull | (uint64_t)(a) << 3)
+#define NIX_AF_NDC_RX_SYNC (0x03E0)
#define NIX_AF_NDC_TX_SYNC (0x03F0)
#define NIX_AF_AQ_CFG (0x0400)
#define NIX_AF_AQ_BASE (0x0410)
@@ -239,20 +242,19 @@
#define NIX_AF_SEB_ECO (0x0600)
#define NIX_AF_SEB_TEST_BP (0x0610)
#define NIX_AF_NORM_TX_FIFO_STATUS (0x0620)
-#define NIX_AF_EXPR_TX_FIFO_STATUS (0x0630)
#define NIX_AF_SDP_TX_FIFO_STATUS (0x0640)
#define NIX_AF_TX_NPC_CAPTURE_CONFIG (0x0660)
#define NIX_AF_TX_NPC_CAPTURE_INFO (0x0670)
+#define NIX_AF_SEB_CFG (0x05F0)
#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3)
#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
+#define NIX_AF_SMQX_STATUS(a) (0x730 | (a) << 16)
#define NIX_AF_SQM_DBG_CTL_STATUS (0x750)
#define NIX_AF_PSE_CHANNEL_LEVEL (0x800)
#define NIX_AF_PSE_SHAPER_CFG (0x810)
-#define NIX_AF_TX_EXPR_CREDIT (0x830)
#define NIX_AF_MARK_FORMATX_CTL(a) (0x900 | (a) << 18)
#define NIX_AF_TX_LINKX_NORM_CREDIT(a) (0xA00 | (a) << 16)
-#define NIX_AF_TX_LINKX_EXPR_CREDIT(a) (0xA10 | (a) << 16)
#define NIX_AF_TX_LINKX_SW_XOFF(a) (0xA20 | (a) << 16)
#define NIX_AF_TX_LINKX_HW_XOFF(a) (0xA30 | (a) << 16)
#define NIX_AF_SDP_LINK_CREDIT (0xa40)
@@ -386,7 +388,7 @@
#define NIX_AF_LFX_RX_IPSEC_CFG0(a) (0x4140 | (a) << 17)
#define NIX_AF_LFX_RX_IPSEC_CFG1(a) (0x4148 | (a) << 17)
#define NIX_AF_LFX_RX_IPSEC_DYNO_CFG(a) (0x4150 | (a) << 17)
-#define NIX_AF_LFX_RX_IPSEC_DYNO_BASE(a) (0x4158 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_DYNO_BASE(a)(0x4158 | (a) << 17)
#define NIX_AF_LFX_RX_IPSEC_SA_BASE(a) (0x4170 | (a) << 17)
#define NIX_AF_LFX_TX_STATUS(a) (0x4180 | (a) << 17)
#define NIX_AF_LFX_RX_VTAG_TYPEX(a, b) (0x4200 | (a) << 17 | (b) << 3)
@@ -399,20 +401,175 @@
#define NIX_AF_RX_NPC_MIRROR_RCV (0x4720)
#define NIX_AF_RX_NPC_MIRROR_DROP (0x4730)
#define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16)
+#define NIX_AF_RQM_BP_TEST (0x4880)
+#define NIX_AF_CQM_BP_TEST (0x48c0)
#define NIX_PRIV_AF_INT_CFG (0x8000000)
#define NIX_PRIV_LFX_CFG (0x8000010)
#define NIX_PRIV_LFX_INT_CFG (0x8000020)
#define NIX_AF_RVU_LF_CFG_DEBUG (0x8000030)
+#define NIX_AF_LF_CFG_SHIFT 17
+#define NIX_AF_LF_SSO_PF_FUNC_SHIFT 16
+
/* SSO */
#define SSO_AF_CONST (0x1000)
#define SSO_AF_CONST1 (0x1008)
-#define SSO_AF_BLK_RST (0x10f8)
+#define SSO_AF_NOS_CNT (0x1050)
+#define SSO_AF_AW_WE (0x1080)
#define SSO_AF_LF_HWGRP_RST (0x10e0)
+#define SSO_AF_AW_CFG (0x10f0)
+#define SSO_AF_BLK_RST (0x10f8)
+#define SSO_AF_ACTIVE_CYCLES0 (0x1100)
+#define SSO_AF_ACTIVE_CYCLES1 (0x1108)
+#define SSO_AF_ACTIVE_CYCLES2 (0x1110)
+#define SSO_AF_ERR0 (0x1220)
+#define SSO_AF_ERR0_W1S (0x1228)
+#define SSO_AF_ERR0_ENA_W1C (0x1230)
+#define SSO_AF_ERR0_ENA_W1S (0x1238)
+#define SSO_AF_ERR2 (0x1260)
+#define SSO_AF_ERR2_W1S (0x1268)
+#define SSO_AF_ERR2_ENA_W1C (0x1270)
+#define SSO_AF_ERR2_ENA_W1S (0x1278)
+#define SSO_AF_UNMAP_INFO (0x12f0)
+#define SSO_AF_UNMAP_INFO2 (0x1300)
+#define SSO_AF_UNMAP_INFO3 (0x1310)
+#define SSO_AF_RAS (0x1420)
+#define SSO_AF_RAS_W1S (0x1430)
+#define SSO_AF_RAS_ENA_W1C (0x1460)
+#define SSO_AF_RAS_ENA_W1S (0x1470)
+#define SSO_PRIV_AF_INT_CFG (0x3000)
+#define SSO_AF_AW_ADD (0x2080)
+#define SSO_AF_AW_READ_ARB (0x2090)
+#define SSO_AF_XAQ_REQ_PC (0x20B0)
+#define SSO_AF_XAQ_LATENCY_PC (0x20B8)
+#define SSO_AF_TAQ_CNT (0x20c0)
+#define SSO_AF_TAQ_ADD (0x20e0)
+#define SSO_AF_POISONX(a) (0x2100 | (a) << 3)
+#define SSO_AF_POISONX_W1S(a) (0x2200 | (a) << 3)
#define SSO_AF_RVU_LF_CFG_DEBUG (0x3800)
#define SSO_PRIV_LFX_HWGRP_CFG (0x10000)
#define SSO_PRIV_LFX_HWGRP_INT_CFG (0x20000)
+#define SSO_AF_XAQX_GMCTL(a) (0xe0000 | (a) << 3)
+#define SSO_AF_XAQX_HEAD_PTR(a) (0x80000 | (a) << 3)
+#define SSO_AF_XAQX_TAIL_PTR(a) (0x90000 | (a) << 3)
+#define SSO_AF_XAQX_HEAD_NEXT(a) (0xa0000 | (a) << 3)
+#define SSO_AF_XAQX_TAIL_NEXT(a) (0xb0000 | (a) << 3)
+#define SSO_AF_TOAQX_STATUS(a) (0xd0000 | (a) << 3)
+#define SSO_AF_TIAQX_STATUS(a) (0xc0000 | (a) << 3)
+#define SSO_AF_HWGRPX_IAQ_THR(a) (0x200000 | (a) << 12)
+#define SSO_AF_HWGRPX_TAQ_THR(a) (0x200010 | (a) << 12)
+#define SSO_AF_HWGRPX_PRI(a) (0x200020 | (a) << 12)
+#define SSO_AF_HWGRPX_WS_PC(a) (0x200050 | (a) << 12)
+#define SSO_AF_HWGRPX_EXT_PC(a) (0x200060 | (a) << 12)
+#define SSO_AF_HWGRPX_WA_PC(a) (0x200070 | (a) << 12)
+#define SSO_AF_HWGRPX_TS_PC(a) (0x200080 | (a) << 12)
+#define SSO_AF_HWGRPX_DS_PC(a) (0x200090 | (a) << 12)
+#define SSO_AF_HWGRPX_DQ_PC(a) (0x2000A0 | (a) << 12)
+#define SSO_AF_HWGRPX_PAGE_CNT(a) (0x200100 | (a) << 12)
+#define SSO_AF_IU_ACCNTX_CFG(a) (0x50000 | (a) << 3)
+#define SSO_AF_IU_ACCNTX_RST(a) (0x60000 | (a) << 3)
+#define SSO_AF_HWGRPX_AW_STATUS(a) (0x200110 | (a) << 12)
+#define SSO_AF_HWGRPX_AW_CFG(a) (0x200120 | (a) << 12)
+#define SSO_AF_HWGRPX_AW_TAGSPACE(a) (0x200130 | (a) << 12)
+#define SSO_AF_HWGRPX_XAQ_AURA(a) (0x200140 | (a) << 12)
+#define SSO_AF_HWGRPX_XAQ_LIMIT(a) (0x200220 | (a) << 12)
+#define SSO_AF_HWGRPX_IU_ACCNT(a) (0x200230 | (a) << 12)
+#define SSO_AF_HWSX_ARB(a) (0x400100 | (a) << 12)
+#define SSO_AF_HWSX_INV(a) (0x400180 | (a) << 12)
+#define SSO_AF_HWSX_GMCTL(a) (0x400200 | (a) << 12)
+#define SSO_AF_HWSX_SX_GRPMSKX(a, b, c) \
+ (0x400400 | (a) << 12 | (b) << 5 | (c) << 3)
+#define SSO_AF_TAQX_LINK(a) (0xc00000 | (a) << 3)
+#define SSO_AF_TAQX_WAEY_TAG(a, b) (0xe00000 | (a) << 8 | (b) << 4)
+#define SSO_AF_TAQX_WAEY_WQP(a, b) (0xe00008 | (a) << 8 | (b) << 4)
+#define SSO_AF_IPL_FREEX(a) (0x800000 | (a) << 3)
+#define SSO_AF_IPL_IAQX(a) (0x840000 | (a) << 3)
+#define SSO_AF_IPL_DESCHEDX(a) (0x860000 | (a) << 3)
+#define SSO_AF_IPL_CONFX(a) (0x880000 | (a) << 3)
+#define SSO_AF_IENTX_TAG(a) (0Xa00000 | (a) << 3)
+#define SSO_AF_IENTX_GRP(a) (0xa20000 | (a) << 3)
+#define SSO_AF_IENTX_PENDTAG(a) (0xa40000 | (a) << 3)
+#define SSO_AF_IENTX_LINKS(a) (0xa60000 | (a) << 3)
+#define SSO_AF_IENTX_QLINKS(a) (0xa80000 | (a) << 3)
+#define SSO_AF_IENTX_WQP(a) (0xaa0000 | (a) << 3)
+#define SSO_AF_XAQDIS_DIGESTX(a) (0x901000 | (a) << 3)
+#define SSO_AF_FLR_AQ_DIGESTX(a) (0x901200 | (a) << 3)
+#define SSO_AF_QCTLDIS_DIGESTX(a) (0x900E00 | (a) << 3)
+#define SSO_AF_WQP0_DIGESTX(a) (0x900A00 | (a) << 3)
+#define SSO_AF_NPA_DIGESTX(a) (0x900000 | (a) << 3)
+#define SSO_AF_BFP_DIGESTX(a) (0x900200 | (a) << 3)
+#define SSO_AF_BFPN_DIGESTX(a) (0x900400 | (a) << 3)
+#define SSO_AF_GRPDIS_DIGESTX(a) (0x900600 | (a) << 3)
+
+#define SSO_AF_IAQ_FREE_CNT_MASK 0x3FFFull
+#define SSO_AF_IAQ_RSVD_FREE_MASK 0x3FFFull
+#define SSO_AF_IAQ_RSVD_FREE_SHIFT 16
+#define SSO_AF_IAQ_FREE_CNT_MAX SSO_AF_IAQ_FREE_CNT_MASK
+#define SSO_AF_AW_ADD_RSVD_FREE_MASK 0x3FFFull
+#define SSO_AF_AW_ADD_RSVD_FREE_SHIFT 16
+#define SSO_HWGRP_IAQ_MAX_THR_MASK 0x3FFFull
+#define SSO_HWGRP_IAQ_RSVD_THR_MASK 0x3FFFull
+#define SSO_HWGRP_IAQ_MAX_THR_SHIFT 32
+#define SSO_HWGRP_IAQ_RSVD_THR 0x2
+#define SSO_HWGRP_IAQ_GRP_CNT_SHIFT 48
+#define SSO_HWGRP_IAQ_GRP_CNT_MASK 0x3FFFull
+#define SSO_AF_HWGRPX_IUEX_NOSCHED(a, b)\
+ ((((b >> 48) & 0x3FF) == a) && (b & BIT_ULL(60)))
+#define SSO_AF_HWGRP_PAGE_CNT_MASK (BIT_ULL(32) - 1)
+#define SSO_AF_HWGRP_PAGE_CNT_MASK (BIT_ULL(32) - 1)
+#define SSO_HWGRP_IAQ_MAX_THR_STRM_PERF 0xD0
+#define SSO_AF_HWGRP_IU_ACCNT_MAX_THR 0x7FFFull
+
+#define SSO_AF_TAQ_FREE_CNT_MASK 0x7FFull
+#define SSO_AF_TAQ_RSVD_FREE_MASK 0x7FFull
+#define SSO_AF_TAQ_RSVD_FREE_SHIFT 16
+#define SSO_AF_TAQ_FREE_CNT_MAX SSO_AF_TAQ_FREE_CNT_MASK
+#define SSO_AF_TAQ_ADD_RSVD_FREE_MASK 0x1FFFull
+#define SSO_AF_TAQ_ADD_RSVD_FREE_SHIFT 16
+#define SSO_HWGRP_TAQ_MAX_THR_MASK 0x7FFull
+#define SSO_HWGRP_TAQ_RSVD_THR_MASK 0x7FFull
+#define SSO_HWGRP_TAQ_MAX_THR_SHIFT 32
+#define SSO_HWGRP_TAQ_RSVD_THR 0x3
+#define SSO_AF_ERR0_MASK 0xFFEull
+#define SSO_AF_ERR2_MASK 0xF001F000ull
+#define SSO_HWGRP_TAQ_MAX_THR_STRM_PERF 0x10
+
+#define SSO_HWGRP_PRI_MASK 0x7ull
+#define SSO_HWGRP_PRI_AFF_MASK 0xFull
+#define SSO_HWGRP_PRI_AFF_SHIFT 8
+#define SSO_HWGRP_PRI_WGT_MASK 0x3Full
+#define SSO_HWGRP_PRI_WGT_SHIFT 16
+#define SSO_HWGRP_PRI_WGT_LEFT_MASK 0x3Full
+#define SSO_HWGRP_PRI_WGT_LEFT_SHIFT 24
+
+#define SSO_HWGRP_AW_CFG_RWEN BIT_ULL(0)
+#define SSO_HWGRP_AW_CFG_LDWB BIT_ULL(1)
+#define SSO_HWGRP_AW_CFG_LDT BIT_ULL(2)
+#define SSO_HWGRP_AW_CFG_STT BIT_ULL(3)
+#define SSO_HWGRP_AW_CFG_XAQ_BYP_DIS BIT_ULL(4)
+
+#define SSO_HWGRP_AW_STS_TPTR_VLD BIT_ULL(8)
+#define SSO_HWGRP_AW_STS_NPA_FETCH BIT_ULL(9)
+#define SSO_HWGRP_AW_STS_XAQ_BUFSC_MASK 0x7ull
+#define SSO_HWGRP_AW_STS_INIT_STS 0x18ull
+
+#define SSO_LF_GGRP_OP_ADD_WORK1 (0x8ull)
+#define SSO_LF_GGRP_QCTL (0x20ull)
+#define SSO_LF_GGRP_INT (0x100ull)
+#define SSO_LF_GGRP_INT_ENA_W1S (0x110ull)
+#define SSO_LF_GGRP_INT_ENA_W1C (0x118ull)
+#define SSO_LF_GGRP_INT_THR (0x140ull)
+#define SSO_LF_GGRP_INT_CNT (0x180ull)
+#define SSO_LF_GGRP_XAQ_CNT (0x1b0ull)
+#define SSO_LF_GGRP_AQ_CNT (0x1c0ull)
+#define SSO_LF_GGRP_AQ_THR (0x1e0ull)
+#define SSO_LF_GGRP_MISC_CNT (0x200ull)
+
+#define SSO_LF_GGRP_INT_MASK (0X7)
+#define SSO_LF_GGRP_AQ_THR_MASK (BIT_ULL(33) - 1)
+#define SSO_LF_GGRP_XAQ_CNT_MASK (BIT_ULL(33) - 1)
+#define SSO_LF_GGRP_INT_CNT_MASK (0x3FFF3FFF0000ull)
/* SSOW */
#define SSOW_AF_RVU_LF_HWS_CFG_DEBUG (0x0010)
@@ -420,6 +577,22 @@
#define SSOW_PRIV_LFX_HWS_CFG (0x1000)
#define SSOW_PRIV_LFX_HWS_INT_CFG (0x2000)
+#define SSOW_LF_GWS_PENDSTATE (0x50ull)
+#define SSOW_LF_GWS_NW_TIM (0x70ull)
+#define SSOW_LF_GWS_INT (0x100ull)
+#define SSOW_LF_GWS_INT_ENA_W1C (0x118ull)
+#define SSOW_LF_GWS_TAG (0x200ull)
+#define SSOW_LF_GWS_WQP (0x210ull)
+#define SSOW_LF_GWS_OP_GET_WORK (0x600ull)
+#define SSOW_LF_GWS_OP_SWTAG_FLUSH (0x800ull)
+#define SSOW_LF_GWS_OP_DESCHED (0x880ull)
+#define SSOW_LF_GWS_OP_CLR_NSCHED0 (0xA00ull)
+#define SSOW_LF_GWS_OP_GWC_INVAL (0xe00ull)
+
+#define SSO_TT_EMPTY (0x3)
+#define SSOW_LF_GWS_INT_MASK (0x7FF)
+#define SSOW_LF_GWS_MAX_NW_TIM (BIT_ULL(10) - 1)
+
/* TIM */
#define TIM_AF_CONST (0x90)
#define TIM_PRIV_LFX_CFG (0x20000)
@@ -427,17 +600,80 @@
#define TIM_AF_RVU_LF_CFG_DEBUG (0x30000)
#define TIM_AF_BLK_RST (0x10)
#define TIM_AF_LF_RST (0x20)
+#define TIM_AF_BLK_RST (0x10)
+#define TIM_AF_RINGX_GMCTL(a) (0x2000 | (a) << 3)
+#define TIM_AF_RINGX_CTL0(a) (0x4000 | (a) << 3)
+#define TIM_AF_RINGX_CTL1(a) (0x6000 | (a) << 3)
+#define TIM_AF_RINGX_CTL2(a) (0x8000 | (a) << 3)
+#define TIM_AF_FLAGS_REG (0x80)
+#define TIM_AF_FLAGS_REG_ENA_TIM BIT_ULL(0)
+#define TIM_AF_RINGX_CTL1_ENA BIT_ULL(47)
+#define TIM_AF_RINGX_CTL1_RCF_BUSY BIT_ULL(50)
+
+#define TIM_AF_RING_GMCTL_SHIFT 3
+#define TIM_AF_RING_SSO_PF_FUNC_SHIFT 0
/* CPT */
-#define CPT_AF_CONSTANTS0 (0x0000)
-#define CPT_PRIV_LFX_CFG (0x41000)
-#define CPT_PRIV_LFX_INT_CFG (0x43000)
-#define CPT_AF_RVU_LF_CFG_DEBUG (0x45000)
-#define CPT_AF_LF_RST (0x44000)
-#define CPT_AF_BLK_RST (0x46000)
+#define CPT_AF_CONSTANTS0 (0x0ull)
+#define CPT_AF_CONSTANTS1 (0x1000ull)
+#define CPT_AF_DIAG (0x3000ull)
+#define CPT_AF_ECO (0x4000ull)
+#define CPT_AF_FLTX_INT(a) (0xa000ull | (u64)(a) << 3)
+#define CPT_AF_FLTX_INT_W1S(a) (0xb000ull | (u64)(a) << 3)
+#define CPT_AF_FLTX_INT_ENA_W1C(a) (0xc000ull | (u64)(a) << 3)
+#define CPT_AF_FLTX_INT_ENA_W1S(a) (0xd000ull | (u64)(a) << 3)
+#define CPT_AF_PSNX_EXE(a) (0xe000ull | (u64)(a) << 3)
+#define CPT_AF_PSNX_EXE_W1S(a) (0xf000ull | (u64)(a) << 3)
+#define CPT_AF_PSNX_LF(a) (0x10000ull | (u64)(a) << 3)
+#define CPT_AF_PSNX_LF_W1S(a) (0x11000ull | (u64)(a) << 3)
+#define CPT_AF_EXEX_CTL2(a) (0x12000ull | (u64)(a) << 3)
+#define CPT_AF_EXEX_STS(a) (0x13000ull | (u64)(a) << 3)
+#define CPT_AF_EXE_ERR_INFO (0x14000ull)
+#define CPT_AF_EXEX_ACTIVE(a) (0x16000ull | (u64)(a) << 3)
+#define CPT_AF_INST_REQ_PC (0x17000ull)
+#define CPT_AF_INST_LATENCY_PC (0x18000ull)
+#define CPT_AF_RD_REQ_PC (0x19000ull)
+#define CPT_AF_RD_LATENCY_PC (0x1a000ull)
+#define CPT_AF_RD_UC_PC (0x1b000ull)
+#define CPT_AF_ACTIVE_CYCLES_PC (0x1c000ull)
+#define CPT_AF_EXE_DBG_CTL (0x1d000ull)
+#define CPT_AF_EXE_DBG_DATA (0x1e000ull)
+#define CPT_AF_EXE_REQ_TIMER (0x1f000ull)
+#define CPT_AF_EXEX_CTL(a) (0x20000ull | (u64)(a) << 3)
+#define CPT_AF_EXE_PERF_CTL (0x21000ull)
+#define CPT_AF_EXE_DBG_CNTX(a) (0x22000ull | (u64)(a) << 3)
+#define CPT_AF_EXE_PERF_EVENT_CNT (0x23000ull)
+#define CPT_AF_EXE_EPCI_INBX_CNT(a) (0x24000ull | (u64)(a) << 3)
+#define CPT_AF_EXE_EPCI_OUTBX_CNT(a) (0x25000ull | (u64)(a) << 3)
+#define CPT_AF_EXEX_UCODE_BASE(a) (0x26000ull | (u64)(a) << 3)
+#define CPT_AF_LFX_CTL(a) (0x27000ull | (u64)(a) << 3)
+#define CPT_AF_LFX_CTL2(a) (0x29000ull | (u64)(a) << 3)
+#define CPT_AF_CPTCLK_CNT (0x2a000ull)
+#define CPT_AF_PF_FUNC (0x2b000ull)
+#define CPT_AF_LFX_PTR_CTL(a) (0x2c000ull | (u64)(a) << 3)
+#define CPT_AF_GRPX_THR(a) (0x2d000ull | (u64)(a) << 3)
+#define CPT_AF_CTL (0x2e000ull)
+#define CPT_AF_XEX_THR(a) (0x2f000ull | (u64)(a) << 3)
+#define CPT_PRIV_LFX_CFG (0x41000ull)
+#define CPT_PRIV_AF_INT_CFG (0x42000ull)
+#define CPT_PRIV_LFX_INT_CFG (0x43000ull)
+#define CPT_AF_LF_RST (0x44000ull)
+#define CPT_AF_RVU_LF_CFG_DEBUG (0x45000ull)
+#define CPT_AF_BLK_RST (0x46000ull)
+#define CPT_AF_RVU_INT (0x47000ull)
+#define CPT_AF_RVU_INT_W1S (0x47008ull)
+#define CPT_AF_RVU_INT_ENA_W1S (0x47010ull)
+#define CPT_AF_RVU_INT_ENA_W1C (0x47018ull)
+#define CPT_AF_RAS_INT (0x47020ull)
+#define CPT_AF_RAS_INT_W1S (0x47028ull)
+#define CPT_AF_RAS_INT_ENA_W1S (0x47030ull)
+#define CPT_AF_RAS_INT_ENA_W1C (0x47038ull)
#define NPC_AF_BLK_RST (0x00040)
+#define CPT_AF_LF_CTL2_SHIFT 3
+#define CPT_AF_LF_SSO_PF_FUNC_SHIFT 32
+
/* NPC */
#define NPC_AF_CFG (0x00000)
#define NPC_AF_ACTIVE_PC (0x00010)
@@ -525,4 +761,12 @@
(0x00F00 | (a) << 5 | (b) << 4)
#define NDC_AF_BANKX_HIT_PC(a) (0x01000 | (a) << 3)
#define NDC_AF_BANKX_MISS_PC(a) (0x01100 | (a) << 3)
+#define AF_BAR2_ALIASX_SIZE (0x100000ull)
+#define SSOW_AF_BAR2_SEL (0x9000000ull)
+#define SSO_AF_BAR2_SEL (0x9000000ull)
+
+#define AF_BAR2_ALIASX(a, b) (0x9100000ull | (a) << 12 | b)
+#define SSOW_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
+#define SSO_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
+
#endif /* RVU_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
new file mode 100644
index 000000000000..071e69d7da15
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/pci.h>
+#include "rvu.h"
+
+/* SDP PF device id */
+#define PCI_DEVID_OTX2_SDP_PF 0xA0F6
+
+/* SDP PF number */
+static int sdp_pf_num = -1;
+
+bool is_sdp_pf(u16 pcifunc)
+{
+ if (rvu_get_pf(pcifunc) != sdp_pf_num)
+ return false;
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ return false;
+
+ return true;
+}
+
+int rvu_sdp_init(struct rvu *rvu)
+{
+ struct pci_dev *pdev;
+ int i;
+
+ for (i = 0; i < rvu->hw->total_pfs; i++) {
+ pdev = pci_get_domain_bus_and_slot(
+ pci_domain_nr(rvu->pdev->bus), i + 1, 0);
+ if (!pdev)
+ continue;
+
+ if (pdev->device == PCI_DEVID_OTX2_SDP_PF) {
+ sdp_pf_num = i;
+ put_device(&pdev->dev);
+ break;
+ }
+
+ put_device(&pdev->dev);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sso.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sso.c
new file mode 100644
index 000000000000..663fb2ce2865
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sso.c
@@ -0,0 +1,1322 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/types.h>
+
+#include "rvu_struct.h"
+
+#include "rvu_reg.h"
+#include "rvu.h"
+
+#if defined(CONFIG_ARM64)
+#define rvu_sso_store_pair(val0, val1, addr) ({ \
+ __asm__ volatile("stp %x[x0], %x[x1], [%x[p1]]" \
+ : \
+ : \
+ [x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr)); \
+ })
+#else
+#define rvu_sso_store_pair(val0, val1, addr) \
+ do { \
+ *(uint64_t *)addr = val0; \
+ *(uint64_t *)(((uint8_t *)addr) + 8) = val1; \
+ } while (0)
+#endif
+
+#define SSO_AF_INT_DIGEST_PRNT(reg) \
+ for (i = 0; i < block->lf.max / 64; i++) { \
+ reg0 = rvu_read64(rvu, blkaddr, reg##X(i)); \
+ dev_err(rvu->dev, #reg "(%d) : 0x%llx", i, reg0); \
+ rvu_write64(rvu, blkaddr, reg##X(i), reg0); \
+ }
+
+void rvu_sso_hwgrp_config_thresh(struct rvu *rvu, int blkaddr, int lf)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 add, grp_thr, grp_rsvd;
+ u64 reg;
+
+ /* Configure IAQ Thresholds */
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_IAQ_THR(lf));
+ grp_rsvd = reg & SSO_HWGRP_IAQ_RSVD_THR_MASK;
+ add = hw->sso.iaq_rsvd - grp_rsvd;
+
+ grp_thr = hw->sso.iaq_rsvd & SSO_HWGRP_IAQ_RSVD_THR_MASK;
+ grp_thr |= ((hw->sso.iaq_max & SSO_HWGRP_IAQ_MAX_THR_MASK) <<
+ SSO_HWGRP_IAQ_MAX_THR_SHIFT);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_IAQ_THR(lf), grp_thr);
+
+ if (add)
+ rvu_write64(rvu, blkaddr, SSO_AF_AW_ADD,
+ (add & SSO_AF_AW_ADD_RSVD_FREE_MASK) <<
+ SSO_AF_AW_ADD_RSVD_FREE_SHIFT);
+
+ /* Configure TAQ Thresholds */
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_TAQ_THR(lf));
+ grp_rsvd = reg & SSO_HWGRP_TAQ_RSVD_THR_MASK;
+ add = hw->sso.taq_rsvd - grp_rsvd;
+
+ grp_thr = hw->sso.taq_rsvd & SSO_HWGRP_TAQ_RSVD_THR_MASK;
+ grp_thr |= ((hw->sso.taq_max & SSO_HWGRP_TAQ_MAX_THR_MASK) <<
+ SSO_HWGRP_TAQ_MAX_THR_SHIFT);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_TAQ_THR(lf), grp_thr);
+
+ if (add)
+ rvu_write64(rvu, blkaddr, SSO_AF_TAQ_ADD,
+ (add & SSO_AF_TAQ_RSVD_FREE_MASK) <<
+ SSO_AF_TAQ_ADD_RSVD_FREE_SHIFT);
+}
+
+static void rvu_sso_enable_aw_src(struct rvu *rvu, int lf_cnt, int sub_blkaddr,
+ u64 addr, int *lf_arr, u16 pcifunc, u8 shift,
+ u8 addr_off)
+{
+ u64 reg;
+ int lf;
+
+ for (lf = 0; lf < lf_cnt; lf++) {
+ reg = rvu_read64(rvu, sub_blkaddr, addr |
+ lf_arr[lf] << addr_off);
+
+ reg |= ((u64)pcifunc << shift);
+ rvu_write64(rvu, sub_blkaddr, addr |
+ lf_arr[lf] << addr_off, reg);
+ }
+}
+
+static int rvu_sso_disable_aw_src(struct rvu *rvu, int **lf_arr,
+ int sub_blkaddr, u8 shift, u8 addr_off,
+ u16 pcifunc, u64 addr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int lf_cnt = 0, lf;
+ u64 reg;
+
+ if (sub_blkaddr >= 0) {
+ block = &hw->block[sub_blkaddr];
+ *lf_arr = kmalloc(block->lf.max * sizeof(int), GFP_KERNEL);
+ if (!*lf_arr)
+ return 0;
+
+ for (lf = 0; lf < block->lf.max; lf++) {
+ reg = rvu_read64(rvu, sub_blkaddr,
+ addr | lf << addr_off);
+ if (((reg >> shift) & 0xFFFFul) != pcifunc)
+ continue;
+
+ reg &= ~(0xFFFFul << shift);
+ rvu_write64(rvu, sub_blkaddr, addr | lf << addr_off,
+ reg);
+ (*lf_arr)[lf_cnt] = lf;
+ lf_cnt++;
+ }
+ }
+
+ return lf_cnt;
+}
+
+static void rvu_sso_ggrp_taq_flush(struct rvu *rvu, u16 pcifunc, int lf,
+ int slot, int ssow_lf, u64 blkaddr,
+ u64 ssow_blkaddr)
+{
+ int nix_lf_cnt, cpt_lf_cnt, tim_lf_cnt;
+ int *nix_lf, *cpt_lf, *tim_lf;
+ u64 reg, val;
+
+ /* Disable add work. */
+ rvu_write64(rvu, blkaddr, SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_QCTL),
+ 0);
+
+ /* Disable all sources of work. */
+ nix_lf = NULL;
+ nix_lf_cnt = rvu_sso_disable_aw_src(rvu, &nix_lf,
+ rvu_get_blkaddr(rvu, BLKTYPE_NIX,
+ 0),
+ NIX_AF_LF_SSO_PF_FUNC_SHIFT,
+ NIX_AF_LF_CFG_SHIFT, pcifunc,
+ NIX_AF_LFX_CFG(0));
+
+ cpt_lf = NULL;
+ cpt_lf_cnt = rvu_sso_disable_aw_src(rvu, &cpt_lf,
+ rvu_get_blkaddr(rvu, BLKTYPE_CPT,
+ 0),
+ CPT_AF_LF_SSO_PF_FUNC_SHIFT,
+ CPT_AF_LF_CTL2_SHIFT, pcifunc,
+ CPT_AF_LFX_CTL2(0));
+
+ tim_lf = NULL;
+ tim_lf_cnt = rvu_sso_disable_aw_src(rvu, &tim_lf,
+ rvu_get_blkaddr(rvu, BLKTYPE_TIM,
+ 0),
+ TIM_AF_RING_SSO_PF_FUNC_SHIFT,
+ TIM_AF_RING_GMCTL_SHIFT, pcifunc,
+ TIM_AF_RINGX_GMCTL(0));
+
+ /* ZIP and DPI blocks not yet implemented. */
+
+ /* Enable add work. */
+ rvu_write64(rvu, blkaddr, SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_QCTL),
+ 0x1);
+
+ /* Prepare WS for GW operations. */
+ do {
+ reg = rvu_read64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_TAG));
+ } while (reg & BIT_ULL(63));
+
+ if (reg & BIT_ULL(62))
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_DESCHED),
+ 0x0);
+ else if (((reg >> 32) & SSO_TT_EMPTY) != SSO_TT_EMPTY)
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_SWTAG_FLUSH),
+ 0x0);
+
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_GWC_INVAL), 0x0);
+ /* Drain TAQ. */
+ val = slot;
+ val |= BIT_ULL(18);
+ val |= BIT_ULL(16);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_TAQ_THR(lf));
+ while ((reg >> 48) & 0x7FF) {
+ rvu_write64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_OP_ADD_WORK1),
+ 0x1 << 3);
+get_work:
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_GET_WORK),
+ val);
+ do {
+ reg = rvu_read64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0,
+ SSOW_LF_GWS_TAG));
+ } while (reg & BIT_ULL(63));
+
+ if (!rvu_read64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_WQP)))
+ goto get_work;
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_TAQ_THR(lf));
+ }
+
+ reg = rvu_read64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_TAG));
+ if (((reg >> 32) & SSO_TT_EMPTY) != SSO_TT_EMPTY)
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_SWTAG_FLUSH),
+ 0x0);
+
+ /* Disable add work. */
+ rvu_write64(rvu, blkaddr, SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_QCTL),
+ 0x0);
+
+ /* restore all sources of work. */
+ rvu_sso_enable_aw_src(rvu, nix_lf_cnt, rvu_get_blkaddr(rvu, BLKTYPE_NIX,
+ 0),
+ NIX_AF_LFX_CFG(0), nix_lf, pcifunc,
+ NIX_AF_LF_SSO_PF_FUNC_SHIFT,
+ NIX_AF_LF_CFG_SHIFT);
+ rvu_sso_enable_aw_src(rvu, cpt_lf_cnt, rvu_get_blkaddr(rvu, BLKTYPE_CPT,
+ 0),
+ CPT_AF_LFX_CTL2(0), cpt_lf, pcifunc,
+ CPT_AF_LF_SSO_PF_FUNC_SHIFT,
+ CPT_AF_LF_CTL2_SHIFT);
+ rvu_sso_enable_aw_src(rvu, tim_lf_cnt, rvu_get_blkaddr(rvu, BLKTYPE_TIM,
+ 0),
+ TIM_AF_RINGX_GMCTL(0), tim_lf, pcifunc,
+ TIM_AF_RING_SSO_PF_FUNC_SHIFT,
+ TIM_AF_RING_GMCTL_SHIFT);
+
+ kfree(nix_lf);
+ kfree(cpt_lf);
+ kfree(tim_lf);
+}
+
+int rvu_sso_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot)
+{
+ int ssow_lf, iue, blkaddr, ssow_blkaddr, err;
+ struct sso_rsrc *sso = &rvu->hw->sso;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 aq_cnt, ds_cnt, cq_ds_cnt;
+ u64 reg, add, wqp, val;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ /* Enable BAR2 ALIAS for this pcifunc. */
+ reg = BIT_ULL(16) | pcifunc;
+ rvu_write64(rvu, blkaddr, SSO_AF_BAR2_SEL, reg);
+
+ rvu_write64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_INT_THR), 0x0);
+ rvu_write64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_AQ_THR),
+ SSO_LF_GGRP_AQ_THR_MASK);
+
+ rvu_write64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_INT),
+ SSO_LF_GGRP_INT_MASK);
+ rvu_write64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_INT_ENA_W1C),
+ SSO_LF_GGRP_INT_MASK);
+
+ ssow_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, 0);
+ if (ssow_blkaddr < 0)
+ goto af_cleanup;
+ /* Check if LF is in slot 0, if not no HWS are attached. */
+ ssow_lf = rvu_get_lf(rvu, &hw->block[ssow_blkaddr], pcifunc, 0);
+ if (ssow_lf < 0)
+ goto af_cleanup;
+
+ rvu_write64(rvu, ssow_blkaddr, SSOW_AF_BAR2_SEL, reg);
+
+ /* Ignore all interrupts */
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_INT_ENA_W1C),
+ SSOW_LF_GWS_INT_MASK);
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_INT),
+ SSOW_LF_GWS_INT_MASK);
+
+ /* Prepare WS for GW operations. */
+ do {
+ reg = rvu_read64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_TAG));
+ } while (reg & BIT_ULL(63));
+
+ if (reg & BIT_ULL(62))
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_DESCHED), 0);
+ else if (((reg >> 32) & SSO_TT_EMPTY) != SSO_TT_EMPTY)
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_SWTAG_FLUSH),
+ 0);
+
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_GWC_INVAL), 0);
+
+ /* Disable add work. */
+ rvu_write64(rvu, blkaddr, SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_QCTL),
+ 0x0);
+
+ /* HRM 14.13.4 (4) */
+ /* Clean up nscheduled IENT let the work flow. */
+ for (iue = 0; iue < sso->sso_iue; iue++) {
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_GRP(iue));
+ if (SSO_AF_HWGRPX_IUEX_NOSCHED(lf, reg)) {
+ wqp = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_WQP(iue));
+ rvu_sso_store_pair(wqp, iue, rvu->afreg_base +
+ ((ssow_blkaddr << 28) |
+ SSOW_AF_BAR2_ALIASX(0,
+ SSOW_LF_GWS_OP_CLR_NSCHED0)));
+ }
+ }
+
+ /* HRM 14.13.4 (6) */
+ /* Drain all the work using grouped gw. */
+ aq_cnt = rvu_read64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_AQ_CNT));
+ ds_cnt = rvu_read64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_MISC_CNT));
+ cq_ds_cnt = rvu_read64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_INT_CNT));
+ cq_ds_cnt &= SSO_LF_GGRP_INT_CNT_MASK;
+
+ val = slot; /* GGRP ID */
+ val |= BIT_ULL(18); /* Grouped */
+ val |= BIT_ULL(16); /* WAIT */
+
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_NW_TIM),
+ SSOW_LF_GWS_MAX_NW_TIM);
+
+ while (aq_cnt || cq_ds_cnt || ds_cnt) {
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_GET_WORK),
+ val);
+ do {
+ reg = rvu_read64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0,
+ SSOW_LF_GWS_TAG));
+ } while (reg & BIT_ULL(63));
+ if (((reg >> 32) & SSO_TT_EMPTY) != SSO_TT_EMPTY)
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0,
+ SSOW_LF_GWS_OP_SWTAG_FLUSH),
+ 0x0);
+ aq_cnt = rvu_read64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_AQ_CNT)
+ );
+ ds_cnt = rvu_read64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot,
+ SSO_LF_GGRP_MISC_CNT));
+ cq_ds_cnt = rvu_read64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot,
+ SSO_LF_GGRP_INT_CNT));
+ /* Extract cq and ds count */
+ cq_ds_cnt &= SSO_LF_GGRP_INT_CNT_MASK;
+ }
+
+ /* Due to the Errata 35432, SSO doesn't release the partially consumed
+ * TAQ buffer used by HWGRP when HWGRP is reset. Use SW routine to
+ * drain it manually.
+ */
+ if (is_rvu_96xx_B0(rvu))
+ rvu_sso_ggrp_taq_flush(rvu, pcifunc, lf, slot, ssow_lf, blkaddr,
+ ssow_blkaddr);
+
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_NW_TIM), 0x0);
+
+ /* HRM 14.13.4 (7) */
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_XAQ_CNT))
+ & SSO_LF_GGRP_XAQ_CNT_MASK;
+ if (reg != 0)
+ dev_warn(rvu->dev,
+ "SSO_LF[%d]_GGRP_XAQ_CNT is %lld expected 0", lf, reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_PAGE_CNT(lf))
+ & SSO_AF_HWGRP_PAGE_CNT_MASK;
+ if (reg != 0)
+ dev_warn(rvu->dev,
+ "SSO_AF_HWGRP[%d]_PAGE_CNT is %lld expected 0", lf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_IAQ_THR(lf))
+ >> SSO_HWGRP_IAQ_GRP_CNT_SHIFT;
+ reg &= SSO_HWGRP_IAQ_GRP_CNT_MASK;
+ if (reg != 0)
+ dev_warn(rvu->dev,
+ "SSO_AF_HWGRP[%d]_IAQ_THR is %lld expected 0", lf,
+ reg);
+ rvu_write64(rvu, ssow_blkaddr, SSOW_AF_BAR2_SEL, 0);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWSX_INV(ssow_lf), 0x1);
+
+af_cleanup:
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_UNMAP_INFO);
+ if ((reg & 0xFFF) == pcifunc)
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR0, SSO_AF_ERR0_MASK);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_UNMAP_INFO2);
+ if ((reg & 0xFFF) == pcifunc)
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR2, SSO_AF_ERR2_MASK);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_POISONX(lf / 64), lf % 64);
+ rvu_write64(rvu, blkaddr, SSO_AF_IU_ACCNTX_RST(lf), 0x1);
+
+ err = rvu_poll_reg(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf),
+ SSO_HWGRP_AW_STS_NPA_FETCH, true);
+ if (err)
+ dev_warn(rvu->dev,
+ "SSO_HWGRP(%d)_AW_STATUS[NPA_FETCH] not cleared", lf);
+
+ /* Remove all pointers from XAQ, HRM 14.13.6 */
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR0_ENA_W1C, ~0ULL);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_AW_CFG(lf));
+ reg = (reg & ~SSO_HWGRP_AW_CFG_RWEN) | SSO_HWGRP_AW_CFG_XAQ_BYP_DIS;
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_CFG(lf), reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf));
+ if (reg & SSO_HWGRP_AW_STS_TPTR_VLD) {
+ /* aura will be torn down, no need to free the pointer. */
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf),
+ SSO_HWGRP_AW_STS_TPTR_VLD);
+ }
+
+ err = rvu_poll_reg(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf),
+ SSO_HWGRP_AW_STS_XAQ_BUFSC_MASK, true);
+ if (err) {
+ dev_warn(rvu->dev,
+ "SSO_HWGRP(%d)_AW_STATUS[XAQ_BUF_CACHED] not cleared",
+ lf);
+ return err;
+ }
+
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR0, ~0ULL);
+ /* Re-enable error reporting once we're finished */
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR0_ENA_W1S, ~0ULL);
+
+ /* HRM 14.13.4 (13) */
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_CFG(lf),
+ SSO_HWGRP_AW_CFG_LDWB | SSO_HWGRP_AW_CFG_LDT |
+ SSO_HWGRP_AW_CFG_STT);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_XAQ_AURA(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_GMCTL(lf), 0x0);
+ reg = (SSO_HWGRP_PRI_AFF_MASK << SSO_HWGRP_PRI_AFF_SHIFT) |
+ (SSO_HWGRP_PRI_WGT_MASK << SSO_HWGRP_PRI_WGT_SHIFT) |
+ (0x1 << SSO_HWGRP_PRI_WGT_SHIFT);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_PRI(lf), reg);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_WS_PC(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_EXT_PC(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_WA_PC(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_TS_PC(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_DS_PC(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_XAQ_LIMIT(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_IU_ACCNT(lf), 0x0);
+
+ /* The delta between the current and default thresholds
+ * need to be returned to the SSO
+ */
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_IAQ_THR(lf)) &
+ SSO_HWGRP_IAQ_RSVD_THR_MASK;
+ add = SSO_HWGRP_IAQ_RSVD_THR - reg;
+ reg = (SSO_HWGRP_IAQ_MAX_THR_MASK << SSO_HWGRP_IAQ_MAX_THR_SHIFT) |
+ SSO_HWGRP_IAQ_RSVD_THR;
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_IAQ_THR(lf), reg);
+
+ if (add)
+ rvu_write64(rvu, blkaddr, SSO_AF_AW_ADD,
+ (add & SSO_AF_AW_ADD_RSVD_FREE_MASK) <<
+ SSO_AF_AW_ADD_RSVD_FREE_SHIFT);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_TAQ_THR(lf)) &
+ SSO_HWGRP_TAQ_RSVD_THR_MASK;
+ add = SSO_HWGRP_TAQ_RSVD_THR - reg;
+ reg = (SSO_HWGRP_TAQ_MAX_THR_MASK << SSO_HWGRP_TAQ_MAX_THR_SHIFT) |
+ SSO_HWGRP_TAQ_RSVD_THR;
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_TAQ_THR(lf), reg);
+ if (add)
+ rvu_write64(rvu, blkaddr, SSO_AF_TAQ_ADD,
+ (add & SSO_AF_TAQ_RSVD_FREE_MASK) <<
+ SSO_AF_TAQ_ADD_RSVD_FREE_SHIFT);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_HEAD_PTR(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_TAIL_PTR(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_HEAD_NEXT(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_TAIL_NEXT(lf), 0x0);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_BAR2_SEL, 0);
+
+ return 0;
+}
+
+int rvu_ssow_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot)
+{
+ int blkaddr, ssow_blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ ssow_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, 0);
+ if (ssow_blkaddr < 0)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ /* Enable BAR2 alias access. */
+ reg = BIT_ULL(16) | pcifunc;
+ rvu_write64(rvu, ssow_blkaddr, SSOW_AF_BAR2_SEL, reg);
+
+ /* Ignore all interrupts */
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_INT_ENA_W1C),
+ SSOW_LF_GWS_INT_MASK);
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_INT),
+ SSOW_LF_GWS_INT_MASK);
+
+ /* HRM 14.13.4 (3) */
+ /* Wait till waitw/desched completes. */
+ do {
+ reg = rvu_read64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot,
+ SSOW_LF_GWS_PENDSTATE));
+ } while (reg & (BIT_ULL(63) | BIT_ULL(58)));
+
+ reg = rvu_read64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot, SSOW_LF_GWS_TAG));
+ /* Switch Tag Pending */
+ if (reg & BIT_ULL(62))
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot, SSOW_LF_GWS_OP_DESCHED),
+ 0x0);
+ /* Tag Type != EMPTY use swtag_flush to release tag-chain. */
+ else if (((reg >> 32) & SSO_TT_EMPTY) != SSO_TT_EMPTY)
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot,
+ SSOW_LF_GWS_OP_SWTAG_FLUSH),
+ 0x0);
+
+ /* Wait for desched to complete. */
+ do {
+ reg = rvu_read64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot,
+ SSOW_LF_GWS_PENDSTATE));
+ } while (reg & BIT_ULL(58));
+
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_NW_TIM), 0x0);
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_GWC_INVAL), 0x0);
+
+ /* set SAI_INVAL bit */
+ rvu_write64(rvu, blkaddr, SSO_AF_HWSX_INV(lf), 0x1);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWSX_ARB(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWSX_GMCTL(lf), 0x0);
+
+ rvu_write64(rvu, ssow_blkaddr, SSOW_AF_BAR2_SEL, 0x0);
+
+ return 0;
+}
+
+int rvu_mbox_handler_sso_hw_setconfig(struct rvu *rvu,
+ struct sso_hw_setconfig *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int hwgrp, lf, err, blkaddr;
+ u32 npa_aura_id;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ npa_aura_id = req->npa_aura_id;
+
+ /* Check if requested 'SSOLF <=> NPALF' mapping is valid */
+ if (req->npa_pf_func) {
+ /* If default, use 'this' SSOLF's PFFUNC */
+ if (req->npa_pf_func == RVU_DEFAULT_PF_FUNC)
+ req->npa_pf_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->npa_pf_func, BLKTYPE_NPA))
+ return SSO_AF_INVAL_NPA_PF_FUNC;
+ }
+
+ /* Initialize XAQ ring */
+ for (hwgrp = 0; hwgrp < req->hwgrps; hwgrp++) {
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, hwgrp);
+ if (lf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf));
+ if (reg & SSO_HWGRP_AW_STS_XAQ_BUFSC_MASK || reg & BIT_ULL(3)) {
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWGRPX_AW_CFG(lf));
+ reg = (reg & ~SSO_HWGRP_AW_CFG_RWEN) |
+ SSO_HWGRP_AW_CFG_XAQ_BYP_DIS;
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_CFG(lf),
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWGRPX_AW_STATUS(lf));
+ if (reg & SSO_HWGRP_AW_STS_TPTR_VLD) {
+ rvu_poll_reg(rvu, blkaddr,
+ SSO_AF_HWGRPX_AW_STATUS(lf),
+ SSO_HWGRP_AW_STS_NPA_FETCH, true);
+
+ rvu_write64(rvu, blkaddr,
+ SSO_AF_HWGRPX_AW_STATUS(lf),
+ SSO_HWGRP_AW_STS_TPTR_VLD);
+ }
+
+ if (rvu_poll_reg(rvu, blkaddr,
+ SSO_AF_HWGRPX_AW_STATUS(lf),
+ SSO_HWGRP_AW_STS_XAQ_BUFSC_MASK, true))
+ dev_warn(rvu->dev,
+ "SSO_HWGRP(%d)_AW_STATUS[XAQ_BUF_CACHED] not cleared",
+ lf);
+ }
+
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_XAQ_AURA(lf),
+ npa_aura_id);
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_GMCTL(lf),
+ req->npa_pf_func);
+
+ /* enable XAQ */
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_CFG(lf), 0xF);
+
+ /* Wait for ggrp to ack. */
+ err = rvu_poll_reg(rvu, blkaddr,
+ SSO_AF_HWGRPX_AW_STATUS(lf),
+ SSO_HWGRP_AW_STS_INIT_STS, false);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf));
+ if (err || (reg & BIT_ULL(4)) || !(reg & BIT_ULL(8))) {
+ dev_warn(rvu->dev, "SSO_HWGRP(%d) XAQ NPA pointer initialization failed",
+ lf);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_sso_grp_set_priority(struct rvu *rvu,
+ struct sso_grp_priority *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr;
+ u64 regval;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ regval = (((u64)(req->weight & SSO_HWGRP_PRI_WGT_MASK)
+ << SSO_HWGRP_PRI_WGT_SHIFT) |
+ ((u64)(req->affinity & SSO_HWGRP_PRI_AFF_MASK)
+ << SSO_HWGRP_PRI_AFF_SHIFT) |
+ (req->priority & SSO_HWGRP_PRI_MASK));
+
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, req->grp);
+ if (lf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_PRI(lf), regval);
+
+ return 0;
+}
+
+int rvu_mbox_handler_sso_grp_get_priority(struct rvu *rvu,
+ struct sso_info_req *req,
+ struct sso_grp_priority *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr;
+ u64 regval;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, req->grp);
+ if (lf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ regval = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_PRI(lf));
+
+ rsp->weight = (regval >> SSO_HWGRP_PRI_WGT_SHIFT)
+ & SSO_HWGRP_PRI_WGT_MASK;
+ rsp->affinity = (regval >> SSO_HWGRP_PRI_AFF_SHIFT)
+ & SSO_HWGRP_PRI_AFF_MASK;
+ rsp->priority = regval & SSO_HWGRP_PRI_MASK;
+
+ return 0;
+}
+
+int rvu_mbox_handler_sso_grp_qos_config(struct rvu *rvu,
+ struct sso_grp_qos_cfg *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ u64 regval, grp_rsvd;
+ int lf, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, req->grp);
+ if (lf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ /* Check if GGRP has been active. */
+ regval = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_WA_PC(lf));
+ if (regval)
+ return SSO_AF_ERR_GRP_EBUSY;
+
+ /* Configure XAQ threhold */
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_XAQ_LIMIT(lf), req->xaq_limit);
+
+ /* Configure TAQ threhold */
+ regval = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_TAQ_THR(lf));
+ grp_rsvd = regval & SSO_HWGRP_TAQ_RSVD_THR_MASK;
+ if (req->taq_thr < grp_rsvd)
+ req->taq_thr = grp_rsvd;
+
+ regval = req->taq_thr & SSO_HWGRP_TAQ_MAX_THR_MASK;
+ regval = (regval << SSO_HWGRP_TAQ_MAX_THR_SHIFT) | grp_rsvd;
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_TAQ_THR(lf), regval);
+
+ /* Configure IAQ threhold */
+ regval = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_IAQ_THR(lf));
+ grp_rsvd = regval & SSO_HWGRP_IAQ_RSVD_THR_MASK;
+ if (req->iaq_thr < grp_rsvd + 4)
+ req->iaq_thr = grp_rsvd + 4;
+
+ regval = req->iaq_thr & SSO_HWGRP_IAQ_MAX_THR_MASK;
+ regval = (regval << SSO_HWGRP_IAQ_MAX_THR_SHIFT) | grp_rsvd;
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_IAQ_THR(lf), regval);
+
+ return 0;
+}
+
+int rvu_mbox_handler_sso_grp_get_stats(struct rvu *rvu,
+ struct sso_info_req *req,
+ struct sso_grp_stats *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, req->grp);
+ if (lf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ rsp->ws_pc = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_WS_PC(lf));
+ rsp->ext_pc = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_EXT_PC(lf));
+ rsp->wa_pc = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_WA_PC(lf));
+ rsp->ts_pc = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_TS_PC(lf));
+ rsp->ds_pc = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_DS_PC(lf));
+ rsp->dq_pc = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_DQ_PC(lf));
+ rsp->aw_status = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf));
+ rsp->page_cnt = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_PAGE_CNT(lf));
+
+ return 0;
+}
+
+int rvu_mbox_handler_sso_hws_get_stats(struct rvu *rvu,
+ struct sso_info_req *req,
+ struct sso_hws_stats *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr, ssow_blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ ssow_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, pcifunc);
+ if (ssow_blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &hw->block[ssow_blkaddr], pcifunc, req->hws);
+ if (lf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ rsp->arbitration = rvu_read64(rvu, blkaddr, SSO_AF_HWSX_ARB(lf));
+
+ return 0;
+}
+
+int rvu_mbox_handler_sso_lf_alloc(struct rvu *rvu, struct sso_lf_alloc_req *req,
+ struct sso_lf_alloc_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int ssolf, uniq_ident, rc = 0;
+ struct rvu_pfvf *pfvf;
+ int hwgrp, blkaddr;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (pfvf->sso <= 0 || blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ if (!pfvf->sso_uniq_ident) {
+ uniq_ident = rvu_alloc_rsrc(&hw->sso.pfvf_ident);
+ if (uniq_ident < 0) {
+ rc = SSO_AF_ERR_AF_LF_ALLOC;
+ goto exit;
+ }
+ pfvf->sso_uniq_ident = uniq_ident;
+ } else {
+ uniq_ident = pfvf->sso_uniq_ident;
+ }
+
+ /* Set threshold for the In-Unit Accounting Index*/
+ rvu_write64(rvu, blkaddr, SSO_AF_IU_ACCNTX_CFG(uniq_ident),
+ SSO_AF_HWGRP_IU_ACCNT_MAX_THR << 16);
+
+ for (hwgrp = 0; hwgrp < req->hwgrps; hwgrp++) {
+ ssolf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, hwgrp);
+ if (ssolf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ /* All groups assigned to single SR-IOV function must be
+ * assigned same unique in-unit accounting index.
+ */
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_IU_ACCNT(ssolf),
+ 0x10000 | uniq_ident);
+
+ /* Assign unique tagspace */
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_TAGSPACE(ssolf),
+ uniq_ident);
+ }
+
+exit:
+ rsp->xaq_buf_size = hw->sso.sso_xaq_buf_size;
+ rsp->xaq_wq_entries = hw->sso.sso_xaq_num_works;
+ rsp->in_unit_entries = hw->sso.sso_iue;
+ rsp->hwgrps = hw->sso.sso_hwgrps;
+ return rc;
+}
+
+int rvu_mbox_handler_sso_lf_free(struct rvu *rvu, struct sso_lf_free_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int hwgrp, lf, err, blkaddr;
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ /* Perform reset of SSO HW GRPs */
+ for (hwgrp = 0; hwgrp < req->hwgrps; hwgrp++) {
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, hwgrp);
+ if (lf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ err = rvu_sso_lf_teardown(rvu, pcifunc, lf, hwgrp);
+ if (err)
+ return err;
+
+ /* Reset this SSO LF */
+ err = rvu_lf_reset(rvu, &hw->block[blkaddr], lf);
+ if (err)
+ dev_err(rvu->dev, "SSO%d free: failed to reset\n", lf);
+ /* Reset the IAQ and TAQ thresholds */
+ rvu_sso_hwgrp_config_thresh(rvu, blkaddr, lf);
+ }
+
+ if (pfvf->sso_uniq_ident) {
+ rvu_free_rsrc(&hw->sso.pfvf_ident, pfvf->sso_uniq_ident);
+ pfvf->sso_uniq_ident = 0;
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_sso_ws_cache_inv(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int num_lfs, ssowlf, hws, blkaddr;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, pcifunc);
+ if (blkaddr < 0)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ block->type);
+ if (!num_lfs)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ /* SSO HWS invalidate registers are part of SSO AF */
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ for (hws = 0; hws < num_lfs; hws++) {
+ ssowlf = rvu_get_lf(rvu, block, pcifunc, hws);
+ if (ssowlf < 0)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ /* Reset this SSO LF GWS cache */
+ rvu_write64(rvu, blkaddr, SSO_AF_HWSX_INV(ssowlf), 1);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_ssow_lf_alloc(struct rvu *rvu,
+ struct ssow_lf_alloc_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (pfvf->ssow <= 0)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ return 0;
+}
+
+int rvu_mbox_handler_ssow_lf_free(struct rvu *rvu,
+ struct ssow_lf_free_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int ssowlf, hws, err, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, pcifunc);
+ if (blkaddr < 0)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ for (hws = 0; hws < req->hws; hws++) {
+ ssowlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, hws);
+ if (ssowlf < 0)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ err = rvu_ssow_lf_teardown(rvu, pcifunc, ssowlf, hws);
+ if (err)
+ return err;
+
+ /* Reset this SSO LF */
+ err = rvu_lf_reset(rvu, &hw->block[blkaddr], ssowlf);
+ if (err)
+ dev_err(rvu->dev, "SSOW%d free: failed to reset\n",
+ ssowlf);
+ }
+
+ return 0;
+}
+
+static int rvu_sso_do_register_interrupt(struct rvu *rvu, int irq_offs,
+ irq_handler_t handler,
+ const char *name)
+{
+ int ret = 0;
+
+ ret = request_irq(pci_irq_vector(rvu->pdev, irq_offs), handler, 0,
+ name, rvu);
+ if (ret) {
+ dev_err(rvu->dev, "SSOAF: %s irq registration failed", name);
+ goto err;
+ }
+
+ WARN_ON(rvu->irq_allocated[irq_offs]);
+ rvu->irq_allocated[irq_offs] = true;
+err:
+ return ret;
+}
+
+static irqreturn_t rvu_sso_af_err0_intr_handler(int irq, void *ptr)
+{
+ struct rvu *rvu = (struct rvu *)ptr;
+ struct rvu_block *block;
+ int i, blkaddr;
+ u64 reg, reg0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ block = &rvu->hw->block[blkaddr];
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_ERR0);
+ dev_err(rvu->dev, "Received SSO_AF_ERR0 irq : 0x%llx", reg);
+
+ if (reg & BIT_ULL(15)) {
+ dev_err(rvu->dev, "Received Bad-fill-packet NCB error");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_POISON)
+ }
+
+ if (reg & BIT_ULL(14)) {
+ dev_err(rvu->dev, "An FLR was initiated, but SSO_LF_GGRP_AQ_CNT[AQ_CNT] != 0");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_FLR_AQ_DIGEST)
+ }
+
+ if (reg & BIT_ULL(13)) {
+ dev_err(rvu->dev, "Add work dropped due to XAQ pointers not yet initialized.");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_XAQDIS_DIGEST)
+ }
+
+ if (reg & (0xF << 9)) {
+ dev_err(rvu->dev, "PF_FUNC mapping error.");
+ dev_err(rvu->dev, "SSO_AF_UNMAP_INFO : 0x%llx",
+ rvu_read64(rvu, blkaddr, SSO_AF_UNMAP_INFO));
+ }
+
+ if (reg & BIT_ULL(8)) {
+ dev_err(rvu->dev, "Add work dropped due to QTL being disabled, 0x0");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_QCTLDIS_DIGEST)
+ }
+
+ if (reg & BIT_ULL(7)) {
+ dev_err(rvu->dev, "Add work dropped due to WQP being 0x0");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_WQP0_DIGEST)
+ }
+
+ if (reg & BIT_ULL(6))
+ dev_err(rvu->dev, "Add work dropped due to 64 bit write");
+
+ if (reg & BIT_ULL(5))
+ dev_err(rvu->dev, "Set when received add work with tag type is specified as EMPTY");
+
+ if (reg & BIT_ULL(4)) {
+ dev_err(rvu->dev, "Add work to disabled hardware group. An ADDWQ was received and dropped to a hardware group with SSO_AF_HWGRP(0..255)_IAQ_THR[RSVD_THR] = 0.");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_GRPDIS_DIGEST)
+ }
+
+ if (reg & BIT_ULL(3)) {
+ dev_err(rvu->dev, "Bad-fill-packet NCB error");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_BFPN_DIGEST)
+ }
+
+ if (reg & BIT_ULL(2)) {
+ dev_err(rvu->dev, "Bad-fill-packet error.");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_BFP_DIGEST)
+ }
+
+ if (reg & BIT_ULL(1)) {
+ dev_err(rvu->dev, "The NPA returned an error indication");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_NPA_DIGEST)
+ }
+
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR0, reg);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_sso_af_err2_intr_handler(int irq, void *ptr)
+{
+ struct rvu *rvu = (struct rvu *)ptr;
+ int blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_ERR2);
+ dev_err(rvu->dev, "received SSO_AF_ERR2 irq : 0x%llx", reg);
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR2, reg);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_sso_af_ras_intr_handler(int irq, void *ptr)
+{
+ struct rvu *rvu = (struct rvu *)ptr;
+ struct rvu_block *block;
+ int i, blkaddr;
+ u64 reg, reg0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ block = &rvu->hw->block[blkaddr];
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_RAS);
+ dev_err(rvu->dev, "received SSO_AF_RAS irq : 0x%llx", reg);
+ rvu_write64(rvu, blkaddr, SSO_AF_RAS, reg);
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_POISON)
+
+ return IRQ_HANDLED;
+}
+
+void rvu_sso_unregister_interrupts(struct rvu *rvu)
+{
+ int i, blkaddr, offs;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ offs = rvu_read64(rvu, blkaddr, SSO_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs)
+ return;
+
+ rvu_write64(rvu, blkaddr, SSO_AF_RAS_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR2_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR0_ENA_W1C, ~0ULL);
+
+ for (i = 0; i < SSO_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+int rvu_sso_register_interrupts(struct rvu *rvu)
+{
+ int blkaddr, offs, ret = 0;
+
+ if (!is_block_implemented(rvu->hw, BLKADDR_SSO))
+ return 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ offs = rvu_read64(rvu, blkaddr, SSO_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs) {
+ dev_warn(rvu->dev,
+ "Failed to get SSO_AF_INT vector offsets\n");
+ return 0;
+ }
+
+ ret = rvu_sso_do_register_interrupt(rvu, offs + SSO_AF_INT_VEC_ERR0,
+ rvu_sso_af_err0_intr_handler,
+ "SSO_AF_ERR0");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR0_ENA_W1S, ~0ULL);
+
+ ret = rvu_sso_do_register_interrupt(rvu, offs + SSO_AF_INT_VEC_ERR2,
+ rvu_sso_af_err2_intr_handler,
+ "SSO_AF_ERR2");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR2_ENA_W1S, ~0ULL);
+
+ ret = rvu_sso_do_register_interrupt(rvu, offs + SSO_AF_INT_VEC_RAS,
+ rvu_sso_af_ras_intr_handler,
+ "SSO_AF_RAS");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, SSO_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+err:
+ rvu_sso_unregister_interrupts(rvu);
+ return ret;
+}
+
+int rvu_sso_init(struct rvu *rvu)
+{
+ u64 iaq_free_cnt, iaq_rsvd, iaq_max, iaq_rsvd_cnt = 0;
+ u64 taq_free_cnt, taq_rsvd, taq_max, taq_rsvd_cnt = 0;
+ struct sso_rsrc *sso = &rvu->hw->sso;
+ int blkaddr, hwgrp, grpmsk, hws, err;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return 0;
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_CONST);
+ /* number of SSO hardware work slots */
+ sso->sso_hws = (reg >> 56) & 0xFF;
+ /* number of SSO hardware groups */
+ sso->sso_hwgrps = (reg & 0xFFFF);
+ /* number of SSO In-Unit entries */
+ sso->sso_iue = (reg >> 16) & 0xFFFF;
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_CONST1);
+ /* number of work entries in external admission queue (XAQ) */
+ sso->sso_xaq_num_works = (reg >> 16) & 0xFFFF;
+ /* number of bytes in a XAQ buffer */
+ sso->sso_xaq_buf_size = (reg & 0xFFFF);
+
+ /* Configure IAQ entries */
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_AW_WE);
+ iaq_free_cnt = reg & SSO_AF_IAQ_FREE_CNT_MASK;
+
+ /* Give out half of buffers fairly, rest left floating */
+ iaq_rsvd = iaq_free_cnt / sso->sso_hwgrps / 2;
+
+ /* Enforce minimum per hardware requirements */
+ if (iaq_rsvd < SSO_HWGRP_IAQ_RSVD_THR)
+ iaq_rsvd = SSO_HWGRP_IAQ_RSVD_THR;
+ /* To ensure full streaming performance should be at least 208. */
+ iaq_max = iaq_rsvd + SSO_HWGRP_IAQ_MAX_THR_STRM_PERF;
+
+ if (iaq_max >= (SSO_AF_IAQ_FREE_CNT_MAX + 1))
+ iaq_max = SSO_AF_IAQ_FREE_CNT_MAX;
+
+ /* Configure TAQ entries */
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_TAQ_CNT);
+ taq_free_cnt = reg & SSO_AF_TAQ_FREE_CNT_MASK;
+
+ /* Give out half of buffers fairly, rest left floating */
+ taq_rsvd = taq_free_cnt / sso->sso_hwgrps / 2;
+
+ /* Enforce minimum per hardware requirements */
+ if (taq_rsvd < SSO_HWGRP_TAQ_RSVD_THR)
+ taq_rsvd = SSO_HWGRP_TAQ_RSVD_THR;
+ /* To ensure full streaming performance should be at least 16. */
+ taq_max = taq_rsvd + SSO_HWGRP_TAQ_MAX_THR_STRM_PERF;
+
+ if (taq_max >= (SSO_AF_TAQ_FREE_CNT_MAX + 1))
+ taq_max = SSO_AF_TAQ_FREE_CNT_MAX;
+
+ /* Save thresholds to reprogram HWGRPs on reset */
+ sso->iaq_rsvd = iaq_rsvd;
+ sso->iaq_max = iaq_max;
+ sso->taq_rsvd = taq_rsvd;
+ sso->taq_max = taq_max;
+
+ for (hwgrp = 0; hwgrp < sso->sso_hwgrps; hwgrp++) {
+ rvu_sso_hwgrp_config_thresh(rvu, blkaddr, hwgrp);
+ iaq_rsvd_cnt += iaq_rsvd;
+ taq_rsvd_cnt += taq_rsvd;
+ }
+
+ /* Verify SSO_AW_WE[RSVD_FREE], TAQ_CNT[RSVD_FREE] are greater than
+ * or equal to sum of IAQ[RSVD_THR], TAQ[RSRVD_THR] fields.
+ */
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_AW_WE);
+ reg = (reg >> SSO_AF_IAQ_RSVD_FREE_SHIFT) & SSO_AF_IAQ_RSVD_FREE_MASK;
+ if (reg < iaq_rsvd_cnt) {
+ dev_warn(rvu->dev, "WARN: Wrong IAQ resource calculations %llx vs %llx\n",
+ reg, iaq_rsvd_cnt);
+ rvu_write64(rvu, blkaddr, SSO_AF_AW_WE,
+ (iaq_rsvd_cnt & SSO_AF_IAQ_RSVD_FREE_MASK) <<
+ SSO_AF_IAQ_RSVD_FREE_SHIFT);
+ }
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_TAQ_CNT);
+ reg = (reg >> SSO_AF_TAQ_RSVD_FREE_SHIFT) & SSO_AF_TAQ_RSVD_FREE_MASK;
+ if (reg < taq_rsvd_cnt) {
+ dev_warn(rvu->dev, "WARN: Wrong TAQ resource calculations %llx vs %llx\n",
+ reg, taq_rsvd_cnt);
+ rvu_write64(rvu, blkaddr, SSO_AF_TAQ_CNT,
+ (taq_rsvd_cnt & SSO_AF_TAQ_RSVD_FREE_MASK) <<
+ SSO_AF_TAQ_RSVD_FREE_SHIFT);
+ }
+
+ /* Unset the HWS Hardware Group Mask.
+ * The hardware group mask should be set by PF/VF
+ * using SSOW_LF_GWS_GRPMSK_CHG based on the LF allocations.
+ */
+ for (grpmsk = 0; grpmsk < (sso->sso_hwgrps / 64); grpmsk++) {
+ for (hws = 0; hws < sso->sso_hws; hws++) {
+ rvu_write64(rvu, blkaddr,
+ SSO_AF_HWSX_SX_GRPMSKX(hws, 0, grpmsk),
+ 0x0);
+ rvu_write64(rvu, blkaddr,
+ SSO_AF_HWSX_SX_GRPMSKX(hws, 1, grpmsk),
+ 0x0);
+ }
+ }
+
+ /* Allocate SSO_AF_CONST::HWS + 1. As the total number of pf/vf are
+ * limited by the numeber of HWS available.
+ */
+ sso->pfvf_ident.max = sso->sso_hws + 1;
+ err = rvu_alloc_bitmap(&sso->pfvf_ident);
+ if (err)
+ return err;
+
+ /* Reserve one bit so that identifier starts from 1 */
+ rvu_alloc_rsrc(&sso->pfvf_ident);
+
+ return 0;
+}
+
+void rvu_sso_freemem(struct rvu *rvu)
+{
+ struct sso_rsrc *sso = &rvu->hw->sso;
+
+ kfree(sso->pfvf_ident.bmap);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index a3ecb5de9000..67d4abd6ed25 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -60,6 +60,43 @@ enum rvu_af_int_vec_e {
RVU_AF_INT_VEC_CNT = 0x5,
};
+/* NPA Admin function Interrupt Vector Enumeration */
+enum npa_af_int_vec_e {
+ NPA_AF_INT_VEC_RVU = 0x0,
+ NPA_AF_INT_VEC_GEN = 0x1,
+ NPA_AF_INT_VEC_AQ_DONE = 0x2,
+ NPA_AF_INT_VEC_AF_ERR = 0x3,
+ NPA_AF_INT_VEC_POISON = 0x4,
+ NPA_AF_INT_VEC_CNT = 0x5,
+};
+
+/* NIX Admin function Interrupt Vector Enumeration */
+enum nix_af_int_vec_e {
+ NIX_AF_INT_VEC_RVU = 0x0,
+ NIX_AF_INT_VEC_GEN = 0x1,
+ NIX_AF_INT_VEC_AQ_DONE = 0x2,
+ NIX_AF_INT_VEC_AF_ERR = 0x3,
+ NIX_AF_INT_VEC_POISON = 0x4,
+ NIX_AF_INT_VEC_CNT = 0x5,
+};
+
+/* SSO Admin function Interrupt Vector Enumeration */
+enum sso_af_int_vec_e {
+ SSO_AF_INT_VEC_ERR0 = 0x0,
+ SSO_AF_INT_VEC_ERR2 = 0x1,
+ SSO_AF_INT_VEC_RAS = 0x2,
+ SSO_AF_INT_VEC_CNT = 0x3,
+};
+
+/* CPT Admin function Interrupt Vector Enumeration */
+enum cpt_af_int_vec_e {
+ CPT_AF_INT_VEC_FLT0 = 0x0,
+ CPT_AF_INT_VEC_FLT1 = 0x1,
+ CPT_AF_INT_VEC_RVU = 0x2,
+ CPT_AF_INT_VEC_RAS = 0x3,
+ CPT_AF_INT_VEC_CNT = 0x4,
+};
+
/**
* RVU PF Interrupt Vector Enumeration
*/
@@ -100,6 +137,19 @@ enum npa_aq_instop {
NPA_AQ_INSTOP_UNLOCK = 0x5,
};
+/* ALLOC/FREE input queues Enumeration from coprocessors */
+enum npa_inpq {
+ NPA_INPQ_NIX0_RX = 0x0,
+ NPA_INPQ_NIX0_TX = 0x1,
+ NPA_INPQ_NIX1_RX = 0x2,
+ NPA_INPQ_NIX1_TX = 0x3,
+ NPA_INPQ_SSO = 0x4,
+ NPA_INPQ_TIM = 0x5,
+ NPA_INPQ_DPI = 0x6,
+ NPA_INPQ_AURA_OP = 0xe,
+ NPA_INPQ_INTERNAL_RSV = 0xf,
+};
+
/* NPA admin queue instruction structure */
struct npa_aq_inst_s {
#if defined(__BIG_ENDIAN_BITFIELD)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_tim.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_tim.c
new file mode 100644
index 000000000000..5c7e219c6656
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_tim.c
@@ -0,0 +1,340 @@
+//SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/types.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+
+#define TIM_CHUNKSIZE_MULTIPLE (16)
+#define TIM_CHUNKSIZE_MIN (TIM_CHUNKSIZE_MULTIPLE * 0x2)
+#define TIM_CHUNKSIZE_MAX (TIM_CHUNKSIZE_MULTIPLE * 0x1FFF)
+
+static inline u64 get_tenns_tsc(void)
+{
+ u64 tsc;
+
+ asm volatile("mrs %0, cntvct_el0" : "=r" (tsc));
+ return tsc;
+}
+
+static inline u64 get_tenns_clk(void)
+{
+ u64 tsc;
+
+ asm volatile("mrs %0, cntfrq_el0" : "=r" (tsc));
+ return tsc;
+}
+
+static int rvu_tim_disable_lf(struct rvu *rvu, int lf, int blkaddr)
+{
+ u64 regval;
+
+ regval = rvu_read64(rvu, blkaddr, TIM_AF_RINGX_CTL1(lf));
+ if ((regval & TIM_AF_RINGX_CTL1_ENA) == 0)
+ return TIM_AF_RING_ALREADY_DISABLED;
+
+ /* Clear TIM_AF_RING(0..255)_CTL1[ENA]. */
+ regval = rvu_read64(rvu, blkaddr, TIM_AF_RINGX_CTL1(lf));
+ regval &= ~TIM_AF_RINGX_CTL1_ENA;
+ rvu_write64(rvu, blkaddr, TIM_AF_RINGX_CTL1(lf), regval);
+
+ /*
+ * Poll until the corresponding ring’s
+ * TIM_AF_RING(0..255)_CTL1[RCF_BUSY] is clear.
+ */
+ rvu_poll_reg(rvu, blkaddr, TIM_AF_RINGX_CTL1(lf),
+ TIM_AF_RINGX_CTL1_RCF_BUSY, true);
+ return 0;
+}
+
+int rvu_mbox_handler_tim_lf_alloc(struct rvu *rvu,
+ struct tim_lf_alloc_req *req,
+ struct tim_lf_alloc_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr;
+ u64 regval;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_TIM, pcifunc);
+ if (blkaddr < 0)
+ return TIM_AF_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, req->ring);
+ if (lf < 0)
+ return TIM_AF_LF_INVALID;
+
+ /* Check if requested 'TIMLF <=> NPALF' mapping is valid */
+ if (req->npa_pf_func) {
+ /* If default, use 'this' TIMLF's PFFUNC */
+ if (req->npa_pf_func == RVU_DEFAULT_PF_FUNC)
+ req->npa_pf_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->npa_pf_func, BLKTYPE_NPA))
+ return TIM_AF_INVAL_NPA_PF_FUNC;
+ }
+
+ /* Check if requested 'TIMLF <=> SSOLF' mapping is valid */
+ if (req->sso_pf_func) {
+ /* If default, use 'this' SSOLF's PFFUNC */
+ if (req->sso_pf_func == RVU_DEFAULT_PF_FUNC)
+ req->sso_pf_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->sso_pf_func, BLKTYPE_SSO))
+ return TIM_AF_INVAL_SSO_PF_FUNC;
+ }
+
+ regval = (((u64)req->npa_pf_func) << 16) |
+ ((u64)req->sso_pf_func);
+ rvu_write64(rvu, blkaddr, TIM_AF_RINGX_GMCTL(lf), regval);
+
+ rsp->tenns_clk = get_tenns_clk();
+
+ return 0;
+}
+
+int rvu_mbox_handler_tim_lf_free(struct rvu *rvu,
+ struct tim_ring_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_TIM, pcifunc);
+ if (blkaddr < 0)
+ return TIM_AF_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, req->ring);
+ if (lf < 0)
+ return TIM_AF_LF_INVALID;
+
+ rvu_tim_lf_teardown(rvu, pcifunc, lf, req->ring);
+
+ return 0;
+}
+
+int rvu_mbox_handler_tim_config_ring(struct rvu *rvu,
+ struct tim_config_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr;
+ u32 intervalmin;
+ u64 regval;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_TIM, pcifunc);
+ if (blkaddr < 0)
+ return TIM_AF_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, req->ring);
+ if (lf < 0)
+ return TIM_AF_LF_INVALID;
+
+ /* Check the inputs. */
+ /* bigendian can only be 1 or 0. */
+ if (req->bigendian & ~1)
+ return TIM_AF_INVALID_BIG_ENDIAN_VALUE;
+
+ /* Check GPIO clock source has the GPIO edge set. */
+ if (req->clocksource == TIM_CLK_SRCS_GPIO) {
+ regval = rvu_read64(rvu, blkaddr, TIM_AF_FLAGS_REG);
+ if (((regval >> 5) & 0x3) == 0)
+ return TIM_AF_GPIO_CLK_SRC_NOT_ENABLED;
+ }
+
+ /* enableperiodic can only be 1 or 0. */
+ if (req->enableperiodic & ~1)
+ return TIM_AF_INVALID_ENABLE_PERIODIC;
+
+ /* enabledontfreebuffer can only be 1 or 0. */
+ if (req->enabledontfreebuffer & ~1)
+ return TIM_AF_INVALID_ENABLE_DONTFREE;
+
+ /*
+ * enabledontfreebuffer needs to be true if enableperiodic
+ * is enabled.
+ */
+ if (req->enableperiodic && !req->enabledontfreebuffer)
+ return TIM_AF_ENA_DONTFRE_NSET_PERIODIC;
+
+
+ /* bucketsize needs to between 2 and 2M (1<<20). */
+ if (req->bucketsize < 2 || req->bucketsize > 1<<20)
+ return TIM_AF_INVALID_BSIZE;
+
+ if (req->chunksize % TIM_CHUNKSIZE_MULTIPLE)
+ return TIM_AF_CSIZE_NOT_ALIGNED;
+
+ if (req->chunksize < TIM_CHUNKSIZE_MIN)
+ return TIM_AF_CSIZE_TOO_SMALL;
+
+ if (req->chunksize > TIM_CHUNKSIZE_MAX)
+ return TIM_AF_CSIZE_TOO_BIG;
+
+ switch (req->clocksource) {
+ case TIM_CLK_SRCS_TENNS:
+ intervalmin = 256;
+ break;
+ case TIM_CLK_SRCS_GPIO:
+ intervalmin = 256;
+ break;
+ case TIM_CLK_SRCS_GTI:
+ case TIM_CLK_SRCS_PTP:
+ intervalmin = 300;
+ break;
+ default:
+ return TIM_AF_INVALID_CLOCK_SOURCE;
+ }
+
+ if (req->interval < intervalmin)
+ return TIM_AF_INTERVAL_TOO_SMALL;
+
+ /* CTL0 */
+ /* EXPIRE_OFFSET = 0 and is set correctly when enabling. */
+ regval = req->interval;
+ rvu_write64(rvu, blkaddr, TIM_AF_RINGX_CTL0(lf), regval);
+
+ /* CTL1 */
+ regval = (((u64)req->bigendian) << 53) |
+ (((u64)req->clocksource) << 51) |
+ (1ull << 48) | /* LOCK_EN */
+ (((u64)req->enableperiodic) << 45) |
+ (((u64)(req->enableperiodic ^ 1)) << 44) | /* ENA_LDWB */
+ (((u64)req->enabledontfreebuffer) << 43) |
+ (u64)(req->bucketsize - 1);
+ rvu_write64(rvu, blkaddr, TIM_AF_RINGX_CTL1(lf), regval);
+
+ /* CTL2 */
+ regval = ((u64)req->chunksize / TIM_CHUNKSIZE_MULTIPLE) << 40;
+ rvu_write64(rvu, blkaddr, TIM_AF_RINGX_CTL2(lf), regval);
+
+ return 0;
+}
+
+int rvu_mbox_handler_tim_enable_ring(struct rvu *rvu,
+ struct tim_ring_req *req,
+ struct tim_enable_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr;
+ u64 regval;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_TIM, pcifunc);
+ if (blkaddr < 0)
+ return TIM_AF_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, req->ring);
+ if (lf < 0)
+ return TIM_AF_LF_INVALID;
+
+ /* Error out if the ring is already running. */
+ regval = rvu_read64(rvu, blkaddr, TIM_AF_RINGX_CTL1(lf));
+ if (regval & TIM_AF_RINGX_CTL1_ENA)
+ return TIM_AF_RING_STILL_RUNNING;
+
+ /* Enable, the ring. */
+ regval = rvu_read64(rvu, blkaddr, TIM_AF_RINGX_CTL1(lf));
+ regval |= TIM_AF_RINGX_CTL1_ENA;
+ rvu_write64(rvu, blkaddr, TIM_AF_RINGX_CTL1(lf), regval);
+
+ rsp->timestarted = get_tenns_tsc();
+ rsp->currentbucket = (regval >> 20) & 0xfffff;
+
+ return 0;
+}
+
+int rvu_mbox_handler_tim_disable_ring(struct rvu *rvu,
+ struct tim_ring_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_TIM, pcifunc);
+ if (blkaddr < 0)
+ return TIM_AF_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, req->ring);
+ if (lf < 0)
+ return TIM_AF_LF_INVALID;
+
+ return rvu_tim_disable_lf(rvu, lf, blkaddr);
+}
+
+int rvu_tim_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot)
+{
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_TIM, pcifunc);
+ if (blkaddr < 0)
+ return TIM_AF_LF_INVALID;
+
+ /* Ensure TIM ring is disabled prior to clearing the mapping */
+ rvu_tim_disable_lf(rvu, lf, blkaddr);
+
+ rvu_write64(rvu, blkaddr, TIM_AF_RINGX_GMCTL(lf), 0);
+
+ return 0;
+}
+
+#define FOR_EACH_TIM_LF(lf) \
+for (lf = 0; lf < hw->block[BLKTYPE_TIM].lf.max; lf++)
+
+int rvu_tim_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int lf, blkaddr;
+ u8 gpio_edge;
+ u64 regval;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_TIM, 0);
+ if (blkaddr < 0)
+ return 0;
+
+ regval = rvu_read64(rvu, blkaddr, TIM_AF_FLAGS_REG);
+
+ /* Disable the TIM block, if not already disabled. */
+ if (regval & TIM_AF_FLAGS_REG_ENA_TIM) {
+ /* Disable each ring(lf). */
+ FOR_EACH_TIM_LF(lf) {
+ regval = rvu_read64(rvu, blkaddr,
+ TIM_AF_RINGX_CTL1(lf));
+ if (!(regval & TIM_AF_RINGX_CTL1_ENA))
+ continue;
+
+ rvu_tim_disable_lf(rvu, lf, blkaddr);
+ }
+
+ /* Disable the TIM block. */
+ regval = rvu_read64(rvu, blkaddr, TIM_AF_FLAGS_REG);
+ regval &= ~TIM_AF_FLAGS_REG_ENA_TIM;
+ rvu_write64(rvu, blkaddr, TIM_AF_FLAGS_REG, regval);
+ }
+
+ /* Reset each LF. */
+ FOR_EACH_TIM_LF(lf) {
+ rvu_lf_reset(rvu, &hw->block[BLKTYPE_TIM], lf);
+ }
+
+ /* Reset the TIM block; getting a clean slate. */
+ rvu_write64(rvu, blkaddr, TIM_AF_BLK_RST, 0x1);
+ rvu_poll_reg(rvu, blkaddr, TIM_AF_BLK_RST, BIT_ULL(63), true);
+
+ gpio_edge = TIM_GPIO_NO_EDGE;
+
+ /* Enable TIM block. */
+ regval = (((u64)gpio_edge) << 6) |
+ BIT_ULL(2) | /* RESET */
+ BIT_ULL(0); /* ENA_TIM */
+ rvu_write64(rvu, blkaddr, TIM_AF_FLAGS_REG, regval);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.c
new file mode 100644
index 000000000000..2ea5d07e71dc
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.c
@@ -0,0 +1,826 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include "rvu.h"
+
+#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
+#define PCI_DEVID_OCTEONTX2_SSO_RVU_PF 0xA0F9
+#define PCI_DEVID_OCTEONTX2_NPA_RVU_PF 0xA0FB
+#define PCI_DEVID_OCTEONTX2_CPT_RVU_PF 0xA0FD
+#define PCI_DEVID_OCTEONTX2_SDP_RVU_PF 0xA0F6
+
+static u64 quotas_get_sum(struct rvu_quotas *quotas)
+{
+ u64 lf_sum = 0;
+ int i;
+
+ for (i = 0; i < quotas->cnt; i++)
+ lf_sum += quotas->a[i].val;
+
+ return lf_sum;
+}
+
+static ssize_t quota_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct rvu_quota *quota;
+ int val;
+
+ quota = container_of(attr, struct rvu_quota, sysfs);
+
+ if (quota->base->lock)
+ mutex_lock(quota->base->lock);
+ val = quota->val;
+ if (quota->base->lock)
+ mutex_unlock(quota->base->lock);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t quota_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int old_val, new_val, res = 0;
+ struct rvu_quota *quota;
+ struct rvu_quotas *base;
+ struct device *dev;
+ u64 lf_sum;
+
+ quota = container_of(attr, struct rvu_quota, sysfs);
+ dev = quota->dev;
+ base = quota->base;
+
+ if (kstrtoint(buf, 0, &new_val)) {
+ dev_err(dev, "Invalid %s quota: %s\n", attr->attr.name, buf);
+ return -EIO;
+ }
+ if (new_val < 0) {
+ dev_err(dev, "Invalid %s quota: %d < 0\n", attr->attr.name,
+ new_val);
+ return -EIO;
+ }
+
+ if (new_val > base->max) {
+ dev_err(dev, "Invalid %s quota: %d > %d\n", attr->attr.name,
+ new_val, base->max);
+ return -EIO;
+ }
+
+ if (base->lock)
+ mutex_lock(base->lock);
+ old_val = quota->val;
+
+ if (base->ops.pre_store)
+ res = base->ops.pre_store(quota->ops_arg, quota, new_val);
+
+ if (res != 0) {
+ res = -EIO;
+ goto unlock;
+ }
+
+ lf_sum = quotas_get_sum(quota->base);
+
+ if (lf_sum + new_val - quota->val > base->max_sum) {
+ dev_err(dev,
+ "Not enough resources for %s quota. Used: %lld, Max: %lld\n",
+ attr->attr.name, lf_sum, base->max_sum);
+ res = -EIO;
+ goto unlock;
+ }
+ quota->val = new_val;
+
+ if (base->ops.post_store)
+ base->ops.post_store(quota->ops_arg, quota, old_val);
+
+ res = count;
+
+unlock:
+ if (base->lock)
+ mutex_unlock(base->lock);
+ return res;
+}
+
+static int quota_sysfs_destroy(struct rvu_quota *quota)
+{
+ if (quota == NULL)
+ return -EINVAL;
+ if (quota->sysfs.attr.mode != 0) {
+ sysfs_remove_file(quota->parent, &quota->sysfs.attr);
+ quota->sysfs.attr.mode = 0;
+ }
+ return 0;
+}
+
+static struct rvu_quotas *quotas_alloc(u32 cnt, u32 max, u64 max_sum,
+ int init_val, struct mutex *lock,
+ struct rvu_quota_ops *ops)
+{
+ struct rvu_quotas *quotas;
+ u64 i;
+
+ if (cnt == 0)
+ return NULL;
+
+ quotas = kzalloc(sizeof(struct rvu_quotas) +
+ cnt * sizeof(struct rvu_quota), GFP_KERNEL);
+ if (quotas == NULL)
+ return NULL;
+
+ for (i = 0; i < cnt; i++) {
+ quotas->a[i].base = quotas;
+ quotas->a[i].val = init_val;
+ }
+
+ quotas->cnt = cnt;
+ quotas->max = max;
+ quotas->max_sum = max_sum;
+ if (ops) {
+ quotas->ops.pre_store = ops->pre_store;
+ quotas->ops.post_store = ops->post_store;
+ }
+ quotas->lock = lock;
+
+ return quotas;
+}
+
+static void quotas_free(struct rvu_quotas *quotas)
+{
+ u64 i;
+
+ if (quotas == NULL)
+ return;
+ WARN_ON(quotas->cnt == 0);
+
+ for (i = 0; i < quotas->cnt; i++)
+ quota_sysfs_destroy(&quotas->a[i]);
+
+ kfree(quotas);
+}
+
+static int quota_sysfs_create(const char *name, struct kobject *parent,
+ struct device *log_dev, struct rvu_quota *quota,
+ void *ops_arg)
+{
+ int err;
+
+ if (name == NULL || quota == NULL || log_dev == NULL)
+ return -EINVAL;
+
+ quota->sysfs.show = quota_show;
+ quota->sysfs.store = quota_store;
+ quota->sysfs.attr.name = name;
+ quota->sysfs.attr.mode = 0644;
+ quota->parent = parent;
+ quota->dev = log_dev;
+ quota->ops_arg = ops_arg;
+
+ sysfs_attr_init(&quota->sysfs.attr);
+ err = sysfs_create_file(quota->parent, &quota->sysfs.attr);
+ if (err) {
+ dev_err(quota->dev,
+ "Failed to create '%s' quota sysfs for '%s'\n",
+ name, kobject_name(quota->parent));
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int rvu_blk_count_rsrc(struct rvu_block *block, u16 pcifunc, u8 rshift)
+{
+ int count = 0, lf;
+
+ for (lf = 0; lf < block->lf.max; lf++)
+ if ((block->fn_map[lf] >> rshift) == (pcifunc >> rshift))
+ count++;
+
+ return count;
+}
+
+static int rvu_txsch_count_rsrc(struct rvu *rvu, int lvl, u16 pcifunc,
+ u8 rshift)
+{
+ struct nix_txsch *txsch = &rvu->hw->nix0->txsch[lvl];
+ int count = 0, schq;
+
+ if (lvl == NIX_TXSCH_LVL_TL1)
+ return 0;
+
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if ((txsch->pfvf_map[schq] >> rshift) == (pcifunc >> rshift))
+ count++;
+ }
+
+ return count;
+}
+
+int rvu_mbox_handler_free_rsrc_cnt(struct rvu *rvu, struct msg_req *req,
+ struct free_rsrcs_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ int pf, curlfs;
+
+ mutex_lock(&rvu->rsrc_lock);
+ pf = rvu_get_pf(pcifunc);
+
+ block = &hw->block[BLKADDR_NPA];
+ curlfs = rvu_blk_count_rsrc(block, pcifunc, RVU_PFVF_PF_SHIFT);
+ rsp->npa = rvu->pf_limits.npa->a[pf].val - curlfs;
+
+ block = &hw->block[BLKADDR_NIX0];
+ curlfs = rvu_blk_count_rsrc(block, pcifunc, RVU_PFVF_PF_SHIFT);
+ rsp->nix = rvu->pf_limits.nix->a[pf].val - curlfs;
+
+ block = &hw->block[BLKADDR_SSO];
+ curlfs = rvu_blk_count_rsrc(block, pcifunc, RVU_PFVF_PF_SHIFT);
+ rsp->sso = rvu->pf_limits.sso->a[pf].val - curlfs;
+
+ block = &hw->block[BLKADDR_SSOW];
+ curlfs = rvu_blk_count_rsrc(block, pcifunc, RVU_PFVF_PF_SHIFT);
+ rsp->ssow = rvu->pf_limits.ssow->a[pf].val - curlfs;
+
+ block = &hw->block[BLKADDR_TIM];
+ curlfs = rvu_blk_count_rsrc(block, pcifunc, RVU_PFVF_PF_SHIFT);
+ rsp->tim = rvu->pf_limits.tim->a[pf].val - curlfs;
+
+ block = &hw->block[BLKADDR_CPT0];
+ curlfs = rvu_blk_count_rsrc(block, pcifunc, RVU_PFVF_PF_SHIFT);
+ rsp->cpt = rvu->pf_limits.cpt->a[pf].val - curlfs;
+
+ if (rvu->hw->cap.nix_fixed_txschq_mapping) {
+ rsp->schq[NIX_TXSCH_LVL_SMQ] = 1;
+ rsp->schq[NIX_TXSCH_LVL_TL4] = 1;
+ rsp->schq[NIX_TXSCH_LVL_TL3] = 1;
+ rsp->schq[NIX_TXSCH_LVL_TL2] = 1;
+ } else {
+ curlfs = rvu_txsch_count_rsrc(rvu, NIX_TXSCH_LVL_SMQ, pcifunc,
+ RVU_PFVF_PF_SHIFT);
+ rsp->schq[NIX_TXSCH_LVL_SMQ] =
+ rvu->pf_limits.smq->a[pf].val - curlfs;
+
+ curlfs = rvu_txsch_count_rsrc(rvu, NIX_TXSCH_LVL_TL4, pcifunc,
+ RVU_PFVF_PF_SHIFT);
+ rsp->schq[NIX_TXSCH_LVL_TL4] =
+ rvu->pf_limits.tl4->a[pf].val - curlfs;
+
+ curlfs = rvu_txsch_count_rsrc(rvu, NIX_TXSCH_LVL_TL3, pcifunc,
+ RVU_PFVF_PF_SHIFT);
+ rsp->schq[NIX_TXSCH_LVL_TL3] =
+ rvu->pf_limits.tl3->a[pf].val - curlfs;
+
+ curlfs = rvu_txsch_count_rsrc(rvu, NIX_TXSCH_LVL_TL2, pcifunc,
+ RVU_PFVF_PF_SHIFT);
+ rsp->schq[NIX_TXSCH_LVL_TL2] =
+ rvu->pf_limits.tl2->a[pf].val - curlfs;
+ }
+
+ rsp->schq[NIX_TXSCH_LVL_TL1] = 1;
+
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return 0;
+}
+
+int rvu_check_txsch_policy(struct rvu *rvu, struct nix_txsch_alloc_req *req,
+ u16 pcifunc)
+{
+ struct nix_txsch *txsch;
+ int lvl, req_schq, pf = rvu_get_pf(pcifunc);
+ int limit, familylfs, delta;
+
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &rvu->hw->nix0->txsch[lvl];
+ req_schq = req->schq_contig[lvl] + req->schq[lvl];
+
+ switch (lvl) {
+ case NIX_TXSCH_LVL_SMQ:
+ limit = rvu->pf_limits.smq->a[pf].val;
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ limit = rvu->pf_limits.tl4->a[pf].val;
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ limit = rvu->pf_limits.tl3->a[pf].val;
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ limit = rvu->pf_limits.tl2->a[pf].val;
+ break;
+ case NIX_TXSCH_LVL_TL1:
+ if (req_schq > 2)
+ return -ENOSPC;
+ continue;
+ }
+
+ familylfs = rvu_txsch_count_rsrc(rvu, lvl, pcifunc,
+ RVU_PFVF_PF_SHIFT);
+ delta = req_schq - rvu_txsch_count_rsrc(rvu, lvl, pcifunc, 0);
+
+ if ((delta > 0) && /* always allow usage decrease */
+ ((limit < familylfs + delta) ||
+ (delta > rvu_rsrc_free_count(&txsch->schq))))
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+int rvu_check_rsrc_policy(struct rvu *rvu, struct rsrc_attach *req,
+ u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int free_lfs, mappedlfs, familylfs, limit, delta;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf = rvu_get_pf(pcifunc);
+ struct rvu_block *block;
+
+ /* Only one NPA LF can be attached */
+ if (req->npalf) {
+ block = &hw->block[BLKADDR_NPA];
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ limit = rvu->pf_limits.npa->a[pf].val;
+ familylfs = rvu_blk_count_rsrc(block, pcifunc,
+ RVU_PFVF_PF_SHIFT);
+ if (!free_lfs || (limit == familylfs))
+ goto fail;
+ }
+
+ /* Only one NIX LF can be attached */
+ if (req->nixlf) {
+ block = &hw->block[BLKADDR_NIX0];
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ limit = rvu->pf_limits.nix->a[pf].val;
+ familylfs = rvu_blk_count_rsrc(block, pcifunc,
+ RVU_PFVF_PF_SHIFT);
+ if (!free_lfs || (limit == familylfs))
+ goto fail;
+ }
+
+ if (req->sso) {
+ block = &hw->block[BLKADDR_SSO];
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ limit = rvu->pf_limits.sso->a[pf].val;
+ familylfs = rvu_blk_count_rsrc(block, pcifunc,
+ RVU_PFVF_PF_SHIFT);
+ /* Check if additional resources are available */
+ delta = req->sso - mappedlfs;
+ if ((delta > 0) && /* always allow usage decrease */
+ ((limit < familylfs + delta) ||
+ (delta > free_lfs)))
+ goto fail;
+ }
+
+ if (req->ssow) {
+ block = &hw->block[BLKADDR_SSOW];
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ limit = rvu->pf_limits.ssow->a[pf].val;
+ familylfs = rvu_blk_count_rsrc(block, pcifunc,
+ RVU_PFVF_PF_SHIFT);
+ /* Check if additional resources are available */
+ delta = req->ssow - mappedlfs;
+ if ((delta > 0) && /* always allow usage decrease */
+ ((limit < familylfs + delta) ||
+ (delta > free_lfs)))
+ goto fail;
+ }
+
+ if (req->timlfs) {
+ block = &hw->block[BLKADDR_TIM];
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ limit = rvu->pf_limits.tim->a[pf].val;
+ familylfs = rvu_blk_count_rsrc(block, pcifunc,
+ RVU_PFVF_PF_SHIFT);
+ /* Check if additional resources are available */
+ delta = req->timlfs - mappedlfs;
+ if ((delta > 0) && /* always allow usage decrease */
+ ((limit < familylfs + delta) ||
+ (delta > free_lfs)))
+ goto fail;
+ }
+
+ if (req->cptlfs) {
+ block = &hw->block[BLKADDR_CPT0];
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ limit = rvu->pf_limits.cpt->a[pf].val;
+ familylfs = rvu_blk_count_rsrc(block, pcifunc,
+ RVU_PFVF_PF_SHIFT);
+ /* Check if additional resources are available */
+ delta = req->cptlfs - mappedlfs;
+ if ((delta > 0) && /* always allow usage decrease */
+ ((limit < familylfs + delta) ||
+ (delta > free_lfs)))
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_info(rvu->dev, "Request for %s failed\n", block->name);
+ return -ENOSPC;
+}
+
+static int check_mapped_rsrcs(void *arg, struct rvu_quota *quota, int new_val)
+{
+ struct rvu_pfvf *pf = arg;
+ int type;
+
+ for (type = 0; type < BLKTYPE_MAX; type++) {
+ if (rvu_get_rsrc_mapcount(pf, type) > 0)
+ return 1;
+ }
+ return 0;
+}
+
+static struct rvu_quota_ops pf_limit_ops = {
+ .pre_store = check_mapped_rsrcs,
+};
+
+static void rvu_set_default_limits(struct rvu *rvu)
+{
+ int i, nvfs, cpt_rvus, npa_rvus, sso_rvus, nix_rvus, nsso, nssow, ntim;
+ int ncpt, nnpa, nnix, nsmq = 0, ntl4 = 0, ntl3 = 0, ntl2 = 0;
+ unsigned short devid;
+
+ /* First pass, count number of SSO/TIM PFs. */
+ sso_rvus = 0;
+ nix_rvus = 0;
+ cpt_rvus = 0;
+ npa_rvus = 0;
+ for (i = 0; i < rvu->hw->total_pfs; i++) {
+ if (rvu->pf[i].pdev == NULL)
+ continue;
+ devid = rvu->pf[i].pdev->device;
+ if (devid == PCI_DEVID_OCTEONTX2_SSO_RVU_PF)
+ sso_rvus++;
+ else if (devid == PCI_DEVID_OCTEONTX2_RVU_PF ||
+ devid == PCI_DEVID_OCTEONTX2_RVU_AF ||
+ devid == PCI_DEVID_OCTEONTX2_SDP_RVU_PF)
+ nix_rvus++;
+ else if (devid == PCI_DEVID_OCTEONTX2_CPT_RVU_PF)
+ cpt_rvus++;
+ else if (devid == PCI_DEVID_OCTEONTX2_NPA_RVU_PF)
+ npa_rvus++;
+ }
+ /* Calculate default partitioning. */
+ nsso = rvu->pf_limits.sso->max_sum / sso_rvus;
+ nssow = rvu->pf_limits.ssow->max_sum / sso_rvus;
+ ntim = rvu->pf_limits.tim->max_sum / sso_rvus;
+ /* Divide CPT among SSO and CPT PFs since cores shouldn't be shared. */
+ ncpt = rvu->pf_limits.cpt->max_sum / (sso_rvus + cpt_rvus);
+ /* NPA/NIX count depends on DTS VF config. Allocate until run out. */
+ nnpa = rvu->pf_limits.npa->max_sum;
+ nnix = rvu->pf_limits.nix->max_sum;
+ if (!rvu->hw->cap.nix_fixed_txschq_mapping) {
+ nsmq = rvu->pf_limits.smq->max_sum / nix_rvus;
+ ntl4 = rvu->pf_limits.tl4->max_sum / nix_rvus;
+ ntl3 = rvu->pf_limits.tl3->max_sum / nix_rvus;
+ ntl2 = rvu->pf_limits.tl2->max_sum / nix_rvus;
+ }
+
+ /* Second pass, set the default limit values. */
+ for (i = 0; i < rvu->hw->total_pfs; i++) {
+ if (rvu->pf[i].pdev == NULL)
+ continue;
+ nvfs = pci_sriov_get_totalvfs(rvu->pf[i].pdev);
+ switch (rvu->pf[i].pdev->device) {
+ case PCI_DEVID_OCTEONTX2_RVU_AF:
+ nnix -= nvfs;
+ nnpa -= nvfs;
+ rvu->pf_limits.nix->a[i].val = nnix > 0 ? nvfs : 0;
+ rvu->pf_limits.npa->a[i].val = nnpa > 0 ? nvfs : 0;
+ if (rvu->hw->cap.nix_fixed_txschq_mapping)
+ break;
+ rvu->pf_limits.smq->a[i].val = nsmq;
+ rvu->pf_limits.tl4->a[i].val = ntl4;
+ rvu->pf_limits.tl3->a[i].val = ntl3;
+ rvu->pf_limits.tl2->a[i].val = ntl2;
+ break;
+ case PCI_DEVID_OCTEONTX2_RVU_PF:
+ nnix -= 1 + nvfs;
+ nnpa -= 1 + nvfs;
+ rvu->pf_limits.nix->a[i].val = nnix > 0 ? 1 + nvfs : 0;
+ rvu->pf_limits.npa->a[i].val = nnpa > 0 ? 1 + nvfs : 0;
+ if (rvu->hw->cap.nix_fixed_txschq_mapping)
+ break;
+ rvu->pf_limits.smq->a[i].val = nsmq;
+ rvu->pf_limits.tl4->a[i].val = ntl4;
+ rvu->pf_limits.tl3->a[i].val = ntl3;
+ rvu->pf_limits.tl2->a[i].val = ntl2;
+ break;
+ case PCI_DEVID_OCTEONTX2_SSO_RVU_PF:
+ nnpa -= 1 + nvfs;
+ rvu->pf_limits.npa->a[i].val = nnpa > 0 ? 1 + nvfs : 0;
+ rvu->pf_limits.sso->a[i].val = nsso;
+ rvu->pf_limits.ssow->a[i].val = nssow;
+ rvu->pf_limits.tim->a[i].val = ntim;
+ rvu->pf_limits.cpt->a[i].val = ncpt;
+ break;
+ case PCI_DEVID_OCTEONTX2_NPA_RVU_PF:
+ nnpa -= 1 + nvfs;
+ rvu->pf_limits.npa->a[i].val = nnpa > 0 ? 1 + nvfs : 0;
+ break;
+ case PCI_DEVID_OCTEONTX2_CPT_RVU_PF:
+ nnpa -= 1;
+ rvu->pf_limits.npa->a[i].val = nnpa > 0 ? 1 : 0;
+ rvu->pf_limits.cpt->a[i].val = ncpt;
+ break;
+ case PCI_DEVID_OCTEONTX2_SDP_RVU_PF:
+ nnix -= 1 + nvfs;
+ nnpa -= 1 + nvfs;
+ rvu->pf_limits.nix->a[i].val = nnix > 0 ? 1 + nvfs : 0;
+ rvu->pf_limits.npa->a[i].val = nnpa > 0 ? 1 + nvfs : 0;
+ if (rvu->hw->cap.nix_fixed_txschq_mapping)
+ break;
+ rvu->pf_limits.smq->a[i].val = nsmq;
+ rvu->pf_limits.tl4->a[i].val = ntl4;
+ rvu->pf_limits.tl3->a[i].val = ntl3;
+ rvu->pf_limits.tl2->a[i].val = ntl2;
+ break;
+ }
+ }
+}
+
+static int rvu_create_limits_sysfs(struct rvu *rvu)
+{
+ struct pci_dev *pdev;
+ struct rvu_pfvf *pf;
+ int i, err = 0;
+
+ for (i = 0; i < rvu->hw->total_pfs; i++) {
+ pf = &rvu->pf[i];
+ if (!pf->pdev)
+ continue;
+ pdev = pf->pdev;
+
+ pf->limits_kobj = kobject_create_and_add("limits",
+ &pdev->dev.kobj);
+
+ if (quota_sysfs_create("sso", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.sso->a[i], pf)) {
+ dev_err(rvu->dev,
+ "Failed to allocate quota for sso on %s\n",
+ pci_name(pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ if (quota_sysfs_create("ssow", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.ssow->a[i], pf)) {
+ dev_err(rvu->dev,
+ "Failed to allocate quota for ssow, on %s\n",
+ pci_name(pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ if (quota_sysfs_create("tim", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.tim->a[i], pf)) {
+ dev_err(rvu->dev,
+ "Failed to allocate quota for tim, on %s\n",
+ pci_name(pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ if (quota_sysfs_create("cpt", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.cpt->a[i], pf)) {
+ dev_err(rvu->dev,
+ "Failed to allocate quota for cpt, on %s\n",
+ pci_name(pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ if (quota_sysfs_create("npa", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.npa->a[i], pf)) {
+ dev_err(rvu->dev,
+ "Failed to allocate quota for npa, on %s\n",
+ pci_name(pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ if (quota_sysfs_create("nix", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.nix->a[i], pf)) {
+ dev_err(rvu->dev,
+ "Failed to allocate quota for nix, on %s\n",
+ pci_name(pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ /* In fixed TXSCHQ case each LF is assigned only 1 queue. */
+ if (rvu->hw->cap.nix_fixed_txschq_mapping)
+ continue;
+
+ if (quota_sysfs_create("smq", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.smq->a[i], pf)) {
+ dev_err(rvu->dev, "Failed to allocate quota for smq on %s\n",
+ pci_name(pf->pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ if (quota_sysfs_create("tl4", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.tl4->a[i], pf)) {
+ dev_err(rvu->dev, "Failed to allocate quota for tl4 on %s\n",
+ pci_name(pf->pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ if (quota_sysfs_create("tl3", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.tl3->a[i], pf)) {
+ dev_err(rvu->dev, "Failed to allocate quota for tl3 on %s\n",
+ pci_name(pf->pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ if (quota_sysfs_create("tl2", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.tl2->a[i], pf)) {
+ dev_err(rvu->dev, "Failed to allocate quota for tl2 on %s\n",
+ pci_name(pf->pdev));
+ err = -EFAULT;
+ break;
+ }
+ }
+
+ return err;
+}
+
+void rvu_policy_destroy(struct rvu *rvu)
+{
+ struct rvu_pfvf *pf = NULL;
+ int i;
+
+ quotas_free(rvu->pf_limits.sso);
+ quotas_free(rvu->pf_limits.ssow);
+ quotas_free(rvu->pf_limits.npa);
+ quotas_free(rvu->pf_limits.cpt);
+ quotas_free(rvu->pf_limits.tim);
+ quotas_free(rvu->pf_limits.nix);
+
+ rvu->pf_limits.sso = NULL;
+ rvu->pf_limits.ssow = NULL;
+ rvu->pf_limits.npa = NULL;
+ rvu->pf_limits.cpt = NULL;
+ rvu->pf_limits.tim = NULL;
+ rvu->pf_limits.nix = NULL;
+
+ if (rvu->hw->cap.nix_fixed_txschq_mapping) {
+ quotas_free(rvu->pf_limits.smq);
+ quotas_free(rvu->pf_limits.tl4);
+ quotas_free(rvu->pf_limits.tl3);
+ quotas_free(rvu->pf_limits.tl2);
+
+ rvu->pf_limits.smq = NULL;
+ rvu->pf_limits.tl4 = NULL;
+ rvu->pf_limits.tl3 = NULL;
+ rvu->pf_limits.tl2 = NULL;
+ }
+
+ for (i = 0; i < rvu->hw->total_pfs; i++) {
+ pf = &rvu->pf[i];
+ kobject_del(pf->limits_kobj);
+ }
+}
+
+int rvu_policy_init(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_hw *nix_hw = rvu->hw->nix0;
+ int err, i = 0;
+ u32 max = 0;
+
+ max = hw->block[BLKADDR_SSO].lf.max;
+ rvu->pf_limits.sso = quotas_alloc(rvu->hw->total_pfs, max, max,
+ 0, &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.sso) {
+ dev_err(rvu->dev, "Failed to allocate sso limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ max = hw->block[BLKADDR_SSOW].lf.max;
+ rvu->pf_limits.ssow = quotas_alloc(rvu->hw->total_pfs, max, max,
+ 0, &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.ssow) {
+ dev_err(rvu->dev, "Failed to allocate ssow limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ max = hw->block[BLKADDR_TIM].lf.max;
+ rvu->pf_limits.tim = quotas_alloc(rvu->hw->total_pfs, max, max,
+ 0, &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.tim) {
+ dev_err(rvu->dev, "Failed to allocate tim limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ max = hw->block[BLKADDR_CPT0].lf.max;
+ rvu->pf_limits.cpt = quotas_alloc(rvu->hw->total_pfs, max, max,
+ 0, &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.cpt) {
+ dev_err(rvu->dev, "Failed to allocate cpt limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ /* Because limits track also VFs under PF, the maximum NPA LF limit for
+ * a single PF has to be max, not 1. Same for NIX below.
+ */
+ max = hw->block[BLKADDR_NPA].lf.max;
+ rvu->pf_limits.npa = quotas_alloc(rvu->hw->total_pfs, max, max,
+ 0, &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.npa) {
+ dev_err(rvu->dev, "Failed to allocate npa limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ max = hw->block[BLKADDR_NIX0].lf.max;
+ rvu->pf_limits.nix = quotas_alloc(rvu->hw->total_pfs, max, max,
+ 0, &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.nix) {
+ dev_err(rvu->dev, "Failed to allocate nix limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ if (rvu->hw->cap.nix_fixed_txschq_mapping)
+ goto skip_txschq_limits;
+
+ max = nix_hw->txsch[NIX_TXSCH_LVL_SMQ].schq.max;
+ rvu->pf_limits.smq = quotas_alloc(hw->total_pfs, max, max, 0,
+ &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.smq) {
+ dev_err(rvu->dev, "Failed to allocate SQM txschq limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ max = nix_hw->txsch[NIX_TXSCH_LVL_TL4].schq.max;
+ rvu->pf_limits.tl4 = quotas_alloc(hw->total_pfs, max, max, 0,
+ &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.tl4) {
+ dev_err(rvu->dev, "Failed to allocate TL4 txschq limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ max = nix_hw->txsch[NIX_TXSCH_LVL_TL3].schq.max;
+ rvu->pf_limits.tl3 = quotas_alloc(hw->total_pfs, max, max, 0,
+ &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.tl3) {
+ dev_err(rvu->dev, "Failed to allocate TL3 txschq limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ max = nix_hw->txsch[NIX_TXSCH_LVL_TL2].schq.max;
+ rvu->pf_limits.tl2 = quotas_alloc(hw->total_pfs, max, max, 0,
+ &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.tl2) {
+ dev_err(rvu->dev, "Failed to allocate TL2 txschq limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+skip_txschq_limits:
+ for (i = 0; i < hw->total_pfs; i++)
+ rvu->pf[i].pdev =
+ pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
+ i + 1, 0);
+
+ rvu_set_default_limits(rvu);
+
+ err = rvu_create_limits_sysfs(rvu);
+ if (err) {
+ dev_err(rvu->dev, "Failed to create limits sysfs\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ rvu_policy_destroy(rvu);
+ return err;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.h
new file mode 100644
index 000000000000..9dc8252d9986
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.h
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RVU_VALIDATION_H
+#define RVU_VALIDATION_H
+
+struct rvu;
+struct rvu_quotas;
+
+struct rvu_quota {
+ struct kobj_attribute sysfs;
+ /* Device to scope logs to */
+ struct device *dev;
+ /* Kobject of the sysfs file */
+ struct kobject *parent;
+ /* Pointer to base structure */
+ struct rvu_quotas *base;
+ /* Argument passed to the quota_ops when this quota is modified */
+ void *ops_arg;
+ /* Value of the quota */
+ int val;
+};
+
+struct rvu_quota_ops {
+ /*
+ * Called before sysfs store(). store() will proceed if returns 0.
+ * It is called with struct rvu_quotas::lock taken.
+ */
+ int (*pre_store)(void *arg, struct rvu_quota *quota, int new_val);
+ /** called after sysfs store(). */
+ void (*post_store)(void *arg, struct rvu_quota *quota, int old_val);
+};
+
+struct rvu_quotas {
+ struct rvu_quota_ops ops;
+ struct mutex *lock; /* lock taken for each sysfs operation */
+ u32 cnt; /* number of elements in arr */
+ u32 max; /* maximum value for a single quota */
+ u64 max_sum; /* maximum sum of all quotas */
+ struct rvu_quota a[0]; /* array of quota assignments */
+};
+
+struct rvu_limits {
+ struct rvu_quotas *sso;
+ struct rvu_quotas *ssow;
+ struct rvu_quotas *tim;
+ struct rvu_quotas *cpt;
+ struct rvu_quotas *npa;
+ struct rvu_quotas *nix;
+ struct rvu_quotas *smq;
+ struct rvu_quotas *tl4;
+ struct rvu_quotas *tl3;
+ struct rvu_quotas *tl2;
+};
+
+int rvu_policy_init(struct rvu *rvu);
+void rvu_policy_destroy(struct rvu *rvu);
+int rvu_check_rsrc_policy(struct rvu *rvu,
+ struct rsrc_attach *req, u16 pcifunc);
+int rvu_check_txsch_policy(struct rvu *rvu, struct nix_txsch_alloc_req *req,
+ u16 pcifunc);
+
+int rvu_mbox_handler_free_rsrc_cnt(struct rvu *rvu, struct msg_req *req,
+ struct free_rsrcs_rsp *rsp);
+#endif /* RVU_VALIDATION_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 778df331c8ac..48846096f4c7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -6,7 +6,8 @@
obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o
obj-$(CONFIG_OCTEONTX2_VF) += octeontx2_nicvf.o
-octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o
-octeontx2_nicvf-y := otx2_vf.o
+octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
+ otx2_ptp.o otx2_flows.o
+octeontx2_nicvf-y := otx2_vf.o otx2_smqvf.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index f1d2dea90a8c..18b0059c1f90 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -60,6 +60,22 @@ void otx2_update_lmac_stats(struct otx2_nic *pfvf)
mutex_unlock(&pfvf->mbox.lock);
}
+void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf)
+{
+ struct msg_req *req;
+
+ if (!netif_running(pfvf->netdev))
+ return;
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_fec_stats(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return;
+ }
+ otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+}
+
int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx)
{
struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx];
@@ -191,9 +207,12 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data))
+ if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) {
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- else
+ /* update dmac field in vlan offload rule */
+ if (pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
+ otx2_install_rxvlan_offload_flow(pfvf);
+ } else
return -EPERM;
return 0;
@@ -212,7 +231,8 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
return -ENOMEM;
}
- pfvf->max_frs = mtu + OTX2_ETH_HLEN;
+ /* Add EDSA/HIGIG2 header len to maxlen */
+ pfvf->max_frs = mtu + OTX2_ETH_HLEN + pfvf->addl_mtu;
req->maxlen = pfvf->max_frs;
err = otx2_sync_mbox_msg(&pfvf->mbox);
@@ -676,6 +696,13 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (!sq->sg)
return -ENOMEM;
+ if (pfvf->ptp) {
+ err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
+ sizeof(*sq->timestamps));
+ if (err)
+ return err;
+ }
+
sq->head = 0;
sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1;
sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb;
@@ -1386,6 +1413,13 @@ void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
pfvf->hw.cgx_tx_stats[id] = rsp->tx_stats[id];
}
+void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf,
+ struct cgx_fec_stats_rsp *rsp)
+{
+ pfvf->hw.cgx_fec_corr_blks += rsp->fec_corr_blks;
+ pfvf->hw.cgx_fec_uncorr_blks += rsp->fec_uncorr_blks;
+}
+
void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
struct nix_txsch_alloc_rsp *rsp)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 018c283a0ac4..0612e6926d0a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -13,6 +13,7 @@
#include <linux/pci.h>
#include <linux/iommu.h>
+#include <linux/ptp_clock_kernel.h>
#include <mbox.h>
#include "otx2_reg.h"
@@ -191,12 +192,18 @@ struct otx2_hw {
struct otx2_drv_stats drv_stats;
u64 cgx_rx_stats[CGX_RX_STATS_COUNT];
u64 cgx_tx_stats[CGX_TX_STATS_COUNT];
+ u64 cgx_fec_corr_blks;
+ u64 cgx_fec_uncorr_blks;
};
struct otx2_vf_config {
struct otx2_nic *pf;
struct delayed_work link_event_work;
+ struct delayed_work ptp_info_work;
bool intf_down; /* interface was either configured or not */
+ u8 mac[ETH_ALEN];
+ u16 vlan;
+ int tx_vtag_idx;
};
struct flr_work {
@@ -209,14 +216,54 @@ struct refill_work {
struct otx2_nic *pf;
};
+struct otx2_mac_table {
+ u8 addr[ETH_ALEN];
+ u16 mcam_entry;
+ bool inuse;
+};
+
+struct otx2_ptp {
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+ struct otx2_nic *nic;
+
+ struct cyclecounter cycle_counter;
+ struct timecounter time_counter;
+ bool ptp_en;
+};
+
+struct otx2_flow_config {
+ u16 entry[NPC_MAX_NONCONTIG_ENTRIES];
+ u32 nr_flows;
+ u32 vf_vlan_offset;
+ u32 ntuple_offset;
+ u32 unicast_offset;
+ u32 rx_vlan_offset;
+#define OTX2_PER_VF_VLAN_FLOWS 2 /* rx+tx per VF */
+#define OTX2_VF_VLAN_RX_INDEX 0
+#define OTX2_VF_VLAN_TX_INDEX 1
+ u32 ntuple_max_flows;
+ struct list_head flow_list;
+};
+
struct otx2_nic {
void __iomem *reg_base;
struct net_device *netdev;
void *iommu_domain;
+ u16 iommu_domain_type;
u16 max_frs;
+ u16 xtra_hdr;
u16 rbsize; /* Receive buffer size */
+#define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0)
+#define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1)
#define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
+#define OTX2_FLAG_MCAM_ENTRIES_ALLOC BIT_ULL(3)
+#define OTX2_FLAG_NTUPLE_SUPPORT BIT_ULL(4)
+#define OTX2_FLAG_UCAST_FLTR_SUPPORT BIT_ULL(5)
+#define OTX2_FLAG_RX_VLAN_SUPPORT BIT_ULL(6)
+#define OTX2_FLAG_VF_VLAN_SUPPORT BIT_ULL(7)
+#define OTX2_FLAG_PF_SHUTDOWN BIT_ULL(8)
#define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9)
#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
u64 flags;
@@ -251,6 +298,33 @@ struct otx2_nic {
/* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
int nix_blkaddr;
+
+ bool entries_alloc;
+ struct otx2_flow_config *flow_cfg;
+
+ u8 hw_rx_tstamp;
+ u8 hw_tx_tstamp;
+ struct otx2_ptp *ptp;
+ struct otx2_mac_table *mac_table;
+ struct workqueue_struct *otx2_ndo_wq;
+ struct work_struct otx2_rx_mode_work;
+
+#define OTX2_PRIV_FLAG_PAM4 BIT(0)
+#define OTX2_PRIV_FLAG_EDSA_HDR BIT(1)
+#define OTX2_PRIV_FLAG_HIGIG2_HDR BIT(2)
+#define OTX2_IS_EDSA_ENABLED(flags) ((flags) & \
+ OTX2_PRIV_FLAG_EDSA_HDR)
+#define OTX2_IS_HIGIG2_ENABLED(flags) ((flags) & \
+ OTX2_PRIV_FLAG_HIGIG2_HDR)
+ u32 ethtool_flags;
+
+ /* extended DSA and EDSA header lengths are 8/16 bytes
+ * so take max length 16 bytes here
+ */
+#define OTX2_EDSA_HDR_LEN 16
+#define OTX2_HIGIG2_HDR_LEN 16
+#define OTX2_HW_TIMESTAMP_LEN 8
+ u32 addl_mtu;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -258,10 +332,14 @@ static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF;
}
+static inline bool is_dev_post_96xx_C0(struct pci_dev *pdev)
+{
+ return (pdev->revision == 0x08) || (pdev->revision == 0x30);
+}
+
static inline bool is_96xx_A0(struct pci_dev *pdev)
{
- return (pdev->revision == 0x00) &&
- (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
+ return (pdev->revision == 0x00);
}
static inline bool is_96xx_B0(struct pci_dev *pdev)
@@ -549,6 +627,9 @@ static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf,
{
dma_addr_t iova;
+ if (pfvf->iommu_domain_type == IOMMU_DOMAIN_IDENTITY)
+ return page_to_phys(page) + offset;
+
iova = dma_map_page_attrs(pfvf->dev, page,
offset, size, dir, DMA_ATTR_SKIP_CPU_SYNC);
if (unlikely(dma_mapping_error(pfvf->dev, iova)))
@@ -560,6 +641,9 @@ static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
dma_addr_t addr, size_t size,
enum dma_data_direction dir)
{
+ if (pfvf->iommu_domain_type == IOMMU_DOMAIN_IDENTITY)
+ return;
+
dma_unmap_page_attrs(pfvf->dev, addr, size,
dir, DMA_ATTR_SKIP_CPU_SYNC);
}
@@ -614,6 +698,9 @@ void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
struct nix_txsch_alloc_rsp *rsp);
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
struct cgx_stats_rsp *rsp);
+void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf,
+ struct cgx_fec_stats_rsp *rsp);
+void otx2_set_fec_stats_count(struct otx2_nic *pfvf);
void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
struct nix_bp_cfg_rsp *rsp);
@@ -622,6 +709,7 @@ void otx2_get_dev_stats(struct otx2_nic *pfvf);
void otx2_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats);
void otx2_update_lmac_stats(struct otx2_nic *pfvf);
+void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf);
int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx);
int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx);
void otx2_set_ethtool_ops(struct net_device *netdev);
@@ -631,4 +719,30 @@ int otx2_open(struct net_device *netdev);
int otx2_stop(struct net_device *netdev);
int otx2_set_real_num_queues(struct net_device *netdev,
int tx_queues, int rx_queues);
+int otx2_set_npc_parse_mode(struct otx2_nic *pfvf, bool unbind);
+
+/* MCAM filter related APIs */
+void otx2_do_set_rx_mode(struct work_struct *work);
+int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
+int otx2_mcam_flow_init(struct otx2_nic *pf);
+int otx2_alloc_mcam_entries(struct otx2_nic *pfvf);
+int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
+void otx2_mcam_flow_del(struct otx2_nic *pf);
+int otx2_destroy_ntuple_flows(struct otx2_nic *pf);
+int otx2_destroy_mcam_flows(struct otx2_nic *pfvf);
+int otx2_get_flow(struct otx2_nic *pfvf,
+ struct ethtool_rxnfc *nfc, u32 location);
+int otx2_get_all_flows(struct otx2_nic *pfvf,
+ struct ethtool_rxnfc *nfc, u32 *rule_locs);
+int otx2_add_flow(struct otx2_nic *pfvf,
+ struct ethtool_rx_flow_spec *fsp);
+int otx2_remove_flow(struct otx2_nic *pfvf, u32 location);
+int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req);
+int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
+int otx2_enable_vf_vlan(struct otx2_nic *pf);
+int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
+int otx2smqvf_probe(struct otx2_nic *vf);
+int otx2smqvf_remove(struct otx2_nic *vf);
+
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index d59f5a9c7273..c43e68a15f6b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -13,11 +13,24 @@
#include <linux/stddef.h>
#include <linux/etherdevice.h>
#include <linux/log2.h>
+#include <uapi/linux/net_tstamp.h>
#include "otx2_common.h"
+#include "otx2_ptp.h"
#define DRV_NAME "octeontx2-nicpf"
#define DRV_VF_NAME "octeontx2-nicvf"
+#define DRV_VF_VERSION "1.0"
+
+#define OTX2_DEFAULT_ACTION 0x1
+
+static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf);
+
+static const char otx2_priv_flags_strings[][ETH_GSTRING_LEN] = {
+ "pam4",
+ "edsa",
+ "higig2",
+};
struct otx2_stat {
char name[ETH_GSTRING_LEN];
@@ -30,6 +43,9 @@ struct otx2_stat {
.index = offsetof(struct otx2_dev_stats, stat) / sizeof(u64), \
}
+#define OTX2_ETHTOOL_SUPPORTED_MODES 0x638CE23 //110001110001100111000100011
+#define OTX2_ETHTOOL_ALL_MODES (ULLONG_MAX)
+
static const struct otx2_stat otx2_dev_stats[] = {
OTX2_DEV_STAT(rx_ucast_frames),
OTX2_DEV_STAT(rx_bcast_frames),
@@ -99,6 +115,12 @@ static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
struct otx2_nic *pfvf = netdev_priv(netdev);
int stats;
+ if (sset == ETH_SS_PRIV_FLAGS) {
+ memcpy(data, otx2_priv_flags_strings,
+ ARRAY_SIZE(otx2_priv_flags_strings) * ETH_GSTRING_LEN);
+ return;
+ }
+
if (sset != ETH_SS_STATS)
return;
@@ -126,6 +148,12 @@ static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
strcpy(data, "reset_count");
data += ETH_GSTRING_LEN;
+ if (pfvf->linfo.fec) {
+ sprintf(data, "Fec Corrected Errors: ");
+ data += ETH_GSTRING_LEN;
+ sprintf(data, "Fec Uncorrected Errors: ");
+ data += ETH_GSTRING_LEN;
+ }
}
static void otx2_get_qset_stats(struct otx2_nic *pfvf,
@@ -158,11 +186,30 @@ static void otx2_get_qset_stats(struct otx2_nic *pfvf,
}
}
+static int otx2_get_phy_fec_stats(struct otx2_nic *pfvf)
+{
+ struct msg_req *req;
+ int rc = -EAGAIN;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_get_phy_fec_stats(&pfvf->mbox);
+ if (!req)
+ goto end;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox))
+ rc = 0;
+end:
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
/* Get device and per queue statistics */
static void otx2_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
+ u64 fec_corr_blks, fec_uncorr_blks;
+ struct cgx_fw_data *rsp;
int stat;
otx2_get_dev_stats(pfvf);
@@ -181,12 +228,45 @@ static void otx2_get_ethtool_stats(struct net_device *netdev,
for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++)
*(data++) = pfvf->hw.cgx_tx_stats[stat];
*(data++) = pfvf->reset_count;
+
+ if (pfvf->linfo.fec == OTX2_FEC_NONE)
+ return;
+
+ fec_corr_blks = pfvf->hw.cgx_fec_corr_blks;
+ fec_uncorr_blks = pfvf->hw.cgx_fec_uncorr_blks;
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (!IS_ERR(rsp) && rsp->fwdata.phy.misc.has_fec_stats &&
+ !otx2_get_phy_fec_stats(pfvf)) {
+ /* Fetch fwdata again because it's been recently populated with
+ * latest PHY FEC stats.
+ */
+ rsp = otx2_get_fwdata(pfvf);
+ if (!IS_ERR(rsp)) {
+ struct fec_stats_s *p = &rsp->fwdata.phy.fec_stats;
+
+ if (pfvf->linfo.fec == OTX2_FEC_BASER) {
+ fec_corr_blks = p->brfec_corr_blks;
+ fec_uncorr_blks = p->brfec_uncorr_blks;
+ } else {
+ fec_corr_blks = p->rsfec_corr_cws;
+ fec_uncorr_blks = p->rsfec_uncorr_cws;
+ }
+ }
+ }
+
+ *(data++) = fec_corr_blks;
+ *(data++) = fec_uncorr_blks;
}
static int otx2_get_sset_count(struct net_device *netdev, int sset)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- int qstats_count;
+ int qstats_count, fec_stats_count = 0;
+ bool if_up = netif_running(netdev);
+
+ if (sset == ETH_SS_PRIV_FLAGS)
+ return ARRAY_SIZE(otx2_priv_flags_strings);
if (sset != ETH_SS_STATS)
return -EINVAL;
@@ -194,8 +274,15 @@ static int otx2_get_sset_count(struct net_device *netdev, int sset)
qstats_count = otx2_n_queue_stats *
(pfvf->hw.rx_queues + pfvf->hw.tx_queues);
+ if (!if_up || !pfvf->linfo.fec) {
+ return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count +
+ CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + 1;
+ }
+ fec_stats_count = 2;
+ otx2_update_lmac_fec_stats(pfvf);
return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count +
- CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + 1;
+ CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + 1 +
+ fec_stats_count;
}
/* Get no of queues device supports and current queue count */
@@ -542,6 +629,16 @@ static int otx2_get_rxnfc(struct net_device *dev,
nfc->data = pfvf->hw.rx_queues;
ret = 0;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ nfc->rule_cnt = pfvf->flow_cfg->nr_flows;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = otx2_get_flow(pfvf, nfc, nfc->fs.location);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = otx2_get_all_flows(pfvf, nfc, rules);
+ break;
case ETHTOOL_GRXFH:
return otx2_get_rss_hash_opts(pfvf, nfc);
default:
@@ -550,8 +647,155 @@ static int otx2_get_rxnfc(struct net_device *dev,
return ret;
}
+int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req)
+{
+ struct ethtool_tcpip4_spec *l4_mask = &fsp->m_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *l4_hdr = &fsp->h_u.tcp_ip4_spec;
+ struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
+ struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
+ struct flow_msg *pmask = &req->mask;
+ struct flow_msg *pkt = &req->packet;
+ u32 flow_type;
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+ switch (flow_type) {
+ /* bits not set in mask are don't care */
+ case ETHER_FLOW:
+ if (!is_zero_ether_addr(eth_mask->h_source)) {
+ ether_addr_copy(pkt->smac, eth_hdr->h_source);
+ ether_addr_copy(pmask->smac, eth_mask->h_source);
+ req->features |= BIT_ULL(NPC_SMAC);
+ }
+ if (!is_zero_ether_addr(eth_mask->h_dest)) {
+ ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
+ ether_addr_copy(pmask->dmac, eth_mask->h_dest);
+ req->features |= BIT_ULL(NPC_DMAC);
+ }
+ if (eth_mask->h_proto) {
+ memcpy(&pkt->etype, &eth_hdr->h_proto,
+ sizeof(pkt->etype));
+ memcpy(&pmask->etype, &eth_mask->h_proto,
+ sizeof(pmask->etype));
+ req->features |= BIT_ULL(NPC_ETYPE);
+ }
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ if (l4_mask->ip4src) {
+ memcpy(&pkt->ip4src, &l4_hdr->ip4src,
+ sizeof(pkt->ip4src));
+ memcpy(&pmask->ip4src, &l4_mask->ip4src,
+ sizeof(pmask->ip4src));
+ req->features |= BIT_ULL(NPC_SIP_IPV4);
+ }
+ if (l4_mask->ip4dst) {
+ memcpy(&pkt->ip4dst, &l4_hdr->ip4dst,
+ sizeof(pkt->ip4dst));
+ memcpy(&pmask->ip4dst, &l4_mask->ip4dst,
+ sizeof(pmask->ip4dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV4);
+ }
+ if (l4_mask->psrc) {
+ memcpy(&pkt->sport, &l4_hdr->psrc, sizeof(pkt->sport));
+ memcpy(&pmask->sport, &l4_mask->psrc,
+ sizeof(pmask->sport));
+ if (flow_type == UDP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_SPORT_UDP);
+ else
+ req->features |= BIT_ULL(NPC_SPORT_TCP);
+ }
+ if (l4_mask->pdst) {
+ memcpy(&pkt->dport, &l4_hdr->pdst, sizeof(pkt->dport));
+ memcpy(&pmask->dport, &l4_mask->pdst,
+ sizeof(pmask->dport));
+ if (flow_type == UDP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_DPORT_UDP);
+ else
+ req->features |= BIT_ULL(NPC_DPORT_TCP);
+ }
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+ if (fsp->flow_type & FLOW_EXT) {
+ if (fsp->m_ext.vlan_etype)
+ return -EINVAL;
+ if (fsp->m_ext.vlan_tci) {
+ if (fsp->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
+ return -EINVAL;
+ if (be16_to_cpu(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
+ return -EINVAL;
+ memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
+ sizeof(pkt->vlan_tci));
+ memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
+ sizeof(pmask->vlan_tci));
+ req->features |= BIT_ULL(NPC_OUTER_VID);
+ }
+ /* Not Drop/Direct to queue but use action in default entry */
+ if (fsp->m_ext.data[1] &&
+ fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
+ req->op = NIX_RX_ACTION_DEFAULT;
+ }
+ if (fsp->flow_type & FLOW_MAC_EXT &&
+ !is_zero_ether_addr(fsp->m_ext.h_dest)) {
+ ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
+ ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
+ req->features |= BIT_ULL(NPC_DMAC);
+ }
+
+ if (!req->features)
+ return -ENOTSUPP;
+
+ return 0;
+}
+
static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
{
+ bool ntuple = !!(dev->features & NETIF_F_NTUPLE);
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (nfc->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = otx2_set_rss_hash_opts(pfvf, nfc);
+ break;
+ case ETHTOOL_SRXCLSRLINS:
+ if (netif_running(dev) && ntuple)
+ ret = otx2_add_flow(pfvf, &nfc->fs);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ if (netif_running(dev) && ntuple)
+ ret = otx2_remove_flow(pfvf, nfc->fs.location);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int otx2vf_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *nfc, u32 *rules)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (nfc->cmd) {
+ case ETHTOOL_GRXRINGS:
+ nfc->data = pfvf->hw.rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ return otx2_get_rss_hash_opts(pfvf, nfc);
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int otx2vf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
+{
struct otx2_nic *pfvf = netdev_priv(dev);
int ret = -EOPNOTSUPP;
@@ -635,7 +879,8 @@ static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
otx2_set_rss_key(pfvf);
}
- otx2_set_rss_table(pfvf);
+ if (netif_running(dev))
+ otx2_set_rss_table(pfvf);
return 0;
}
@@ -663,7 +908,510 @@ static u32 otx2_get_link(struct net_device *netdev)
return pfvf->linfo.link_up;
}
-static const struct ethtool_ops otx2_ethtool_ops = {
+static int otx2_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ if (!pfvf->ptp)
+ return ethtool_op_get_ts_info(netdev, info);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = otx2_ptp_clock_index(pfvf);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static void otx2_get_fec_info(u64 index, int mode, struct ethtool_link_ksettings
+ *link_ksettings)
+{
+ switch (index) {
+ case OTX2_FEC_NONE:
+ if (mode)
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising,
+ FEC_NONE);
+ else
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported,
+ FEC_NONE);
+ break;
+ case OTX2_FEC_BASER:
+ if (mode)
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising,
+ FEC_BASER);
+ else
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported,
+ FEC_BASER);
+ break;
+ case OTX2_FEC_RS:
+ if (mode)
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising,
+ FEC_RS);
+ else
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported,
+ FEC_RS);
+ break;
+ case OTX2_FEC_BASER | OTX2_FEC_RS:
+ if (mode) {
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising,
+ FEC_BASER);
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising,
+ FEC_RS);
+ } else {
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported,
+ FEC_BASER);
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported,
+ FEC_RS);
+ }
+
+ break;
+ }
+}
+
+static void otx2_get_link_mode_info(u64 index, int mode,
+ struct ethtool_link_ksettings
+ *link_ksettings)
+{
+ u64 ethtool_link_mode = 0;
+ int bit_position = 0;
+ u64 link_modes = 0;
+
+ int cgx_link_mode[29] = {0,
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
+ };
+ link_modes = index & OTX2_ETHTOOL_SUPPORTED_MODES;
+
+ for (bit_position = 0; link_modes; bit_position++, link_modes >>= 1) {
+ if (!(link_modes & 1))
+ continue;
+
+ if (bit_position == 0)
+ ethtool_link_mode = 0x3F;
+
+ ethtool_link_mode |= 1ULL << cgx_link_mode[bit_position];
+ if (mode)
+ *link_ksettings->link_modes.advertising |=
+ ethtool_link_mode;
+ else
+ *link_ksettings->link_modes.supported |=
+ ethtool_link_mode;
+ }
+}
+
+static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf)
+{
+ struct cgx_fw_data *rsp = NULL;
+ struct msg_req *req;
+ int err = 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_get_aux_link_info(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (!err) {
+ rsp = (struct cgx_fw_data *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ } else {
+ rsp = ERR_PTR(err);
+ }
+
+ mutex_unlock(&pfvf->mbox.lock);
+ return rsp;
+}
+
+static int otx2_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_fw_data *rsp;
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ modinfo->type = rsp->fwdata.sfp_eeprom.sff_id;
+ modinfo->eeprom_len = SFP_EEPROM_SIZE;
+ return 0;
+}
+
+static int otx2_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_fw_data *rsp;
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ memcpy(data, &rsp->fwdata.sfp_eeprom.buf, ee->len);
+
+ return 0;
+}
+
+static int otx2_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_fw_data *rsp = NULL;
+ u32 supported = 0;
+
+ cmd->base.duplex = pfvf->linfo.full_duplex;
+ cmd->base.speed = pfvf->linfo.speed;
+ cmd->base.autoneg = pfvf->linfo.an;
+ cmd->base.port = pfvf->linfo.port;
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ if (rsp->fwdata.supported_an)
+ supported |= SUPPORTED_Autoneg;
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ otx2_get_link_mode_info(rsp->fwdata.advertised_link_modes, 1, cmd);
+ otx2_get_fec_info(rsp->fwdata.advertised_fec, 1, cmd);
+
+ otx2_get_link_mode_info(rsp->fwdata.supported_link_modes, 0, cmd);
+ otx2_get_fec_info(rsp->fwdata.supported_fec, 0, cmd);
+
+ return 0;
+}
+
+static int otx2_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ unsigned long advertising = 0;
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_set_link_mode_req *req;
+ struct cgx_set_link_mode_rsp *rsp;
+ int err = 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_set_link_mode(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -EAGAIN;
+ }
+
+ advertising = (*cmd->link_modes.advertising) & (OTX2_ETHTOOL_ALL_MODES);
+ if (!(advertising & (advertising - 1)) &&
+ (advertising <= BIT_ULL(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT))) {
+ req->args.mode = advertising;
+ } else {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -EINVAL;
+ }
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (!err) {
+ rsp = (struct cgx_set_link_mode_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (rsp->status)
+ err = rsp->status;
+ }
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+static int otx2_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_fw_data *rsp;
+ int fec[] = {
+ ETHTOOL_FEC_OFF,
+ ETHTOOL_FEC_BASER,
+ ETHTOOL_FEC_RS,
+ ETHTOOL_FEC_BASER | ETHTOOL_FEC_RS};
+#define FEC_MAX_INDEX 3
+ if (pfvf->linfo.fec < FEC_MAX_INDEX)
+ fecparam->active_fec = fec[pfvf->linfo.fec];
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ if (rsp->fwdata.supported_fec <= FEC_MAX_INDEX) {
+ if (!rsp->fwdata.supported_fec)
+ fecparam->fec = ETHTOOL_FEC_NONE;
+ else
+ fecparam->fec = fec[rsp->fwdata.supported_fec];
+ }
+ return 0;
+}
+
+static int otx2_set_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct fec_mode *req, *rsp;
+ int err = 0, fec = 0;
+
+ switch (fecparam->fec) {
+ case ETHTOOL_FEC_OFF:
+ fec = OTX2_FEC_NONE;
+ break;
+ case ETHTOOL_FEC_RS:
+ fec = OTX2_FEC_RS;
+ break;
+ case ETHTOOL_FEC_BASER:
+ fec = OTX2_FEC_BASER;
+ break;
+ default:
+ fec = OTX2_FEC_NONE;
+ break;
+ }
+
+ if (fec == pfvf->linfo.fec)
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_set_fec_param(&pfvf->mbox);
+ if (!req) {
+ err = -EAGAIN;
+ goto end;
+ }
+ req->fec = fec;
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto end;
+
+ rsp = (struct fec_mode *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (rsp->fec >= 0) {
+ pfvf->linfo.fec = rsp->fec;
+ pfvf->hw.cgx_fec_corr_blks = 0;
+ pfvf->hw.cgx_fec_uncorr_blks = 0;
+
+ } else {
+ err = rsp->fec;
+ }
+
+end: mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+static u32 otx2_get_priv_flags(struct net_device *netdev)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_fw_data *rsp;
+
+ rsp = otx2_get_fwdata(pfvf);
+
+ if (IS_ERR(rsp)) {
+ pfvf->ethtool_flags &= ~OTX2_PRIV_FLAG_PAM4;
+ } else {
+ if (rsp->fwdata.phy.misc.mod_type)
+ pfvf->ethtool_flags |= OTX2_PRIV_FLAG_PAM4;
+ else
+ pfvf->ethtool_flags &= ~OTX2_PRIV_FLAG_PAM4;
+ }
+
+ return pfvf->ethtool_flags;
+}
+
+static int otx2_set_phy_mod_type(struct net_device *netdev, bool enable)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_phy_mod_type *req;
+ struct cgx_fw_data *fwd;
+ int rc = -EAGAIN;
+
+ fwd = otx2_get_fwdata(pfvf);
+ if (IS_ERR(fwd))
+ return -EAGAIN;
+
+ /* ret here if phy does not support this feature */
+ if (!fwd->fwdata.phy.misc.can_change_mod_type)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_set_phy_mod_type(&pfvf->mbox);
+ if (!req)
+ goto end;
+
+ req->mod = enable;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox))
+ rc = 0;
+end:
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
+int otx2_set_npc_parse_mode(struct otx2_nic *pfvf, bool unbind)
+{
+ struct npc_set_pkind *req;
+ int rc = -EAGAIN;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_set_pkind(&pfvf->mbox);
+ if (!req)
+ goto end;
+
+ if (unbind)
+ req->mode = OTX2_PRIV_FLAGS_DEFAULT;
+ else if (OTX2_IS_HIGIG2_ENABLED(pfvf->ethtool_flags))
+ req->mode = OTX2_PRIV_FLAGS_HIGIG;
+ else if (OTX2_IS_EDSA_ENABLED(pfvf->ethtool_flags))
+ req->mode = OTX2_PRIV_FLAGS_EDSA;
+ else
+ req->mode = OTX2_PRIV_FLAGS_DEFAULT;
+
+ req->dir = PKIND_RX;
+
+ /* req AF to change pkind on both the dir */
+ if (req->mode == OTX2_PRIV_FLAGS_HIGIG || unbind)
+ req->dir |= PKIND_TX;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox))
+ rc = 0;
+ else
+ pfvf->ethtool_flags &= ~req->mode;
+end:
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
+static int otx2_enable_addl_header(struct net_device *netdev, int bitpos,
+ u32 len, bool enable)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ bool if_up = netif_running(netdev);
+
+ if (enable) {
+ pfvf->ethtool_flags |= BIT(bitpos);
+ } else {
+ pfvf->ethtool_flags &= ~BIT(bitpos);
+ len = 0;
+ }
+
+ if (if_up)
+ otx2_stop(netdev);
+
+ /* Update max FRS so that additional hdrs are considered */
+ pfvf->addl_mtu = len;
+
+ /* Incase HIGIG2 mode is set packet will have 16 bytes of
+ * extra header at start of packet which stack does not need.
+ */
+ if (OTX2_IS_HIGIG2_ENABLED(pfvf->ethtool_flags))
+ pfvf->xtra_hdr = 16;
+ else
+ pfvf->xtra_hdr = 0;
+
+ /* NPC parse mode will be updated here */
+ if (if_up)
+ otx2_open(netdev);
+
+ return 0;
+}
+
+static int otx2_set_priv_flags(struct net_device *netdev, u32 new_flags)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ bool enable = false;
+ int bitnr, rc = 0;
+ u32 chg_flags;
+
+ /* Get latest PAM4 settings */
+ otx2_get_priv_flags(netdev);
+
+ chg_flags = new_flags ^ pfvf->ethtool_flags;
+ if (!chg_flags)
+ return 0;
+
+ /* Some are mutually exclusive, so allow only change at a time */
+ if (hweight32(chg_flags) != 1)
+ return -EINVAL;
+
+ bitnr = ffs(chg_flags) - 1;
+ if (new_flags & BIT(bitnr))
+ enable = true;
+
+ switch (BIT(bitnr)) {
+ case OTX2_PRIV_FLAG_PAM4:
+ rc = otx2_set_phy_mod_type(netdev, enable);
+ break;
+ case OTX2_PRIV_FLAG_EDSA_HDR:
+ /* HIGIG & EDSA are mutual exclusive */
+ if (enable && OTX2_IS_HIGIG2_ENABLED(pfvf->ethtool_flags))
+ return -EINVAL;
+ return otx2_enable_addl_header(netdev, bitnr,
+ OTX2_EDSA_HDR_LEN, enable);
+ break;
+ case OTX2_PRIV_FLAG_HIGIG2_HDR:
+ if (enable && OTX2_IS_EDSA_ENABLED(pfvf->ethtool_flags))
+ return -EINVAL;
+ return otx2_enable_addl_header(netdev, bitnr,
+ OTX2_HIGIG2_HDR_LEN, enable);
+ break;
+ default:
+ break;
+ }
+
+ /* save the change */
+ if (!rc) {
+ if (enable)
+ pfvf->ethtool_flags |= BIT(bitnr);
+ else
+ pfvf->ethtool_flags &= ~BIT(bitnr);
+ }
+
+ return rc;
+}
+
+static struct ethtool_ops otx2_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
.get_link = otx2_get_link,
@@ -685,8 +1433,17 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.set_rxfh = otx2_set_rxfh,
.get_msglevel = otx2_get_msglevel,
.set_msglevel = otx2_set_msglevel,
+ .get_ts_info = otx2_get_ts_info,
+ .get_link_ksettings = otx2_get_link_ksettings,
+ .set_link_ksettings = otx2_set_link_ksettings,
.get_pauseparam = otx2_get_pauseparam,
.set_pauseparam = otx2_set_pauseparam,
+ .get_fecparam = otx2_get_fecparam,
+ .set_fecparam = otx2_set_fecparam,
+ .get_module_info = otx2_get_module_info,
+ .get_module_eeprom = otx2_get_module_eeprom,
+ .get_priv_flags = otx2_get_priv_flags,
+ .set_priv_flags = otx2_set_priv_flags,
};
void otx2_set_ethtool_ops(struct net_device *netdev)
@@ -761,6 +1518,21 @@ static int otx2vf_get_sset_count(struct net_device *netdev, int sset)
return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 1;
}
+static int otx2vf_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ if (is_otx2_lbkvf(pfvf->pdev)) {
+ cmd->base.port = PORT_OTHER;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.speed = SPEED_100000;
+ } else {
+ return otx2_get_link_ksettings(netdev, cmd);
+ }
+ return 0;
+}
+
static const struct ethtool_ops otx2vf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@@ -771,8 +1543,8 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.get_sset_count = otx2vf_get_sset_count,
.set_channels = otx2_set_channels,
.get_channels = otx2_get_channels,
- .get_rxnfc = otx2_get_rxnfc,
- .set_rxnfc = otx2_set_rxnfc,
+ .get_rxnfc = otx2vf_get_rxnfc,
+ .set_rxnfc = otx2vf_set_rxnfc,
.get_rxfh_key_size = otx2_get_rxfh_key_size,
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
@@ -781,10 +1553,9 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.set_ringparam = otx2_set_ringparam,
.get_coalesce = otx2_get_coalesce,
.set_coalesce = otx2_set_coalesce,
- .get_msglevel = otx2_get_msglevel,
- .set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
.set_pauseparam = otx2_set_pauseparam,
+ .get_link_ksettings = otx2vf_get_link_ksettings,
};
void otx2vf_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
new file mode 100644
index 000000000000..a05b7ba38b17
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -0,0 +1,631 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "otx2_common.h"
+
+/* helper macros to support mcam flows */
+#define OTX2_MAX_NTUPLE_FLOWS 32
+#define OTX2_MAX_UNICAST_FLOWS 8
+#define OTX2_MAX_VLAN_FLOWS 1
+#define OTX2_MCAM_COUNT (OTX2_MAX_NTUPLE_FLOWS + \
+ OTX2_MAX_UNICAST_FLOWS + \
+ OTX2_MAX_VLAN_FLOWS)
+
+struct otx2_flow {
+ struct ethtool_rx_flow_spec flow_spec;
+ struct list_head list;
+ u32 location;
+ u16 entry;
+ bool is_vf;
+ int vf;
+};
+
+int otx2_mcam_flow_init(struct otx2_nic *pf)
+{
+ pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
+ GFP_KERNEL);
+ if (!pf->flow_cfg)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
+
+ pf->flow_cfg->ntuple_max_flows = OTX2_MAX_NTUPLE_FLOWS;
+
+ pf->flags |= (OTX2_FLAG_NTUPLE_SUPPORT |
+ OTX2_FLAG_UCAST_FLTR_SUPPORT |
+ OTX2_FLAG_RX_VLAN_SUPPORT |
+ OTX2_FLAG_VF_VLAN_SUPPORT);
+
+ pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
+ * OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
+
+ if (!pf->mac_table)
+ return -ENOMEM;
+
+ /* register work queue for ndo callbacks */
+ pf->otx2_ndo_wq = create_singlethread_workqueue("otx2_ndo_work_queue");
+ if (!pf->otx2_ndo_wq)
+ return -ENOMEM;
+ INIT_WORK(&pf->otx2_rx_mode_work, otx2_do_set_rx_mode);
+ return 0;
+}
+
+void otx2_mcam_flow_del(struct otx2_nic *pf)
+{
+ otx2_destroy_mcam_flows(pf);
+ if (pf->otx2_ndo_wq) {
+ flush_workqueue(pf->otx2_ndo_wq);
+ destroy_workqueue(pf->otx2_ndo_wq);
+ }
+}
+
+int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ netdev_features_t wanted = NETIF_F_HW_VLAN_STAG_RX |
+ NETIF_F_HW_VLAN_CTAG_RX;
+ struct npc_mcam_alloc_entry_req *req;
+ struct npc_mcam_alloc_entry_rsp *rsp;
+ int vf_vlan_max_flows;
+ int i;
+
+ mutex_lock(&pfvf->mbox.lock);
+ if (pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return 0;
+ }
+
+ req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
+ req->contig = false;
+ req->count = OTX2_MCAM_COUNT + vf_vlan_max_flows;
+
+ /* Send message to AF */
+ if (otx2_sync_mbox_msg(&pfvf->mbox)) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -EINVAL;
+ }
+
+ rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
+ (&pfvf->mbox.mbox, 0, &req->hdr);
+
+ if (rsp->count != req->count) {
+ netdev_info(pfvf->netdev, "number of rules truncated to %d\n",
+ rsp->count);
+ netdev_info(pfvf->netdev,
+ "Disabling RX VLAN offload due to non-availability of MCAM space\n");
+ /* support only ntuples here */
+ flow_cfg->ntuple_max_flows = rsp->count;
+ flow_cfg->ntuple_offset = 0;
+ pfvf->netdev->priv_flags &= ~IFF_UNICAST_FLT;
+ pfvf->flags &= ~OTX2_FLAG_UCAST_FLTR_SUPPORT;
+ pfvf->flags &= ~OTX2_FLAG_RX_VLAN_SUPPORT;
+ pfvf->flags &= ~OTX2_FLAG_VF_VLAN_SUPPORT;
+ pfvf->netdev->features &= ~wanted;
+ pfvf->netdev->hw_features &= ~wanted;
+ } else {
+ flow_cfg->vf_vlan_offset = 0;
+ flow_cfg->ntuple_offset = flow_cfg->vf_vlan_offset +
+ vf_vlan_max_flows;
+ flow_cfg->unicast_offset = flow_cfg->ntuple_offset +
+ OTX2_MAX_NTUPLE_FLOWS;
+ flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
+ OTX2_MAX_UNICAST_FLOWS;
+ }
+
+ for (i = 0; i < rsp->count; i++)
+ flow_cfg->entry[i] = rsp->entry_list[i];
+
+ pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return 0;
+}
+
+/* On success adds mcam entry
+ * On failure enable promisous mode
+ */
+static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
+{
+ struct otx2_flow_config *flow_cfg = pf->flow_cfg;
+ struct npc_install_flow_req *req;
+ int err, i;
+
+ if (!(pf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) {
+ err = otx2_alloc_mcam_entries(pf);
+ if (err)
+ return err;
+ }
+
+ if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
+ return -ENOMEM;
+
+ /* dont have free mcam entries or uc list is greater than alloted */
+ if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
+ return -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ /* unicast offset starts with 32 0..31 for ntuple */
+ for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
+ if (pf->mac_table[i].inuse)
+ continue;
+ ether_addr_copy(pf->mac_table[i].addr, mac);
+ pf->mac_table[i].inuse = true;
+ pf->mac_table[i].mcam_entry =
+ flow_cfg->entry[i + flow_cfg->unicast_offset];
+ req->entry = pf->mac_table[i].mcam_entry;
+ break;
+ }
+
+ ether_addr_copy(req->packet.dmac, mac);
+ u64_to_ether_addr(0xffffffffffffull, req->mask.dmac);
+ req->features = BIT_ULL(NPC_DMAC);
+ req->channel = pf->hw.rx_chan_base;
+ req->intf = NIX_INTF_RX;
+ req->op = NIX_RX_ACTION_DEFAULT;
+ req->set_cntr = 1;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+
+ return err;
+}
+
+int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int err;
+
+ err = otx2_do_add_macfilter(pf, mac);
+ if (err) {
+ netdev->flags |= IFF_PROMISC;
+ return err;
+ }
+ return 0;
+}
+
+static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
+ int *mcam_entry)
+{
+ int i;
+
+ for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
+ if (!pf->mac_table[i].inuse)
+ continue;
+
+ if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
+ *mcam_entry = pf->mac_table[i].mcam_entry;
+ pf->mac_table[i].inuse = false;
+ return true;
+ }
+ }
+ return false;
+}
+
+int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct npc_delete_flow_req *req;
+ int err, mcam_entry;
+
+ /* check does mcam entry exists for given mac */
+ if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
+ return 0;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+ req->entry = mcam_entry;
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+
+ return err;
+}
+
+static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
+{
+ struct otx2_flow *iter;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if (iter->location == location)
+ return iter;
+ }
+
+ return NULL;
+}
+
+static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
+{
+ struct list_head *head = &pfvf->flow_cfg->flow_list;
+ struct otx2_flow *iter;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if (iter->location > flow->location)
+ break;
+ head = &iter->list;
+ }
+
+ list_add(&flow->list, head);
+}
+
+int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
+ u32 location)
+{
+ struct otx2_flow *iter;
+
+ if (location >= pfvf->flow_cfg->ntuple_max_flows)
+ return -EINVAL;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if (iter->location == location) {
+ nfc->fs = iter->flow_spec;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
+ u32 *rule_locs)
+{
+ u32 location = 0;
+ int idx = 0;
+ int err = 0;
+
+ nfc->data = pfvf->flow_cfg->ntuple_max_flows;
+ while ((!err || err == -ENOENT) && idx < nfc->rule_cnt) {
+ err = otx2_get_flow(pfvf, nfc, location);
+ if (!err)
+ rule_locs[idx++] = location;
+ location++;
+ }
+
+ return err;
+}
+
+static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
+{
+ u64 ring_cookie = flow->flow_spec.ring_cookie;
+ struct npc_install_flow_req *req;
+ int err, vf = 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_prepare_flow_request(&flow->flow_spec, req);
+ if (err) {
+ /* free the allocated msg above */
+ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+
+ req->entry = flow->entry;
+ req->intf = NIX_INTF_RX;
+ req->set_cntr = 1;
+ req->channel = pfvf->hw.rx_chan_base;
+ if (ring_cookie == RX_CLS_FLOW_DISC) {
+ req->op = NIX_RX_ACTIONOP_DROP;
+ } else {
+ /* change to unicast only if action of default entry is not
+ * requested by user
+ */
+ if (req->op != NIX_RX_ACTION_DEFAULT)
+ req->op = NIX_RX_ACTIONOP_UCAST;
+ req->index = ethtool_get_flow_spec_ring(ring_cookie);
+ vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
+ if (vf > pci_num_vf(pfvf->pdev)) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -EINVAL;
+ }
+ }
+
+ /* ethtool ring_cookie has (VF + 1) for VF */
+ if (vf) {
+ req->vf = vf;
+ flow->is_vf = true;
+ flow->vf = vf;
+ }
+
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rx_flow_spec *fsp)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+ struct otx2_flow *flow;
+ bool new = false;
+ int err;
+
+ if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
+ return -EINVAL;
+
+ if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) {
+ err = otx2_alloc_mcam_entries(pfvf);
+ if (err)
+ return err;
+ }
+
+ if (fsp->location >= flow_cfg->ntuple_max_flows)
+ return -EINVAL;
+
+ flow = otx2_find_flow(pfvf, fsp->location);
+ if (!flow) {
+ flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
+ if (!flow)
+ return -ENOMEM;
+ flow->location = fsp->location;
+ flow->entry = flow_cfg->entry[flow_cfg->ntuple_offset +
+ flow->location];
+ new = true;
+ }
+ /* struct copy */
+ flow->flow_spec = *fsp;
+
+ err = otx2_add_flow_msg(pfvf, flow);
+ if (err) {
+ if (new)
+ kfree(flow);
+ return err;
+ }
+
+ /* add the new flow installed to list */
+ if (new) {
+ otx2_add_flow_to_list(pfvf, flow);
+ flow_cfg->nr_flows++;
+ }
+
+ return 0;
+}
+
+static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
+{
+ struct npc_delete_flow_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = entry;
+ if (all)
+ req->all = 1;
+
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct otx2_flow *flow;
+ int err;
+
+ if (location >= flow_cfg->ntuple_max_flows)
+ return -EINVAL;
+
+ flow = otx2_find_flow(pfvf, location);
+ if (!flow)
+ return -ENOENT;
+
+ err = otx2_remove_flow_msg(pfvf, flow->entry, false);
+ if (err)
+ return err;
+
+ list_del(&flow->list);
+ kfree(flow);
+ flow_cfg->nr_flows--;
+
+ return 0;
+}
+
+int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_delete_flow_req *req;
+ struct otx2_flow *iter, *tmp;
+ int err;
+
+ if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->start = flow_cfg->entry[flow_cfg->ntuple_offset];
+ req->end = flow_cfg->entry[flow_cfg->ntuple_offset +
+ flow_cfg->ntuple_max_flows - 1];
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
+ list_del(&iter->list);
+ kfree(iter);
+ flow_cfg->nr_flows--;
+ }
+ return err;
+}
+
+int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_mcam_free_entry_req *req;
+ struct otx2_flow *iter, *tmp;
+ int err;
+
+ if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
+ return 0;
+
+ /* remove all flows */
+ err = otx2_remove_flow_msg(pfvf, 0, true);
+ if (err)
+ return err;
+
+ list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
+ list_del(&iter->list);
+ kfree(iter);
+ flow_cfg->nr_flows--;
+ }
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->all = 1;
+ /* Send message to AF to free MCAM entries */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+
+ pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return 0;
+}
+
+int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_install_flow_req *req;
+ int err;
+
+ if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
+ return -ENOMEM;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
+ req->intf = NIX_INTF_RX;
+ ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
+ u64_to_ether_addr(0xffffffffffffull, req->mask.dmac);
+ req->channel = pfvf->hw.rx_chan_base;
+ req->op = NIX_RX_ACTION_DEFAULT;
+ req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
+ req->vtag0_valid = true;
+ req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
+
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_delete_flow_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
+{
+ struct nix_vtag_config *req;
+ struct mbox_msghdr *rsp_hdr;
+ int err;
+
+ /* Dont have enough mcam entries */
+ if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
+ return -ENOMEM;
+
+ if (enable) {
+ err = otx2_install_rxvlan_offload_flow(pf);
+ if (err)
+ return err;
+ } else {
+ err = otx2_delete_rxvlan_offload_flow(pf);
+ if (err)
+ return err;
+ }
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ /* config strip, capture and size */
+ req->vtag_size = VTAGSIZE_T4;
+ req->cfg_type = 1; /* rx vlan cfg */
+ req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
+ req->rx.strip_vtag = enable;
+ req->rx.capture_vtag = enable;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err) {
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+ }
+
+ rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp_hdr)) {
+ mutex_unlock(&pf->mbox.lock);
+ return PTR_ERR(rsp_hdr);
+ }
+
+ mutex_unlock(&pf->mbox.lock);
+ return rsp_hdr->rc;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 75a8c407e815..9a499e449b7a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -21,6 +21,7 @@
#include "otx2_common.h"
#include "otx2_txrx.h"
#include "otx2_struct.h"
+#include "otx2_ptp.h"
#define DRV_NAME "octeontx2-nicpf"
#define DRV_STRING "Marvell OcteonTX2 NIC Physical Function Driver"
@@ -331,6 +332,9 @@ static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
}
}
+static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable);
+static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);
+
static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev,
struct otx2_mbox *pfvf_mbox, void *bbuf_base,
int devid)
@@ -749,6 +753,26 @@ static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
return;
}
+ /* message response heading VF */
+ devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
+ if (devid) {
+ struct otx2_vf_config *config = &pf->vf_configs[devid - 1];
+ struct delayed_work *dwork;
+
+ switch (msg->id) {
+ case MBOX_MSG_NIX_LF_START_RX:
+ config->intf_down = false;
+ dwork = &config->link_event_work;
+ schedule_delayed_work(dwork, msecs_to_jiffies(100));
+ break;
+ case MBOX_MSG_NIX_LF_STOP_RX:
+ config->intf_down = true;
+ break;
+ }
+
+ return;
+ }
+
switch (msg->id) {
case MBOX_MSG_READY:
pf->pcifunc = msg->pcifunc;
@@ -772,6 +796,9 @@ static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
case MBOX_MSG_CGX_STATS:
mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
break;
+ case MBOX_MSG_CGX_FEC_STATS:
+ mbox_handler_cgx_fec_stats(pf, (struct cgx_fec_stats_rsp *)msg);
+ break;
default:
if (msg->rc)
dev_err(pf->dev,
@@ -854,6 +881,27 @@ int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
return 0;
}
+int otx2_mbox_up_handler_cgx_ptp_rx_info(struct otx2_nic *pf,
+ struct cgx_ptp_rx_info_msg *msg,
+ struct msg_rsp *rsp)
+{
+ int i;
+
+ pf->ptp->ptp_en = msg->ptp_en;
+
+ /* notify VFs about ptp event */
+ for (i = 0; i < pci_num_vf(pf->pdev); i++) {
+ struct otx2_vf_config *config = &pf->vf_configs[i];
+ struct delayed_work *dwork = &config->ptp_info_work;
+
+ if (config->intf_down)
+ continue;
+
+ schedule_delayed_work(dwork, msecs_to_jiffies(100));
+ }
+ return 0;
+}
+
static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
struct mbox_msghdr *req)
{
@@ -1262,6 +1310,7 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
qmem_free(pf->dev, sq->tso_hdrs);
kfree(sq->sg);
kfree(sq->sqb_ptrs);
+ qmem_free(pf->dev, sq->timestamps);
}
}
@@ -1271,6 +1320,7 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
struct otx2_hw *hw = &pf->hw;
struct msg_req *req;
int err = 0, lvl;
+ struct nix_lf_free_req *free_req;
/* Set required NPA LF's pool counts
* Auras and Pools are used in a 1:1 mapping,
@@ -1281,7 +1331,9 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
/* Get the size of receive buffers to allocate */
- pf->rbsize = RCV_FRAG_LEN(pf->netdev->mtu + OTX2_ETH_HLEN);
+ pf->rbsize = RCV_FRAG_LEN(pf->netdev->mtu + OTX2_ETH_HLEN +
+ OTX2_HW_TIMESTAMP_LEN + pf->addl_mtu +
+ pf->xtra_hdr);
mutex_lock(&mbox->lock);
/* NPA init */
@@ -1347,8 +1399,8 @@ err_free_rq_ptrs:
otx2_aura_pool_free(pf);
err_free_nix_lf:
mutex_lock(&mbox->lock);
- req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
- if (req) {
+ free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
+ if (free_req) {
if (otx2_sync_mbox_msg(mbox))
dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
}
@@ -1367,6 +1419,7 @@ exit:
static void otx2_free_hw_resources(struct otx2_nic *pf)
{
struct otx2_qset *qset = &pf->qset;
+ struct nix_lf_free_req *free_req;
struct mbox *mbox = &pf->mbox;
struct otx2_cq_queue *cq;
struct msg_req *req;
@@ -1407,8 +1460,11 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
mutex_lock(&mbox->lock);
/* Reset NIX LF */
- req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
- if (req) {
+ free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
+ if (free_req) {
+ free_req->flags = NIX_LF_DISABLE_FLOWS;
+ if (!(pf->flags & OTX2_FLAG_PF_SHUTDOWN))
+ free_req->flags |= NIX_LF_DONT_FREE_TX_VTAG;
if (otx2_sync_mbox_msg(mbox))
dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
}
@@ -1555,9 +1611,39 @@ int otx2_open(struct net_device *netdev)
if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
otx2_handle_link_event(pf);
+ if ((pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) ||
+ (pf->flags & OTX2_FLAG_VF_VLAN_SUPPORT)) {
+ if (!(pf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) {
+ err = otx2_alloc_mcam_entries(pf);
+ if (err)
+ goto err_free_cints;
+ }
+ }
+
/* Restore pause frame settings */
otx2_config_pause_frm(pf);
+ if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
+ otx2_enable_rxvlan(pf, true);
+
+ /* When reinitializing enable time stamping if it was enabled before */
+ if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) {
+ pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
+ otx2_config_hw_tx_tstamp(pf, true);
+ }
+ if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) {
+ pf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
+ otx2_config_hw_rx_tstamp(pf, true);
+ }
+
+ /* Restore pause frame settings */
+ otx2_config_pause_frm(pf);
+
+ /* Set NPC parsing mode, skip LBKs */
+ if (!is_otx2_lbkvf(pf->pdev)) {
+ otx2_set_npc_parse_mode(pf, false);
+ }
+
err = otx2_rxtx_enable(pf, true);
if (err)
goto err_free_cints;
@@ -1676,6 +1762,17 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
+static netdev_features_t otx2_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ features |= NETIF_F_HW_VLAN_STAG_RX;
+ else
+ features &= ~NETIF_F_HW_VLAN_STAG_RX;
+
+ return features;
+}
+
static void otx2_set_rx_mode(struct net_device *netdev)
{
struct otx2_nic *pf = netdev_priv(netdev);
@@ -1683,7 +1780,7 @@ static void otx2_set_rx_mode(struct net_device *netdev)
queue_work(pf->otx2_wq, &pf->rx_mode_work);
}
-static void otx2_do_set_rx_mode(struct work_struct *work)
+void otx2_do_set_rx_mode(struct work_struct *work)
{
struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
struct net_device *netdev = pf->netdev;
@@ -1692,6 +1789,10 @@ static void otx2_do_set_rx_mode(struct work_struct *work)
if (!(netdev->flags & IFF_UP))
return;
+ /* Write unicast address to mcam entries or del from mcam */
+ if (netdev->priv_flags & IFF_UNICAST_FLT)
+ __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
+
mutex_lock(&pf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
if (!req) {
@@ -1701,7 +1802,6 @@ static void otx2_do_set_rx_mode(struct work_struct *work)
req->mode = NIX_RX_MODE_UCAST;
- /* We don't support MAC address filtering yet */
if (netdev->flags & IFF_PROMISC)
req->mode |= NIX_RX_MODE_PROMISC;
else if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
@@ -1716,10 +1816,19 @@ static int otx2_set_features(struct net_device *netdev,
{
netdev_features_t changed = features ^ netdev->features;
struct otx2_nic *pf = netdev_priv(netdev);
+ bool ntuple = !!(features & NETIF_F_NTUPLE);
if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
return otx2_cgx_config_loopback(pf,
features & NETIF_F_LOOPBACK);
+
+ if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(netdev))
+ return otx2_enable_rxvlan(pf,
+ features & NETIF_F_HW_VLAN_CTAG_RX);
+
+ if ((changed & NETIF_F_NTUPLE) && !ntuple)
+ otx2_destroy_ntuple_flows(pf);
+
return 0;
}
@@ -1738,16 +1847,425 @@ static void otx2_reset_task(struct work_struct *work)
rtnl_unlock();
}
+static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable)
+{
+ struct msg_req *req;
+ int err;
+
+ if (pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED && enable)
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ if (enable)
+ req = otx2_mbox_alloc_msg_cgx_ptp_rx_enable(&pfvf->mbox);
+ else
+ req = otx2_mbox_alloc_msg_cgx_ptp_rx_disable(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+
+ mutex_unlock(&pfvf->mbox.lock);
+ if (enable)
+ pfvf->flags |= OTX2_FLAG_RX_TSTAMP_ENABLED;
+ else
+ pfvf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
+ return 0;
+}
+
+static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
+{
+ struct msg_req *req;
+ int err;
+
+ if (pfvf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED && enable)
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ if (enable)
+ req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(&pfvf->mbox);
+ else
+ req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+
+ mutex_unlock(&pfvf->mbox.lock);
+ if (enable)
+ pfvf->flags |= OTX2_FLAG_TX_TSTAMP_ENABLED;
+ else
+ pfvf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
+ return 0;
+}
+
+static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct hwtstamp_config config;
+
+ if (!pfvf->ptp)
+ return -ENODEV;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ otx2_config_hw_tx_tstamp(pfvf, false);
+ break;
+ case HWTSTAMP_TX_ON:
+ otx2_config_hw_tx_tstamp(pfvf, true);
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ otx2_config_hw_rx_tstamp(pfvf, false);
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ otx2_config_hw_rx_tstamp(pfvf, true);
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return otx2_config_hwtstamp(netdev, req);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac)
+{
+ struct otx2_flow_config *flow_cfg = pf->flow_cfg;
+ struct npc_install_flow_req *req;
+ struct otx2_vf_config *config;
+ u32 idx;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ ether_addr_copy(req->packet.dmac, mac);
+ u64_to_ether_addr(0xffffffffffffull, req->mask.dmac);
+ req->features = BIT_ULL(NPC_DMAC);
+ req->channel = pf->hw.rx_chan_base;
+ req->intf = NIX_INTF_RX;
+ req->default_rule = 1;
+ req->append = 1;
+ req->vf = vf + 1;
+ req->op = NIX_RX_ACTION_DEFAULT;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ /* update vf vlan rx flow entry */
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
+ req->entry = flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
+ config = &pf->vf_configs[vf];
+ req->packet.vlan_tci = htons(config->vlan);
+ req->mask.vlan_tci = htons(VLAN_VID_MASK);
+ /* af fills the destination mac addr */
+ u64_to_ether_addr(0xffffffffffffull, req->mask.dmac);
+ req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
+ req->channel = pf->hw.rx_chan_base;
+ req->intf = NIX_INTF_RX;
+ req->vf = vf + 1;
+ req->op = NIX_RX_ACTION_DEFAULT;
+ req->vtag0_valid = true;
+ req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+out:
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct pci_dev *pdev = pf->pdev;
+ struct otx2_vf_config *config;
+ int ret = 0;
+
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
+ if (vf >= pci_num_vf(pdev))
+ return -EINVAL;
+
+ if (!is_valid_ether_addr(mac))
+ return -EINVAL;
+
+ config = &pf->vf_configs[vf];
+ ether_addr_copy(config->mac, mac);
+
+ ret = otx2_do_set_vf_mac(pf, vf, mac);
+ if (ret == 0)
+ dev_info(&pdev->dev, "Reload VF driver to apply the changes\n");
+
+ return ret;
+}
+
+static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
+ u16 proto)
+{
+ struct otx2_flow_config *flow_cfg = pf->flow_cfg;
+ struct nix_vtag_config_rsp *vtag_rsp;
+ struct npc_delete_flow_req *del_req;
+ struct nix_vtag_config *vtag_req;
+ struct npc_install_flow_req *req;
+ struct otx2_vf_config *config;
+ int err = 0;
+ u32 idx;
+
+ config = &pf->vf_configs[vf];
+
+ if (!vlan && !config->vlan)
+ goto out;
+
+ mutex_lock(&pf->mbox.lock);
+
+ /* free old tx vtag entry */
+ if (config->vlan) {
+ vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
+ if (!vtag_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+ vtag_req->cfg_type = 0;
+ vtag_req->tx.free_vtag0 = 1;
+ vtag_req->tx.vtag0_idx = config->tx_vtag_idx;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+ }
+
+ if (!vlan && config->vlan) {
+ /* rx */
+ del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
+ if (!del_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+ idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
+ del_req->entry =
+ flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ /* tx */
+ del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
+ if (!del_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+ idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
+ del_req->entry =
+ flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ goto out;
+ }
+
+ /* rx */
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
+ req->entry = flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
+ req->packet.vlan_tci = htons(vlan);
+ req->mask.vlan_tci = htons(VLAN_VID_MASK);
+ /* af fills the destination mac addr */
+ u64_to_ether_addr(0xffffffffffffull, req->mask.dmac);
+ req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
+ req->channel = pf->hw.rx_chan_base;
+ req->intf = NIX_INTF_RX;
+ req->vf = vf + 1;
+ req->op = NIX_RX_ACTION_DEFAULT;
+ req->vtag0_valid = true;
+ req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
+ req->set_cntr = 1;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ /* tx */
+ vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
+ if (!vtag_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* configure tx vtag params */
+ vtag_req->vtag_size = VTAGSIZE_T4;
+ vtag_req->cfg_type = 0; /* tx vlan cfg */
+ vtag_req->tx.cfg_vtag0 = 1;
+ vtag_req->tx.vtag0 = (ntohs(proto) << 16) | vlan;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ vtag_rsp = (struct nix_vtag_config_rsp *)otx2_mbox_get_rsp
+ (&pf->mbox.mbox, 0, &vtag_req->hdr);
+ if (IS_ERR(vtag_rsp)) {
+ err = PTR_ERR(vtag_rsp);
+ goto out;
+ }
+ config->tx_vtag_idx = vtag_rsp->vtag0_idx;
+
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ u64_to_ether_addr(0x0ull, req->mask.dmac);
+ idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
+ req->entry = flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
+ req->features = BIT_ULL(NPC_DMAC);
+ req->channel = pf->hw.tx_chan_base;
+ req->intf = NIX_INTF_TX;
+ req->vf = vf + 1;
+ req->op = NIX_TX_ACTIONOP_UCAST_DEFAULT;
+ req->vtag0_def = vtag_rsp->vtag0_idx;
+ req->vtag0_op = 0x1;
+ req->set_cntr = 1;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+out:
+ config->vlan = vlan;
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+static int otx2_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 proto)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct pci_dev *pdev = pf->pdev;
+
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
+ if (vf >= pci_num_vf(pdev))
+ return -EINVAL;
+
+ /* qos is currently unsupported */
+ if (vlan >= VLAN_N_VID || qos)
+ return -EINVAL;
+
+ if (proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ if (!(pf->flags & OTX2_FLAG_VF_VLAN_SUPPORT))
+ return -EOPNOTSUPP;
+
+ return otx2_do_set_vf_vlan(pf, vf, vlan, qos, proto);
+}
+
+static int otx2_get_vf_config(struct net_device *netdev, int vf,
+ struct ifla_vf_info *ivi)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct pci_dev *pdev = pf->pdev;
+ struct otx2_vf_config *config;
+
+ if (vf >= pci_num_vf(pdev))
+ return -EINVAL;
+
+ config = &pf->vf_configs[vf];
+ ivi->vf = vf;
+ ether_addr_copy(ivi->mac, config->mac);
+ ivi->vlan = config->vlan;
+
+ return 0;
+}
+
+static netdev_features_t
+otx2_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ return features;
+}
+
static const struct net_device_ops otx2_netdev_ops = {
.ndo_open = otx2_open,
.ndo_stop = otx2_stop,
.ndo_start_xmit = otx2_xmit,
+ .ndo_fix_features = otx2_fix_features,
.ndo_set_mac_address = otx2_set_mac_address,
.ndo_change_mtu = otx2_change_mtu,
.ndo_set_rx_mode = otx2_set_rx_mode,
.ndo_set_features = otx2_set_features,
.ndo_tx_timeout = otx2_tx_timeout,
.ndo_get_stats64 = otx2_get_stats64,
+ .ndo_do_ioctl = otx2_ioctl,
+ .ndo_set_vf_mac = otx2_set_vf_mac,
+ .ndo_set_vf_vlan = otx2_set_vf_vlan,
+ .ndo_get_vf_config = otx2_get_vf_config,
+ .ndo_features_check = otx2_features_check,
};
static int otx2_wq_init(struct otx2_nic *pf)
@@ -1920,6 +2438,9 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Assign default mac address */
otx2_get_mac_from_af(netdev);
+ /* Don't check for error. Proceed without ptp */
+ otx2_ptp_init(pf);
+
/* NPA's pool is a stack to which SW frees buffer pointers via Aura.
* HW allocates buffer pointer from stack and uses it for DMA'ing
* ingress packet. In some scenarios HW can free back allocated buffer
@@ -1932,16 +2453,30 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* walking through the translation tables.
*/
pf->iommu_domain = iommu_get_domain_for_dev(dev);
+ if (pf->iommu_domain)
+ pf->iommu_domain_type =
+ ((struct iommu_domain *)pf->iommu_domain)->type;
netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6);
netdev->features |= netdev->hw_features;
+ /* Support TSO on tag interface */
+ netdev->vlan_features |= netdev->features;
- netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX;
+
+ netdev->features |= netdev->hw_features;
+
+ netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL | NETIF_F_NTUPLE;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
- netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
+ netdev->watchdog_timeo = netdev->watchdog_timeo ?
+ netdev->watchdog_timeo : OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2_netdev_ops;
@@ -1952,13 +2487,17 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_detach_rsrc;
+ goto err_ptp_destroy;
}
err = otx2_wq_init(pf);
if (err)
goto err_unreg_netdev;
+ err = otx2_mcam_flow_init(pf);
+ if (err)
+ goto err_unreg_netdev;
+
otx2_set_ethtool_ops(netdev);
/* Enable link notifications */
@@ -1972,6 +2511,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_unreg_netdev:
unregister_netdev(netdev);
+err_ptp_destroy:
+ otx2_ptp_destroy(pf);
err_detach_rsrc:
otx2_detach_resources(&pf->mbox);
err_disable_mbox_intr:
@@ -2016,6 +2557,34 @@ static void otx2_vf_link_event_task(struct work_struct *work)
otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
}
+static void otx2_vf_ptp_info_task(struct work_struct *work)
+{
+ struct cgx_ptp_rx_info_msg *req;
+ struct otx2_vf_config *config;
+ struct mbox_msghdr *msghdr;
+ struct otx2_nic *pf;
+ int vf_idx;
+
+ config = container_of(work, struct otx2_vf_config,
+ ptp_info_work.work);
+ vf_idx = config - config->pf->vf_configs;
+ pf = config->pf;
+
+ msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
+ sizeof(*req), sizeof(struct msg_rsp));
+ if (!msghdr) {
+ dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
+ return;
+ }
+
+ req = (struct cgx_ptp_rx_info_msg *)msghdr;
+ req->hdr.id = MBOX_MSG_CGX_PTP_RX_INFO;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->ptp_en = pf->ptp->ptp_en;
+
+ otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
+}
+
static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -2043,6 +2612,8 @@ static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
pf->vf_configs[i].intf_down = true;
INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
otx2_vf_link_event_task);
+ INIT_DELAYED_WORK(&pf->vf_configs[i].ptp_info_work,
+ otx2_vf_ptp_info_task);
}
ret = otx2_pf_flr_init(pf, numvfs);
@@ -2083,8 +2654,10 @@ static int otx2_sriov_disable(struct pci_dev *pdev)
pci_disable_sriov(pdev);
- for (i = 0; i < pci_num_vf(pdev); i++)
+ for (i = 0; i < pci_num_vf(pdev); i++) {
cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
+ cancel_delayed_work_sync(&pf->vf_configs[i].ptp_info_work);
+ }
kfree(pf->vf_configs);
otx2_disable_flr_me_intr(pf);
@@ -2113,6 +2686,15 @@ static void otx2_remove(struct pci_dev *pdev)
pf = netdev_priv(netdev);
+ pf->flags |= OTX2_FLAG_PF_SHUTDOWN;
+
+ if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED)
+ otx2_config_hw_tx_tstamp(pf, false);
+ if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
+ otx2_config_hw_rx_tstamp(pf, false);
+
+ otx2_set_npc_parse_mode(pf, true);
+
cancel_work_sync(&pf->reset_task);
/* Disable link notifications */
otx2_cgx_config_linkevents(pf, false);
@@ -2122,6 +2704,8 @@ static void otx2_remove(struct pci_dev *pdev)
if (pf->otx2_wq)
destroy_workqueue(pf->otx2_wq);
+ otx2_ptp_destroy(pf);
+ otx2_mcam_flow_del(pf);
otx2_detach_resources(&pf->mbox);
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
new file mode 100644
index 000000000000..5b82e84897e5
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 PTP support for ethernet driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "otx2_common.h"
+#include "otx2_ptp.h"
+
+static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ struct ptp_req *req;
+ int err;
+
+ if (!ptp->nic)
+ return -ENODEV;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->op = PTP_OP_ADJFINE;
+ req->scaled_ppm = scaled_ppm;
+
+ err = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static u64 ptp_cc_read(const struct cyclecounter *cc)
+{
+ struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
+ struct ptp_req *req;
+ struct ptp_rsp *rsp;
+ int err;
+
+ if (!ptp->nic)
+ return 0;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return 0;
+
+ req->op = PTP_OP_GET_CLOCK;
+
+ err = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ if (err)
+ return 0;
+
+ rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
+ &req->hdr);
+ if (IS_ERR(rsp))
+ return 0;
+
+ return rsp->clk;
+}
+
+static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+
+ mutex_lock(&ptp->nic->mbox.lock);
+ timecounter_adjtime(&ptp->time_counter, delta);
+ mutex_unlock(&ptp->nic->mbox.lock);
+
+ return 0;
+}
+
+static int otx2_ptp_gettime(struct ptp_clock_info *ptp_info,
+ struct timespec64 *ts)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ u64 nsec;
+
+ mutex_lock(&ptp->nic->mbox.lock);
+ nsec = timecounter_read(&ptp->time_counter);
+ mutex_unlock(&ptp->nic->mbox.lock);
+
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+static int otx2_ptp_settime(struct ptp_clock_info *ptp_info,
+ const struct timespec64 *ts)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ u64 nsec;
+
+ nsec = timespec64_to_ns(ts);
+
+ mutex_lock(&ptp->nic->mbox.lock);
+ timecounter_init(&ptp->time_counter, &ptp->cycle_counter, nsec);
+ mutex_unlock(&ptp->nic->mbox.lock);
+
+ return 0;
+}
+
+static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+int otx2_ptp_init(struct otx2_nic *pfvf)
+{
+ struct otx2_ptp *ptp_ptr;
+ struct cyclecounter *cc;
+ struct ptp_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ /* check if PTP block is available */
+ req = otx2_mbox_alloc_msg_ptp_op(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->op = PTP_OP_GET_CLOCK;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+ mutex_unlock(&pfvf->mbox.lock);
+
+ ptp_ptr = kzalloc(sizeof(*ptp_ptr), GFP_KERNEL);
+ if (!ptp_ptr) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ ptp_ptr->nic = pfvf;
+
+ cc = &ptp_ptr->cycle_counter;
+ cc->read = ptp_cc_read;
+ cc->mask = CYCLECOUNTER_MASK(64);
+ cc->mult = 1;
+ cc->shift = 0;
+
+ timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter,
+ ktime_to_ns(ktime_get_real()));
+
+ ptp_ptr->ptp_info = (struct ptp_clock_info) {
+ .owner = THIS_MODULE,
+ .name = "OcteonTX2 PTP",
+ .max_adj = 1000000000ull,
+ .n_ext_ts = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfine = otx2_ptp_adjfine,
+ .adjtime = otx2_ptp_adjtime,
+ .gettime64 = otx2_ptp_gettime,
+ .settime64 = otx2_ptp_settime,
+ .enable = otx2_ptp_enable,
+ };
+
+ ptp_ptr->ptp_clock = ptp_clock_register(&ptp_ptr->ptp_info, pfvf->dev);
+ if (IS_ERR(ptp_ptr->ptp_clock)) {
+ err = PTR_ERR(ptp_ptr->ptp_clock);
+ kfree(ptp_ptr);
+ goto error;
+ }
+
+ pfvf->ptp = ptp_ptr;
+
+error:
+ return err;
+}
+
+void otx2_ptp_destroy(struct otx2_nic *pfvf)
+{
+ struct otx2_ptp *ptp = pfvf->ptp;
+
+ if (!ptp)
+ return;
+
+ ptp_clock_unregister(ptp->ptp_clock);
+ kfree(ptp);
+ pfvf->ptp = NULL;
+}
+
+int otx2_ptp_clock_index(struct otx2_nic *pfvf)
+{
+ if (!pfvf->ptp)
+ return -ENODEV;
+
+ return ptp_clock_index(pfvf->ptp->ptp_clock);
+}
+
+int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns)
+{
+ if (!pfvf->ptp)
+ return -ENODEV;
+
+ *tsns = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
new file mode 100644
index 000000000000..9ddb82cec195
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 PTP support for ethernet driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OTX2_PTP_H
+#define OTX2_PTP_H
+
+int otx2_ptp_init(struct otx2_nic *pfvf);
+void otx2_ptp_destroy(struct otx2_nic *pfvf);
+
+int otx2_ptp_clock_index(struct otx2_nic *pfvf);
+int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
index 867f646e0802..ff9ee9debb94 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
@@ -157,4 +157,17 @@
#define LMT_LF_LMTLINEX(a) (LMT_LFBASE | 0x000 | (a) << 12)
#define LMT_LF_LMTCANCEL (LMT_LFBASE | 0x400)
+/* RVU VF registers */
+#define RVU_VF_VFPF_MBOX0 (0x00000)
+#define RVU_VF_VFPF_MBOX1 (0x00008)
+#define RVU_VF_VFPF_MBOXX(a) (0x00 | (a) << 3)
+#define RVU_VF_INT (0x20)
+#define RVU_VF_INT_W1S (0x28)
+#define RVU_VF_INT_ENA_W1S (0x30)
+#define RVU_VF_INT_ENA_W1C (0x38)
+#define RVU_VF_BLOCK_ADDRX_DISC(a) (0x200 | (a) << 3)
+#define RVU_VF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
+#define RVU_VF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
+#define RVU_VF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+
#endif /* OTX2_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_smqvf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_smqvf.c
new file mode 100644
index 000000000000..8199a3141f0a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_smqvf.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Virtual Function ethernet driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+#include "otx2_common.h"
+#include "otx2_reg.h"
+#include "otx2_struct.h"
+#include "rvu_fixes.h"
+
+/* serialize device removal and xmit */
+DEFINE_MUTEX(remove_lock);
+
+static char pkt_data[64] = { 0x00, 0x0f, 0xb7, 0x11, 0xa6, 0x87, 0x02, 0xe0,
+ 0x28, 0xa5, 0xf6, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x32, 0x00, 0x00, 0x00, 0x00, 0x04, 0x11,
+ 0xee, 0x53, 0x50, 0x50, 0x50, 0x02, 0x14, 0x14,
+ 0x14, 0x02, 0x10, 0x00, 0x10, 0x01, 0x00, 0x1e,
+ 0x00, 0x00, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
+ 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e,
+ 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76 };
+
+static struct sk_buff *the_skb;
+static struct otx2_nic *the_smqvf;
+static u16 drop_entry = 0xFFFF;
+
+static bool is_otx2_smqvf(struct otx2_nic *vf)
+{
+ if (vf->pcifunc == RVU_SMQVF_PCIFUNC &&
+ (is_96xx_A0(vf->pdev)))
+ return true;
+
+ return false;
+}
+
+static void otx2_sqe_flush(struct otx2_snd_queue *sq, int size)
+{
+ u64 status;
+
+ /* Packet data stores should finish before SQE is flushed to HW */
+ dma_wmb();
+
+ do {
+ memcpy(sq->lmt_addr, sq->sqe_base, size);
+ status = otx2_lmt_flush(sq->io_addr);
+ } while (status == 0);
+
+ sq->head++;
+ sq->head &= (sq->sqe_cnt - 1);
+}
+
+static int otx2_ctx_update(struct otx2_nic *vf, u16 qidx)
+{
+ struct nix_aq_enq_req *sq_aq, *rq_aq, *cq_aq;
+
+ /* Do not link CQ for SQ and disable RQ, CQ */
+ sq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&vf->mbox);
+ if (!sq_aq)
+ return -ENOMEM;
+
+ sq_aq->sq.cq_ena = 0;
+ sq_aq->sq_mask.cq_ena = 1;
+ sq_aq->qidx = qidx;
+ sq_aq->ctype = NIX_AQ_CTYPE_SQ;
+ sq_aq->op = NIX_AQ_INSTOP_WRITE;
+
+ rq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&vf->mbox);
+ if (!rq_aq)
+ return -ENOMEM;
+
+ rq_aq->rq.ena = 0;
+ rq_aq->rq_mask.ena = 1;
+ rq_aq->qidx = qidx;
+ rq_aq->ctype = NIX_AQ_CTYPE_RQ;
+ rq_aq->op = NIX_AQ_INSTOP_WRITE;
+
+ cq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&vf->mbox);
+ if (!cq_aq)
+ return -ENOMEM;
+
+ cq_aq->cq.ena = 0;
+ cq_aq->cq_mask.ena = 1;
+ cq_aq->qidx = qidx;
+ cq_aq->ctype = NIX_AQ_CTYPE_CQ;
+ cq_aq->op = NIX_AQ_INSTOP_WRITE;
+
+ return otx2_sync_mbox_msg(&vf->mbox);
+}
+
+void otx2smqvf_xmit(void)
+{
+ struct otx2_snd_queue *sq;
+ int i, size;
+
+ mutex_lock(&remove_lock);
+
+ if (!the_smqvf) {
+ mutex_unlock(&remove_lock);
+ return;
+ }
+
+ sq = &the_smqvf->qset.sq[0];
+ /* Min. set of send descriptors required to send packets */
+ size = sizeof(struct nix_sqe_hdr_s) + sizeof(struct nix_sqe_sg_s) +
+ sizeof(struct nix_sqe_ext_s) + sizeof(u64);
+
+ for (i = 0; i < 256; i++)
+ otx2_sqe_flush(sq, size);
+
+ mutex_unlock(&remove_lock);
+}
+EXPORT_SYMBOL(otx2smqvf_xmit);
+
+static int otx2smqvf_install_flow(struct otx2_nic *vf)
+{
+ struct npc_mcam_alloc_entry_req *alloc_req;
+ struct npc_mcam_free_entry_req *free_req;
+ struct npc_install_flow_req *install_req;
+ struct npc_mcam_alloc_entry_rsp *rsp;
+ struct msg_req *msg;
+ int err, qid;
+ size_t size;
+ void *data;
+
+ size = SKB_DATA_ALIGN(64 + OTX2_ALIGN) + NET_SKB_PAD +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ err = -ENOMEM;
+
+ data = kzalloc(size, GFP_KERNEL);
+ if (!data)
+ return err;
+
+ memcpy(data, &pkt_data, 64);
+
+ the_skb = build_skb(data, 0);
+ the_skb->len = 64;
+
+ for (qid = 0; qid < vf->hw.tx_queues; qid++) {
+ err = otx2_ctx_update(vf, qid);
+ /* If something wrong with Q0 then treat as error */
+ if (err && !qid)
+ goto err_free_mem;
+ }
+
+ mutex_lock(&vf->mbox.lock);
+
+ alloc_req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&vf->mbox);
+ if (!alloc_req) {
+ mutex_unlock(&vf->mbox.lock);
+ goto err_free_mem;
+ }
+ alloc_req->count = 1;
+ alloc_req->contig = true;
+
+ /* Send message to AF */
+ if (otx2_sync_mbox_msg(&vf->mbox)) {
+ err = -EINVAL;
+ mutex_unlock(&vf->mbox.lock);
+ goto err_free_mem;
+ }
+ mutex_unlock(&vf->mbox.lock);
+
+ rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
+ (&vf->mbox.mbox, 0, &alloc_req->hdr);
+ drop_entry = rsp->entry;
+
+ mutex_lock(&vf->mbox.lock);
+
+ /* Send messages to drop Tx packets at NPC and stop Rx traffic */
+ install_req = otx2_mbox_alloc_msg_npc_install_flow(&vf->mbox);
+ if (!install_req) {
+ err = -ENOMEM;
+ mutex_unlock(&vf->mbox.lock);
+ goto err_free_entry;
+ }
+
+ u64_to_ether_addr(0x0ull, install_req->mask.dmac);
+ install_req->entry = drop_entry;
+ install_req->features = BIT_ULL(NPC_DMAC);
+ install_req->intf = NIX_INTF_TX;
+ install_req->op = NIX_TX_ACTIONOP_DROP;
+ install_req->set_cntr = 1;
+
+ msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&vf->mbox);
+ if (!msg) {
+ mutex_unlock(&vf->mbox.lock);
+ goto err_free_entry;
+ }
+
+ /* Send message to AF */
+ if (otx2_sync_mbox_msg(&vf->mbox)) {
+ err = -EINVAL;
+ mutex_unlock(&vf->mbox.lock);
+ goto err_free_entry;
+ }
+ mutex_unlock(&vf->mbox.lock);
+
+ otx2_sq_append_skb(vf->netdev, &vf->qset.sq[0], the_skb, 0);
+
+ return 0;
+
+err_free_entry:
+ mutex_lock(&vf->mbox.lock);
+ free_req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&vf->mbox);
+ if (!free_req) {
+ dev_err(vf->dev, "Could not allocate msg for freeing entry\n");
+ } else {
+ free_req->entry = drop_entry;
+ WARN_ON(otx2_sync_mbox_msg(&vf->mbox));
+ }
+ mutex_unlock(&vf->mbox.lock);
+err_free_mem:
+ kfree_skb(the_skb);
+ drop_entry = 0xFFFF;
+ return err;
+}
+
+int otx2smqvf_probe(struct otx2_nic *vf)
+{
+ int err;
+
+ if (!is_otx2_smqvf(vf))
+ return -EPERM;
+
+ err = otx2_open(vf->netdev);
+ if (err)
+ return -EINVAL;
+
+ /* Disable QINT interrupts because we do not use a CQ for SQ and
+ * drop TX packets intentionally
+ */
+ otx2_write64(vf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
+
+ err = otx2smqvf_install_flow(vf);
+ if (err) {
+ otx2_stop(vf->netdev);
+ return -EINVAL;
+ }
+
+ the_smqvf = vf;
+
+ return 0;
+}
+
+int otx2smqvf_remove(struct otx2_nic *vf)
+{
+ struct npc_mcam_free_entry_req *free_req;
+ struct npc_delete_flow_req *del_req;
+
+ if (!is_otx2_smqvf(vf))
+ return -EPERM;
+
+ mutex_lock(&remove_lock);
+ kfree_skb(the_skb);
+ the_smqvf = NULL;
+ the_skb = NULL;
+ mutex_unlock(&remove_lock);
+
+ mutex_lock(&vf->mbox.lock);
+ del_req = otx2_mbox_alloc_msg_npc_delete_flow(&vf->mbox);
+ free_req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&vf->mbox);
+ if (!del_req || !free_req) {
+ dev_err(vf->dev, "Could not allocate msg for freeing entry\n");
+ } else {
+ del_req->entry = drop_entry;
+ free_req->entry = drop_entry;
+ WARN_ON(otx2_sync_mbox_msg(&vf->mbox));
+ }
+ mutex_unlock(&vf->mbox.lock);
+
+ otx2_stop(vf->netdev);
+ drop_entry = 0xFFFF;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 45abe0cd0e7b..360fbf682ca9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -16,6 +16,7 @@
#include "otx2_common.h"
#include "otx2_struct.h"
#include "otx2_txrx.h"
+#include "otx2_ptp.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
@@ -83,17 +84,38 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
struct nix_send_comp_s *snd_comp = &cqe->comp;
struct sk_buff *skb = NULL;
struct sg_list *sg;
+ int sqe_id;
if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf))
net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n",
pfvf->netdev->name, cq->cint_idx,
snd_comp->status);
- sg = &sq->sg[snd_comp->sqe_id];
+ sqe_id = snd_comp->sqe_id;
+ sg = &sq->sg[sqe_id];
+
skb = (struct sk_buff *)sg->skb;
if (unlikely(!skb))
return;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
+ u64 timestamp = ((u64 *)sq->timestamps->base)[sqe_id];
+
+ if (timestamp != 1) {
+ u64 tsns;
+ int err;
+
+ err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
+ if (!err) {
+ struct skb_shared_hwtstamps ts;
+
+ memset(&ts, 0, sizeof(ts));
+ ts.hwtstamp = ns_to_ktime(tsns);
+ skb_tstamp_tx(skb, &ts);
+ }
+ }
+ }
+
*tx_bytes += skb->len;
(*tx_pkts)++;
otx2_dma_unmap_skb_frags(pfvf, sg);
@@ -101,21 +123,71 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
sg->skb = (u64)NULL;
}
+static inline void otx2_set_rxtstamp(struct otx2_nic *pfvf,
+ struct sk_buff *skb, void *data)
+{
+ u64 tsns;
+ int err;
+
+ if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED))
+ return;
+
+ /* The first 8 bytes is the timestamp */
+ err = otx2_ptp_tstamp2time(pfvf, be64_to_cpu(*(u64 *)data), &tsns);
+ if (err)
+ return;
+
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns);
+}
+
static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
- u64 iova, int len)
+ u64 iova, int len, struct nix_rx_parse_s *parse)
{
struct page *page;
+ int off = 0;
void *va;
va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
+
+ if (likely(!skb_shinfo(skb)->nr_frags)) {
+ /* Check if data starts at some nonzero offset
+ * from the start of the buffer. For now the
+ * only possible offset is 8 bytes in the case
+ * where packet is prepended by a timestamp.
+ */
+ if (parse->laptr) {
+ otx2_set_rxtstamp(pfvf, skb, va);
+ off = OTX2_HW_TIMESTAMP_LEN;
+ }
+ off += pfvf->xtra_hdr;
+ }
+
page = virt_to_page(va);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- va - page_address(page), len, pfvf->rbsize);
+ va - page_address(page) + off, len - off, pfvf->rbsize);
otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
pfvf->rbsize, DMA_FROM_DEVICE);
}
+static inline void otx2_set_taginfo(struct nix_rx_parse_s *parse,
+ struct sk_buff *skb)
+{
+ /* Check if VLAN is present, captured and stripped from packet */
+ if (parse->vtag0_valid && parse->vtag0_gone) {
+ skb_frag_t *frag0 = &skb_shinfo(skb)->frags[0];
+
+ /* Is the tag captured STAG or CTAG ? */
+ if (((struct ethhdr *)skb_frag_address(frag0))->h_proto ==
+ htons(ETH_P_8021Q))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
+ parse->vtag0_tci);
+ else
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ parse->vtag0_tci);
+ }
+}
+
static void otx2_set_rxhash(struct otx2_nic *pfvf,
struct nix_cqe_rx_s *cqe, struct sk_buff *skb)
{
@@ -239,7 +311,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
if (unlikely(!skb))
return;
- otx2_skb_add_frag(pfvf, skb, cqe->sg.seg_addr, cqe->sg.seg_size);
+ otx2_skb_add_frag(pfvf, skb, cqe->sg.seg_addr, cqe->sg.seg_size, parse);
cq->pool_ptrs++;
otx2_set_rxhash(pfvf, cqe, skb);
@@ -248,6 +320,8 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
if (pfvf->netdev->features & NETIF_F_RXCSUM)
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ otx2_set_taginfo(parse, skb);
+
napi_gro_frags(napi);
}
@@ -267,6 +341,7 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
return 0;
break;
}
+
cq->cq_head++;
cq->cq_head &= (cq->cqe_cnt - 1);
@@ -483,10 +558,40 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
ipv6_hdr(skb)->payload_len =
htons(ext->lso_sb - skb_network_offset(skb));
}
+ } else if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ ext->tstmp = 1;
}
+
+#define OTX2_VLAN_PTR_OFFSET (ETH_HLEN - ETH_TLEN)
+ if (skb_vlan_tag_present(skb)) {
+ if (skb->vlan_proto == htons(ETH_P_8021Q)) {
+ ext->vlan1_ins_ena = 1;
+ ext->vlan1_ins_ptr = OTX2_VLAN_PTR_OFFSET;
+ ext->vlan1_ins_tci = skb_vlan_tag_get(skb);
+ } else if (skb->vlan_proto == htons(ETH_P_8021AD)) {
+ ext->vlan0_ins_ena = 1;
+ ext->vlan0_ins_ptr = OTX2_VLAN_PTR_OFFSET;
+ ext->vlan0_ins_tci = skb_vlan_tag_get(skb);
+ }
+ }
+
*offset += sizeof(*ext);
}
+static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
+ int alg, u64 iova)
+{
+ struct nix_sqe_mem_s *mem;
+
+ mem = (struct nix_sqe_mem_s *)(sq->sqe_base + *offset);
+ mem->subdc = NIX_SUBDC_MEM;
+ mem->alg = alg;
+ mem->wmem = 1; /* wait for the memory operation */
+ mem->addr = iova;
+
+ *offset += sizeof(*mem);
+}
+
/* Add SQE header subdescriptor structure */
static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
struct nix_sqe_hdr_s *sqe_hdr,
@@ -709,6 +814,8 @@ static bool is_hw_tso_supported(struct otx2_nic *pfvf,
if (!pfvf->hw.hw_tso)
return false;
+ if (is_dev_post_96xx_C0(pfvf->pdev))
+ return true;
/* HW has an issue due to which when the payload of the last LSO
* segment is shorter than 16 bytes, some header fields may not
* be correctly modified, hence don't offload such TSO segments.
@@ -737,6 +844,21 @@ static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
return skb_shinfo(skb)->gso_segs;
}
+static inline void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
+ struct otx2_snd_queue *sq, int *offset)
+{
+ u64 iova;
+
+ if (!skb_shinfo(skb)->gso_size &&
+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ iova = sq->timestamps->iova + (sq->head * sizeof(u64));
+ otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova);
+ } else {
+ skb_tx_timestamp(skb);
+ }
+}
+
bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
struct sk_buff *skb, u16 qidx)
{
@@ -769,6 +891,9 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
}
if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
+ /* Insert vlan tag before giving pkt to tso */
+ if (skb_vlan_tag_present(skb))
+ skb = __vlan_hwaccel_push_inside(skb);
otx2_sq_append_tso(pfvf, sq, skb, qidx);
return true;
}
@@ -790,6 +915,8 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
return false;
}
+ otx2_set_txtstamp(pfvf, skb, sq, &offset);
+
sqe_hdr->sizem1 = (offset / 16) - 1;
netdev_tx_sent_queue(txq, skb->len);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 4ab32d3adb78..9a70d7a52413 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -24,7 +24,7 @@
#define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN)
#define OTX2_MIN_MTU 64
-#define OTX2_MAX_MTU (9212 - OTX2_ETH_HLEN)
+#define OTX2_MAX_MTU (9204 - OTX2_ETH_HLEN)
#define OTX2_MAX_GSO_SEGS 255
#define OTX2_MAX_FRAGS_IN_SQE 9
@@ -91,6 +91,7 @@ struct otx2_snd_queue {
struct qmem *sqe;
struct qmem *tso_hdrs;
struct sg_list *sg;
+ struct qmem *timestamps;
struct queue_stats stats;
u16 sqb_count;
u64 *sqb_ptrs;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 92a3db69a6cd..10b85a6d470a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -108,9 +108,6 @@ static void otx2vf_vfaf_mbox_handler(struct work_struct *work)
static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf,
struct mbox_msghdr *req)
{
- struct msg_rsp *rsp;
- int err;
-
/* Check if valid, if not reply with a invalid msg */
if (req->sig != OTX2_MBOX_REQ_SIG) {
otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id);
@@ -118,20 +115,29 @@ static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf,
}
switch (req->id) {
- case MBOX_MSG_CGX_LINK_EVENT:
- rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(
- &vf->mbox.mbox_up, 0,
- sizeof(struct msg_rsp));
- if (!rsp)
- return -ENOMEM;
-
- rsp->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
- rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
- rsp->hdr.pcifunc = 0;
- rsp->hdr.rc = 0;
- err = otx2_mbox_up_handler_cgx_link_event(
- vf, (struct cgx_link_info_msg *)req, rsp);
- return err;
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+ case _id: { \
+ struct _rsp_type *rsp; \
+ int err; \
+ \
+ rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
+ &vf->mbox.mbox_up, 0, \
+ sizeof(struct _rsp_type)); \
+ if (!rsp) \
+ return -ENOMEM; \
+ \
+ rsp->hdr.id = _id; \
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
+ rsp->hdr.pcifunc = 0; \
+ rsp->hdr.rc = 0; \
+ \
+ err = otx2_mbox_up_handler_ ## _fn_name( \
+ vf, (struct _req_type *)req, rsp); \
+ return err; \
+ }
+MBOX_UP_CGX_MESSAGES
+#undef M
+ break;
default:
otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id);
return -ENODEV;
@@ -414,6 +420,13 @@ static void otx2vf_reset_task(struct work_struct *work)
rtnl_unlock();
}
+static netdev_features_t
+otx2_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ return features;
+}
+
static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_open = otx2vf_open,
.ndo_stop = otx2vf_stop,
@@ -422,6 +435,7 @@ static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_change_mtu = otx2vf_change_mtu,
.ndo_get_stats64 = otx2_get_stats64,
.ndo_tx_timeout = otx2_tx_timeout,
+ .ndo_features_check = otx2_features_check,
};
static int otx2vf_realloc_msix_vectors(struct otx2_nic *vf)
@@ -487,6 +501,9 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
vf->pdev = pdev;
vf->dev = dev;
vf->iommu_domain = iommu_get_domain_for_dev(dev);
+ if (vf->iommu_domain)
+ vf->iommu_domain_type =
+ ((struct iommu_domain *)vf->iommu_domain)->type;
vf->flags |= OTX2_FLAG_INTF_DOWN;
hw = &vf->hw;
@@ -548,6 +565,12 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
otx2_setup_dev_hw_settings(vf);
+ err = otx2smqvf_probe(vf);
+ if (!err)
+ return 0;
+ else if (err == -EINVAL)
+ goto err_detach_rsrc;
+
/* Assign default mac address */
otx2_get_mac_from_af(netdev);
@@ -555,7 +578,11 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
netdev->features = netdev->hw_features;
-
+ /* Support TSO on tag interface */
+ netdev->vlan_features |= netdev->features;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
+ netdev->features |= netdev->hw_features;
netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
@@ -567,6 +594,15 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_WORK(&vf->reset_task, otx2vf_reset_task);
+ if (is_otx2_lbkvf(vf->pdev)) {
+ int n;
+
+ n = (vf->pcifunc >> RVU_PFVF_FUNC_SHIFT) & RVU_PFVF_FUNC_MASK;
+ /* Need to subtract 1 to get proper VF number */
+ n -= 1;
+ snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n);
+ }
+
/* To distinguish, for LBK VFs set netdev name explicitly */
if (is_otx2_lbkvf(vf->pdev)) {
int n;
@@ -618,7 +654,9 @@ static void otx2vf_remove(struct pci_dev *pdev)
vf = netdev_priv(netdev);
cancel_work_sync(&vf->reset_task);
- unregister_netdev(netdev);
+ if (otx2smqvf_remove(vf))
+ unregister_netdev(netdev);
+
otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 08fd823edac9..7fee70c3394c 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -329,7 +329,7 @@ void of_node_release(struct kobject *kobj)
/* We should never be releasing nodes that haven't been detached. */
if (!of_node_check_flag(node, OF_DETACHED)) {
- pr_err("ERROR: Bad of_node_put() on %pOF\n", node);
+ pr_err("ERROR: Bad of_node_put() on %s\n", node->full_name);
dump_stack();
return;
}
@@ -340,8 +340,8 @@ void of_node_release(struct kobject *kobj)
if (!of_node_check_flag(node, OF_OVERLAY_FREE_CSET)) {
/* premature refcount of zero, do not free memory */
- pr_err("ERROR: memory leak before free overlay changeset, %pOF\n",
- node);
+ pr_err("ERROR: memory leak before free overlay changeset, %s\n",
+ node->full_name);
return;
}
@@ -351,8 +351,8 @@ void of_node_release(struct kobject *kobj)
* yet been removed, or by a non-overlay mechanism.
*/
if (node->properties)
- pr_err("ERROR: %s(), unexpected properties in %pOF\n",
- __func__, node);
+ pr_err("ERROR: %s(), unexpected properties in %s\n",
+ __func__, node->full_name);
}
property_list_free(node->properties);
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 91bfdb784829..451ec6e674b6 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -168,6 +168,14 @@ config PCI_HOST_THUNDER_PEM
help
Say Y here if you want PCIe support for CN88XX Cavium Thunder SoCs.
+config PCI_HOST_OCTEONTX2_PEM
+ bool "Marvell OcteonTX2 PCIe controller to off-chip devices"
+ depends on ARM64
+ depends on OF
+ select PCI_HOST_COMMON
+ help
+ Say Y here if you want PCIe support for CN9XXX Marvell OcteonTX2 SoCs.
+
config PCI_HOST_THUNDER_ECAM
bool "Cavium Thunder ECAM controller to on-chip devices on pass-1.x silicon"
depends on ARM64 || COMPILE_TEST
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index 158c59771824..53dcd9b952fa 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -46,5 +46,6 @@ obj-y += mobiveil/
ifdef CONFIG_PCI
obj-$(CONFIG_ARM64) += pci-thunder-ecam.o
obj-$(CONFIG_ARM64) += pci-thunder-pem.o
+obj-$(CONFIG_ARM64) += pci-octeontx2-pem.o
obj-$(CONFIG_ARM64) += pci-xgene.o
endif
diff --git a/drivers/pci/controller/pci-octeontx2-pem.c b/drivers/pci/controller/pci-octeontx2-pem.c
new file mode 100644
index 000000000000..7c2b8a17e287
--- /dev/null
+++ b/drivers/pci/controller/pci-octeontx2-pem.c
@@ -0,0 +1,490 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 PCIe host controller
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
+#include <linux/platform_device.h>
+#include "../pci.h"
+
+#if defined(CONFIG_PCI_HOST_OCTEONTX2_PEM)
+
+/* Bridge config space reads/writes done using
+ * these registers.
+ */
+#define PEM_CFG_WR 0x018
+#define PEM_CFG_RD 0x020
+#define PEM_IB_MERGE_TIMER_CTL 0x1C0
+
+#define PCIERC_RAS_EINJ_EN 0x348
+#define PCIERC_RAS_EINJ_CTL6CMPP0 0x364
+#define PCIERC_RAS_EINJ_CTL6CMPV0 0x374
+#define PCIERC_RAS_EINJ_CTL6CHGP1 0x388
+#define PCIERC_RAS_EINJ_CTL6CHGV1 0x398
+#define PCIERC_RAS_EINJ_CTL6PE 0x3A4
+#define PCIERC_RASDP_EP_CTL 0x420
+#define PCIERC_RASDP_DE_ME 0x440
+
+struct octeontx2_pem_pci {
+ u32 ea_entry[3];
+ void __iomem *pem_reg_base;
+};
+
+static int octeontx2_pem_bridge_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct octeontx2_pem_pci *pem_pci;
+ u64 read_val;
+
+ if (devfn != 0 || where >= 2048) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ pem_pci = (struct octeontx2_pem_pci *)cfg->priv;
+
+ /*
+ * 32-bit accesses only. Write the address to the low order
+ * bits of PEM_CFG_RD, then trigger the read by reading back.
+ * The config data lands in the upper 32-bits of PEM_CFG_RD.
+ */
+ read_val = where & ~3ull;
+ writeq(read_val, pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val >>= 32;
+
+ /* HW reset value at few config space locations are
+ * garbage, fix them.
+ */
+ switch (where & ~3) {
+ case 0x00: /* DevID & VenID */
+ read_val = 0xA02D177D;
+ break;
+ case 0x04:
+ read_val = 0x00100006;
+ break;
+ case 0x08:
+ read_val = 0x06040100;
+ break;
+ case 0x0c:
+ read_val = 0x00010000;
+ break;
+ case 0x18:
+ read_val = 0x00010100;
+ break;
+ case 0x40:
+ read_val &= 0xffff00ff;
+ read_val |= 0x00005000; /* In RC mode, point to EA capability */
+ break;
+ case 0x5c: /* EA_ENTRY2 */
+ read_val = pem_pci->ea_entry[0];
+ break;
+ case 0x60: /* EA_ENTRY3 */
+ read_val = pem_pci->ea_entry[1];
+ break;
+ case 0x64: /* EA_ENTRY4 */
+ read_val = pem_pci->ea_entry[2];
+ break;
+ case 0x70: /* Express Cap */
+ /* HW reset value is '0', set PME interrupt vector to 1 */
+ if (!(read_val & (0x1f << 25)))
+ read_val |= (1u << 25);
+ break;
+ default:
+ break;
+ }
+ read_val >>= (8 * (where & 3));
+ switch (size) {
+ case 1:
+ read_val &= 0xff;
+ break;
+ case 2:
+ read_val &= 0xffff;
+ break;
+ default:
+ break;
+ }
+ *val = read_val;
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int octeontx2_pem_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+
+ if (bus->number < cfg->busr.start ||
+ bus->number > cfg->busr.end)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /*
+ * The first device on the bus is the PEM PCIe bridge.
+ * Special case its config access.
+ */
+ if (bus->number == cfg->busr.start)
+ return octeontx2_pem_bridge_read(bus, devfn, where, size, val);
+
+ return pci_generic_config_read(bus, devfn, where, size, val);
+}
+
+/*
+ * Some of the w1c_bits below also include read-only or non-writable
+ * reserved bits, this makes the code simpler and is OK as the bits
+ * are not affected by writing zeros to them.
+ */
+static u32 octeontx2_pem_bridge_w1c_bits(u64 where_aligned)
+{
+ u32 w1c_bits = 0;
+
+ switch (where_aligned) {
+ case 0x04: /* Command/Status */
+ case 0x1c: /* Base and I/O Limit/Secondary Status */
+ w1c_bits = 0xff000000;
+ break;
+ case 0x44: /* Power Management Control and Status */
+ w1c_bits = 0xfffffe00;
+ break;
+ case 0x78: /* Device Control/Device Status */
+ case 0x80: /* Link Control/Link Status */
+ case 0x88: /* Slot Control/Slot Status */
+ case 0x90: /* Root Status */
+ case 0xa0: /* Link Control 2 Registers/Link Status 2 */
+ w1c_bits = 0xffff0000;
+ break;
+ case 0x104: /* Uncorrectable Error Status */
+ case 0x110: /* Correctable Error Status */
+ case 0x130: /* Error Status */
+ case 0x180: /* Lane error status */
+ w1c_bits = 0xffffffff;
+ break;
+ default:
+ break;
+ }
+ return w1c_bits;
+}
+
+/* Some bits must be written to one so they appear to be read-only. */
+static u32 octeontx2_pem_bridge_w1_bits(u64 where_aligned)
+{
+ u32 w1_bits;
+
+ switch (where_aligned) {
+ case 0x1c: /* I/O Base / I/O Limit, Secondary Status */
+ /* Force 32-bit I/O addressing. */
+ w1_bits = 0x0101;
+ break;
+ case 0x24: /* Prefetchable Memory Base / Prefetchable Memory Limit */
+ /* Force 64-bit addressing */
+ w1_bits = 0x00010001;
+ break;
+ default:
+ w1_bits = 0;
+ break;
+ }
+ return w1_bits;
+}
+
+static int octeontx2_pem_bridge_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct octeontx2_pem_pci *pem_pci;
+ u64 where_aligned = where & ~3ull;
+ u64 write_val, read_val;
+ u32 mask = 0;
+
+
+ if (devfn != 0 || where >= 2048)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ pem_pci = (struct octeontx2_pem_pci *)cfg->priv;
+
+ /*
+ * 32-bit accesses only. If the write is for a size smaller
+ * than 32-bits, we must first read the 32-bit value and merge
+ * in the desired bits and then write the whole 32-bits back
+ * out.
+ */
+ switch (size) {
+ case 1:
+ writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val >>= 32;
+ mask = ~(0xff << (8 * (where & 3)));
+ read_val &= mask;
+ val = (val & 0xff) << (8 * (where & 3));
+ val |= (u32)read_val;
+ break;
+ case 2:
+ writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val >>= 32;
+ mask = ~(0xffff << (8 * (where & 3)));
+ read_val &= mask;
+ val = (val & 0xffff) << (8 * (where & 3));
+ val |= (u32)read_val;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * By expanding the write width to 32 bits, we may
+ * inadvertently hit some W1C bits that were not intended to
+ * be written. Calculate the mask that must be applied to the
+ * data to be written to avoid these cases.
+ */
+ if (mask) {
+ u32 w1c_bits = octeontx2_pem_bridge_w1c_bits(where);
+
+ if (w1c_bits) {
+ mask &= w1c_bits;
+ val &= ~mask;
+ }
+ }
+
+ /*
+ * Some bits must be read-only with value of one. Since the
+ * access method allows these to be cleared if a zero is
+ * written, force them to one before writing.
+ */
+ val |= octeontx2_pem_bridge_w1_bits(where_aligned);
+
+ /*
+ * Low order bits are the config address, the high order 32
+ * bits are the data to be written.
+ */
+ write_val = (((u64)val) << 32) | where_aligned;
+ writeq(write_val, pem_pci->pem_reg_base + PEM_CFG_WR);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static void octeontx2_be_workaround_init(struct pci_bus *bus)
+{
+ u32 val;
+
+ /* Ensure that PCIERC_RASDP_DE_ME.ERR_MODE is set to 0 */
+ octeontx2_pem_bridge_read(bus, 0x00,
+ PCIERC_RASDP_DE_ME, 4, &val);
+ octeontx2_pem_bridge_write(bus, 0x00,
+ PCIERC_RASDP_DE_ME, 4, val & ~BIT(0));
+
+ /* Disable parity error correction */
+ octeontx2_pem_bridge_read(bus, 0x00,
+ PCIERC_RASDP_EP_CTL, 4, &val);
+ octeontx2_pem_bridge_write(bus, 0x00,
+ PCIERC_RASDP_EP_CTL, 4, val | BIT(0));
+
+ /* Enable RAS to change header
+ * PCIERC_RAS_EINJ_EN.EINJ0_EN.set(0);
+ * PCIERC_RAS_EINJ_EN.EINJ1_EN.set(0);
+ * PCIERC_RAS_EINJ_EN.EINJ2_EN.set(0);
+ * PCIERC_RAS_EINJ_EN.EINJ3_EN.set(0);
+ * PCIERC_RAS_EINJ_EN.EINJ4_EN.set(0);
+ * PCIERC_RAS_EINJ_EN.EINJ5_EN.set(0);
+ * PCIERC_RAS_EINJ_EN.EINJ6_EN.set(1);
+ */
+ octeontx2_pem_bridge_write(bus, 0x00,
+ PCIERC_RAS_EINJ_EN, 4, BIT(6));
+
+ /* Set up error injection count to 1 and
+ * set type to TLP and INV_CNTRL must be 0.
+ */
+ octeontx2_pem_bridge_write(bus, 0x00,
+ PCIERC_RAS_EINJ_CTL6PE, 4, 1);
+
+ /* Set up compare point to compare Fmt/Type field in TLP Header word 0
+ * Where bits[31:0] = tlp_dw[7:0], tlp_dw[15:18],
+ * tlp_dw[23:16], tlp_dw[31:24].
+ *
+ * PCIERC_RAS_EINJ_CTL6CMPP0.EINJ6_COM_PT_H0.set(32'hfe00_0000);
+ */
+ octeontx2_pem_bridge_write(bus, 0x00,
+ PCIERC_RAS_EINJ_CTL6CMPP0, 4, 0xFE000000);
+
+ /* Set up the value to compare against,
+ * look for Fmt/Type to indicate CfgRd/CfWr - both type 0 or 1.
+ * Where bits[31:0] = tlp_dw[7:0], tlp_dw[15:18],
+ * tlp_dw[23:16], tlp_dw[31:24]
+ */
+ octeontx2_pem_bridge_write(bus, 0x00,
+ PCIERC_RAS_EINJ_CTL6CMPV0, 4, 0x44000000);
+
+ /* Set up the bit position in TLP Header word 1 to replace
+ * (LBE is bits 7:4, FBE is bits 3:0).
+ *
+ * Where bits[31:0] = tlp_dw[7:0], tlp_dw[15:18],
+ * tlp_dw[23:16], tlp_dw[31:24].
+ */
+ octeontx2_pem_bridge_write(bus, 0x00,
+ PCIERC_RAS_EINJ_CTL6CHGP1, 4, 0xFF);
+}
+
+static void octeontx2_be_workaround(struct pci_bus *bus, int where,
+ int size, u32 val)
+{
+ struct pci_host_bridge *rc;
+ u32 reg, be = 0;
+
+ rc = pci_find_host_bridge(bus);
+
+ /* Setup RAS to inject one error */
+ octeontx2_be_workaround_init(rc->bus);
+
+ /* Get byte-enable to inject into TLP */
+ where &= 0x03;
+ switch (size) {
+ case 1:
+ be = 1 << where;
+ break;
+ case 2:
+ be = 3 << where;
+ break;
+ case 4:
+ be = 0xF;
+ }
+
+ /* Set up the value you'd like to use for FBE (Cfg ops must have LBE==0)
+ * Where bits[31:0] = tlp_dw[7:0], tlp_dw[15:18],
+ * tlp_dw[23:16], tlp_dw[31:24].
+ */
+ octeontx2_pem_bridge_write(rc->bus, 0x00,
+ PCIERC_RAS_EINJ_CTL6CHGV1, 4, be);
+
+ /* To be absolutely sure that the ECAM access does not get to
+ * the MAC prior to the PCIERC register updates that are setting
+ * up for that ECAM access, SW should read back one of the
+ * registers it wrote before launching the ECAM access.
+ */
+ octeontx2_pem_bridge_read(rc->bus, 0x00,
+ PCIERC_RAS_EINJ_CTL6CHGV1, 4, &reg);
+}
+
+static int octeontx2_pem_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+
+ if (bus->number < cfg->busr.start ||
+ bus->number > cfg->busr.end)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ /*
+ * The first device on the bus is the PEM PCIe bridge.
+ * Special case its config access.
+ */
+ if (bus->number == cfg->busr.start)
+ return octeontx2_pem_bridge_write(bus, devfn, where, size, val);
+
+ octeontx2_be_workaround(bus, where, size, val);
+
+ return pci_generic_config_write(bus, devfn, where, size, val);
+}
+
+static int octeontx2_pem_init(struct device *dev, struct pci_config_window *cfg,
+ struct resource *res_pem)
+{
+ struct octeontx2_pem_pci *pem_pci;
+ resource_size_t bar4_start;
+ u64 val;
+
+ pem_pci = devm_kzalloc(dev, sizeof(*pem_pci), GFP_KERNEL);
+ if (!pem_pci)
+ return -ENOMEM;
+
+ pem_pci->pem_reg_base = devm_ioremap(dev, res_pem->start, 0x10000);
+ if (!pem_pci->pem_reg_base)
+ return -ENOMEM;
+
+ /* As per HW Errata 34726, an issue exists whereby inbound write
+ * merging may cause undefined operation. Hence disabling it.
+ *
+ * Need to revisit this for future silicon passes and versions.
+ */
+ val = readq(pem_pci->pem_reg_base + PEM_IB_MERGE_TIMER_CTL);
+ val |= BIT_ULL(10);
+ writeq(val, pem_pci->pem_reg_base + PEM_IB_MERGE_TIMER_CTL);
+
+ /*
+ * The MSI-X BAR for the PEM and AER interrupts is located at
+ * a fixed offset from the PEM register base. Generate a
+ * fragment of the synthesized Enhanced Allocation capability
+ * structure here for the BAR.
+ */
+ bar4_start = res_pem->start + 0xf00000000;
+ pem_pci->ea_entry[0] = (u32)bar4_start | 2;
+ pem_pci->ea_entry[1] = (u32)(res_pem->end - bar4_start) & ~3u;
+ pem_pci->ea_entry[2] = (u32)(bar4_start >> 32);
+
+ cfg->priv = pem_pci;
+ return 0;
+}
+
+static int octeontx2_pem_platform_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct platform_device *pdev;
+ struct resource *res_pem;
+
+ if (!dev->of_node)
+ return -EINVAL;
+
+ pdev = to_platform_device(dev);
+
+ /*
+ * The second register range is the PEM bridge to the PCIe
+ * bus. It has a different config access method than those
+ * devices behind the bridge.
+ */
+ res_pem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res_pem) {
+ dev_err(dev, "missing \"reg[1]\"property\n");
+ return -EINVAL;
+ }
+
+ return octeontx2_pem_init(dev, cfg, res_pem);
+}
+
+static struct pci_ecam_ops pci_octeontx2_pem_ops = {
+ .bus_shift = 20,
+ .init = octeontx2_pem_platform_init,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = octeontx2_pem_config_read,
+ .write = octeontx2_pem_config_write,
+ }
+};
+
+static const struct of_device_id octeontx2_pem_of_match[] = {
+ { .compatible = "marvell,pci-host-octeontx2-pem" },
+ { },
+};
+
+static int octeontx2_pem_probe(struct platform_device *pdev)
+{
+ return pci_host_common_probe(pdev, &pci_octeontx2_pem_ops);
+}
+
+static struct platform_driver octeontx2_pem_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = octeontx2_pem_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = octeontx2_pem_probe,
+};
+builtin_platform_driver(octeontx2_pem_driver);
+
+#endif
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 136d25acff56..2101ddd7f050 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -4435,7 +4435,7 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
{
- if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
+ if (!pci_is_pcie(dev))
return false;
switch (dev->device) {
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 09ae8a970880..e45f6f45f8cc 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -55,7 +55,7 @@ config ARM_PMU_ACPI
config ARM_SMMU_V3_PMU
tristate "ARM SMMUv3 Performance Monitors Extension"
- depends on ARM64 && ACPI && ARM_SMMU_V3
+ depends on ARM64 && ARM_SMMU_V3
help
Provides support for the ARM SMMUv3 Performance Monitor Counter
Groups (PMCG), which provide monitoring of transactions passing
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index 4cdb35d166ac..b212a9725995 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -36,6 +36,7 @@
#include <linux/acpi.h>
#include <linux/acpi_iort.h>
+#include <linux/of.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/cpuhotplug.h>
@@ -293,7 +294,10 @@ static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
if (idx == num_ctrs ||
smmu_pmu_check_global_filter(smmu_pmu->events[idx], event)) {
- smmu_pmu_set_event_filter(event, 0, span, sid);
+ if (idx == 0)
+ smmu_pmu_set_event_filter(event, 0, span, sid);
+ else
+ smmu_pmu_set_event_filter(event, idx, 0, 0);
return 0;
}
@@ -857,9 +861,16 @@ static void smmu_pmu_shutdown(struct platform_device *pdev)
smmu_pmu_disable(&smmu_pmu->pmu);
}
+static const struct of_device_id smmu_pmu_of_match[] = {
+ { .compatible = "arm,smmu-pmu-v3", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, smmu_pmu_of_match);
+
static struct platform_driver smmu_pmu_driver = {
.driver = {
.name = "arm-smmu-v3-pmcg",
+ .of_match_table = of_match_ptr(smmu_pmu_of_match),
.suppress_bind_attrs = true,
},
.probe = smmu_pmu_probe,
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index 425ab6f7e375..acedbcab5d9f 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -9,6 +9,7 @@ source "drivers/soc/bcm/Kconfig"
source "drivers/soc/fsl/Kconfig"
source "drivers/soc/imx/Kconfig"
source "drivers/soc/ixp4xx/Kconfig"
+source "drivers/soc/marvell/Kconfig"
source "drivers/soc/mediatek/Kconfig"
source "drivers/soc/qcom/Kconfig"
source "drivers/soc/renesas/Kconfig"
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 36452bed86ef..c60dc3de3fa0 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_ARCH_IXP4XX) += ixp4xx/
obj-$(CONFIG_SOC_XWAY) += lantiq/
obj-y += mediatek/
obj-y += amlogic/
+obj-y += marvell/
obj-y += qcom/
obj-y += renesas/
obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
diff --git a/drivers/soc/marvell/Kconfig b/drivers/soc/marvell/Kconfig
new file mode 100644
index 000000000000..334c0e2137b0
--- /dev/null
+++ b/drivers/soc/marvell/Kconfig
@@ -0,0 +1,106 @@
+#
+# MARVELL SoC drivers
+#
+
+menu "Marvell SoC drivers"
+
+config OCTEONTX2_RM
+ tristate "OcteonTX2 RVU Resource Manager driver"
+ depends on OCTEONTX2_AF
+ ---help---
+ This driver offers resource management interfaces for Marvell's
+ OcteonTX2 Resource Virtualization Unit SSO/TIM PFs which are used
+ for interfacing with non-NIC hardware offload units.
+
+config OCTEONTX2_RM_DOM_SYSFS
+ bool "OcteonTX2 RVU Resource Manager domain sysfs"
+ depends on OCTEONTX2_RM
+ ---help---
+ Enable Application Domain sysfs which simplifies management of
+ SSO/TIM VFs and OcteonTX2 RVU based NIC devices by the system
+ administrator. This interface consists of the following files:
+
+ I. /sys/bus/pci/drivers/octeontx2-rm/0*/create_domain
+
+ Writing to this file will:
+ 1. Create a domain directory in /sys/bus/pci/drivers/octeontx2-rm/0*
+ with the domain name
+ 2. Reserve one of SSO/TIM VFs for this domain and set its limits
+ according to the specification passed in write string
+ 3. Create symlinks to all devices that will be part of the domain
+ in the directory created in point 1
+ 4. Create domain_id file returning the ID assigned to this domain
+ (effectively the domain name)
+ 5. Create domain_in_use file which reports state of domain's
+ SSO/TIM device's in_use file to indicate when domain is used
+ by an application.
+
+ The syntax for writing into this file is:
+
+ name;param:val(;param:val)*
+
+ * name - domain name
+ * param - parameter name, based on the parameter, its value 'val'
+ has to have a different format:
+ * sso, ssow, npa, tim, cpt - 'val' is an integer value of the
+ number of LFs to assign to the domain
+ * port - 'val' is in 'DDDD:BB:DD.F' format and specifies device
+ representing a port.
+
+ There are the following rules when creating a domain:
+
+ 1. Domain names must be unique
+ 2. Each domain must have at least 1 NPA and 1 SSOW LF
+ 3. One port may be only assigned to a single domain
+
+ II. /sys/bus/pci/drivers/octeontx2-rm/0*/destroy_domain
+
+ Writing domain name to this file will cause given domain to be
+ removed from the sysfs. This includes:
+ 1. Setting all limits of domain's SSO/TIM device to 0
+ 2. Removing all sysfs structures for this domain
+ 3. Removing all ports in this application domain from the list of
+ ports in use.
+
+ Removal of the domain is disabled while domain is in use, that
+ is while the 'in_use' flag of the domain's SSO/TIM device is set.
+ User/admin may query the status of this flag using the
+ 'domain_in_use' file in the domain's sysfs directory.
+
+config OCTEONTX2_DPI_PF
+ tristate "OcteonTX2 DPI-DMA PF driver"
+ depends on ARM64 && PCI
+ ---help---
+ Select this option to enable DPI PF driver support.
+ DPI (DMA packet interface) provides DMA support for MAC.
+ This driver intializes dpi PF device and enables VF's for supporting
+ different types of DMA transfers.
+
+config MDIO_DEBUGFS
+ tristate "Stub driver for debugfs support for mdio commands "
+ depends on OCTEONTX2_AF
+ help
+ provides debugfs support to initiate mdio commands via smc call
+ to the atf.
+
+config OCTEONTX2_SDEI_GHES
+ bool "OcteonTX2 Generic Hardware Error Source (GHES) support"
+ depends on ARM_SDE_INTERFACE
+ help
+ Select this option to enable support for RAS Generic Hardware Error
+ Source (GHES) reporting.
+ This will allow RAS errors that are detected by the OcteonTX2 to
+ be reported using kernel logging.
+
+config OCTEONTX2_SDEI_GHES_DEBUG
+ bool "OcteonTX2 Generic Hardware Error Source (GHES) verbose debug msgs"
+ depends on OCTEONTX2_SDEI_GHES
+ help
+ Say Y here if you want the OcteonTX2 GHES support to
+ write verbose debug messages to the system log. Select this
+ if you are having a problem with the OcteonTX2 GHES support
+ and want to see more details.
+
+ If you are unsure about this, say N here.
+
+endmenu
diff --git a/drivers/soc/marvell/Makefile b/drivers/soc/marvell/Makefile
new file mode 100644
index 000000000000..e8e1b5b63488
--- /dev/null
+++ b/drivers/soc/marvell/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y += octeontx2-rm/
+obj-y += octeontx2-dpi/
+obj-$(CONFIG_MDIO_DEBUGFS) += mdio_debugfs.o
+obj-y += octeontx2-ghes/
diff --git a/drivers/soc/marvell/mdio_debugfs.c b/drivers/soc/marvell/mdio_debugfs.c
new file mode 100644
index 000000000000..652108960499
--- /dev/null
+++ b/drivers/soc/marvell/mdio_debugfs.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/arm-smccc.h>
+
+#define OCTEONTX_MDIO_DBG_READ 0xc2000d01
+#define OCTEONTX_MDIO_DBG_WRITE 0xc2000d02
+
+struct dentry *pfile;
+static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
+ const char __user *buffer,
+ int *a, bool *write)
+{
+ int bytes_not_copied;
+ char *subtoken;
+ int ret, i;
+
+ bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ cmd_buf[*count] = '\0';
+ for (i = 0; i < 5; i++) {
+ subtoken = strsep(&cmd_buf, " ");
+ ret = subtoken ? kstrtoint(subtoken, 10, &a[i]) : -EINVAL;
+ if (ret < 0)
+ return ret;
+ }
+ if (cmd_buf) {
+ subtoken = strsep(&cmd_buf, " ");
+ ret = subtoken ? kstrtoint(subtoken, 10, &a[i]) : -EINVAL;
+ if (ret < 0)
+ return ret;
+ *write = true;
+ }
+ if (cmd_buf)
+ return -EINVAL;
+ return ret;
+}
+
+static ssize_t dbg_mdio_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct arm_smccc_res res;
+ bool write = false;
+ char *cmd_buf;
+ int ret, a[6];
+
+ if ((*ppos != 0) || !count)
+ return -EINVAL;
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return count;
+
+ ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer, a, &write);
+ if (ret < 0) {
+ pr_info("Usage: echo <cgxlmac> <mode> <addr> <devad> <reg> [value] > mdio_cmd\n");
+ goto done;
+ } else {
+ if (write)
+ arm_smccc_smc(OCTEONTX_MDIO_DBG_WRITE, a[0], a[1], a[2],
+ a[3], a[4], a[5], 0, &res);
+ else
+ arm_smccc_smc(OCTEONTX_MDIO_DBG_READ, a[0], a[1], a[2],
+ a[3], a[4], 0, 0, &res);
+ pr_info("MDIO COMMAND RESULT\n");
+ pr_info("===================\n");
+ pr_info("res[0]:\t%ld\n", res.a0);
+ pr_info("res[1]:\t%ld\n", res.a1);
+ pr_info("res[2]:\t%ld\n", res.a2);
+ pr_info("res[3]:\t%ld\n", res.a3);
+ }
+done:
+ kfree(cmd_buf);
+ return ret ? ret : count;
+}
+
+static const struct file_operations dbg_mdio_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = dbg_mdio_write,
+};
+
+static int dbg_mdio_init(void)
+{
+ pfile = debugfs_create_file("mdio_cmd", 0644, NULL, NULL,
+ &dbg_mdio_fops);
+ if (!pfile)
+ goto create_failed;
+ return 0;
+create_failed:
+ pr_err("Failed to create debugfs dir/file for mdio_cmd\n");
+ debugfs_remove_recursive(pfile);
+ return 0;
+}
+
+static void dbg_mdio_exit(void)
+{
+ debugfs_remove_recursive(pfile);
+}
+module_init(dbg_mdio_init);
+module_exit(dbg_mdio_exit);
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/soc/marvell/octeontx2-dpi/Makefile b/drivers/soc/marvell/octeontx2-dpi/Makefile
new file mode 100644
index 000000000000..73640517593c
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-dpi/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's OcteonTX2 DPI PF driver
+#
+
+obj-$(CONFIG_OCTEONTX2_DPI_PF) += octeontx2_dpi.o
+
+octeontx2_dpi-y := dpi.o
diff --git a/drivers/soc/marvell/octeontx2-dpi/dpi.c b/drivers/soc/marvell/octeontx2-dpi/dpi.c
new file mode 100644
index 000000000000..9896a4488b40
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-dpi/dpi.c
@@ -0,0 +1,526 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 DPI PF driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/sysfs.h>
+
+#include "dpi.h"
+
+#define DPI_DRV_NAME "octeontx2-dpi"
+#define DPI_DRV_STRING "Marvell OcteonTX2 DPI-DMA Driver"
+#define DPI_DRV_VERSION "1.0"
+
+/* Supported devices */
+static const struct pci_device_id dpi_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_DPI_PF) },
+ { 0, } /* end of table */
+};
+MODULE_DEVICE_TABLE(pci, dpi_id_table);
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION(DPI_DRV_STRING);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DPI_DRV_VERSION);
+
+static void dpi_reg_write(struct dpipf *dpi, u64 offset, u64 val)
+{
+ writeq(val, dpi->reg_base + offset);
+}
+
+static u64 dpi_reg_read(struct dpipf *dpi, u64 offset)
+{
+ return readq(dpi->reg_base + offset);
+}
+
+static int dpi_dma_engine_get_num(void)
+{
+ return DPI_MAX_ENGINES;
+}
+
+static int dpi_queue_init(struct dpipf *dpi, struct dpipf_vf *dpivf, u8 vf)
+{
+ int engine = 0;
+ int queue = vf;
+ u64 reg = 0ULL;
+ u32 aura = dpivf->vf_config.aura;
+ u16 buf_size = dpivf->vf_config.csize;
+ u16 sso_pf_func = dpivf->vf_config.sso_pf_func;
+ u16 npa_pf_func = dpivf->vf_config.npa_pf_func;
+
+ dpi_reg_write(dpi, DPI_DMAX_IBUFF_CSIZE(queue),
+ DPI_DMA_IBUFF_CSIZE_CSIZE((u64)(buf_size / 8)));
+
+ /* IDs are already configured while crating the domains.
+ * No need to configure here.
+ */
+ for (engine = 0; engine < dpi_dma_engine_get_num(); engine++) {
+ /* Dont configure the queus for PKT engines */
+ if (engine >= 4)
+ break;
+
+ reg = 0;
+ reg = dpi_reg_read(dpi, DPI_DMA_ENGX_EN(engine));
+ reg |= DPI_DMA_ENG_EN_QEN(0x1 << queue);
+ dpi_reg_write(dpi, DPI_DMA_ENGX_EN(engine), reg);
+ }
+
+ reg = dpi_reg_read(dpi, DPI_DMAX_IDS2(queue));
+ reg |= DPI_DMA_IDS2_INST_AURA(aura);
+ dpi_reg_write(dpi, DPI_DMAX_IDS2(queue), reg);
+
+ reg = dpi_reg_read(dpi, DPI_DMAX_IDS(queue));
+ reg |= DPI_DMA_IDS_DMA_NPA_PF_FUNC(npa_pf_func);
+ reg |= DPI_DMA_IDS_DMA_SSO_PF_FUNC(sso_pf_func);
+ reg |= DPI_DMA_IDS_DMA_STRM(vf + 1);
+ reg |= DPI_DMA_IDS_INST_STRM(vf + 1);
+ dpi_reg_write(dpi, DPI_DMAX_IDS(queue), reg);
+
+ return 0;
+}
+
+static int dpi_queue_fini(struct dpipf *dpi, struct dpipf_vf *dpivf, u8 vf)
+{
+ u64 reg = 0ULL;
+ int engine = 0;
+ int queue = vf;
+ u16 buf_size = dpivf->vf_config.csize;
+
+ for (engine = 0; engine < dpi_dma_engine_get_num(); engine++) {
+ /* Dont configure the queus for PKT engines */
+ if (engine >= 4)
+ break;
+
+ reg = 0;
+ reg = dpi_reg_read(dpi, DPI_DMA_ENGX_EN(engine));
+ reg &= DPI_DMA_ENG_EN_QEN((~(1 << queue)));
+ dpi_reg_write(dpi, DPI_DMA_ENGX_EN(engine), reg);
+ }
+
+ dpi_reg_write(dpi, DPI_DMAX_QRST(queue), 0x1ULL);
+ /* TBD: below code required ? */
+ dpi_reg_write(dpi, DPI_DMAX_IBUFF_CSIZE(queue),
+ DPI_DMA_IBUFF_CSIZE_CSIZE((u64)(buf_size)));
+
+ /* Reset IDS and IDS2 registers */
+ dpi_reg_write(dpi, DPI_DMAX_IDS2(queue), 0ULL);
+ dpi_reg_write(dpi, DPI_DMAX_IDS(queue), 0ULL);
+
+ return 0;
+}
+
+/**
+ * Global initialization of DPI
+ *
+ * @dpi: DPI device context structure
+ * @return Zero on success, negative on failure
+ */
+static int dpi_init(struct dpipf *dpi)
+{
+ int engine = 0;
+ u64 reg = 0ULL;
+
+ for (engine = 0; engine < dpi_dma_engine_get_num(); engine++) {
+ if (engine == 4 || engine == 5)
+ reg = DPI_ENG_BUF_BLKS(8);
+ else
+ reg = DPI_ENG_BUF_BLKS(4);
+
+ dpi_reg_write(dpi, DPI_ENGX_BUF(engine), reg);
+
+ /* Here qmap for the engines are set to 0.
+ * No dpi queues are mapped to engines.
+ * When a VF is initialised corresponding bit
+ * in the qmap will be set for all engines.
+ */
+ dpi_reg_write(dpi, DPI_DMA_ENGX_EN(engine), 0x0ULL);
+ }
+
+ reg = 0ULL;
+ reg = (DPI_DMA_CONTROL_ZBWCSEN | DPI_DMA_CONTROL_PKT_EN |
+ DPI_DMA_CONTROL_LDWB | DPI_DMA_CONTROL_O_MODE |
+ DPI_DMA_CONTROL_DMA_ENB(0xfULL));
+
+ dpi_reg_write(dpi, DPI_DMA_CONTROL, reg);
+ dpi_reg_write(dpi, DPI_CTL, DPI_CTL_EN);
+
+ return 0;
+}
+
+static int dpi_fini(struct dpipf *dpi)
+{
+ int engine = 0;
+ u64 reg = 0ULL;
+
+ for (engine = 0; engine < dpi_dma_engine_get_num(); engine++) {
+
+ dpi_reg_write(dpi, DPI_ENGX_BUF(engine), reg);
+ dpi_reg_write(dpi, DPI_DMA_ENGX_EN(engine), 0x0ULL);
+ }
+
+ reg = 0ULL;
+ dpi_reg_write(dpi, DPI_DMA_CONTROL, reg);
+ dpi_reg_write(dpi, DPI_CTL, ~DPI_CTL_EN);
+
+ return 0;
+}
+
+static int dpi_queue_reset(struct dpipf *dpi, u16 queue)
+{
+ /* TODO: add support */
+ return 0;
+}
+
+static irqreturn_t dpi_pf_intr_handler (int irq, void *dpi_irq)
+{
+ u64 reg_val = 0;
+ int i = 0;
+ struct dpipf *dpi = (struct dpipf *)dpi_irq;
+
+ dev_err(&dpi->pdev->dev, "intr received: %d\n", irq);
+
+ /* extract MSIX vector number from irq number. */
+ while (irq != pci_irq_vector(dpi->pdev, i)) {
+ i++;
+ if (i > dpi->num_vec)
+ break;
+ }
+ if (i < DPI_REQQX_INT_IDX) {
+ reg_val = dpi_reg_read(dpi, DPI_DMA_CCX_INT(i));
+ dev_err(&dpi->pdev->dev, "DPI_CC%d_INT raised: 0x%016llx\n",
+ i, reg_val);
+ dpi_reg_write(dpi, DPI_DMA_CCX_INT(i), 0x1ULL);
+ } else if (i < DPI_SDP_FLR_RING_LINTX_IDX) {
+ reg_val = dpi_reg_read(
+ dpi, DPI_REQQX_INT(i - DPI_REQQX_INT_IDX));
+ dev_err(&dpi->pdev->dev,
+ "DPI_REQQ_INT raised for q:%d: 0x%016llx\n",
+ (i - 0x40), reg_val);
+
+ dpi_reg_write(
+ dpi, DPI_REQQX_INT(i - DPI_REQQX_INT_IDX), reg_val);
+
+ if (reg_val & (0x71ULL))
+ dpi_queue_reset(dpi, (i - DPI_REQQX_INT_IDX));
+ } else if (i < DPI_SDP_IRE_LINTX_IDX) {
+ /* TODO: handle interrupt */
+ dev_err(&dpi->pdev->dev, "DPI_SDP_FLR_RING_LINTX raised\n");
+
+ } else if (i < DPI_SDP_ORE_LINTX_IDX) {
+ /* TODO: handle interrupt */
+ dev_err(&dpi->pdev->dev, "DPI_SDP_IRE_LINTX raised\n");
+
+ } else if (i < DPI_SDP_ORD_LINTX_IDX) {
+ /* TODO: handle interrupt */
+ dev_err(&dpi->pdev->dev, "DPI_SDP_ORE_LINTX raised\n");
+
+ } else if (i < DPI_EPFX_PP_VF_LINTX_IDX) {
+ /* TODO: handle interrupt */
+ dev_err(&dpi->pdev->dev, "DPI_SDP_ORD_LINTX raised\n");
+
+ } else if (i < DPI_EPFX_DMA_VF_LINTX_IDX) {
+ /* TODO: handle interrupt */
+ dev_err(&dpi->pdev->dev, "DPI_EPFX_PP_VF_LINTX raised\n");
+
+ } else if (i < DPI_EPFX_MISC_LINTX_IDX) {
+ /* TODO: handle interrupt */
+ dev_err(&dpi->pdev->dev, "DPI_EPFX_DMA_VF_LINTX raised\n");
+
+ } else if (i < DPI_PF_RAS_IDX) {
+ /* TODO: handle interrupt */
+ dev_err(&dpi->pdev->dev, "DPI_EPFX_MISC_LINTX raised\n");
+
+ } else if (i == DPI_PF_RAS_IDX) {
+ reg_val = dpi_reg_read(dpi, DPI_PF_RAS);
+ dev_err(&dpi->pdev->dev, "DPI_PF_RAS raised: 0x%016llx\n",
+ reg_val);
+ dpi_reg_write(dpi, DPI_PF_RAS, reg_val);
+ }
+ return IRQ_HANDLED;
+}
+
+static int dpi_irq_init(struct dpipf *dpi)
+{
+ int i, irq = 0;
+ int ret = 0;
+
+ /* Clear All Interrupts */
+ dpi_reg_write(dpi, DPI_PF_RAS, DPI_PF_RAS_INT);
+
+ /* Clear All Enables */
+ dpi_reg_write(dpi, DPI_PF_RAS_ENA_W1C, DPI_PF_RAS_INT);
+
+ for (i = 0; i < DPI_MAX_REQQ_INT; i++) {
+ dpi_reg_write(dpi, DPI_REQQX_INT(i), DPI_REQQ_INT);
+ dpi_reg_write(dpi, DPI_REQQX_INT_ENA_W1C(i), DPI_REQQ_INT);
+ }
+
+ for (i = 0; i < DPI_MAX_CC_INT; i++) {
+ dpi_reg_write(dpi, DPI_DMA_CCX_INT(i), DPI_DMA_CC_INT);
+ dpi_reg_write(dpi, DPI_DMA_CCX_INT_ENA_W1C(i), DPI_DMA_CC_INT);
+ }
+
+ dpi->num_vec = pci_msix_vec_count(dpi->pdev);
+ /* Enable MSI-X */
+ ret = pci_alloc_irq_vectors(dpi->pdev, dpi->num_vec,
+ dpi->num_vec, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(&dpi->pdev->dev,
+ "DPIPF: Request for %d msix vectors failed, ret %d\n",
+ dpi->num_vec, ret);
+ goto alloc_fail;
+ }
+
+ for (irq = 0; irq < dpi->num_vec; irq++) {
+ ret = request_irq(pci_irq_vector(dpi->pdev, irq),
+ dpi_pf_intr_handler, 0, "DPIPF", dpi);
+ if (ret) {
+ dev_err(&dpi->pdev->dev,
+ "DPIPF: IRQ(%d) registration failed for DPIPF\n",
+ irq);
+ goto fail;
+ }
+ }
+
+#define ENABLE_DPI_INTERRUPTS 0
+#if ENABLE_DPI_INTERRUPTS
+ /*Enable All Interrupts */
+ for (i = 0; i < DPI_MAX_REQQ_INT; i++)
+ dpi_reg_write(dpi, DPI_REQQX_INT_ENA_W1S(i), DPI_REQQ_INT);
+
+ dpi_reg_write(dpi, DPI_PF_RAS_ENA_W1S, DPI_PF_RAS_INT);
+#endif
+ return 0;
+fail:
+ if (irq) {
+ for (i = 0; i <= irq; i++)
+ free_irq(pci_irq_vector(dpi->pdev, i), dpi);
+ }
+ pci_free_irq_vectors(dpi->pdev);
+alloc_fail:
+ dpi->num_vec = 0;
+ return ret;
+}
+
+static void dpi_irq_free(struct dpipf *dpi)
+{
+ int i = 0;
+
+ /* Clear All Enables */
+ dpi_reg_write(dpi, DPI_PF_RAS_ENA_W1C, DPI_PF_RAS_INT);
+
+ for (i = 0; i < DPI_MAX_REQQ_INT; i++) {
+ dpi_reg_write(dpi, DPI_REQQX_INT(i), DPI_REQQ_INT);
+ dpi_reg_write(dpi, DPI_REQQX_INT_ENA_W1C(i), DPI_REQQ_INT);
+ }
+
+ for (i = 0; i < DPI_MAX_CC_INT; i++) {
+ dpi_reg_write(dpi, DPI_DMA_CCX_INT(i), DPI_DMA_CC_INT);
+ dpi_reg_write(dpi, DPI_DMA_CCX_INT_ENA_W1C(i), DPI_DMA_CC_INT);
+ }
+
+ for (i = 0; i < dpi->num_vec; i++)
+ free_irq(pci_irq_vector(dpi->pdev, i), dpi);
+
+ pci_free_irq_vectors(dpi->pdev);
+ dpi->num_vec = 0;
+}
+
+static int dpi_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+ struct dpipf *dpi = pci_get_drvdata(pdev);
+ int ret = 0;
+
+ if (numvfs == 0) {
+ pci_disable_sriov(pdev);
+ dpi->total_vfs = 0;
+ } else {
+ ret = pci_enable_sriov(pdev, numvfs);
+ if (ret == 0) {
+ dpi->total_vfs = numvfs;
+ ret = numvfs;
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t dpi_show_config(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct dpipf *dpi = pci_get_drvdata(pdev);
+ int vf_idx;
+
+ for (vf_idx = 0; vf_idx < DPI_MAX_VFS; vf_idx++) {
+ struct dpipf_vf *dpivf = &dpi->vf[vf_idx];
+
+ if (!dpivf->setup_done)
+ continue;
+ sprintf(buf + strlen(buf),
+ "VF:%d command buffer size:%d aura:%d",
+ vf_idx, dpivf->vf_config.csize, dpivf->vf_config.aura);
+ sprintf(buf + strlen(buf),
+ "sso_pf_func:%x npa_pf_func:%x\n",
+ dpivf->vf_config.sso_pf_func,
+ dpivf->vf_config.npa_pf_func);
+ }
+ return strlen(buf);
+}
+
+static ssize_t dpi_write_config(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ union dpi_mbox_message_t mbox_msg = {.u[0] = 0ULL, .u[1] = 0ULL};
+ struct dpipf *dpi = pci_get_drvdata(pdev);
+ struct dpipf_vf *dpivf;
+
+ memcpy(&mbox_msg, buf, count);
+ if (mbox_msg.s.vfid > DPI_MAX_VFS) {
+ dev_err(dev, "Invalid vfid:%d\n", mbox_msg.s.vfid);
+ return -1;
+ }
+ dpivf = &dpi->vf[mbox_msg.s.vfid];
+
+ switch (mbox_msg.s.cmd) {
+ case DPI_QUEUE_OPEN:
+ dpivf->vf_config.aura = mbox_msg.s.aura;
+ dpivf->vf_config.csize = mbox_msg.s.csize;
+ dpivf->vf_config.sso_pf_func = mbox_msg.s.sso_pf_func;
+ dpivf->vf_config.npa_pf_func = mbox_msg.s.npa_pf_func;
+ dpi_queue_init(dpi, dpivf, mbox_msg.s.vfid);
+ dpivf->setup_done = true;
+ break;
+ case DPI_QUEUE_CLOSE:
+ dpivf->vf_config.aura = 0;
+ dpivf->vf_config.csize = 0;
+ dpivf->vf_config.sso_pf_func = 0;
+ dpivf->vf_config.npa_pf_func = 0;
+ dpi_queue_fini(dpi, dpivf, mbox_msg.s.vfid);
+ dpivf->setup_done = false;
+ break;
+ default:
+ return -1;
+ }
+
+ return sizeof(mbox_msg);
+}
+
+static DEVICE_ATTR(dpi_device_config, 0660,
+ dpi_show_config, dpi_write_config);
+
+static int dpi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct dpipf *dpi;
+ int err;
+
+ dpi = devm_kzalloc(dev, sizeof(*dpi), GFP_KERNEL);
+ if (!dpi)
+ return -ENOMEM;
+ dpi->pdev = pdev;
+
+ pci_set_drvdata(pdev, dpi);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DPI_DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ /* MAP configuration registers */
+ dpi->reg_base = pcim_iomap(pdev, PCI_DPI_PF_CFG_BAR, 0);
+ if (!dpi->reg_base) {
+ dev_err(dev, "DPI: Cannot map CSR memory space, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ /* Initialize global PF registers */
+ err = dpi_init(dpi);
+ if (err) {
+ dev_err(dev, "DPI: Failed to initialize dpi\n");
+ goto err_release_regions;
+ }
+
+ /* Register interrupts */
+ err = dpi_irq_init(dpi);
+ if (err) {
+ dev_err(dev, "DPI: Failed to initialize irq vectors\n");
+ goto err_dpi_fini;
+ }
+
+ err = device_create_file(dev, &dev_attr_dpi_device_config);
+ if (err) {
+ dev_err(dev, "DPI: Failed to create sysfs entry for driver\n");
+ goto err_free_irq;
+ }
+
+ return 0;
+
+err_free_irq:
+ dpi_irq_free(dpi);
+err_dpi_fini:
+ dpi_fini(dpi);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ devm_kfree(dev, dpi);
+ return err;
+}
+
+static void dpi_remove(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dpipf *dpi = pci_get_drvdata(pdev);
+
+ device_remove_file(dev, &dev_attr_dpi_device_config);
+ dpi_irq_free(dpi);
+ dpi_fini(dpi);
+ dpi_sriov_configure(pdev, 0);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ devm_kfree(dev, dpi);
+}
+
+static struct pci_driver dpi_driver = {
+ .name = DPI_DRV_NAME,
+ .id_table = dpi_id_table,
+ .probe = dpi_probe,
+ .remove = dpi_remove,
+ .sriov_configure = dpi_sriov_configure,
+};
+
+static int __init dpi_init_module(void)
+{
+ pr_info("%s: %s\n", DPI_DRV_NAME, DPI_DRV_STRING);
+
+ return pci_register_driver(&dpi_driver);
+}
+
+static void __exit dpi_cleanup_module(void)
+{
+ pci_unregister_driver(&dpi_driver);
+}
+
+module_init(dpi_init_module);
+module_exit(dpi_cleanup_module);
diff --git a/drivers/soc/marvell/octeontx2-dpi/dpi.h b/drivers/soc/marvell/octeontx2-dpi/dpi.h
new file mode 100644
index 000000000000..2d0cf04524b7
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-dpi/dpi.h
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 DPI PF driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DPI_H__
+#define __DPI_H__
+
+ /* PCI device IDs */
+#define PCI_DEVID_OCTEONTX2_DPI_PF 0xA080
+#define PCI_DEVID_OCTEONTX2_DPI_VF 0xA081
+
+/* PCI BAR nos */
+#define PCI_DPI_PF_CFG_BAR 0
+#define PCI_DPI_PF_MSIX_BAR 4
+#define PCI_DPI_VF_CFG_BAR 0
+#define PCI_DPI_VF_MSIX_BAR 4
+#define DPI_VF_CFG_SIZE 0x100000
+#define DPI_VF_OFFSET(x) (0x20000000 | 0x100000 * (x))
+
+/* MSI-X interrupts */
+#define DPI_VF_MSIX_COUNT 1
+#define DPI_MAX_REQQ_INT 8
+#define DPI_MAX_CC_INT 64
+
+/* MSI-X interrupt vectors indexes */
+#define DPI_CCX_INT_IDX 0x0
+#define DPI_REQQX_INT_IDX 0x40
+#define DPI_SDP_FLR_RING_LINTX_IDX 0x48
+#define DPI_SDP_IRE_LINTX_IDX 0x4C
+#define DPI_SDP_ORE_LINTX_IDX 0x50
+#define DPI_SDP_ORD_LINTX_IDX 0x54
+#define DPI_EPFX_PP_VF_LINTX_IDX 0x58
+#define DPI_EPFX_DMA_VF_LINTX_IDX 0x78
+#define DPI_EPFX_MISC_LINTX_IDX 0x98
+#define DPI_PF_RAS_IDX 0xA8
+
+#define DPI_MAX_ENGINES 6
+#define DPI_MAX_VFS 8
+
+/**************** Macros for register modification ************/
+#define DPI_DMA_IBUFF_CSIZE_CSIZE(x) ((x) & 0x1fff)
+#define DPI_DMA_IBUFF_CSIZE_GET_CSIZE(x) ((x) & 0x1fff)
+
+#define DPI_DMA_IDS_INST_STRM(x) ((uint64_t)((x) & 0xff) << 40)
+#define DPI_DMA_IDS_GET_INST_STRM(x) (((x) >> 40) & 0xff)
+
+#define DPI_DMA_IDS_DMA_STRM(x) ((uint64_t)((x) & 0xff) << 32)
+#define DPI_DMA_IDS_GET_DMA_STRM(x) (((x) >> 32) & 0xff)
+
+#define DPI_DMA_IDS_DMA_NPA_PF_FUNC(x) ((uint64_t)((x) & 0xffff) << 16)
+#define DPI_DMA_IDS_GET_DMA_NPA_PF_FUNC(x) (((x) >> 16) & 0xffff)
+
+#define DPI_DMA_IDS_DMA_SSO_PF_FUNC(x) ((uint64_t)((x) & 0xffff))
+#define DPI_DMA_IDS_GET_DMA_SSO_PF_FUNC(x) ((x) & 0xffff)
+
+#define DPI_DMA_IDS2_INST_AURA(x) ((uint64_t)((x) & 0xfffff))
+#define DPI_DMA_IDS2_GET_INST_AURA(x) ((x) & 0xfffff)
+
+#define DPI_ENG_BUF_BLKS(x) ((x) & 0x1fULL)
+#define DPI_ENG_BUF_GET_BLKS(x) ((x) & 0x1fULL)
+
+#define DPI_ENG_BUF_BASE(x) (((x) & 0x3fULL) << 16)
+#define DPI_ENG_BUF_GET_BASE(x) (((x) >> 16) & 0x3fULL)
+
+#define DPI_DMA_ENG_EN_QEN(x) ((x) & 0xffULL)
+#define DPI_DMA_ENG_EN_GET_QEN(x) ((x) & 0xffULL)
+
+#define DPI_DMA_ENG_EN_MOLR(x) (((x) & 0x3ffULL) << 32)
+#define DPI_DMA_ENG_EN_GET_MOLR(x) (((x) >> 32) & 0x3ffULL)
+
+#define DPI_DMA_CONTROL_DMA_ENB(x) (((x) & 0x3fULL) << 48)
+#define DPI_DMA_CONTROL_GET_DMA_ENB(x) (((x) >> 48) & 0x3fULL)
+
+#define DPI_DMA_CONTROL_O_ES(x) (((x) & 0x3ULL) << 15)
+#define DPI_DMA_CONTROL_GET_O_ES(x) (((x) >> 15) & 0x3ULL)
+
+#define DPI_DMA_CONTROL_O_MODE (0x1ULL << 14)
+#define DPI_DMA_CONTROL_O_NS (0x1ULL << 17)
+#define DPI_DMA_CONTROL_O_RO (0x1ULL << 18)
+#define DPI_DMA_CONTROL_O_ADD1 (0x1ULL << 19)
+#define DPI_DMA_CONTROL_LDWB (0x1ULL << 32)
+#define DPI_DMA_CONTROL_NCB_TAG_DIS (0x1ULL << 34)
+#define DPI_DMA_CONTROL_ZBWCSEN (0x1ULL << 39)
+#define DPI_DMA_CONTROL_WQECSDIS (0x1ULL << 47)
+#define DPI_DMA_CONTROL_UIO_DIS (0x1ULL << 55)
+#define DPI_DMA_CONTROL_PKT_EN (0x1ULL << 56)
+#define DPI_DMA_CONTROL_FFP_DIS (0x1ULL << 59)
+
+#define DPI_CTL_EN (0x1ULL)
+
+/******************** macros for Interrupts ************************/
+#define DPI_DMA_CC_INT (0x1ULL)
+
+#define DPI_REQQ_INT_INSTRFLT (0x1ULL)
+#define DPI_REQQ_INT_RDFLT (0x1ULL << 1)
+#define DPI_REQQ_INT_WRFLT (0x1ULL << 2)
+#define DPI_REQQ_INT_CSFLT (0x1ULL << 3)
+#define DPI_REQQ_INT_INST_DBO (0x1ULL << 4)
+#define DPI_REQQ_INT_INST_ADDR_NULL (0x1ULL << 5)
+#define DPI_REQQ_INT_INST_FILL_INVAL (0x1ULL << 6)
+#define DPI_REQQ_INT_INSTR_PSN (0x1ULL << 7)
+
+#define DPI_REQQ_INT \
+ (DPI_REQQ_INT_INSTRFLT | \
+ DPI_REQQ_INT_RDFLT | \
+ DPI_REQQ_INT_WRFLT | \
+ DPI_REQQ_INT_CSFLT | \
+ DPI_REQQ_INT_INST_DBO | \
+ DPI_REQQ_INT_INST_ADDR_NULL | \
+ DPI_REQQ_INT_INST_FILL_INVAL | \
+ DPI_REQQ_INT_INSTR_PSN)
+
+#define DPI_PF_RAS_EBI_DAT_PSN (0x1ULL)
+#define DPI_PF_RAS_NCB_DAT_PSN (0x1ULL << 1)
+#define DPI_PF_RAS_NCB_CMD_PSN (0x1ULL << 2)
+#define DPI_PF_RAS_INT \
+ (DPI_PF_RAS_EBI_DAT_PSN | \
+ DPI_PF_RAS_NCB_DAT_PSN | \
+ DPI_PF_RAS_NCB_CMD_PSN)
+
+
+/***************** Registers ******************/
+#define DPI_DMAX_IBUFF_CSIZE(x) (0x0ULL | ((x) << 11))
+#define DPI_DMAX_REQBANK0(x) (0x8ULL | ((x) << 11))
+#define DPI_DMAX_REQBANK1(x) (0x10ULL | ((x) << 11))
+#define DPI_DMAX_IDS(x) (0x18ULL | ((x) << 11))
+#define DPI_DMAX_IDS2(x) (0x20ULL | ((x) << 11))
+#define DPI_DMAX_IFLIGHT(x) (0x28ULL | ((x) << 11))
+#define DPI_DMAX_QRST(x) (0x30ULL | ((x) << 11))
+#define DPI_DMAX_ERR_RSP_STATUS(x) (0x38ULL | ((x) << 11))
+
+#define DPI_CSCLK_ACTIVE_PC (0x4000ULL)
+#define DPI_CTL (0x4010ULL)
+#define DPI_DMA_CONTROL (0x4018ULL)
+#define DPI_DMA_ENGX_EN(x) (0x4040ULL | ((x) << 3))
+#define DPI_REQ_ERR_RSP (0x4078ULL)
+#define DPI_REQ_ERR_RSP_EN (0x4088ULL)
+#define DPI_PKT_ERR_RSP (0x4098ULL)
+#define DPI_NCB_CFG (0x40A0ULL)
+#define DPI_BP_TEST0 (0x40B0ULL)
+#define DPI_ENGX_BUF(x) (0x40C0ULL | ((x) << 3))
+#define DPI_EBUS_RECAL (0x40F0ULL)
+#define DPI_EBUS_PORTX_CFG(x) (0x4100ULL | ((x) << 3))
+#define DPI_EBUS_PORTX_SCFG(x) (0x4180ULL | ((x) << 3))
+#define DPI_EBUS_PORTX_ERR_INFO(x) (0x4200ULL | ((x) << 3))
+#define DPI_EBUS_PORTX_ERR(x) (0x4280ULL | ((x) << 3))
+#define DPI_INFO_REG (0x4300ULL)
+#define DPI_PF_RAS (0x4308ULL)
+#define DPI_PF_RAS_W1S (0x4310ULL)
+#define DPI_PF_RAS_ENA_W1C (0x4318ULL)
+#define DPI_PF_RAS_ENA_W1S (0x4320ULL)
+#define DPI_DMA_CCX_INT(x) (0x5000ULL | ((x) << 3))
+#define DPI_DMA_CCX_INT_W1S(x) (0x5400ULL | ((x) << 3))
+#define DPI_DMA_CCX_INT_ENA_W1C(x) (0x5800ULL | ((x) << 3))
+#define DPI_DMA_CCX_INT_ENA_W1S(x) (0x5C00ULL | ((x) << 3))
+#define DPI_DMA_CCX_CNT(x) (0x6000ULL | ((x) << 3))
+#define DPI_REQQX_INT(x) (0x6600ULL | ((x) << 3))
+#define DPI_REQQX_INT_W1S(x) (0x6640ULL | ((x) << 3))
+#define DPI_REQQX_INT_ENA_W1C(x) (0x6680ULL | ((x) << 3))
+#define DPI_REQQX_INT_ENA_W1S(x) (0x66C0ULL | ((x) << 3))
+#define DPI_EPFX_DMA_VF_LINTX(x, y) (0x6800ULL | ((x) << 5) |\
+ ((y) << 4))
+#define DPI_EPFX_DMA_VF_LINTX_W1S(x, y) (0x6A00ULL | ((x) << 5) |\
+ ((y) << 4))
+#define DPI_EPFX_DMA_VF_LINTX_ENA_W1C(x, y) (0x6C00ULL | ((x) << 5) |\
+ ((y) << 4))
+#define DPI_EPFX_DMA_VF_LINTX_ENA_W1S(x, y) (0x6E00ULL | ((x) << 5) |\
+ ((y) << 4))
+#define DPI_EPFX_MISC_LINT(x) (0x7000ULL | ((x) << 5))
+#define DPI_EPFX_MISC_LINT_W1S(x) (0x7008ULL | ((x) << 5))
+#define DPI_EPFX_MISC_LINT_ENA_W1C(x) (0x7010ULL | ((x) << 5))
+#define DPI_EPFX_MISC_LINT_ENA_W1S(x) (0x7018ULL | ((x) << 5))
+#define DPI_EPFX_PP_VF_LINTX(x, y) (0x7200ULL | ((x) << 5) |\
+ ((y) << 4))
+#define DPI_EPFX_PP_VF_LINTX_W1S(x, y) (0x7400ULL | ((x) << 5) |\
+ ((y) << 4))
+#define DPI_EPFX_PP_VF_LINTX_ENA_W1C(x, y) (0x7600ULL | ((x) << 5) |\
+ ((y) << 4))
+#define DPI_EPFX_PP_VF_LINTX_ENA_W1S(x, y) (0x7800ULL | ((x) << 5) |\
+ ((y) << 4))
+/* VF Registers: */
+#define DPI_VDMA_EN (0x0ULL)
+#define DPI_VDMA_REQQ_CTL (0x8ULL)
+#define DPI_VDMA_DBELL (0x10ULL)
+#define DPI_VDMA_SADDR (0x18ULL)
+#define DPI_VDMA_COUNTS (0x20ULL)
+#define DPI_VDMA_NADDR (0x28ULL)
+#define DPI_VDMA_IWBUSY (0x30ULL)
+#define DPI_VDMA_CNT (0x38ULL)
+#define DPI_VF_INT (0x100ULL)
+#define DPI_VF_INT_W1S (0x108ULL)
+#define DPI_VF_INT_ENA_W1C (0x110ULL)
+#define DPI_VF_INT_ENA_W1S (0x118ULL)
+
+struct dpivf_config {
+ uint16_t csize;
+ uint32_t aura;
+ uint16_t sso_pf_func;
+ uint16_t npa_pf_func;
+};
+
+struct dpipf_vf {
+ uint8_t this_vfid;
+ bool setup_done;
+ struct dpivf_config vf_config;
+};
+
+struct dpipf {
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+ int num_vec;
+ struct msix_entry *msix_entries;
+ int total_vfs;
+ int vfs_in_use;
+ struct dpipf_vf vf[DPI_MAX_VFS];
+};
+
+#define DPI_QUEUE_OPEN 0x1
+#define DPI_QUEUE_CLOSE 0x2
+#define DPI_REG_DUMP 0x3
+#define DPI_GET_REG_CFG 0x4
+
+union dpi_mbox_message_t {
+ uint64_t u[2];
+ struct dpi_mbox_message_s {
+ /* VF ID to configure */
+ uint64_t vfid :4;
+ /* Command code */
+ uint64_t cmd :4;
+ /* Command buffer size in 8-byte words */
+ uint64_t csize :14;
+ /* aura of the command buffer */
+ uint64_t aura :20;
+ /* SSO PF function */
+ uint64_t sso_pf_func :16;
+ /* NPA PF function */
+ uint64_t npa_pf_func :16;
+ } s;
+};
+#endif
diff --git a/drivers/soc/marvell/octeontx2-ghes/Makefile b/drivers/soc/marvell/octeontx2-ghes/Makefile
new file mode 100644
index 000000000000..29878ca771c0
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-ghes/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's OcteonTX2 SDEI/GHES device driver
+#
+
+obj-$(CONFIG_OCTEONTX2_SDEI_GHES) += octeontx2_sdei_ghes.o
+
+octeontx2_sdei_ghes-y := otx2-sdei-ghes.o
diff --git a/drivers/soc/marvell/octeontx2-ghes/otx2-sdei-ghes.c b/drivers/soc/marvell/octeontx2-ghes/otx2-sdei-ghes.c
new file mode 100644
index 000000000000..eae0c4e0fca9
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-ghes/otx2-sdei-ghes.c
@@ -0,0 +1,605 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Supports OcteonTX2 Generic Hardware Error Source[s] (GHES).
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/arm_sdei.h>
+#include <linux/uuid.h>
+#include <linux/acpi.h>
+#include <acpi/apei.h>
+#include <linux/pci.h>
+#include "otx2-sdei-ghes.h"
+
+#define DRV_NAME "sdei-ghes"
+
+/* The initialization function does not have a device ptr; use 'pr_xxx' */
+#define initerrmsg(fmt, ...) pr_err(DRV_NAME ":" fmt, __VA_ARGS__)
+
+#ifdef CONFIG_OCTEONTX2_SDEI_GHES_DEBUG
+# define initdbgmsg(fmt, ...) pr_info(DRV_NAME ":" fmt, __VA_ARGS__)
+# define dbgmsg(dev, ...) dev_info((dev), __VA_ARGS__)
+#else
+# define initdbgmsg(fmt, ...) (void)(fmt)
+# define dbgmsg(dev, ...) (void)(dev)
+#endif // CONFIG_OCTEONTX2_SDEI_GHES_DEBUG
+
+static struct acpi_table_hest *hest;
+static struct otx2_ghes_event *event_list;
+
+#define PCI_VENDOR_ID_CAVIUM 0x177d
+#define PCI_DEVICE_ID_OCTEONTX2_LMC 0xa022
+#define PCI_DEVICE_ID_OCTEONTX2_MCC 0xa070
+#define PCI_DEVICE_ID_OCTEONTX2_MDC 0xa073
+static const struct pci_device_id sdei_ghes_otx2_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_OCTEONTX2_LMC) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_OCTEONTX2_MCC) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_OCTEONTX2_MDC) },
+ { 0, },
+};
+
+/* SDEI event notification callback. */
+static int sdei_ghes_callback(u32 event_id, struct pt_regs *regs, void *arg)
+{
+ struct acpi_hest_generic_data *hest_gen_data;
+ struct acpi_hest_generic_status *estatus;
+ struct otx2_ghes_err_record *err_rec;
+ struct cper_sec_mem_err_old *mem_err;
+ struct otx2_sdei_ghes_drv *ghes_drv;
+ struct otx2_ghes_event *event;
+ u32 head, tail;
+ size_t idx;
+
+ ghes_drv = arg;
+
+ for (idx = 0; idx < ghes_drv->event_count; idx++) {
+ event = &ghes_drv->event_list[idx];
+ if (event->id != event_id)
+ continue;
+
+ head = event->ring->head;
+ tail = event->ring->tail;
+
+ if (head == tail) {
+ initerrmsg("event 0x%x err_rec ring is empty!\n",
+ event_id);
+ break;
+ }
+
+ err_rec = &event->ring->records[tail];
+
+ estatus = event->estatus;
+
+ estatus->raw_data_length = 0;
+ /* Implementation note: 'data_length' must equal
+ * 'hest_gen_entry->error_block_length' MINUS
+ * sizeof(struct acpi_hest_generic_status).
+ * See 'sdei_ghes_init()'.
+ */
+
+ /* Initialize 'data_length'; also see modifications below. */
+ estatus->data_length = sizeof(*hest_gen_data);
+ estatus->error_severity = err_rec->severity;
+
+ /* generic data follows header */
+ hest_gen_data = (struct acpi_hest_generic_data *)(estatus + 1);
+ memset(hest_gen_data, 0, sizeof(*hest_gen_data));
+
+ hest_gen_data->revision = 0x201; /* ACPI 4.x */
+ if (err_rec->fru_text[0]) {
+ hest_gen_data->validation_bits =
+ ACPI_HEST_GEN_VALID_FRU_STRING;
+ strncpy(hest_gen_data->fru_text, err_rec->fru_text,
+ sizeof(hest_gen_data->fru_text));
+ }
+ /* copy severity from generic status */
+ hest_gen_data->error_severity = estatus->error_severity;
+ guid_copy((guid_t *)hest_gen_data->section_type,
+ &CPER_SEC_PLATFORM_MEM);
+
+ hest_gen_data->error_data_length =
+ sizeof(struct cper_sec_mem_err_old);
+ estatus->data_length += sizeof(struct cper_sec_mem_err_old);
+ /* memory error follows generic data */
+ mem_err = (struct cper_sec_mem_err_old *)(hest_gen_data + 1);
+ /* copy error record from ring */
+ memcpy(mem_err, &err_rec->u.mcc, sizeof(*mem_err));
+
+ /* Ensure that estatus is committed to memory prior to
+ * setting block_status.
+ */
+ wmb();
+
+ estatus->block_status = ACPI_HEST_CORRECTABLE |
+ (1 << 4); /* i.e. one entry */
+
+ if (++tail >= event->ring->size)
+ tail = 0;
+ event->ring->tail = tail;
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * Main initialization function for ghes_drv device instance.
+ *
+ * returns:
+ * 0 if no error
+ * -ENODEV if error occurred initializing device
+ * ENODEV if device should not be used (not an error per se)
+ */
+static int sdei_ghes_init(struct platform_device *pdev)
+{
+ struct otx2_sdei_ghes_drv *ghes_drv;
+ struct device *dev = &pdev->dev;
+ struct otx2_ghes_event *event;
+ size_t idx;
+ int ret;
+
+ dbgmsg(dev, "%s: entry\n", __func__);
+
+ ghes_drv = platform_get_drvdata(pdev);
+ ret = -ENODEV;
+
+ /* Allocated during initialization (see sdei_ghes_driver_init) */
+ ghes_drv->event_list = event_list;
+ ghes_drv->event_count = hest->error_source_count;
+
+ /* Register & enable each SDEI event */
+ for (idx = 0; idx < ghes_drv->event_count; idx++) {
+ event = &ghes_drv->event_list[idx];
+
+ /* register the event */
+ ret = sdei_event_register(event->id, sdei_ghes_callback,
+ ghes_drv);
+ if (ret < 0) {
+ dev_err(dev, "Error %d registering event 0x%x (%s)\n",
+ ret, event->id, event->name);
+ break;
+ }
+
+ /* enable the event */
+ ret = sdei_event_enable(event->id);
+ if (ret < 0) {
+ dev_err(dev, "Error %d enabling event 0x%x (%s)\n",
+ ret, event->id, event->name);
+ break;
+ }
+ }
+
+ if (idx != ghes_drv->event_count) {
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ dbgmsg(dev, "Registered & enabled %ld events\n", ghes_drv->event_count);
+
+ ret = 0;
+
+exit:
+ return ret;
+}
+
+/* Main de-initialization function for ghes_drv device instance. */
+static int sdei_ghes_de_init(struct platform_device *pdev)
+{
+ struct otx2_sdei_ghes_drv *ghes_drv;
+ struct device *dev = &pdev->dev;
+ struct otx2_ghes_event *event;
+ int ret, idx;
+
+ dbgmsg(dev, "%s: entry\n", __func__);
+
+ ghes_drv = platform_get_drvdata(pdev);
+
+ for (idx = 0; idx < ghes_drv->event_count; idx++) {
+ event = &ghes_drv->event_list[idx];
+
+ ret = sdei_event_disable(event->id);
+ if (ret < 0)
+ dev_err(dev,
+ "Error %d disabling SDEI event 0x%x (%s)\n",
+ ret, event->id, event->name);
+
+ ret = sdei_event_unregister(event->id);
+ if (ret < 0)
+ dev_err(dev,
+ "Error %d unregistering SDEI event 0x%x (%s)\n",
+ ret, event->id, event->name);
+ }
+
+ return 0;
+}
+
+/* Linux driver framework probe function. */
+static int sdei_ghes_probe(struct platform_device *pdev)
+{
+ struct otx2_sdei_ghes_drv *ghes_drv;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ dbgmsg(dev, "%s: entry\n", __func__);
+
+ ghes_drv = NULL;
+
+ ret = -ENODEV;
+
+ /* allocate device structure */
+ ghes_drv = devm_kzalloc(dev, sizeof(*ghes_drv), GFP_KERNEL);
+
+ if (ghes_drv == NULL) {
+ ret = -ENOMEM;
+ dev_err(dev, "Unable to allocate drv context.\n");
+ goto exit;
+ }
+
+ platform_set_drvdata(pdev, ghes_drv);
+
+ ret = sdei_ghes_init(pdev);
+
+ /* a negative value indicates an error */
+ if (ret < 0)
+ dev_err(dev, "Error initializing SDEI GHES support.\n");
+
+exit:
+ if (ret) {
+ sdei_ghes_de_init(pdev);
+
+ if (ghes_drv != NULL)
+ devm_kfree(dev, ghes_drv);
+ }
+
+ return ret ? -ENODEV : 0;
+}
+
+static void sdei_ghes_shutdown(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ dbgmsg(dev, "%s: entry\n", __func__);
+}
+
+static int sdei_ghes_remove(struct platform_device *pdev)
+{
+ struct otx2_sdei_ghes_drv *ghes_drv;
+ struct device *dev = &pdev->dev;
+
+ ghes_drv = platform_get_drvdata(pdev);
+
+ dbgmsg(dev, "%s: entry\n", __func__);
+
+ sdei_ghes_de_init(pdev);
+
+ devm_kfree(dev, ghes_drv);
+
+ return 0;
+}
+
+static const struct of_device_id sdei_ghes_of_match[] = {
+ { .compatible = "marvell,sdei-ghes", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sdei_ghes_of_match);
+
+static const struct platform_device_id sdei_ghes_pdev_match[] = {
+ { .name = DRV_NAME, },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, sdei_ghes_pdev_match);
+
+static struct platform_driver sdei_ghes_drv = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = sdei_ghes_of_match,
+ },
+ .probe = sdei_ghes_probe,
+ .remove = sdei_ghes_remove,
+ .shutdown = sdei_ghes_shutdown,
+ .id_table = sdei_ghes_pdev_match,
+};
+
+/*
+ * Allocates and initializes Hardware Error Source Table (HEST), then
+ * registers it with kernel.
+ */
+static int __init sdei_ghes_hest_init(struct device_node *of_node)
+{
+ const __be32 *of_base0, *of_base1, *of_base2;
+ struct acpi_hest_generic *hest_gen_entry;
+ struct device_node *child_node;
+ struct otx2_ghes_event *event;
+ size_t event_cnt, size, idx;
+ const u32 *evt_id_prop;
+ int ret, prop_sz;
+ void *memblock;
+
+ initdbgmsg("%s: entry\n", __func__);
+
+ ret = -ENODEV;
+
+ /* enumerate events available for subscription */
+ event_cnt = 0;
+ for_each_available_child_of_node(of_node, child_node) {
+ of_base0 = of_get_address(child_node, 0, NULL, NULL);
+ if ((of_base0 == NULL) ||
+ (of_translate_address(child_node, of_base0) == OF_BAD_ADDR))
+ continue;
+ of_base1 = of_get_address(child_node, 1, NULL, NULL);
+ if ((of_base1 == NULL) ||
+ (of_translate_address(child_node, of_base1) == OF_BAD_ADDR))
+ continue;
+ of_base2 = of_get_address(child_node, 2, NULL, NULL);
+ if ((of_base2 == NULL) ||
+ (of_translate_address(child_node, of_base2) == OF_BAD_ADDR))
+ continue;
+ evt_id_prop = of_get_property(child_node, "event-id", &prop_sz);
+ if (!evt_id_prop && (prop_sz != sizeof(*evt_id_prop)))
+ continue;
+
+ event_cnt++;
+ initdbgmsg("Found child %s/%s 0x%llx/0x%llx/0x%llx, ID:0x%x)\n",
+ child_node->name, child_node->full_name,
+ (long long)of_translate_address(child_node, of_base0),
+ (long long)of_translate_address(child_node, of_base1),
+ (long long)of_translate_address(child_node, of_base2),
+ be32_to_cpu(*evt_id_prop));
+ }
+
+ /* allocate room for HEST */
+ size = sizeof(struct acpi_table_hest);
+ /* each error source is of type ACPI_HEST_TYPE_GENERIC_ERROR */
+ size += event_cnt * sizeof(struct acpi_hest_generic);
+ /* align event list on 8-byte boundary */
+ size = roundup(size, 8);
+
+ /* allocate room for list of available events */
+ size += event_cnt * sizeof(struct otx2_ghes_event);
+
+ /* allocate everything in one block, ordered as:
+ * HEST table
+ * event list
+ */
+ memblock = kzalloc(size, GFP_KERNEL);
+ if (memblock == NULL) {
+ initerrmsg("Unable to allocate HEST & event memory (0x%lx B)\n",
+ size);
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ /* HEST is at start of allocated block */
+ hest = memblock;
+
+ /* event table is after HEST */
+ size = sizeof(struct acpi_table_hest);
+ size += event_cnt * sizeof(struct acpi_hest_generic);
+ /* align event list on 8-byte boundary (see allocation above) */
+ size = roundup(size, 8);
+ event_list = memblock + size;
+
+ /* populate HEST header */
+ strncpy(hest->header.signature, ACPI_SIG_HEST,
+ sizeof(hest->header.signature));
+ hest->header.length =
+ sizeof(struct acpi_table_hest) +
+ (event_cnt * sizeof(struct acpi_hest_generic));
+ hest->header.revision = 1;
+#define OTX2_HEST_OEM_ID "MRVL "
+ strncpy(hest->header.oem_id, OTX2_HEST_OEM_ID,
+ sizeof(hest->header.oem_id));
+ strncpy(hest->header.oem_table_id, "OTX2 ",
+ sizeof(hest->header.oem_table_id));
+ hest->header.oem_revision = 1;
+ strncpy(hest->header.asl_compiler_id, OTX2_HEST_OEM_ID,
+ sizeof(hest->header.asl_compiler_id));
+ hest->header.asl_compiler_revision = 1;
+#pragma message "HEST checksum should be calculated"
+ hest->header.checksum = 0;
+
+ hest->error_source_count = event_cnt;
+
+ /* retrieve/init event IDs from DeviceTree & populate HEST entries */
+ idx = 0;
+ hest_gen_entry = (struct acpi_hest_generic *)(hest + 1);
+ for_each_available_child_of_node(of_node, child_node) {
+ of_base0 = of_get_address(child_node, 0, NULL, NULL);
+ if ((of_base0 == NULL) ||
+ (of_translate_address(child_node, of_base0) == OF_BAD_ADDR))
+ continue;
+ of_base1 = of_get_address(child_node, 1, NULL, NULL);
+ if ((of_base1 == NULL) ||
+ (of_translate_address(child_node, of_base1) == OF_BAD_ADDR))
+ continue;
+ of_base2 = of_get_address(child_node, 2, NULL, NULL);
+ if ((of_base2 == NULL) ||
+ (of_translate_address(child_node, of_base2) == OF_BAD_ADDR))
+ continue;
+ evt_id_prop = of_get_property(child_node, "event-id", &prop_sz);
+ if (!evt_id_prop && (prop_sz != sizeof(*evt_id_prop)))
+ continue;
+
+ event = &event_list[idx];
+
+ /* name is already terminated by 'kzalloc' */
+ strncpy(event->name, child_node->name,
+ sizeof(event->name) - 1);
+ event->id = be32_to_cpu(*evt_id_prop);
+
+ hest_gen_entry->header.type = ACPI_HEST_TYPE_GENERIC_ERROR;
+ hest_gen_entry->header.source_id = idx;
+ hest_gen_entry->related_source_id =
+ hest_gen_entry->header.source_id;
+ hest_gen_entry->reserved = 0;
+ hest_gen_entry->enabled = 1;
+ hest_gen_entry->records_to_preallocate = 1;
+ hest_gen_entry->max_sections_per_record = 1;
+ hest_gen_entry->max_raw_data_length = 0;
+
+ hest_gen_entry->error_status_address.space_id =
+ ACPI_ADR_SPACE_SYSTEM_MEMORY;
+ hest_gen_entry->error_status_address.bit_width = 64;
+ hest_gen_entry->error_status_address.bit_offset = 0;
+ hest_gen_entry->error_status_address.access_width = 4;
+ hest_gen_entry->error_status_address.address =
+ of_translate_address(child_node, of_base0);
+
+ hest_gen_entry->notify.type = ACPI_HEST_NOTIFY_POLLED;
+ hest_gen_entry->notify.length = sizeof(hest_gen_entry->notify);
+ hest_gen_entry->notify.config_write_enable = 0;
+ hest_gen_entry->notify.poll_interval = 1000; /* i.e. 1 sec */
+ hest_gen_entry->notify.vector = event->id;
+ hest_gen_entry->notify.error_threshold_value = 1;
+ hest_gen_entry->notify.error_threshold_window = 1;
+
+ hest_gen_entry->error_block_length =
+ sizeof(struct acpi_hest_generic_status) +
+ sizeof(struct acpi_hest_generic_data) +
+ sizeof(struct cper_sec_mem_err_old);
+
+ event->estatus_address = phys_to_virt(
+ hest_gen_entry->error_status_address.address);
+ if (event->estatus_address == NULL) {
+ initerrmsg("Unable to access estatus_address 0x%llx\n",
+ hest_gen_entry->error_status_address.address)
+ ;
+ goto exit;
+ }
+
+ event->estatus_pa = of_translate_address(child_node, of_base1);
+ event->estatus = phys_to_virt(event->estatus_pa);
+ if (event->estatus == NULL) {
+ initerrmsg("Unable to access estatus block 0x%llx\n",
+ of_translate_address(child_node, of_base1));
+ goto exit;
+ }
+
+ /* Event ring buffer in memory */
+ event->ring = phys_to_virt(of_translate_address(child_node,
+ of_base2));
+ if (event->ring == NULL) {
+ initerrmsg("Unable to access event 0x%x ring buffer\n",
+ event->id);
+ goto exit;
+ }
+
+ /* clear status */
+ event->estatus->block_status = 0;
+
+ /* set event status address */
+ *event->estatus_address = event->estatus_pa;
+
+ idx++;
+ hest_gen_entry++;
+ }
+
+ if (idx != event_cnt) {
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ initdbgmsg("%s: registering HEST\n", __func__);
+ hest_table_set(hest);
+ acpi_hest_init();
+
+ ret = 0;
+
+exit:
+ return ret;
+}
+
+/*
+ * Enable MSIX at the device level (MSIX_CAPABILITIES Header).
+ *
+ * NOTE: We SHOULD be able to use PCCPV_XXX_VSEC_SCTL[MSIX_SEC_EN]
+ * to enable our SECURE IRQs, but for errata PCC-34263...
+ */
+static void dev_enable_msix(struct pci_dev *pdev)
+{
+ u16 ctrl;
+
+ if ((pdev->msi_enabled) || (pdev->msix_enabled)) {
+ initerrmsg("MSI(%d) or MSIX(%d) already enabled\n",
+ pdev->msi_enabled, pdev->msix_enabled);
+ return;
+ }
+
+ /* enable MSIX delivery for this device; we handle [secure] MSIX ints */
+ pdev->msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (pdev->msix_cap) {
+ pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS,
+ &ctrl);
+ ctrl |= PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS,
+ ctrl);
+ initdbgmsg("Set MSI-X Enable for PCI dev %04d:%02d.%d\n",
+ pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
+ } else {
+ initerrmsg("PCI dev %04d:%02d.%d missing MSIX capabilities\n",
+ pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn));
+ }
+}
+
+/* Driver entry point */
+static int __init sdei_ghes_driver_init(void)
+{
+ const struct pci_device_id *pdevid;
+ struct device_node *of_node;
+ struct pci_dev *pdev;
+ int i, rc;
+
+ initdbgmsg("%s: entry\n", __func__);
+
+ rc = -ENODEV;
+
+ of_node = of_find_matching_node_and_match(NULL, sdei_ghes_of_match,
+ NULL);
+ if (!of_node)
+ return rc;
+
+ /* Initialize Hardware Error Source Table (HEST) */
+ rc = sdei_ghes_hest_init(of_node);
+ if (rc) {
+ initerrmsg("HEST initialization error %d\n", rc);
+ return rc;
+ }
+
+ platform_driver_register(&sdei_ghes_drv);
+
+ /* Enable MSIX for devices whose [secure] IRQ's we control.
+ * These IRQs have been initialized by ATF.
+ * This is required due to an errata against
+ * PCCPV_XXX_VSEC_SCTL[MSIX_SEC_EN].
+ */
+ for (i = 0; i < ARRAY_SIZE(sdei_ghes_otx2_pci_tbl); i++) {
+ pdevid = &sdei_ghes_otx2_pci_tbl[i];
+ pdev = NULL;
+ while ((pdev = pci_get_device(pdevid->vendor, pdevid->device,
+ pdev))) {
+ dev_enable_msix(pdev);
+ }
+ }
+
+ return rc;
+}
+
+device_initcall(sdei_ghes_driver_init);
+
+MODULE_DESCRIPTION("OcteonTX2 SDEI GHES Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/soc/marvell/octeontx2-ghes/otx2-sdei-ghes.h b/drivers/soc/marvell/octeontx2-ghes/otx2-sdei-ghes.h
new file mode 100644
index 000000000000..f2f12f042722
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-ghes/otx2-sdei-ghes.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Supports OcteonTX2 Generic Hardware Error Source[s] (GHES).
+ *
+ */
+
+#ifndef __OTX2_SDEI_GHES_H__
+#define __OTX2_SDEI_GHES_H__
+
+#define SDEI_GHES_EVENT_NAME_MAX_CHARS 16
+/*
+ * @estatus_pa: physical address of error status information block
+ * @estatus_address: mapped pointer to error_status_address
+ * @estatus: mapped pointer to error status block
+ */
+struct otx2_ghes_event {
+ char name[SDEI_GHES_EVENT_NAME_MAX_CHARS];
+ u32 id;
+ phys_addr_t estatus_pa;
+ phys_addr_t *estatus_address;
+ struct acpi_hest_generic_status *estatus;
+ struct otx2_ghes_err_ring *ring;
+};
+
+/**
+ * struct otx2_sdei_ghes_drv: driver state
+ *
+ * @of_node: associated device tree node
+ * @event_list: list of [SDEI] events
+ * @event_count: count of [SDEI] events (size of @event_list)
+ */
+struct otx2_sdei_ghes_drv {
+ struct device_node *of_node;
+ struct otx2_ghes_event *event_list;
+ size_t event_count;
+};
+
+#define OTX2_GHES_ERR_REC_FRU_TEXT_LEN 32
+/* This is shared with ATF */
+struct otx2_ghes_err_record {
+ union {
+ struct cper_sec_mem_err_old mcc;
+ struct cper_sec_mem_err_old mdc;
+ struct cper_sec_mem_err_old lmc;
+ struct cper_arm_err_info ap; /* application processor */
+ } u;
+ uint32_t severity; /* CPER_SEV_xxx */
+ char fru_text[OTX2_GHES_ERR_REC_FRU_TEXT_LEN];
+};
+
+/* This is shared with ATF */
+struct otx2_ghes_err_ring {
+ /* The head resides in DRAM & can be updated by ATF (i.e. firmware).
+ * See Documentation/process/volatile-considered-harmful.rst, line 92.
+ */
+ uint32_t volatile head;
+ uint32_t tail;
+ uint32_t size; /* ring size */
+ /* ring of records */
+ struct otx2_ghes_err_record records[1] __aligned(8);
+};
+
+#endif // __OTX2_SDEI_GHES_H__
+
diff --git a/drivers/soc/marvell/octeontx2-rm/Makefile b/drivers/soc/marvell/octeontx2-rm/Makefile
new file mode 100644
index 000000000000..bab787b56b43
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-rm/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's OcteonTX2 SSO/TIM RVU device driver
+#
+
+obj-$(CONFIG_OCTEONTX2_RM) += octeontx2_rm.o
+
+octeontx2_rm-y := otx2_rm.o quota.o
+octeontx2_rm-$(CONFIG_OCTEONTX2_RM_DOM_SYSFS) += domain_sysfs.o
+ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
+ccflags-y += -I$(srctree)/drivers/soc/marvell/octeontx2-dpi/
diff --git a/drivers/soc/marvell/octeontx2-rm/domain_sysfs.c b/drivers/soc/marvell/octeontx2-rm/domain_sysfs.c
new file mode 100644
index 000000000000..6083db40d0ed
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-rm/domain_sysfs.c
@@ -0,0 +1,830 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OcteonTX2 RVU Resource Manager driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/sysfs.h>
+#include "domain_sysfs.h"
+#include "otx2_rm.h"
+#include "dpi.h"
+
+#define DOMAIN_NAME_LEN 32
+#define PCI_SCAN_FMT "%04x:%02x:%02x.%02x"
+
+/* The format of DP is: DP(_name, _param_type, _scanf_fmt) */
+#define DOM_PARAM_SPEC \
+DP(ssow, int, "%d") \
+DP(sso, int, "%d") \
+DP(npa, int, "%d") \
+DP(cpt, int, "%d") \
+DP(tim, int, "%d") \
+DP(dpi, int, "%d")
+
+struct domain_params {
+ const char *name;
+#define DP(_name, _type, _1) \
+ _type _name;
+DOM_PARAM_SPEC
+#undef DP
+ const char *ports[RM_MAX_PORTS];
+ u16 port_cnt;
+};
+
+struct domain {
+ char name[DOMAIN_NAME_LEN];
+ struct kobj_attribute domain_id;
+ struct kobj_attribute domain_in_use;
+ /* List of all ports attached to the domain */
+ struct rvu_port *ports;
+ struct kobject *kobj;
+ struct rvu_vf *rvf;
+ int port_count;
+ bool in_use;
+};
+
+struct rvu_port {
+ /* handle in global list of ports associated to all domains */
+ struct list_head list;
+ struct pci_dev *pdev;
+ struct domain *domain;
+};
+
+struct dpi_vf {
+ struct pci_dev *pdev;
+ /* pointer to the kobject which owns this vf */
+ struct kobject *domain_kobj;
+ int vf_id;
+ bool in_use;
+};
+
+struct dpi_info {
+ /* Total number of vfs available */
+ uint8_t num_vfs;
+ /* Free vfs */
+ uint8_t vfs_free;
+ /* Pointer to the vfs available */
+ struct dpi_vf *dpi_vf;
+};
+
+struct domain_sysfs {
+ struct list_head list;
+ struct kobj_attribute create_domain;
+ struct kobj_attribute destroy_domain;
+ struct kobj_attribute pmccntr_el0;
+ /* List of all ports added to all domains. Used for validating if new
+ * domain creation doesn't want to take an already taken port.
+ */
+ struct list_head ports;
+ struct rm_dev *rdev;
+ struct kobject *parent;
+ struct domain *domains;
+ size_t domains_len;
+ struct dpi_info dpi_info;
+};
+
+static DEFINE_MUTEX(domain_sysfs_lock);
+static LIST_HEAD(domain_sysfs_list);
+
+static ssize_t
+domain_id_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct domain *dom = container_of(attr, struct domain, domain_id);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", dom->name);
+}
+
+static ssize_t
+domain_in_use_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct domain *dom = container_of(attr, struct domain, domain_in_use);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", dom->rvf->in_use);
+}
+
+static int do_destroy_domain(struct domain_sysfs *lsfs, struct domain *domain)
+{
+ struct device *dev = &lsfs->rdev->pdev->dev;
+ int i;
+
+ if (domain->rvf->in_use) {
+ dev_err(dev, "Domain %s is in use.\n", domain->name);
+ return -EBUSY;
+ }
+
+ sysfs_remove_file(domain->kobj, &domain->domain_id.attr);
+ domain->domain_id.attr.mode = 0;
+ sysfs_remove_file(domain->kobj, &domain->domain_in_use.attr);
+ domain->domain_in_use.attr.mode = 0;
+ for (i = 0; i < domain->port_count; i++) {
+ sysfs_remove_link(domain->kobj,
+ pci_name(domain->ports[i].pdev));
+ }
+
+ for (i = 0; i < lsfs->dpi_info.num_vfs; i++) {
+ struct dpi_vf *dpivf_ptr = NULL;
+
+ dpivf_ptr = &lsfs->dpi_info.dpi_vf[i];
+ /* Identify the devices belongs to this domain */
+ if (dpivf_ptr->in_use &&
+ dpivf_ptr->domain_kobj == domain->kobj) {
+ sysfs_remove_link(domain->kobj,
+ pci_name(dpivf_ptr->pdev));
+ dpivf_ptr->in_use = false;
+ dpivf_ptr->domain_kobj = NULL;
+ lsfs->dpi_info.vfs_free++;
+ }
+ }
+
+ sysfs_remove_link(domain->kobj, pci_name(domain->rvf->pdev));
+ kobject_del(domain->kobj);
+ mutex_lock(&lsfs->rdev->lock);
+ // restore limits
+ lsfs->rdev->vf_limits.sso->a[domain->rvf->vf_id].val = 0;
+ lsfs->rdev->vf_limits.ssow->a[domain->rvf->vf_id].val = 0;
+ lsfs->rdev->vf_limits.npa->a[domain->rvf->vf_id].val = 0;
+ lsfs->rdev->vf_limits.cpt->a[domain->rvf->vf_id].val = 0;
+ lsfs->rdev->vf_limits.tim->a[domain->rvf->vf_id].val = 0;
+ mutex_unlock(&lsfs->rdev->lock);
+
+ mutex_lock(&domain_sysfs_lock);
+ // FREE ALL allocated ports
+ for (i = 0; i < domain->port_count; i++) {
+ list_del(&domain->ports[i].list);
+ pci_dev_put(domain->ports[i].pdev);
+ }
+ kfree(domain->ports);
+ domain->ports = NULL;
+ domain->port_count = 0;
+ domain->in_use = false;
+ domain->name[0] = '\0';
+ mutex_unlock(&domain_sysfs_lock);
+
+ return 0;
+}
+
+static int
+do_create_domain(struct domain_sysfs *lsfs, struct domain_params *dparams)
+{
+ struct device *dev = &lsfs->rdev->pdev->dev;
+ struct domain *domain = NULL;
+ struct rvu_port *ports = NULL, *cur;
+ u32 dom, bus, slot, fn;
+ int old_sso, old_ssow, old_npa, old_cpt, old_tim, device;
+ int res = 0, i;
+
+ /* Validate parameters */
+ if (dparams == NULL)
+ return -EINVAL;
+ if (strnlen(dparams->name, DOMAIN_NAME_LEN) >= DOMAIN_NAME_LEN) {
+ dev_err(dev, "Domain name too long, max %d characters.\n",
+ DOMAIN_NAME_LEN);
+ return -EINVAL;
+ }
+ if (dparams->npa != 1) {
+ dev_err(dev, "Exactly 1 NPA resource required.\n");
+ return -EINVAL;
+ }
+ if (dparams->ssow < 1) {
+ dev_err(dev, "At least 1 SSOW resource required.\n");
+ return -EINVAL;
+ }
+ mutex_lock(&domain_sysfs_lock);
+ /* Find a free domain device */
+ for (i = 0; i < lsfs->domains_len; i++) {
+ if (!strncmp(lsfs->domains[i].name, dparams->name,
+ DOMAIN_NAME_LEN)) {
+ dev_err(dev, "Domain %s exists already.\n",
+ dparams->name);
+ res = -EINVAL;
+ goto err_dom;
+ }
+ if (lsfs->domains[i].in_use == false &&
+ lsfs->domains[i].rvf->in_use == false) {
+ if (domain == NULL)
+ domain = &lsfs->domains[i];
+ }
+ }
+ if (domain == NULL) {
+ dev_err(dev, "No free device to create new domain.\n");
+ res = -ENODEV;
+ goto err_dom;
+ }
+ strncpy(domain->name, dparams->name, DOMAIN_NAME_LEN - 1);
+ domain->in_use = true;
+ /* Verify ports are valid and supported. */
+ if (dparams->port_cnt == 0)
+ goto skip_ports;
+ ports = kcalloc(dparams->port_cnt, sizeof(struct rvu_port), GFP_KERNEL);
+ if (ports == NULL) {
+ dev_err(dev, "Not enough memory.\n");
+ res = -ENOMEM;
+ goto err_ports;
+ }
+ for (i = 0; i < dparams->port_cnt; i++) {
+ if (sscanf(dparams->ports[i], PCI_SCAN_FMT, &dom, &bus, &slot,
+ &fn) != 4) {
+ dev_err(dev, "Invalid port: %s.\n", dparams->ports[i]);
+ res = -EINVAL;
+ goto err_ports;
+ }
+ ports[i].pdev =
+ pci_get_domain_bus_and_slot(dom, bus,
+ PCI_DEVFN(slot, fn));
+ if (ports[i].pdev == NULL) {
+ dev_err(dev, "Unknown port: %s.\n", dparams->ports[i]);
+ res = -ENODEV;
+ goto err_ports;
+ }
+ device = ports[i].pdev->device;
+ if (ports[i].pdev->vendor != PCI_VENDOR_ID_CAVIUM ||
+ (device != PCI_DEVID_OCTEONTX2_RVU_PF &&
+ device != PCI_DEVID_OCTEONTX2_PASS1_RVU_PF &&
+ device != PCI_DEVID_OCTEONTX2_RVU_AFVF &&
+ device != PCI_DEVID_OCTEONTX2_PASS1_RVU_AFVF &&
+ device != PCI_DEVID_OCTEONTX2_RVU_VF &&
+ device != PCI_DEVID_OCTEONTX2_PASS1_RVU_VF)) {
+ dev_err(dev, "Unsupported port: %s.\n",
+ dparams->ports[i]);
+ res = -EINVAL;
+ goto err_ports;
+ }
+ list_for_each_entry(cur, &lsfs->ports, list) {
+ if (cur->pdev != ports[i].pdev)
+ continue;
+ dev_err(dev,
+ "Port %s already assigned to domain %s.\n",
+ dparams->ports[i], cur->domain->name);
+ res = -EBUSY;
+ goto err_ports;
+ }
+ }
+ for (i = 0; i < dparams->port_cnt; i++) {
+ ports[i].domain = domain;
+ list_add(&ports[i].list, &lsfs->ports);
+ }
+ domain->ports = ports;
+ domain->port_count = dparams->port_cnt;
+skip_ports:
+ mutex_unlock(&domain_sysfs_lock);
+ /* Check domain spec against limits for the parent RVU. */
+ mutex_lock(&lsfs->rdev->lock);
+ old_sso = lsfs->rdev->vf_limits.sso->a[domain->rvf->vf_id].val;
+ old_ssow = lsfs->rdev->vf_limits.ssow->a[domain->rvf->vf_id].val;
+ old_npa = lsfs->rdev->vf_limits.npa->a[domain->rvf->vf_id].val;
+ old_cpt = lsfs->rdev->vf_limits.cpt->a[domain->rvf->vf_id].val;
+ old_tim = lsfs->rdev->vf_limits.tim->a[domain->rvf->vf_id].val;
+#define CHECK_LIMITS(_ls, _val, _n, _idx) do { \
+ if (quotas_get_sum(_ls) + _val - _ls->a[_idx].val > _ls->max_sum) { \
+ dev_err(dev, \
+ "Not enough "_n" LFs, currently used: %lld/%lld\n", \
+ quotas_get_sum(_ls), _ls->max_sum); \
+ res = -ENODEV; \
+ goto err_limits; \
+ } \
+} while (0)
+ CHECK_LIMITS(lsfs->rdev->vf_limits.sso, dparams->sso, "SSO",
+ domain->rvf->vf_id);
+ CHECK_LIMITS(lsfs->rdev->vf_limits.ssow, dparams->ssow, "SSOW",
+ domain->rvf->vf_id);
+ CHECK_LIMITS(lsfs->rdev->vf_limits.npa, dparams->npa, "NPA",
+ domain->rvf->vf_id);
+ CHECK_LIMITS(lsfs->rdev->vf_limits.cpt, dparams->cpt, "CPT",
+ domain->rvf->vf_id);
+ CHECK_LIMITS(lsfs->rdev->vf_limits.tim, dparams->tim, "TIM",
+ domain->rvf->vf_id);
+ if (dparams->dpi > lsfs->dpi_info.vfs_free) {
+ dev_err(dev,
+ "Not enough DPI VFS, currently used:%d/%d\n",
+ lsfs->dpi_info.num_vfs -
+ lsfs->dpi_info.vfs_free,
+ lsfs->dpi_info.num_vfs);
+ res = -ENODEV;
+ goto err_limits;
+ }
+
+ /* Now that checks are done, update the limits */
+ lsfs->rdev->vf_limits.sso->a[domain->rvf->vf_id].val = dparams->sso;
+ lsfs->rdev->vf_limits.ssow->a[domain->rvf->vf_id].val = dparams->ssow;
+ lsfs->rdev->vf_limits.npa->a[domain->rvf->vf_id].val = dparams->npa;
+ lsfs->rdev->vf_limits.cpt->a[domain->rvf->vf_id].val = dparams->cpt;
+ lsfs->rdev->vf_limits.tim->a[domain->rvf->vf_id].val = dparams->tim;
+ lsfs->dpi_info.vfs_free -= dparams->dpi;
+ mutex_unlock(&lsfs->rdev->lock);
+
+ /* Set it up according to user spec */
+ domain->kobj = kobject_create_and_add(dparams->name, lsfs->parent);
+ if (domain->kobj == NULL) {
+ dev_err(dev, "Failed to create domain directory.\n");
+ res = -ENOMEM;
+ goto err_kobject_create;
+ }
+ res = sysfs_create_link(domain->kobj, &domain->rvf->pdev->dev.kobj,
+ pci_name(domain->rvf->pdev));
+ if (res < 0) {
+ dev_err(dev, "Failed to create dev links for domain %s.\n",
+ domain->name);
+ res = -ENOMEM;
+ goto err_dom_dev_symlink;
+ }
+ for (i = 0; i < dparams->port_cnt; i++) {
+ res = sysfs_create_link(domain->kobj, &ports[i].pdev->dev.kobj,
+ pci_name(ports[i].pdev));
+ if (res < 0) {
+ dev_err(dev,
+ "Failed to create dev links for domain %s.\n",
+ domain->name);
+ res = -ENOMEM;
+ goto err_dom_port_symlink;
+ }
+ }
+ /* Create symlinks for dpi vfs in domain */
+ for (i = 0; i < dparams->dpi; i++) {
+ struct dpi_vf *dpivf_ptr = NULL;
+ int vf_idx;
+
+ for (vf_idx = 0; vf_idx < lsfs->dpi_info.num_vfs;
+ vf_idx++) {
+ /* Find available dpi vfs and create symlinks */
+ dpivf_ptr = &lsfs->dpi_info.dpi_vf[vf_idx];
+ if (dpivf_ptr->in_use)
+ continue;
+ else
+ break;
+ }
+ res = sysfs_create_link(domain->kobj,
+ &dpivf_ptr->pdev->dev.kobj,
+ pci_name(dpivf_ptr->pdev));
+ if (res < 0) {
+ dev_err(dev,
+ "Failed to create DPI dev links for domain %s\n",
+ domain->name);
+ res = -ENOMEM;
+ goto err_dpi_symlink;
+ }
+ dpivf_ptr->domain_kobj = domain->kobj;
+ dpivf_ptr->in_use = true;
+ }
+
+ domain->domain_in_use.attr.mode = 0444;
+ domain->domain_in_use.attr.name = "domain_in_use";
+ domain->domain_in_use.show = domain_in_use_show;
+ res = sysfs_create_file(domain->kobj, &domain->domain_in_use.attr);
+ if (res < 0) {
+ dev_err(dev,
+ "Failed to create domain_in_use file for domain %s.\n",
+ domain->name);
+ res = -ENOMEM;
+ goto err_dom_in_use;
+ }
+
+ domain->domain_id.attr.mode = 0444;
+ domain->domain_id.attr.name = "domain_id";
+ domain->domain_id.show = domain_id_show;
+ res = sysfs_create_file(domain->kobj, &domain->domain_id.attr);
+ if (res < 0) {
+ dev_err(dev, "Failed to create domain_id file for domain %s.\n",
+ domain->name);
+ res = -ENOMEM;
+ goto err_dom_id;
+ }
+
+ return res;
+
+err_dom_id:
+ domain->domain_id.attr.mode = 0;
+ sysfs_remove_file(domain->kobj, &domain->domain_in_use.attr);
+err_dom_in_use:
+ domain->domain_in_use.attr.mode = 0;
+err_dpi_symlink:
+ for (i = 0; i < lsfs->dpi_info.num_vfs; i++) {
+ struct dpi_vf *dpivf_ptr = NULL;
+
+ dpivf_ptr = &lsfs->dpi_info.dpi_vf[i];
+ /* Identify the devices belongs to this domain */
+ if (dpivf_ptr->in_use &&
+ dpivf_ptr->domain_kobj == domain->kobj) {
+ sysfs_remove_link(domain->kobj,
+ pci_name(dpivf_ptr->pdev));
+ dpivf_ptr->in_use = false;
+ dpivf_ptr->domain_kobj = NULL;
+ }
+ }
+err_dom_port_symlink:
+ for (i = 0; i < dparams->port_cnt; i++)
+ sysfs_remove_link(domain->kobj, pci_name(ports[i].pdev));
+ sysfs_remove_link(domain->kobj, pci_name(domain->rvf->pdev));
+err_dom_dev_symlink:
+ kobject_del(domain->kobj);
+err_kobject_create:
+ mutex_lock(&lsfs->rdev->lock);
+err_limits:
+ // restore limits
+ lsfs->rdev->vf_limits.sso->a[domain->rvf->vf_id].val = old_sso;
+ lsfs->rdev->vf_limits.ssow->a[domain->rvf->vf_id].val = old_ssow;
+ lsfs->rdev->vf_limits.npa->a[domain->rvf->vf_id].val = old_npa;
+ lsfs->rdev->vf_limits.cpt->a[domain->rvf->vf_id].val = old_cpt;
+ lsfs->rdev->vf_limits.tim->a[domain->rvf->vf_id].val = old_tim;
+ lsfs->dpi_info.vfs_free += dparams->dpi;
+ mutex_unlock(&lsfs->rdev->lock);
+ mutex_lock(&domain_sysfs_lock);
+err_ports:
+ // FREE ALL allocated ports
+ for (i = 0; i < dparams->port_cnt; i++) {
+ if (ports[i].pdev == NULL)
+ break;
+ if (ports[i].domain != NULL)
+ list_del(&ports[i].list);
+ pci_dev_put(ports[i].pdev);
+ }
+ kfree(ports);
+ domain->ports = NULL;
+ domain->port_count = 0;
+ domain->in_use = false;
+ domain->name[0] = '\0';
+err_dom:
+ mutex_unlock(&domain_sysfs_lock);
+ return res;
+}
+
+static ssize_t
+destroy_domain_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct domain_sysfs *lsfs =
+ container_of(attr, struct domain_sysfs, destroy_domain);
+ struct device *dev = &lsfs->rdev->pdev->dev;
+ struct domain *domain = NULL;
+ char name[DOMAIN_NAME_LEN], *name_ptr;
+ int i, res;
+
+ strncpy(name, buf, DOMAIN_NAME_LEN - 1);
+ name_ptr = strim(name);
+ if (strlen(name_ptr) == 0) {
+ dev_err(dev, "Empty domain name.\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&domain_sysfs_lock);
+ /* Find a free domain device */
+ for (i = 0; i < lsfs->domains_len; i++) {
+ if (!strncmp(lsfs->domains[i].name, name_ptr,
+ DOMAIN_NAME_LEN)) {
+ domain = &lsfs->domains[i];
+ break;
+ }
+ }
+ if (domain == NULL) {
+ dev_err(dev, "Domain '%s' doesn't exist.\n", name);
+ res = -EINVAL;
+ goto err_dom;
+ }
+ mutex_unlock(&domain_sysfs_lock);
+
+ res = do_destroy_domain(lsfs, domain);
+ if (res == 0)
+ res = count;
+err_dom:
+ mutex_unlock(&domain_sysfs_lock);
+ return res;
+}
+
+static ssize_t
+create_domain_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct domain_params *dparams = NULL;
+ struct domain_sysfs *lsfs =
+ container_of(attr, struct domain_sysfs, create_domain);
+ struct device *dev = &lsfs->rdev->pdev->dev;
+ int res = 0;
+ char *start;
+ char *end;
+ char *ptr = NULL;
+ const char *name;
+ char *errmsg = "Invalid domain specification format.";
+
+ if (strlen(buf) == 0) {
+ dev_err(dev, "Empty domain spec.\n");
+ return -EINVAL;
+ }
+
+ dparams = kzalloc(sizeof(*dparams), GFP_KERNEL);
+ if (dparams == NULL) {
+ errmsg = "Not enough memory";
+ res = -ENOMEM;
+ goto error;
+ }
+
+ end = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (end == NULL) {
+ errmsg = "Not enough memory";
+ res = -ENOMEM;
+ goto error;
+ }
+
+ ptr = end;
+ memcpy(end, buf, count);
+
+ name = strsep(&end, ";");
+ if (end == NULL) {
+ res = -EINVAL;
+ goto error;
+ }
+
+ dparams->name = name;
+
+ for (;;) {
+ start = strsep(&end, ";");
+ if (start == NULL)
+ break;
+ start = strim(start);
+ if (!*start)
+ continue;
+
+ if (!strncmp(strim(start), "port", sizeof("port") - 1)) {
+ strsep(&start, ":");
+ if (dparams->port_cnt >= RM_MAX_PORTS)
+ goto error;
+ dparams->ports[dparams->port_cnt++] = strim(start);
+ }
+ #define DP(_name, _1, _fmt) \
+ else if (!strncmp(strim(start), #_name, \
+ sizeof(#_name) - 1)) { \
+ strsep(&start, ":"); \
+ start = strim(start); \
+ res = sscanf(start, _fmt, &dparams->_name); \
+ if (res != 1) \
+ goto error; \
+ continue; \
+ }
+ DOM_PARAM_SPEC
+ #undef DP
+ else {
+ res = -EINVAL;
+ goto error;
+ }
+ }
+ res = do_create_domain(lsfs, dparams);
+ if (res < 0) {
+ errmsg = "Failed to create application domain.";
+ goto error;
+ } else
+ res = count;
+error:
+ if (res < 0)
+ dev_err(dev, "%s\n", errmsg);
+ kfree(ptr);
+ kfree(dparams);
+ return res;
+}
+
+static int dpivf_sysfs_create(struct domain_sysfs *lsfs)
+{
+ struct dpi_info *dpi_info = &lsfs->dpi_info;
+ struct dpi_vf *dpivf_ptr = NULL;
+ struct pci_dev *pdev = lsfs->rdev->pdev;
+ struct pci_dev *vdev = NULL;
+ uint8_t vf_idx = 0;
+
+ dpi_info->dpi_vf = kcalloc(DPI_MAX_VFS,
+ sizeof(struct dpi_vf), GFP_KERNEL);
+ if (dpi_info->dpi_vf == NULL)
+ return -ENOMEM;
+
+ /* Get available DPI vfs */
+ while ((vdev = pci_get_device(pdev->vendor,
+ PCI_DEVID_OCTEONTX2_DPI_VF, vdev))) {
+ if (!vdev->is_virtfn)
+ continue;
+ else {
+ dpivf_ptr = &dpi_info->dpi_vf[vf_idx];
+ dpivf_ptr->pdev = vdev;
+ dpivf_ptr->vf_id = vf_idx;
+ dpivf_ptr->in_use = false;
+ vf_idx++;
+ }
+ }
+ dpi_info->num_vfs = vf_idx;
+ dpi_info->vfs_free = vf_idx;
+ return 0;
+}
+
+static void dpivf_sysfs_destroy(struct domain_sysfs *lsfs)
+{
+ struct dpi_info *dpi_info = &lsfs->dpi_info;
+ struct dpi_vf *dpivf_ptr = NULL;
+ uint8_t vf_idx = 0;
+
+ if (dpi_info->num_vfs == 0)
+ goto free_mem;
+ else {
+ for (vf_idx = 0; vf_idx < dpi_info->num_vfs; vf_idx++) {
+ dpivf_ptr = &dpi_info->dpi_vf[vf_idx];
+ pci_dev_put(dpivf_ptr->pdev);
+ dpivf_ptr->pdev = NULL;
+ vf_idx++;
+ }
+ }
+ dpi_info->num_vfs = 0;
+
+free_mem:
+ kfree(dpi_info->dpi_vf);
+ dpi_info->dpi_vf = NULL;
+}
+
+
+static void enable_pmccntr_el0(void *data)
+{
+ u64 val;
+ /* Disable cycle counter overflow interrupt */
+ asm volatile("mrs %0, pmintenset_el1" : "=r" (val));
+ val &= ~BIT_ULL(31);
+ asm volatile("msr pmintenset_el1, %0" : : "r" (val));
+ /* Enable cycle counter */
+ asm volatile("mrs %0, pmcntenset_el0" : "=r" (val));
+ val |= BIT_ULL(31);
+ asm volatile("msr pmcntenset_el0, %0" :: "r" (val));
+ /* Enable user-mode access to cycle counters. */
+ asm volatile("mrs %0, pmuserenr_el0" : "=r" (val));
+ val |= BIT(2) | BIT(0);
+ asm volatile("msr pmuserenr_el0, %0" : : "r"(val));
+ /* Start cycle counter */
+ asm volatile("mrs %0, pmcr_el0" : "=r" (val));
+ val |= BIT(0);
+ isb();
+ asm volatile("msr pmcr_el0, %0" : : "r" (val));
+ asm volatile("mrs %0, pmccfiltr_el0" : "=r" (val));
+ val |= BIT(27);
+ asm volatile("msr pmccfiltr_el0, %0" : : "r" (val));
+}
+
+static void disable_pmccntr_el0(void *data)
+{
+ u64 val;
+ /* Disable cycle counter */
+ asm volatile("mrs %0, pmcntenset_el0" : "=r" (val));
+ val &= ~BIT_ULL(31);
+ asm volatile("msr pmcntenset_el0, %0" :: "r" (val));
+ /* Disable user-mode access to counters. */
+ asm volatile("mrs %0, pmuserenr_el0" : "=r" (val));
+ val &= ~(BIT(2) | BIT(0));
+ asm volatile("msr pmuserenr_el0, %0" : : "r"(val));
+}
+
+static ssize_t
+enadis_pmccntr_el0_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct domain_sysfs *lsfs = container_of(attr, struct domain_sysfs,
+ pmccntr_el0);
+ struct device *dev = &lsfs->rdev->pdev->dev;
+ char tmp_buf[64];
+ long enable = 0;
+ char *tmp_ptr;
+
+ strlcpy(tmp_buf, buf, 64);
+ tmp_ptr = strim(tmp_buf);
+ if (kstrtol(tmp_ptr, 0, &enable)) {
+ dev_err(dev, "Invalid value, expected 1/0\n");
+ return -EIO;
+ }
+
+ if (enable)
+ on_each_cpu(enable_pmccntr_el0, NULL, 1);
+ else
+ on_each_cpu(disable_pmccntr_el0, NULL, 1);
+
+ return count;
+}
+
+static void check_pmccntr_el0(void *data)
+{
+ int *out = data;
+ u64 val;
+
+ asm volatile("mrs %0, pmuserenr_el0" : "=r" (val));
+ *out = *out & !!(val & (BIT(2) | BIT(0)));
+}
+
+static ssize_t
+enadis_pmccntr_el0_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ int out = 1;
+
+ on_each_cpu(check_pmccntr_el0, &out, 1);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", out);
+}
+
+int domain_sysfs_create(struct rm_dev *rm)
+{
+ struct domain_sysfs *lsfs;
+ int res = 0, i;
+
+ if (rm == NULL || rm->num_vfs == 0)
+ return -EINVAL;
+
+ lsfs = kzalloc(sizeof(*lsfs), GFP_KERNEL);
+ if (lsfs == NULL) {
+ res = -ENOMEM;
+ goto err_lsfs_alloc;
+ }
+
+ INIT_LIST_HEAD(&lsfs->ports);
+ lsfs->rdev = rm;
+ lsfs->domains_len = rm->num_vfs;
+ lsfs->domains =
+ kcalloc(lsfs->domains_len, sizeof(struct domain), GFP_KERNEL);
+ if (lsfs->domains == NULL)
+ goto err_domains_alloc;
+ for (i = 0; i < lsfs->domains_len; i++)
+ lsfs->domains[i].rvf = &rm->vf_info[i];
+
+ lsfs->create_domain.attr.name = "create_domain";
+ lsfs->create_domain.attr.mode = 0200;
+ lsfs->create_domain.store = create_domain_store;
+ res = sysfs_create_file(&rm->pdev->dev.kobj, &lsfs->create_domain.attr);
+ if (res)
+ goto err_create_domain;
+
+ lsfs->destroy_domain.attr.name = "destroy_domain";
+ lsfs->destroy_domain.attr.mode = 0200;
+ lsfs->destroy_domain.store = destroy_domain_store;
+ res = sysfs_create_file(&rm->pdev->dev.kobj,
+ &lsfs->destroy_domain.attr);
+ if (res)
+ goto err_destroy_domain;
+
+ lsfs->pmccntr_el0.attr.name = "pmccntr_el0";
+ lsfs->pmccntr_el0.attr.mode = 0644;
+ lsfs->pmccntr_el0.show = enadis_pmccntr_el0_show;
+ lsfs->pmccntr_el0.store = enadis_pmccntr_el0_store;
+ res = sysfs_create_file(&rm->pdev->dev.kobj, &lsfs->pmccntr_el0.attr);
+ if (res)
+ goto err_pmccntr_el0;
+
+ lsfs->parent = &rm->pdev->dev.kobj;
+
+ res = dpivf_sysfs_create(lsfs);
+ if (res)
+ goto err_dpivf_sysfs_create;
+
+ mutex_lock(&domain_sysfs_lock);
+ list_add_tail(&lsfs->list, &domain_sysfs_list);
+ mutex_unlock(&domain_sysfs_lock);
+
+ return 0;
+
+err_dpivf_sysfs_create:
+ sysfs_remove_file(&rm->pdev->dev.kobj, &lsfs->pmccntr_el0.attr);
+err_pmccntr_el0:
+ sysfs_remove_file(&rm->pdev->dev.kobj, &lsfs->destroy_domain.attr);
+err_destroy_domain:
+ sysfs_remove_file(&rm->pdev->dev.kobj, &lsfs->create_domain.attr);
+err_create_domain:
+ kfree(lsfs->domains);
+err_domains_alloc:
+ kfree(lsfs);
+err_lsfs_alloc:
+ return res;
+}
+
+void domain_sysfs_destroy(struct rm_dev *rm)
+{
+ struct list_head *pos, *n;
+ struct domain_sysfs *lsfs;
+
+ if (rm == NULL)
+ return;
+
+ mutex_lock(&domain_sysfs_lock);
+ list_for_each_safe(pos, n, &domain_sysfs_list) {
+ lsfs = container_of(pos, struct domain_sysfs, list);
+ if (lsfs->rdev == rm) {
+ list_del(pos);
+ break;
+ }
+ lsfs = NULL;
+ }
+ mutex_unlock(&domain_sysfs_lock);
+
+ if (lsfs == NULL)
+ return;
+
+ dpivf_sysfs_destroy(lsfs);
+
+ if (lsfs->pmccntr_el0.attr.mode != 0)
+ sysfs_remove_file(lsfs->parent, &lsfs->pmccntr_el0.attr);
+ if (lsfs->destroy_domain.attr.mode != 0)
+ sysfs_remove_file(lsfs->parent, &lsfs->destroy_domain.attr);
+ if (lsfs->create_domain.attr.mode != 0)
+ sysfs_remove_file(lsfs->parent, &lsfs->create_domain.attr);
+
+ kfree(lsfs->domains);
+ kfree(lsfs);
+}
diff --git a/drivers/soc/marvell/octeontx2-rm/domain_sysfs.h b/drivers/soc/marvell/octeontx2-rm/domain_sysfs.h
new file mode 100644
index 000000000000..d28d5b8e8f38
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-rm/domain_sysfs.h
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OcteonTX2 RVU Resource Manager driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef DOMAIN_SYSFS_H_
+#define DOMAIN_SYSFS_H_
+
+#include "otx2_rm.h"
+
+int domain_sysfs_create(struct rm_dev *rm);
+void domain_sysfs_destroy(struct rm_dev *rm);
+
+#endif /* DOMAIN_SYSFS_H_ */
diff --git a/drivers/soc/marvell/octeontx2-rm/otx2_rm.c b/drivers/soc/marvell/octeontx2-rm/otx2_rm.c
new file mode 100644
index 000000000000..bf0e023abdda
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-rm/otx2_rm.c
@@ -0,0 +1,1581 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OcteonTX2 RVU Resource Manager driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sysfs.h>
+
+#include "rvu_reg.h"
+#include "rvu_struct.h"
+#include "otx2_rm.h"
+
+#ifdef CONFIG_OCTEONTX2_RM_DOM_SYSFS
+#include "domain_sysfs.h"
+#endif
+
+#define DRV_NAME "octeontx2-rm"
+#define DRV_VERSION "1.0"
+
+#define PCI_DEVID_OCTEONTX2_SSO_PF 0xA0F9
+#define PCI_DEVID_OCTEONTX2_SSO_VF 0xA0FA
+
+/* PCI BAR nos */
+#define PCI_AF_REG_BAR_NUM 0
+#define PCI_CFG_REG_BAR_NUM 2
+#define PCI_MBOX_BAR_NUM 4
+
+/* Supported devices */
+static const struct pci_device_id rvu_rm_id_table[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_SSO_PF)},
+ {0} /* end of table */
+};
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION("Marvell OcteonTX2 SSO/SSOW/TIM/NPA PF Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, rvu_rm_id_table);
+
+/* All PF devices found are stored here */
+static spinlock_t rm_lst_lock;
+LIST_HEAD(rm_dev_lst_head);
+
+static void
+rm_write64(struct rm_dev *rvu, u64 b, u64 s, u64 o, u64 v)
+{
+ writeq_relaxed(v, rvu->bar2 + ((b << 20) | (s << 12) | o));
+}
+
+static u64 rm_read64(struct rm_dev *rvu, u64 b, u64 s, u64 o)
+{
+ return readq_relaxed(rvu->bar2 + ((b << 20) | (s << 12) | o));
+}
+
+static void enable_af_mbox_int(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+
+ rm = pci_get_drvdata(pdev);
+ /* Clear interrupt if any */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
+
+ /* Now Enable AF-PF interrupt */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1S, 0x1ULL);
+}
+
+static void disable_af_mbox_int(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+
+ rm = pci_get_drvdata(pdev);
+ /* Clear interrupt if any */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
+
+ /* Now Disable AF-PF interrupt */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1C, 0x1ULL);
+}
+
+static int
+forward_to_mbox(struct rm_dev *rm, struct otx2_mbox *mbox, int devid,
+ struct mbox_msghdr *req, int size, const char *mstr)
+{
+ struct mbox_msghdr *msg;
+ int res = 0;
+
+ msg = otx2_mbox_alloc_msg(mbox, devid, size);
+ if (msg == NULL)
+ return -ENOMEM;
+
+ memcpy((uint8_t *)msg + sizeof(struct mbox_msghdr),
+ (uint8_t *)req + sizeof(struct mbox_msghdr), size);
+ msg->id = req->id;
+ msg->pcifunc = req->pcifunc;
+ msg->sig = req->sig;
+ msg->ver = req->ver;
+
+ otx2_mbox_msg_send(mbox, devid);
+ res = otx2_mbox_wait_for_rsp(mbox, devid);
+ if (res == -EIO) {
+ dev_err(&rm->pdev->dev, "RVU %s MBOX timeout.\n", mstr);
+ goto err;
+ } else if (res) {
+ dev_err(&rm->pdev->dev,
+ "RVU %s MBOX error: %d.\n", mstr, res);
+ res = -EFAULT;
+ goto err;
+ }
+
+ return 0;
+err:
+ return res;
+}
+
+static int
+handle_af_req(struct rm_dev *rm, struct rvu_vf *vf, struct mbox_msghdr *req,
+ int size)
+{
+ /* We expect a request here */
+ if (req->sig != OTX2_MBOX_REQ_SIG) {
+ dev_err(&rm->pdev->dev,
+ "UP MBOX msg with wrong signature %x, ID 0x%x\n",
+ req->sig, req->id);
+ return -EINVAL;
+ }
+
+ /* If handling notifs in PF is required,add a switch-case here. */
+ return forward_to_mbox(rm, &rm->pfvf_mbox_up, vf->vf_id, req, size,
+ "VF");
+}
+
+
+static void rm_afpf_mbox_handler_up(struct work_struct *work)
+{
+ struct rm_dev *rm = container_of(work, struct rm_dev, mbox_wrk_up);
+ struct otx2_mbox *mbox = &rm->afpf_mbox_up;
+ struct otx2_mbox_dev *mdev = mbox->dev;
+ struct rvu_vf *vf;
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ int offset, id, err;
+
+ /* sync with mbox memory region */
+ smp_rmb();
+
+ /* Process received mbox messages */
+ req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+ for (id = 0; id < req_hdr->num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
+ offset);
+
+ if ((msg->pcifunc >> RVU_PFVF_PF_SHIFT) != rm->pf ||
+ (msg->pcifunc & RVU_PFVF_FUNC_MASK) <= rm->num_vfs)
+ err = -EINVAL;
+ else {
+ vf = &rm->vf_info[msg->pcifunc & RVU_PFVF_FUNC_MASK];
+ err = handle_af_req(rm, vf, msg,
+ msg->next_msgoff - offset);
+ }
+ if (err)
+ otx2_reply_invalid_msg(mbox, 0, msg->pcifunc, msg->id);
+ offset = msg->next_msgoff;
+ }
+
+ otx2_mbox_msg_send(mbox, 0);
+}
+
+static void rm_afpf_mbox_handler(struct work_struct *work)
+{
+ struct rm_dev *rm;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg, *fwd;
+ struct otx2_mbox *af_mbx, *vf_mbx;
+ struct free_rsrcs_rsp *rsp;
+ int offset, i, vf_id, size;
+ struct rvu_vf *vf;
+
+ /* Read latest mbox data */
+ smp_rmb();
+
+ rm = container_of(work, struct rm_dev, mbox_wrk);
+ af_mbx = &rm->afpf_mbox;
+ vf_mbx = &rm->pfvf_mbox;
+ rsp_hdr = (struct mbox_hdr *)(af_mbx->dev->mbase + af_mbx->rx_start);
+ if (rsp_hdr->num_msgs == 0)
+ return;
+ offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < rsp_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)(af_mbx->dev->mbase +
+ af_mbx->rx_start + offset);
+ size = msg->next_msgoff - offset;
+
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(&rm->pdev->dev,
+ "MBOX msg with unknown ID 0x%x\n", msg->id);
+ goto end;
+ }
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(&rm->pdev->dev,
+ "MBOX msg with wrong signature %x, ID 0x%x\n",
+ msg->sig, msg->id);
+ goto end;
+ }
+
+ vf_id = (msg->pcifunc & RVU_PFVF_FUNC_MASK);
+ if (vf_id > 0) {
+ if (vf_id > rm->num_vfs) {
+ dev_err(&rm->pdev->dev,
+ "MBOX msg to unknown VF: %d >= %d\n",
+ vf_id, rm->num_vfs);
+ goto end;
+ }
+ vf = &rm->vf_info[vf_id - 1];
+ /* Ignore stale responses and VFs in FLR. */
+ if (!vf->in_use || vf->got_flr)
+ goto end;
+ fwd = otx2_mbox_alloc_msg(vf_mbx, vf_id - 1, size);
+ if (!fwd) {
+ dev_err(&rm->pdev->dev,
+ "Forwarding to VF%d failed.\n", vf_id);
+ goto end;
+ }
+ memcpy((uint8_t *)fwd + sizeof(struct mbox_msghdr),
+ (uint8_t *)msg + sizeof(struct mbox_msghdr),
+ size);
+ fwd->id = msg->id;
+ fwd->pcifunc = msg->pcifunc;
+ fwd->sig = msg->sig;
+ fwd->ver = msg->ver;
+ fwd->rc = msg->rc;
+ } else {
+ if (msg->ver < OTX2_MBOX_VERSION) {
+ dev_err(&rm->pdev->dev,
+ "MBOX msg with version %04x != %04x\n",
+ msg->ver, OTX2_MBOX_VERSION);
+ goto end;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_READY:
+ rm->pf = (msg->pcifunc >> RVU_PFVF_PF_SHIFT) &
+ RVU_PFVF_PF_MASK;
+ break;
+ case MBOX_MSG_FREE_RSRC_CNT:
+ rsp = (struct free_rsrcs_rsp *)msg;
+ memcpy(&rm->limits, msg, sizeof(*rsp));
+ break;
+ default:
+ dev_err(&rm->pdev->dev,
+ "Unsupported msg %d received.\n",
+ msg->id);
+ break;
+ }
+ }
+end:
+ offset = msg->next_msgoff;
+ af_mbx->dev->msgs_acked++;
+ }
+ otx2_mbox_reset(af_mbx, 0);
+}
+
+static int
+reply_free_rsrc_cnt(struct rm_dev *rm, struct rvu_vf *vf,
+ struct mbox_msghdr *req, int size)
+{
+ struct free_rsrcs_rsp *rsp;
+
+ rsp = (struct free_rsrcs_rsp *)otx2_mbox_alloc_msg(&rm->pfvf_mbox,
+ vf->vf_id,
+ sizeof(*rsp));
+ if (rsp == NULL)
+ return -ENOMEM;
+
+ rsp->hdr.id = MBOX_MSG_FREE_RSRC_CNT;
+ rsp->hdr.pcifunc = req->pcifunc;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ mutex_lock(&rm->lock);
+ rsp->sso = rm->vf_limits.sso->a[vf->vf_id].val;
+ rsp->ssow = rm->vf_limits.ssow->a[vf->vf_id].val;
+ rsp->npa = rm->vf_limits.npa->a[vf->vf_id].val;
+ rsp->cpt = rm->vf_limits.cpt->a[vf->vf_id].val;
+ rsp->tim = rm->vf_limits.tim->a[vf->vf_id].val;
+ rsp->nix = 0;
+ mutex_unlock(&rm->lock);
+ return 0;
+}
+
+static int
+check_attach_rsrcs_req(struct rm_dev *rm, struct rvu_vf *vf,
+ struct mbox_msghdr *req, int size)
+{
+ struct rsrc_attach *rsrc_req;
+
+ rsrc_req = (struct rsrc_attach *)req;
+ mutex_lock(&rm->lock);
+ if (rsrc_req->sso > rm->vf_limits.sso->a[vf->vf_id].val ||
+ rsrc_req->ssow > rm->vf_limits.ssow->a[vf->vf_id].val ||
+ rsrc_req->npalf > rm->vf_limits.npa->a[vf->vf_id].val ||
+ rsrc_req->timlfs > rm->vf_limits.tim->a[vf->vf_id].val ||
+ rsrc_req->cptlfs > rm->vf_limits.cpt->a[vf->vf_id].val ||
+ rsrc_req->nixlf > 0) {
+ dev_err(&rm->pdev->dev,
+ "Invalid ATTACH_RESOURCES request from %s\n",
+ dev_name(&vf->pdev->dev));
+ mutex_unlock(&rm->lock);
+ return -EINVAL;
+ }
+ mutex_unlock(&rm->lock);
+ return forward_to_mbox(rm, &rm->afpf_mbox, 0, req, size, "AF");
+}
+
+static int
+handle_vf_req(struct rm_dev *rm, struct rvu_vf *vf, struct mbox_msghdr *req,
+ int size)
+{
+ int err = 0;
+
+ /* Check if valid, if not reply with a invalid msg */
+ if (req->sig != OTX2_MBOX_REQ_SIG) {
+ dev_err(&rm->pdev->dev,
+ "VF MBOX msg with wrong signature %x, ID 0x%x\n",
+ req->sig, req->id);
+ return -EINVAL;
+ }
+
+ switch (req->id) {
+ case MBOX_MSG_READY:
+ if (req->ver < OTX2_MBOX_VERSION) {
+ dev_err(&rm->pdev->dev,
+ "VF MBOX msg with version %04x != %04x\n",
+ req->ver, OTX2_MBOX_VERSION);
+ return -EINVAL;
+ }
+ vf->in_use = true;
+ err = forward_to_mbox(rm, &rm->afpf_mbox, 0, req, size, "AF");
+ break;
+ case MBOX_MSG_FREE_RSRC_CNT:
+ if (req->ver < OTX2_MBOX_VERSION) {
+ dev_err(&rm->pdev->dev,
+ "VF MBOX msg with version %04x != %04x\n",
+ req->ver, OTX2_MBOX_VERSION);
+ return -EINVAL;
+ }
+ err = reply_free_rsrc_cnt(rm, vf, req, size);
+ break;
+ case MBOX_MSG_ATTACH_RESOURCES:
+ if (req->ver < OTX2_MBOX_VERSION) {
+ dev_err(&rm->pdev->dev,
+ "VF MBOX msg with version %04x != %04x\n",
+ req->ver, OTX2_MBOX_VERSION);
+ return -EINVAL;
+ }
+ err = check_attach_rsrcs_req(rm, vf, req, size);
+ break;
+ default:
+ err = forward_to_mbox(rm, &rm->afpf_mbox, 0, req, size, "AF");
+ break;
+ }
+
+ return err;
+}
+
+static int send_flr_msg(struct otx2_mbox *mbox, int dev_id, int pcifunc)
+{
+ struct msg_req *req;
+
+ req = (struct msg_req *)
+ otx2_mbox_alloc_msg(mbox, dev_id, sizeof(*req));
+ if (req == NULL)
+ return -ENOMEM;
+
+ req->hdr.pcifunc = pcifunc;
+ req->hdr.id = MBOX_MSG_VF_FLR;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+
+ otx2_mbox_msg_send(mbox, 0);
+
+ return 0;
+}
+
+static void rm_send_flr_msg(struct rm_dev *rm, struct rvu_vf *vf)
+{
+ int res, pcifunc;
+
+ pcifunc = (vf->rm->pf << RVU_PFVF_PF_SHIFT) |
+ ((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK);
+
+ if (send_flr_msg(&rm->afpf_mbox, 0, pcifunc) != 0) {
+ dev_err(&rm->pdev->dev, "Sending FLR to AF failed\n");
+ return;
+ }
+
+ res = otx2_mbox_wait_for_rsp(&rm->afpf_mbox, 0);
+ if (res == -EIO) {
+ dev_err(&rm->pdev->dev, "RVU AF MBOX timeout.\n");
+ } else if (res) {
+ dev_err(&rm->pdev->dev,
+ "RVU MBOX error: %d.\n", res);
+ }
+}
+
+static void rm_send_flr_to_dpi(struct rm_dev *rm)
+{
+ /* TODO: DPI VF's needs to be handled */
+}
+
+static void rm_pfvf_flr_handler(struct work_struct *work)
+{
+ struct rvu_vf *vf = container_of(work, struct rvu_vf, pfvf_flr_work);
+ struct rm_dev *rm = vf->rm;
+ struct otx2_mbox *mbox = &rm->pfvf_mbox;
+
+ rm_send_flr_to_dpi(rm);
+ rm_send_flr_msg(rm, vf);
+
+ /* Disable interrupts from AF and wait for any pending
+ * responses to be handled for this VF and then reset the
+ * mailbox
+ */
+ disable_af_mbox_int(rm->pdev);
+ flush_workqueue(rm->afpf_mbox_wq);
+ otx2_mbox_reset(mbox, vf->vf_id);
+ vf->in_use = false;
+ vf->got_flr = false;
+ enable_af_mbox_int(rm->pdev);
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(vf->vf_id / 64),
+ BIT_ULL(vf->intr_idx));
+}
+
+static void rm_pfvf_mbox_handler_up(struct work_struct *work)
+{
+ struct rm_dev *rm;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg, *fwd;
+ struct otx2_mbox *af_mbx, *vf_mbx;
+ int offset, i, size;
+ struct rvu_vf *vf;
+
+ /* Read latest mbox data */
+ smp_rmb();
+
+ vf = container_of(work, struct rvu_vf, mbox_wrk_up);
+ rm = vf->rm;
+ af_mbx = &rm->afpf_mbox;
+ vf_mbx = &rm->pfvf_mbox;
+ rsp_hdr = (struct mbox_hdr *)(vf_mbx->dev[vf->vf_id].mbase +
+ vf_mbx->rx_start);
+ if (rsp_hdr->num_msgs == 0)
+ return;
+ offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < rsp_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)(vf_mbx->dev->mbase +
+ vf_mbx->rx_start + offset);
+ size = msg->next_msgoff - offset;
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(&rm->pdev->dev,
+ "UP MBOX msg with wrong signature %x, ID 0x%x\n",
+ msg->sig, msg->id);
+ goto end;
+ }
+
+ /* override message value with actual values */
+ msg->pcifunc = (rm->pf << RVU_PFVF_PF_SHIFT) | vf->vf_id;
+
+ fwd = otx2_mbox_alloc_msg(af_mbx, 0, size);
+ if (!fwd) {
+ dev_err(&rm->pdev->dev,
+ "UP Forwarding from VF%d to AF failed.\n",
+ vf->vf_id);
+ goto end;
+ }
+ memcpy((uint8_t *)fwd + sizeof(struct mbox_msghdr),
+ (uint8_t *)msg + sizeof(struct mbox_msghdr),
+ size);
+ fwd->id = msg->id;
+ fwd->pcifunc = msg->pcifunc;
+ fwd->sig = msg->sig;
+ fwd->ver = msg->ver;
+ fwd->rc = msg->rc;
+end:
+ offset = msg->next_msgoff;
+ vf_mbx->dev->msgs_acked++;
+ }
+ otx2_mbox_reset(vf_mbx, vf->vf_id);
+}
+
+static void rm_pfvf_mbox_handler(struct work_struct *work)
+{
+ struct rvu_vf *vf = container_of(work, struct rvu_vf, mbox_wrk);
+ struct rm_dev *rm = vf->rm;
+ struct otx2_mbox *mbox = &rm->pfvf_mbox;
+ struct otx2_mbox_dev *mdev = &mbox->dev[vf->vf_id];
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ int offset, id, err;
+
+ /* sync with mbox memory region */
+ smp_rmb();
+
+ /* Process received mbox messages */
+ req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+ for (id = 0; id < req_hdr->num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
+ offset);
+
+ /* Set which VF sent this message based on mbox IRQ */
+ msg->pcifunc = ((u16)rm->pf << RVU_PFVF_PF_SHIFT) |
+ ((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK);
+ err = handle_vf_req(rm, vf, msg, msg->next_msgoff - offset);
+ if (err)
+ otx2_reply_invalid_msg(mbox, vf->vf_id, msg->pcifunc,
+ msg->id);
+ offset = msg->next_msgoff;
+ }
+ /* Send mbox responses to VF */
+ if (mdev->num_msgs)
+ otx2_mbox_msg_send(mbox, vf->vf_id);
+}
+
+static irqreturn_t rm_af_pf_mbox_intr(int irq, void *arg)
+{
+ struct rm_dev *rm = (struct rm_dev *)arg;
+ struct mbox_hdr *hdr;
+ struct otx2_mbox *mbox;
+ struct otx2_mbox_dev *mdev;
+
+ /* Read latest mbox data */
+ smp_rmb();
+
+ mbox = &rm->afpf_mbox;
+ mdev = &mbox->dev[0];
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ /* Handle PF => AF channel response */
+ if (hdr->num_msgs)
+ queue_work(rm->afpf_mbox_wq, &rm->mbox_wrk);
+
+ mbox = &rm->afpf_mbox_up;
+ mdev = &mbox->dev[0];
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ /* Handle AF => PF request */
+ if (hdr->num_msgs)
+ queue_work(rm->afpf_mbox_wq, &rm->mbox_wrk_up);
+
+ /* Clear and ack the interrupt */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
+
+ return IRQ_HANDLED;
+}
+
+static void __handle_vf_flr(struct rm_dev *rm, struct rvu_vf *vf_ptr)
+{
+ if (vf_ptr->in_use) {
+ /* Using the same MBOX workqueue here, so that we can
+ * synchronize with other VF->PF messages being forwarded to
+ * AF
+ */
+ vf_ptr->got_flr = true;
+ queue_work(rm->pfvf_mbox_wq, &vf_ptr->pfvf_flr_work);
+ } else
+ rm_write64(rm, BLKADDR_RVUM, 0,
+ RVU_PF_VFTRPENDX(vf_ptr->vf_id / 64),
+ BIT_ULL(vf_ptr->intr_idx));
+}
+
+static irqreturn_t rm_pf_vf_flr_intr(int irq, void *arg)
+{
+ struct rm_dev *rm = (struct rm_dev *)arg;
+ u64 intr;
+ struct rvu_vf *vf_ptr;
+ int vf, i;
+
+ /* Check which VF FLR has been raised and process accordingly */
+ for (i = 0; i < 2; i++) {
+ /* Read the interrupt bits */
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(i));
+
+ for (vf = i * 64; vf < rm->num_vfs; vf++) {
+ vf_ptr = &rm->vf_info[vf];
+ if (intr & (1ULL << vf_ptr->intr_idx)) {
+ /* Clear the interrupts */
+ rm_write64(rm, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INTX(i),
+ BIT_ULL(vf_ptr->intr_idx));
+ __handle_vf_flr(rm, vf_ptr);
+ }
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rm_pf_vf_mbox_intr(int irq, void *arg)
+{
+ struct rm_dev *rm = (struct rm_dev *)arg;
+ struct mbox_hdr *hdr;
+ struct otx2_mbox *mbox;
+ struct otx2_mbox_dev *mdev;
+ u64 intr;
+ struct rvu_vf *vf;
+ int i, vfi;
+
+ /* Check which VF has raised an interrupt and schedule corresponding
+ * workq to process the MBOX
+ */
+ for (i = 0; i < 2; i++) {
+ /* Read the interrupt bits */
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(i));
+
+ for (vfi = i * 64; vfi < rm->num_vfs; vfi++) {
+ vf = &rm->vf_info[vfi];
+ if ((intr & (1ULL << vf->intr_idx)) == 0)
+ continue;
+ mbox = &rm->pfvf_mbox;
+ mdev = &mbox->dev[vf->vf_id];
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ /* Handle VF => PF channel request */
+ if (hdr->num_msgs)
+ queue_work(rm->pfvf_mbox_wq, &vf->mbox_wrk);
+
+ mbox = &rm->pfvf_mbox_up;
+ mdev = &mbox->dev[vf->vf_id];
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ /* Handle PF => VF channel response */
+ if (hdr->num_msgs)
+ queue_work(rm->pfvf_mbox_wq, &vf->mbox_wrk_up);
+ /* Clear the interrupt */
+ rm_write64(rm, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(i),
+ BIT_ULL(vf->intr_idx));
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int rm_register_flr_irq(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int err, vec, i;
+
+ rm = pci_get_drvdata(pdev);
+
+ /* Register for VF FLR interrupts
+ * There are 2 vectors starting at index 0x0
+ */
+ for (vec = RVU_PF_INT_VEC_VFFLR0, i = 0;
+ vec + i <= RVU_PF_INT_VEC_VFFLR1; i++) {
+ sprintf(&rm->irq_names[(vec + i) * NAME_SIZE],
+ "PF%02d_VF_FLR_IRQ%d", pdev->devfn, i);
+ err = request_irq(pci_irq_vector(pdev, vec + i),
+ rm_pf_vf_flr_intr, 0,
+ &rm->irq_names[(vec + i) * NAME_SIZE], rm);
+ if (err) {
+ dev_err(&pdev->dev,
+ "request_irq() failed for PFVF FLR intr %d\n",
+ vec);
+ goto reg_fail;
+ }
+ rm->irq_allocated[vec + i] = true;
+ }
+
+ return 0;
+
+reg_fail:
+
+ return err;
+}
+
+static void rm_free_flr_irq(struct pci_dev *pdev)
+{
+ (void) pdev;
+ /* Nothing here but will free workqueues */
+}
+
+static int rm_alloc_irqs(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int err;
+
+ rm = pci_get_drvdata(pdev);
+
+ /* Get number of MSIX vector count and allocate vectors first */
+ rm->msix_count = pci_msix_vec_count(pdev);
+
+ err = pci_alloc_irq_vectors(pdev, rm->msix_count, rm->msix_count,
+ PCI_IRQ_MSIX);
+
+ if (err < 0) {
+ dev_err(&pdev->dev, "pci_alloc_irq_vectors() failed %d\n", err);
+ return err;
+ }
+
+ rm->irq_names = kmalloc_array(rm->msix_count, NAME_SIZE, GFP_KERNEL);
+ if (!rm->irq_names) {
+ err = -ENOMEM;
+ goto err_irq_names;
+ }
+
+ rm->irq_allocated = kcalloc(rm->msix_count, sizeof(bool), GFP_KERNEL);
+ if (!rm->irq_allocated) {
+ err = -ENOMEM;
+ goto err_irq_allocated;
+ }
+
+ return 0;
+
+err_irq_allocated:
+ kfree(rm->irq_names);
+ rm->irq_names = NULL;
+err_irq_names:
+ pci_free_irq_vectors(pdev);
+ rm->msix_count = 0;
+
+ return err;
+}
+
+static void rm_free_irqs(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int irq;
+
+ rm = pci_get_drvdata(pdev);
+ for (irq = 0; irq < rm->msix_count; irq++) {
+ if (rm->irq_allocated[irq])
+ free_irq(pci_irq_vector(rm->pdev, irq), rm);
+ }
+
+ pci_free_irq_vectors(pdev);
+
+ kfree(rm->irq_names);
+ kfree(rm->irq_allocated);
+}
+
+static int rm_register_mbox_irq(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int err, vec = RVU_PF_INT_VEC_VFPF_MBOX0, i;
+
+ rm = pci_get_drvdata(pdev);
+
+ /* Register PF-AF interrupt handler */
+ sprintf(&rm->irq_names[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE],
+ "PF%02d_AF_MBOX_IRQ", pdev->devfn);
+ err = request_irq(pci_irq_vector(pdev, RVU_PF_INT_VEC_AFPF_MBOX),
+ rm_af_pf_mbox_intr, 0,
+ &rm->irq_names[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE],
+ rm);
+ if (err) {
+ dev_err(&pdev->dev,
+ "request_irq() failed for AF_PF MSIX vector\n");
+ return err;
+ }
+ rm->irq_allocated[RVU_PF_INT_VEC_AFPF_MBOX] = true;
+
+ err = otx2_mbox_init(&rm->afpf_mbox, rm->af_mbx_base, pdev, rm->bar2,
+ MBOX_DIR_PFAF, 1);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize PF/AF MBOX\n");
+ goto error;
+ }
+ err = otx2_mbox_init(&rm->afpf_mbox_up, rm->af_mbx_base, pdev, rm->bar2,
+ MBOX_DIR_PFAF_UP, 1);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize PF/AF UP MBOX\n");
+ goto error;
+ }
+
+ /* Register for PF-VF mailbox interrupts
+ * There are 2 vectors starting at index 0x4
+ */
+ for (vec = RVU_PF_INT_VEC_VFPF_MBOX0, i = 0;
+ vec + i <= RVU_PF_INT_VEC_VFPF_MBOX1; i++) {
+ sprintf(&rm->irq_names[(vec + i) * NAME_SIZE],
+ "PF%02d_VF_MBOX_IRQ%d", pdev->devfn, i);
+ err = request_irq(pci_irq_vector(pdev, vec + i),
+ rm_pf_vf_mbox_intr, 0,
+ &rm->irq_names[(vec + i) * NAME_SIZE], rm);
+ if (err) {
+ dev_err(&pdev->dev,
+ "request_irq() failed for PFVF Mbox intr %d\n",
+ vec + i);
+ goto error;
+ }
+ rm->irq_allocated[vec + i] = true;
+ }
+
+ rm->afpf_mbox_wq = alloc_workqueue(
+ "rm_pfaf_mailbox", WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, 1);
+ if (!rm->afpf_mbox_wq)
+ goto error;
+
+ INIT_WORK(&rm->mbox_wrk, rm_afpf_mbox_handler);
+ INIT_WORK(&rm->mbox_wrk_up, rm_afpf_mbox_handler_up);
+
+ return err;
+
+error:
+ if (rm->afpf_mbox_up.dev != NULL)
+ otx2_mbox_destroy(&rm->afpf_mbox_up);
+ if (rm->afpf_mbox.dev != NULL)
+ otx2_mbox_destroy(&rm->afpf_mbox);
+
+ return err;
+}
+
+static int rm_get_pcifunc(struct rm_dev *rm)
+{
+ struct msg_req *ready_req;
+ int res = 0;
+
+ ready_req = (struct msg_req *)
+ otx2_mbox_alloc_msg_rsp(&rm->afpf_mbox, 0, sizeof(ready_req),
+ sizeof(struct ready_msg_rsp));
+ if (ready_req == NULL) {
+ dev_err(&rm->pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+
+ ready_req->hdr.id = MBOX_MSG_READY;
+ ready_req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ otx2_mbox_msg_send(&rm->afpf_mbox, 0);
+ res = otx2_mbox_wait_for_rsp(&rm->afpf_mbox, 0);
+ if (res == -EIO) {
+ dev_err(&rm->pdev->dev, "RVU AF MBOX timeout.\n");
+ } else if (res) {
+ dev_err(&rm->pdev->dev, "RVU MBOX error: %d.\n", res);
+ res = -EFAULT;
+ }
+ return res;
+}
+
+static int rm_get_available_rsrcs(struct rm_dev *rm)
+{
+ struct mbox_msghdr *rsrc_req;
+ int res = 0;
+
+ rsrc_req = otx2_mbox_alloc_msg(&rm->afpf_mbox, 0, sizeof(*rsrc_req));
+ if (rsrc_req == NULL) {
+ dev_err(&rm->pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+ rsrc_req->id = MBOX_MSG_FREE_RSRC_CNT;
+ rsrc_req->sig = OTX2_MBOX_REQ_SIG;
+ rsrc_req->pcifunc = RVU_PFFUNC(rm->pf, 0);
+ otx2_mbox_msg_send(&rm->afpf_mbox, 0);
+ res = otx2_mbox_wait_for_rsp(&rm->afpf_mbox, 0);
+ if (res == -EIO) {
+ dev_err(&rm->pdev->dev, "RVU AF MBOX timeout.\n");
+ } else if (res) {
+ dev_err(&rm->pdev->dev,
+ "RVU MBOX error: %d.\n", res);
+ res = -EFAULT;
+ }
+ return res;
+}
+
+static void rm_afpf_mbox_term(struct pci_dev *pdev)
+{
+ struct rm_dev *rm = pci_get_drvdata(pdev);
+
+ flush_workqueue(rm->afpf_mbox_wq);
+ destroy_workqueue(rm->afpf_mbox_wq);
+ otx2_mbox_destroy(&rm->afpf_mbox);
+ otx2_mbox_destroy(&rm->afpf_mbox_up);
+}
+
+static ssize_t vf_in_use_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct rvu_vf *vf = container_of(attr, struct rvu_vf, in_use_attr);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", vf->in_use);
+}
+
+static void vf_sysfs_destroy(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ struct rvu_vf *vf;
+ int i;
+
+ rm = pci_get_drvdata(pdev);
+
+ quotas_free(rm->vf_limits.sso);
+ quotas_free(rm->vf_limits.ssow);
+ quotas_free(rm->vf_limits.npa);
+ quotas_free(rm->vf_limits.cpt);
+ quotas_free(rm->vf_limits.tim);
+ rm->vf_limits.sso = NULL;
+ rm->vf_limits.ssow = NULL;
+ rm->vf_limits.npa = NULL;
+ rm->vf_limits.cpt = NULL;
+ rm->vf_limits.tim = NULL;
+
+ for (i = 0; i < rm->num_vfs; i++) {
+ vf = &rm->vf_info[i];
+ if (vf->limits_kobj == NULL)
+ continue;
+ if (vf->in_use_attr.attr.mode != 0) {
+ sysfs_remove_file(&vf->pdev->dev.kobj,
+ &vf->in_use_attr.attr);
+ vf->in_use_attr.attr.mode = 0;
+ }
+ kobject_del(vf->limits_kobj);
+ vf->limits_kobj = NULL;
+ pci_dev_put(vf->pdev);
+ vf->pdev = NULL;
+ }
+}
+
+static int check_vf_in_use(void *arg, struct quota *quota, int new_val)
+{
+ struct rvu_vf *vf = arg;
+
+ if (vf->in_use) {
+ dev_err(quota->dev, "Can't modify limits, device is in use.\n");
+ return 1;
+ }
+ return 0;
+}
+
+static struct quota_ops vf_limit_ops = {
+ .pre_store = check_vf_in_use,
+};
+
+static int vf_sysfs_create(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ struct pci_dev *vdev;
+ struct rvu_vf *vf;
+ int err, i;
+
+ vdev = NULL;
+ vf = NULL;
+ rm = pci_get_drvdata(pdev);
+ err = 0;
+ i = 0;
+
+ /* Create limit structures for all resource types */
+ rm->vf_limits.sso = quotas_alloc(rm->num_vfs, rm->limits.sso,
+ rm->limits.sso, 0, &rm->lock,
+ &vf_limit_ops);
+ if (rm->vf_limits.sso == NULL) {
+ dev_err(&pdev->dev,
+ "Failed to allocate sso limits structures.\n");
+ err = -EFAULT;
+ goto error;
+ }
+ rm->vf_limits.ssow = quotas_alloc(rm->num_vfs, rm->limits.ssow,
+ rm->limits.ssow, 0, &rm->lock,
+ &vf_limit_ops);
+ if (rm->vf_limits.ssow == NULL) {
+ dev_err(&pdev->dev,
+ "Failed to allocate ssow limits structures.\n");
+ err = -EFAULT;
+ goto error;
+ }
+ /* AF currently reports only 0-1 for PF but there's more free LFs.
+ * Until we implement proper limits in AF, use max num_vfs in total.
+ */
+ rm->vf_limits.npa = quotas_alloc(rm->num_vfs, 1, rm->num_vfs, 0,
+ &rm->lock, &vf_limit_ops);
+ if (rm->vf_limits.npa == NULL) {
+ dev_err(&pdev->dev,
+ "Failed to allocate npa limits structures.\n");
+ err = -EFAULT;
+ goto error;
+ }
+ rm->vf_limits.cpt = quotas_alloc(rm->num_vfs, rm->limits.cpt,
+ rm->limits.cpt, 0, &rm->lock,
+ &vf_limit_ops);
+ if (rm->vf_limits.cpt == NULL) {
+ dev_err(&pdev->dev,
+ "Failed to allocate cpt limits structures.\n");
+ err = -EFAULT;
+ goto error;
+ }
+ rm->vf_limits.tim = quotas_alloc(rm->num_vfs, rm->limits.tim,
+ rm->limits.tim, 0, &rm->lock,
+ &vf_limit_ops);
+ if (rm->vf_limits.tim == NULL) {
+ dev_err(&pdev->dev,
+ "Failed to allocate tim limits structures.\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ /* loop through all the VFs and create sysfs entries for them */
+ while ((vdev = pci_get_device(pdev->vendor, PCI_DEVID_OCTEONTX2_SSO_VF,
+ vdev))) {
+ if (!vdev->is_virtfn || (vdev->physfn != pdev))
+ continue;
+ vf = &rm->vf_info[i];
+ vf->pdev = pci_dev_get(vdev);
+ vf->limits_kobj = kobject_create_and_add("limits",
+ &vdev->dev.kobj);
+ if (vf->limits_kobj == NULL) {
+ err = -ENOMEM;
+ goto error;
+ }
+ if (quota_sysfs_create("sso", vf->limits_kobj, &vdev->dev,
+ &rm->vf_limits.sso->a[i], vf) != 0) {
+ dev_err(&pdev->dev,
+ "Failed to create sso limits sysfs for %s\n",
+ pci_name(vdev));
+ err = -EFAULT;
+ goto error;
+ }
+ if (quota_sysfs_create("ssow", vf->limits_kobj, &vdev->dev,
+ &rm->vf_limits.ssow->a[i], vf) != 0) {
+ dev_err(&pdev->dev,
+ "Failed to create ssow limits sysfs for %s\n",
+ pci_name(vdev));
+ err = -EFAULT;
+ goto error;
+ }
+ if (quota_sysfs_create("npa", vf->limits_kobj, &vdev->dev,
+ &rm->vf_limits.npa->a[i], vf) != 0) {
+ dev_err(&pdev->dev,
+ "Failed to create npa limits sysfs for %s\n",
+ pci_name(vdev));
+ err = -EFAULT;
+ goto error;
+ }
+ if (quota_sysfs_create("cpt", vf->limits_kobj, &vdev->dev,
+ &rm->vf_limits.cpt->a[i], vf) != 0) {
+ dev_err(&pdev->dev,
+ "Failed to create cpt limits sysfs for %s\n",
+ pci_name(vdev));
+ err = -EFAULT;
+ goto error;
+ }
+ if (quota_sysfs_create("tim", vf->limits_kobj, &vdev->dev,
+ &rm->vf_limits.tim->a[i], vf) != 0) {
+ dev_err(&pdev->dev,
+ "Failed to create tim limits sysfs for %s\n",
+ pci_name(vdev));
+ err = -EFAULT;
+ goto error;
+ }
+
+ vf->in_use_attr.show = vf_in_use_show;
+ vf->in_use_attr.attr.name = "in_use";
+ vf->in_use_attr.attr.mode = 0444;
+ sysfs_attr_init(&vf->in_use_attr.attr);
+ if (sysfs_create_file(&vdev->dev.kobj, &vf->in_use_attr.attr)) {
+ dev_err(&pdev->dev,
+ "Failed to create in_use sysfs entry for %s\n",
+ pci_name(vdev));
+ err = -EFAULT;
+ goto error;
+ }
+ i++;
+ }
+
+ return 0;
+error:
+ vf_sysfs_destroy(pdev);
+ return err;
+}
+
+static int rm_check_pf_usable(struct rm_dev *rm)
+{
+ u64 rev;
+
+ rev = rm_read64(rm, BLKADDR_RVUM, 0,
+ RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
+ rev = (rev >> 12) & 0xFF;
+ /* Check if AF has setup revision for RVUM block,
+ * otherwise this driver probe should be deferred
+ * until AF driver comes up.
+ */
+ if (!rev) {
+ dev_warn(&rm->pdev->dev,
+ "AF is not initialized, deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+ return 0;
+}
+
+static int rm_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct rm_dev *rm;
+ int err;
+
+ rm = devm_kzalloc(dev, sizeof(struct rm_dev), GFP_KERNEL);
+ if (rm == NULL)
+ return -ENOMEM;
+
+ rm->pdev = pdev;
+ pci_set_drvdata(pdev, rm);
+
+ mutex_init(&rm->lock);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto enable_failed;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto map_failed;
+ }
+
+ if (pci_sriov_get_totalvfs(pdev) <= 0) {
+ err = -ENODEV;
+ goto set_mask_failed;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to set DMA mask\n");
+ goto set_mask_failed;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to set DMA mask\n");
+ goto set_mask_failed;
+ }
+
+ pci_set_master(pdev);
+
+ /* CSR Space mapping */
+ rm->bar2 = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM,
+ pci_resource_len(pdev, PCI_CFG_REG_BAR_NUM));
+ if (!rm->bar2) {
+ dev_err(&pdev->dev, "Unable to map BAR2\n");
+ err = -ENODEV;
+ goto set_mask_failed;
+ }
+
+ err = rm_check_pf_usable(rm);
+ if (err)
+ goto pf_unusable;
+
+ /* Map PF-AF mailbox memory */
+ rm->af_mbx_base = ioremap_wc(pci_resource_start(pdev, PCI_MBOX_BAR_NUM),
+ pci_resource_len(pdev, PCI_MBOX_BAR_NUM));
+ if (!rm->af_mbx_base) {
+ dev_err(&pdev->dev, "Unable to map BAR4\n");
+ err = -ENODEV;
+ goto pf_unusable;
+ }
+
+ /* Request IRQ for PF-VF mailbox here - TBD: check if this can be moved
+ * to sriov enable function
+ */
+ if (rm_alloc_irqs(pdev)) {
+ dev_err(&pdev->dev,
+ "Unable to allocate MSIX Interrupt vectors\n");
+ err = -ENODEV;
+ goto alloc_irqs_failed;
+ }
+
+ if (rm_register_mbox_irq(pdev) != 0) {
+ dev_err(&pdev->dev,
+ "Unable to allocate MBOX Interrupt vectors\n");
+ err = -ENODEV;
+ goto reg_mbox_irq_failed;
+ }
+
+ if (rm_register_flr_irq(pdev) != 0) {
+ dev_err(&pdev->dev,
+ "Unable to allocate FLR Interrupt vectors\n");
+ err = -ENODEV;
+ goto reg_flr_irq_failed;
+ }
+
+ enable_af_mbox_int(pdev);
+
+ if (rm_get_pcifunc(rm)) {
+ dev_err(&pdev->dev,
+ "Failed to retrieve pcifunc from AF\n");
+ err = -ENODEV;
+ goto get_pcifunc_failed;
+ }
+
+ /* Add to global list of PFs found */
+ spin_lock(&rm_lst_lock);
+ list_add(&rm->list, &rm_dev_lst_head);
+ spin_unlock(&rm_lst_lock);
+
+ return 0;
+
+get_pcifunc_failed:
+ disable_af_mbox_int(pdev);
+ rm_free_flr_irq(pdev);
+reg_flr_irq_failed:
+ rm_afpf_mbox_term(pdev);
+reg_mbox_irq_failed:
+ rm_free_irqs(pdev);
+alloc_irqs_failed:
+ iounmap(rm->af_mbx_base);
+pf_unusable:
+ pcim_iounmap(pdev, rm->bar2);
+set_mask_failed:
+ pci_release_regions(pdev);
+map_failed:
+ pci_disable_device(pdev);
+enable_failed:
+ pci_set_drvdata(pdev, NULL);
+ devm_kfree(dev, rm);
+ return err;
+}
+
+static void enable_vf_flr_int(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int ena_bits;
+
+ rm = pci_get_drvdata(pdev);
+ /* Clear any pending interrupts */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(0), ~0x0ULL);
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0), ~0x0ULL);
+
+ if (rm->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(1), ~0x0ULL);
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1), ~0x0ULL);
+ }
+
+ /* Enable for first 64 VFs here - upto number of VFs enabled */
+ ena_bits = ((rm->num_vfs - 1) % 64);
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INT_ENA_W1SX(0),
+ GENMASK_ULL(ena_bits, 0));
+
+ if (rm->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ /* Enable for VF interrupts for VFs 64 to 128 */
+ ena_bits = rm->num_vfs - 64 - 1;
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INT_ENA_W1SX(1),
+ GENMASK_ULL(ena_bits, 0));
+ }
+}
+
+static void disable_vf_flr_int(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int ena_bits;
+ u64 intr;
+
+ rm = pci_get_drvdata(pdev);
+ /* clear any pending interrupt */
+
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0));
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0), intr);
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(0));
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(0), intr);
+
+ if (rm->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1));
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1), intr);
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(1));
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFTRPENDX(1), intr);
+ }
+
+ /* Disable for first 64 VFs here - upto number of VFs enabled */
+ ena_bits = ((rm->num_vfs - 1) % 64);
+
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INT_ENA_W1CX(0),
+ GENMASK_ULL(ena_bits, 0));
+
+ if (rm->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ /* Enable for VF interrupts for VFs 64 to 128 */
+ ena_bits = rm->num_vfs - 64 - 1;
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INT_ENA_W1CX(1),
+ GENMASK_ULL(ena_bits, 0));
+ }
+}
+
+static void enable_vf_mbox_int(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int ena_bits;
+
+ rm = pci_get_drvdata(pdev);
+ /* Clear any pending interrupts */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(0), ~0x0ULL);
+
+ if (rm->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(1),
+ ~0x0ULL);
+ }
+
+ /* Enable for first 64 VFs here - upto number of VFs enabled */
+ ena_bits = ((rm->num_vfs - 1) % 64);
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0),
+ GENMASK_ULL(ena_bits, 0));
+
+ if (rm->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ /* Enable for VF interrupts for VFs 64 to 128 */
+ ena_bits = rm->num_vfs - 64 - 1;
+ rm_write64(rm, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
+ GENMASK_ULL(ena_bits, 0));
+ }
+}
+
+static void disable_vf_mbox_int(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+ int ena_bits;
+ u64 intr;
+
+ rm = pci_get_drvdata(pdev);
+ /* clear any pending interrupt */
+
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(0));
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(0), intr);
+
+ if (rm->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ intr = rm_read64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(1));
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INTX(1), intr);
+ }
+
+ /* Disable for first 64 VFs here - upto number of VFs enabled */
+ ena_bits = ((rm->num_vfs - 1) % 64);
+ rm_write64(rm, BLKADDR_RVUM, 0, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0),
+ GENMASK_ULL(ena_bits, 0));
+
+ if (rm->num_vfs > 64) { /* For VF 64 to 127(MAX) */
+ /* Enable for VF interrupts for VFs 64 to 128 */
+ ena_bits = rm->num_vfs - 64 - 1;
+ rm_write64(rm, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
+ GENMASK_ULL(ena_bits, 0));
+ }
+}
+
+static int __sriov_disable(struct pci_dev *pdev)
+{
+ struct rm_dev *rm;
+
+ rm = pci_get_drvdata(pdev);
+ if (pci_vfs_assigned(pdev)) {
+ dev_err(&pdev->dev, "Disabing VFs while VFs are assigned\n");
+ dev_err(&pdev->dev, "VFs will not be freed\n");
+ return -EPERM;
+ }
+
+ disable_vf_flr_int(pdev);
+ disable_vf_mbox_int(pdev);
+
+#ifdef CONFIG_OCTEONTX2_RM_DOM_SYSFS
+ domain_sysfs_destroy(rm);
+#endif
+ vf_sysfs_destroy(pdev);
+
+ if (rm->pfvf_mbox_wq) {
+ flush_workqueue(rm->pfvf_mbox_wq);
+ destroy_workqueue(rm->pfvf_mbox_wq);
+ rm->pfvf_mbox_wq = NULL;
+ }
+ if (rm->pfvf_mbx_base) {
+ iounmap(rm->pfvf_mbx_base);
+ rm->pfvf_mbx_base = NULL;
+ }
+
+ otx2_mbox_destroy(&rm->pfvf_mbox);
+ otx2_mbox_destroy(&rm->pfvf_mbox_up);
+
+ pci_disable_sriov(pdev);
+
+ kfree(rm->vf_info);
+ rm->vf_info = NULL;
+
+ return 0;
+}
+
+static int __sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+ int curr_vfs, vf = 0;
+ int err;
+ struct rm_dev *rm;
+ struct rvu_vf *vf_ptr;
+ u64 pf_vf_mbox_base;
+
+ curr_vfs = pci_num_vf(pdev);
+ if (!curr_vfs && !num_vfs)
+ return -EINVAL;
+
+ if (curr_vfs) {
+ dev_err(
+ &pdev->dev,
+ "Virtual Functions are already enabled on this device\n");
+ return -EINVAL;
+ }
+ if (num_vfs > RM_MAX_VFS)
+ num_vfs = RM_MAX_VFS;
+
+ rm = pci_get_drvdata(pdev);
+
+ if (rm_get_available_rsrcs(rm)) {
+ dev_err(&pdev->dev, "Failed to get resource limits.\n");
+ return -EFAULT;
+ }
+
+ rm->vf_info = kcalloc(num_vfs, sizeof(struct rvu_vf), GFP_KERNEL);
+ if (rm->vf_info == NULL)
+ return -ENOMEM;
+
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable to SRIOV VFs: %d\n", err);
+ goto err_enable_sriov;
+ }
+
+ rm->num_vfs = num_vfs;
+
+ /* Map PF-VF mailbox memory */
+ pf_vf_mbox_base = (u64)rm->bar2 + RVU_PF_VF_BAR4_ADDR;
+ pf_vf_mbox_base = readq((void __iomem *)(unsigned long)pf_vf_mbox_base);
+ if (!pf_vf_mbox_base) {
+ dev_err(&pdev->dev, "PF-VF Mailbox address not configured\n");
+ err = -ENOMEM;
+ goto err_mbox_mem_map;
+ }
+ rm->pfvf_mbx_base = ioremap_wc(pf_vf_mbox_base, MBOX_SIZE * num_vfs);
+ if (!rm->pfvf_mbx_base) {
+ dev_err(&pdev->dev,
+ "Mapping of PF-VF mailbox address failed\n");
+ err = -ENOMEM;
+ goto err_mbox_mem_map;
+ }
+ err = otx2_mbox_init(&rm->pfvf_mbox, rm->pfvf_mbx_base, pdev, rm->bar2,
+ MBOX_DIR_PFVF, num_vfs);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to initialize PF/VF MBOX for %d VFs\n",
+ num_vfs);
+ goto err_mbox_init;
+ }
+
+ err = otx2_mbox_init(&rm->pfvf_mbox_up, rm->pfvf_mbx_base, pdev,
+ rm->bar2, MBOX_DIR_PFVF_UP, num_vfs);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to initialize PF/VF MBOX UP for %d VFs\n",
+ num_vfs);
+ goto err_mbox_up_init;
+ }
+
+ /* Allocate a single workqueue for VF/PF mailbox because access to
+ * AF/PF mailbox has to be synchronized.
+ */
+ rm->pfvf_mbox_wq =
+ alloc_workqueue("rm_pfvf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, 1);
+ if (rm->pfvf_mbox_wq == NULL) {
+ dev_err(&pdev->dev,
+ "Workqueue allocation failed for PF-VF MBOX\n");
+ err = -ENOMEM;
+ goto err_workqueue_alloc;
+ }
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ vf_ptr = &rm->vf_info[vf];
+ vf_ptr->vf_id = vf;
+ vf_ptr->rm = (void *)rm;
+ vf_ptr->intr_idx = vf % 64;
+ INIT_WORK(&vf_ptr->mbox_wrk, rm_pfvf_mbox_handler);
+ INIT_WORK(&vf_ptr->mbox_wrk_up, rm_pfvf_mbox_handler_up);
+ INIT_WORK(&vf_ptr->pfvf_flr_work, rm_pfvf_flr_handler);
+ }
+
+ err = vf_sysfs_create(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to initialize VF sysfs entries. Err=%d\n",
+ err);
+ err = -EFAULT;
+ goto err_vf_sysfs_create;
+ }
+
+#ifdef CONFIG_OCTEONTX2_RM_DOM_SYSFS
+ err = domain_sysfs_create(rm);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to create RM domain sysfs\n");
+ err = -EFAULT;
+ goto err_domain_sysfs_create;
+ }
+#endif
+
+ enable_vf_mbox_int(pdev);
+ enable_vf_flr_int(pdev);
+ return num_vfs;
+
+#ifdef CONFIG_OCTEONTX2_RM_DOM_SYSFS
+err_domain_sysfs_create:
+ vf_sysfs_destroy(pdev);
+#endif
+err_vf_sysfs_create:
+err_workqueue_alloc:
+ destroy_workqueue(rm->pfvf_mbox_wq);
+ if (rm->pfvf_mbox_up.dev != NULL)
+ otx2_mbox_destroy(&rm->pfvf_mbox_up);
+err_mbox_up_init:
+ if (rm->pfvf_mbox.dev != NULL)
+ otx2_mbox_destroy(&rm->pfvf_mbox);
+err_mbox_init:
+ iounmap(rm->pfvf_mbx_base);
+err_mbox_mem_map:
+ pci_disable_sriov(pdev);
+err_enable_sriov:
+ kfree(rm->vf_info);
+
+ return err;
+}
+
+static int rm_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ if (num_vfs == 0)
+ return __sriov_disable(pdev);
+ else
+ return __sriov_enable(pdev, num_vfs);
+}
+
+static void rm_remove(struct pci_dev *pdev)
+{
+ struct rm_dev *rm = pci_get_drvdata(pdev);
+
+ spin_lock(&rm_lst_lock);
+ list_del(&rm->list);
+ spin_unlock(&rm_lst_lock);
+
+ if (rm->num_vfs)
+ __sriov_disable(pdev);
+
+ disable_af_mbox_int(pdev);
+ rm_free_flr_irq(pdev);
+ rm_afpf_mbox_term(pdev);
+ rm_free_irqs(pdev);
+
+ if (rm->af_mbx_base)
+ iounmap(rm->af_mbx_base);
+ if (rm->bar2)
+ pcim_iounmap(pdev, rm->bar2);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ devm_kfree(&pdev->dev, rm);
+}
+
+static struct pci_driver rm_driver = {
+ .name = DRV_NAME,
+ .id_table = rvu_rm_id_table,
+ .probe = rm_probe,
+ .remove = rm_remove,
+ .sriov_configure = rm_sriov_configure,
+};
+
+static int __init otx2_rm_init_module(void)
+{
+ pr_info("%s\n", DRV_NAME);
+
+ spin_lock_init(&rm_lst_lock);
+ return pci_register_driver(&rm_driver);
+}
+
+static void __exit otx2_rm_exit_module(void)
+{
+ pci_unregister_driver(&rm_driver);
+}
+
+module_init(otx2_rm_init_module);
+module_exit(otx2_rm_exit_module);
diff --git a/drivers/soc/marvell/octeontx2-rm/otx2_rm.h b/drivers/soc/marvell/octeontx2-rm/otx2_rm.h
new file mode 100644
index 000000000000..2375e84c1198
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-rm/otx2_rm.h
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OcteonTX2 RVU Resource Manager driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef RM_H_
+#define RM_H_
+
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include "mbox.h"
+#include "quota.h"
+
+#define MAX_DOM_VFS 8
+#define RM_MAX_VFS 128
+/* 12 CGX PFs + max HWVFs - VFs used for domains */
+#define RM_MAX_PORTS (12 + 256 - MAX_DOM_VFS)
+#define NAME_SIZE 32
+
+#define RVU_PFVF_PF_SHIFT 10
+#define RVU_PFVF_PF_MASK 0x3F
+#define RVU_PFVF_FUNC_SHIFT 0
+#define RVU_PFVF_FUNC_MASK 0x3FF
+
+#define RVU_PFFUNC(pf, func) \
+ ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \
+ (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT))
+
+/* PCI device IDs */
+#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
+#define PCI_DEVID_OCTEONTX2_PASS1_RVU_PF 0x0063 /* Errata */
+#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
+#define PCI_DEVID_OCTEONTX2_PASS1_RVU_AFVF 0x00F8
+#define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064
+#define PCI_DEVID_OCTEONTX2_PASS1_RVU_VF 0xA064
+
+struct rm_dev;
+
+struct rvu_vf {
+ struct work_struct mbox_wrk;
+ struct work_struct mbox_wrk_up;
+ struct work_struct pfvf_flr_work;
+ struct device_attribute in_use_attr;
+ struct pci_dev *pdev;
+ struct kobject *limits_kobj;
+ /* pointer to PF struct this PF belongs to */
+ struct rm_dev *rm;
+ int vf_id;
+ int intr_idx; /* vf_id%64 actually */
+ bool in_use;
+ bool got_flr;
+};
+
+struct rvu_limits {
+ struct quotas *sso;
+ struct quotas *ssow;
+ struct quotas *npa;
+ struct quotas *tim;
+ struct quotas *cpt;
+};
+
+struct rm_dev {
+ struct list_head list;
+ struct mutex lock;
+ struct pci_dev *pdev;
+ void __iomem *bar2;
+ void __iomem *af_mbx_base;
+ void __iomem *pfvf_mbx_base;
+#define RM_VF_ENABLED 0x1
+ u32 flags;
+ u32 num_vfs;
+ bool *irq_allocated;
+ char *irq_names;
+ int msix_count;
+ int pf;
+
+ struct otx2_mbox pfvf_mbox; /* MBOXes for VF => PF channel */
+ struct otx2_mbox pfvf_mbox_up; /* MBOXes for PF => VF channel */
+ struct otx2_mbox afpf_mbox; /* MBOX for PF => AF channel */
+ struct otx2_mbox afpf_mbox_up; /* MBOX for AF => PF channel */
+ struct work_struct mbox_wrk;
+ struct work_struct mbox_wrk_up;
+ struct workqueue_struct *afpf_mbox_wq; /* MBOX handler */
+ struct workqueue_struct *pfvf_mbox_wq; /* VF MBOX handler */
+ struct rvu_vf *vf_info;
+ struct free_rsrcs_rsp limits; /* Maximum limits for all VFs */
+ struct rvu_limits vf_limits; /* Limits for each VF */
+};
+
+#endif /* RM_H_ */
diff --git a/drivers/soc/marvell/octeontx2-rm/quota.c b/drivers/soc/marvell/octeontx2-rm/quota.c
new file mode 100644
index 000000000000..361b903cf86c
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-rm/quota.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OcteonTX2 RVU Resource Manager driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/pci.h>
+#include <linux/sysfs.h>
+
+#include "quota.h"
+
+static ssize_t quota_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct quota *quota;
+ int val;
+
+ quota = container_of(attr, struct quota, sysfs);
+
+ if (quota->base->lock)
+ mutex_lock(quota->base->lock);
+ val = quota->val;
+ if (quota->base->lock)
+ mutex_unlock(quota->base->lock);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t quota_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct quota *quota;
+ struct quotas *base;
+ struct device *dev;
+ int old_val, new_val, res = 0;
+ u64 lf_sum;
+
+ quota = container_of(attr, struct quota, sysfs);
+ dev = quota->dev;
+ base = quota->base;
+
+ if (kstrtoint(buf, 0, &new_val)) {
+ dev_err(dev, "Invalid %s quota: %s\n", attr->attr.name, buf);
+ return -EIO;
+ }
+ if (new_val < 0) {
+ dev_err(dev, "Invalid %s quota: %d < 0\n", attr->attr.name,
+ new_val);
+ return -EIO;
+ }
+
+ if (new_val > base->max) {
+ dev_err(dev, "Invalid %s quota: %d > %d\n", attr->attr.name,
+ new_val, base->max);
+ return -EIO;
+ }
+
+ if (base->lock)
+ mutex_lock(base->lock);
+ old_val = quota->val;
+
+ if (base->ops.pre_store)
+ res = base->ops.pre_store(quota->ops_arg, quota, new_val);
+
+ if (res != 0) {
+ res = -EIO;
+ goto unlock;
+ }
+
+ lf_sum = quotas_get_sum(quota->base);
+
+ if (lf_sum + new_val - quota->val > base->max_sum) {
+ dev_err(dev,
+ "Not enough resources for %s quota. Used: %lld, Max: %lld\n",
+ attr->attr.name, lf_sum, base->max_sum);
+ res = -EIO;
+ goto unlock;
+ }
+ quota->val = new_val;
+
+ if (base->ops.post_store)
+ base->ops.post_store(quota->ops_arg, quota, old_val);
+
+ res = count;
+
+unlock:
+ if (base->lock)
+ mutex_unlock(base->lock);
+ return res;
+}
+
+
+struct quotas *quotas_alloc(u32 cnt, u32 max, u64 max_sum,
+ int init_val, struct mutex *lock,
+ struct quota_ops *ops)
+{
+ struct quotas *quotas;
+ u64 i;
+
+ if (cnt == 0)
+ return NULL;
+
+ quotas = kzalloc(sizeof(struct quotas) + cnt * sizeof(struct quota),
+ GFP_KERNEL);
+ if (quotas == NULL)
+ return NULL;
+
+ for (i = 0; i < cnt; i++) {
+ quotas->a[i].base = quotas;
+ quotas->a[i].val = init_val;
+ }
+
+ quotas->cnt = cnt;
+ quotas->max = max;
+ quotas->max_sum = max_sum;
+ if (ops) {
+ quotas->ops.pre_store = ops->pre_store;
+ quotas->ops.post_store = ops->post_store;
+ }
+ quotas->lock = lock;
+
+ return quotas;
+}
+
+void quotas_free(struct quotas *quotas)
+{
+ u64 i;
+
+ if (quotas == NULL)
+ return;
+ WARN_ON(quotas->cnt == 0);
+
+ for (i = 0; i < quotas->cnt; i++)
+ quota_sysfs_destroy(&quotas->a[i]);
+
+ kfree(quotas);
+}
+
+int quota_sysfs_create(const char *name, struct kobject *parent,
+ struct device *log_dev, struct quota *quota,
+ void *ops_arg)
+{
+ int err;
+
+ if (name == NULL || quota == NULL || log_dev == NULL)
+ return -EINVAL;
+
+ quota->sysfs.show = quota_show;
+ quota->sysfs.store = quota_store;
+ quota->sysfs.attr.name = name;
+ quota->sysfs.attr.mode = 0644;
+ quota->parent = parent;
+ quota->dev = log_dev;
+ quota->ops_arg = ops_arg;
+
+ sysfs_attr_init(&quota->sysfs.attr);
+ err = sysfs_create_file(quota->parent, &quota->sysfs.attr);
+ if (err) {
+ dev_err(quota->dev,
+ "Failed to create '%s' quota sysfs for '%s'\n",
+ name, kobject_name(quota->parent));
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int quota_sysfs_destroy(struct quota *quota)
+{
+ if (quota == NULL)
+ return -EINVAL;
+ if (quota->sysfs.attr.mode != 0) {
+ sysfs_remove_file(quota->parent, &quota->sysfs.attr);
+ quota->sysfs.attr.mode = 0;
+ }
+ return 0;
+}
+
+u64 quotas_get_sum(struct quotas *quotas)
+{
+ u64 lf_sum = 0;
+ int i;
+
+ for (i = 0; i < quotas->cnt; i++)
+ lf_sum += quotas->a[i].val;
+
+ return lf_sum;
+}
+
diff --git a/drivers/soc/marvell/octeontx2-rm/quota.h b/drivers/soc/marvell/octeontx2-rm/quota.h
new file mode 100644
index 000000000000..84b12f952f4c
--- /dev/null
+++ b/drivers/soc/marvell/octeontx2-rm/quota.h
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OcteonTX2 RVU Resource Manager driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef QUOTA_H_
+#define QUOTA_H_
+
+#include <linux/kobject.h>
+#include <linux/mutex.h>
+
+struct quotas;
+
+struct quota {
+ struct kobj_attribute sysfs;
+ /* Device to scope logs to */
+ struct device *dev;
+ /* Kobject of the sysfs file */
+ struct kobject *parent;
+ /* Pointer to base structure */
+ struct quotas *base;
+ /* Argument passed to the quota_ops when this quota is modified */
+ void *ops_arg;
+ /* Value of the quota */
+ int val;
+};
+
+struct quota_ops {
+ /**
+ * Called before sysfs store(). store() will proceed if returns 0.
+ * It is called with struct quotas::lock taken.
+ */
+ int (*pre_store)(void *arg, struct quota *quota, int new_val);
+ /** called after sysfs store(). */
+ void (*post_store)(void *arg, struct quota *quota, int old_val);
+};
+
+struct quotas {
+ struct quota_ops ops;
+ struct mutex *lock; /* lock taken for each sysfs operation */
+ u32 cnt; /* number of elements in arr */
+ u32 max; /* maximum value for a single quota */
+ u64 max_sum; /* maximum sum of all quotas */
+ struct quota a[0]; /* array of quota assignments */
+};
+
+/**
+ * Allocate and setup quotas structure.
+ *
+ * @p cnt number of quotas to allocate
+ * @p max maximum value of a single quota
+ * @p max_sum maximum sum of all quotas
+ * @p init_val initial value set to all quotas
+ * @p ops callbacks for sysfs manipulation notifications
+ */
+struct quotas *quotas_alloc(u32 cnt, u32 max, u64 max_sum,
+ int init_val, struct mutex *lock,
+ struct quota_ops *ops);
+/**
+ * Frees quota array and any sysfs entries associated with it.
+ */
+void quotas_free(struct quotas *quotas);
+
+/**
+ * Create a sysfs entry controling given quota entry.
+ *
+ * File created under parent will read the current value of the quota and
+ * write will take quotas lock and check if new value does not exceed
+ * configured maximum values.
+ *
+ * @return 0 if succeeded, negative error code otherwise.
+ */
+int quota_sysfs_create(const char *name, struct kobject *parent,
+ struct device *log_dev, struct quota *quota,
+ void *ops_arg);
+/**
+ * Remove sysfs entry for a given quota if it was created.
+ */
+int quota_sysfs_destroy(struct quota *quota);
+
+/**
+ * Return current sum of values for current quota.
+ */
+u64 quotas_get_sum(struct quotas *quotas);
+
+#endif /* QUOTA_H_ */
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 11279dcc4a3e..d651c48b8da7 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -506,6 +506,15 @@ config SPI_OCTEON
SPI host driver for the hardware found on some Cavium OCTEON
SOCs.
+config SPI_OCTEONTX2
+ tristate "Marvell OcteonTX2 SPI controller"
+ depends on PCI && 64BIT && (ARM64 || COMPILE_TEST)
+ help
+ This driver supports the OcteonTX2 SPI controller in master
+ mode. It supports single, dual and quad mode transfers.
+ This controller hardware is found on some of Marvell
+ OcteonTX2 SoCs.
+
config SPI_OMAP_UWIRE
tristate "OMAP1 MicroWire"
depends on ARCH_OMAP1
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 28f601327f8c..088724c794e6 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -74,6 +74,7 @@ obj-$(CONFIG_SPI_NXP_FLEXSPI) += spi-nxp-fspi.o
obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o
spi-octeon-objs := spi-cavium.o spi-cavium-octeon.o
obj-$(CONFIG_SPI_OCTEON) += spi-octeon.o
+obj-$(CONFIG_SPI_OCTEONTX2) += spi-octeontx2.o
obj-$(CONFIG_SPI_OMAP_UWIRE) += spi-omap-uwire.o
obj-$(CONFIG_SPI_OMAP_100K) += spi-omap-100k.o
obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o
diff --git a/drivers/spi/spi-cavium-thunderx.c b/drivers/spi/spi-cavium-thunderx.c
index fd6b9caffaf0..ce588b47c0f0 100644
--- a/drivers/spi/spi-cavium-thunderx.c
+++ b/drivers/spi/spi-cavium-thunderx.c
@@ -16,6 +16,11 @@
#define SYS_FREQ_DEFAULT 700000000 /* 700 Mhz */
+#define PCI_DEVICE_ID_THUNDER_SPI 0xA00B
+#define PCI_SUBSYS_DEVID_88XX_SPI 0xA10B
+#define PCI_SUBSYS_DEVID_81XX_SPI 0xA20B
+#define PCI_SUBSYS_DEVID_83XX_SPI 0xA30B
+
static int thunderx_spi_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -103,7 +108,18 @@ static void thunderx_spi_remove(struct pci_dev *pdev)
}
static const struct pci_device_id thunderx_spi_pci_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa00b) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_THUNDER_SPI,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_88XX_SPI) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_THUNDER_SPI,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_81XX_SPI) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_THUNDER_SPI,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_83XX_SPI) },
{ 0, }
};
diff --git a/drivers/spi/spi-octeontx2.c b/drivers/spi/spi-octeontx2.c
new file mode 100644
index 000000000000..ead6c234f9e2
--- /dev/null
+++ b/drivers/spi/spi-octeontx2.c
@@ -0,0 +1,362 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Marvell OcteonTX2 SPI driver.
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/spi/spi.h>
+
+#include "spi-octeontx2.h"
+
+#define DRV_NAME "spi-octeontx2"
+
+#define TBI_FREQ 100000000 /* 100 Mhz */
+#define SYS_FREQ_DEFAULT 700000000 /* 700 Mhz */
+
+static int tbi_clk_en = 1;
+module_param(tbi_clk_en, uint, 0644);
+MODULE_PARM_DESC(tbi_clk_en,
+ "Use Fixed Time Base 100MHz Reference Clock (0=Disable, 1=Enable [default])");
+
+static int cfg_mode_delay = 30;
+module_param(cfg_mode_delay, uint, 0644);
+MODULE_PARM_DESC(cfg_mode_delay,
+ "Delay in micro-seconds for mode change in MPI CFG register (30 [default])");
+
+static void octeontx2_spi_wait_ready(struct octeontx2_spi *p)
+{
+ union mpix_sts mpi_sts;
+ unsigned int loops = 0;
+
+ do {
+ if (loops++)
+ __delay(500);
+ mpi_sts.u64 = readq(p->register_base + OCTEONTX2_SPI_STS(p));
+ } while (mpi_sts.s.busy);
+}
+
+static int octeontx2_spi_do_transfer(struct octeontx2_spi *p,
+ struct spi_message *msg,
+ struct spi_transfer *xfer,
+ bool last_xfer,
+ int cs)
+{
+ struct spi_device *spi = msg->spi;
+ union mpix_cfg mpi_cfg;
+ union mpix_xmit mpi_xmit;
+ unsigned int clkdiv, calc_spd;
+ int mode;
+ bool cpha, cpol;
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ int len, rem;
+ int i;
+ void __iomem *wbuf_ptr = p->register_base + OCTEONTX2_SPI_WBUF(p);
+
+ mode = spi->mode;
+ cpha = mode & SPI_CPHA;
+ cpol = mode & SPI_CPOL;
+
+ clkdiv = p->sys_freq / (2 * xfer->speed_hz);
+ /* Perform check to not exceed requested speed */
+ while (1) {
+ calc_spd = p->sys_freq / (2 * clkdiv);
+ if (calc_spd <= xfer->speed_hz)
+ break;
+ clkdiv += 1;
+ }
+
+ if ((clkdiv > 8191) || (!tbi_clk_en && (clkdiv == 1))) {
+ dev_err(&spi->dev,
+ "can't support xfer->speed_hz %d for reference clock %d\n",
+ xfer->speed_hz, p->sys_freq);
+ return -EINVAL;
+ }
+
+ mpi_cfg.u64 = 0;
+
+ mpi_cfg.s.clkdiv = clkdiv;
+ mpi_cfg.s.cshi = (mode & SPI_CS_HIGH) ? 1 : 0;
+ mpi_cfg.s.lsbfirst = (mode & SPI_LSB_FIRST) ? 1 : 0;
+ mpi_cfg.s.wireor = (mode & SPI_3WIRE) ? 1 : 0;
+ mpi_cfg.s.idlelo = cpha != cpol;
+ mpi_cfg.s.cslate = cpha ? 1 : 0;
+ mpi_cfg.s.tritx = 1;
+ mpi_cfg.s.enable = 1;
+ mpi_cfg.s.cs_sticky = 1;
+ mpi_cfg.s.legacy_dis = 1;
+ if (tbi_clk_en)
+ mpi_cfg.s.tb100_en = 1;
+
+ /* Set x1 mode as default */
+ mpi_cfg.s.iomode = 0;
+ /* Set x2 mode if either tx or rx request dual */
+ if ((xfer->tx_nbits == SPI_NBITS_DUAL) ||
+ (xfer->rx_nbits == SPI_NBITS_DUAL))
+ mpi_cfg.s.iomode = 2;
+ /* Set x4 mode if either tx or rx request quad */
+ if ((xfer->tx_nbits == SPI_NBITS_QUAD) ||
+ (xfer->rx_nbits == SPI_NBITS_QUAD))
+ mpi_cfg.s.iomode = 3;
+
+ p->cs_enax |= (0xFull << 12);
+ mpi_cfg.u64 |= p->cs_enax;
+
+ if (mpi_cfg.u64 != p->last_cfg) {
+ p->last_cfg = mpi_cfg.u64;
+ writeq(mpi_cfg.u64, p->register_base + OCTEONTX2_SPI_CFG(p));
+ mpi_cfg.u64 = readq(p->register_base + OCTEONTX2_SPI_CFG(p));
+ udelay(cfg_mode_delay); /* allow CS change to settle */
+ }
+ tx_buf = xfer->tx_buf;
+ rx_buf = xfer->rx_buf;
+ len = xfer->len;
+
+ while (len > OCTEONTX2_SPI_MAX_BYTES) {
+ if (tx_buf) {
+ /* 8 bytes per iteration */
+ for (i = 0; i < OCTEONTX2_SPI_MAX_BYTES / 8; i++) {
+ u64 data = *(uint64_t *)tx_buf;
+
+ tx_buf += 8;
+ writeq(data, wbuf_ptr + (8 * i));
+ }
+ }
+ mpi_xmit.u64 = 0;
+ mpi_xmit.s.csid = cs;
+ mpi_xmit.s.leavecs = 1;
+ mpi_xmit.s.txnum = tx_buf ? OCTEONTX2_SPI_MAX_BYTES : 0;
+ mpi_xmit.s.totnum = OCTEONTX2_SPI_MAX_BYTES;
+ writeq(mpi_xmit.u64, p->register_base + OCTEONTX2_SPI_XMIT(p));
+
+ octeontx2_spi_wait_ready(p);
+ if (rx_buf) {
+ /* 8 bytes per iteration */
+ for (i = 0; i < OCTEONTX2_SPI_MAX_BYTES / 8; i++) {
+ u64 v = readq(wbuf_ptr + (8 * i));
+ *(uint64_t *)rx_buf = v;
+ rx_buf += 8;
+ }
+ }
+ len -= OCTEONTX2_SPI_MAX_BYTES;
+ }
+
+ rem = len % 8;
+
+ if (tx_buf) {
+ u64 data;
+ /* 8 bytes per iteration */
+ for (i = 0; i < len / 8; i++) {
+ data = *(uint64_t *)tx_buf;
+ tx_buf += 8;
+ writeq(data, wbuf_ptr + (8 * i));
+ }
+ /* remaining <8 bytes */
+ if (rem) {
+ data = 0;
+ memcpy(&data, tx_buf, rem);
+ writeq(data, wbuf_ptr + (8 * i));
+ }
+ }
+
+ mpi_xmit.u64 = 0;
+ mpi_xmit.s.csid = cs;
+ if (last_xfer)
+ mpi_xmit.s.leavecs = xfer->cs_change;
+ else
+ mpi_xmit.s.leavecs = !xfer->cs_change;
+ mpi_xmit.s.txnum = tx_buf ? len : 0;
+ mpi_xmit.s.totnum = len;
+ writeq(mpi_xmit.u64, p->register_base + OCTEONTX2_SPI_XMIT(p));
+
+ octeontx2_spi_wait_ready(p);
+ if (rx_buf) {
+ u64 v;
+ /* 8 bytes per iteration */
+ for (i = 0; i < len / 8; i++) {
+ v = readq(wbuf_ptr + (8 * i));
+ *(uint64_t *)rx_buf = v;
+ rx_buf += 8;
+ }
+ /* remaining <8 bytes */
+ if (rem) {
+ v = readq(wbuf_ptr + (8 * i));
+ memcpy(rx_buf, &v, rem);
+ rx_buf += rem;
+ }
+ }
+
+ if (xfer->delay_usecs)
+ udelay(xfer->delay_usecs);
+
+ return xfer->len;
+}
+
+int octeontx2_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct octeontx2_spi *p = spi_master_get_devdata(master);
+ unsigned int total_len = 0;
+ int status = 0;
+ struct spi_transfer *xfer;
+ int cs = msg->spi->chip_select;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ bool last_xfer = list_is_last(&xfer->transfer_list,
+ &msg->transfers);
+ int r = octeontx2_spi_do_transfer(p, msg, xfer, last_xfer, cs);
+
+ if (r < 0) {
+ status = r;
+ goto err;
+ }
+ total_len += r;
+ }
+err:
+ msg->status = status;
+ msg->actual_length = total_len;
+ spi_finalize_current_message(master);
+ return status;
+}
+
+static int octeontx2_spi_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_master *master;
+ struct octeontx2_spi *p;
+ int ret = -ENOENT;
+
+ /* may need to hunt for devtree entry */
+ if (!pdev->dev.of_node) {
+ struct device_node *np = of_find_node_by_name(NULL, "spi");
+
+ if (IS_ERR(np)) {
+ ret = PTR_ERR(np);
+ goto error;
+ }
+ pdev->dev.of_node = np;
+ of_node_put(np);
+ }
+
+ master = spi_alloc_master(dev, sizeof(struct octeontx2_spi));
+ if (!master)
+ return -ENOMEM;
+
+ p = spi_master_get_devdata(master);
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ goto error_put;
+
+ ret = pci_request_regions(pdev, DRV_NAME);
+ if (ret)
+ goto error_disable;
+
+ p->register_base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
+ if (!p->register_base) {
+ ret = -EINVAL;
+ goto error_disable;
+ }
+
+ p->regs.config = 0x1000;
+ p->regs.status = 0x1008;
+ p->regs.xmit = 0x1018;
+ p->regs.wbuf = 0x1800;
+ p->last_cfg = 0x0;
+
+ /* FIXME: need a proper clocksource object for SCLK */
+ p->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(p->clk)) {
+ p->clk = devm_clk_get(dev, "sclk");
+ p->sys_freq = 0;
+ } else {
+ ret = clk_prepare_enable(p->clk);
+ if (!ret)
+ p->sys_freq = clk_get_rate(p->clk);
+ }
+
+ if (!p->sys_freq)
+ p->sys_freq = SYS_FREQ_DEFAULT;
+ if (tbi_clk_en)
+ p->sys_freq = TBI_FREQ;
+ dev_info(dev, "Reference clock is %u\n", p->sys_freq);
+
+ master->num_chipselect = 4;
+ master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH |
+ SPI_LSB_FIRST | SPI_3WIRE |
+ SPI_TX_DUAL | SPI_RX_DUAL |
+ SPI_TX_QUAD | SPI_RX_QUAD;
+ master->transfer_one_message = octeontx2_spi_transfer_one_message;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->max_speed_hz = OCTEONTX2_SPI_MAX_CLOCK_HZ;
+ master->dev.of_node = pdev->dev.of_node;
+
+ pci_set_drvdata(pdev, master);
+
+ ret = devm_spi_register_master(dev, master);
+ if (ret)
+ goto error_disable;
+
+ return 0;
+
+error_disable:
+ clk_disable_unprepare(p->clk);
+error_put:
+ spi_master_put(master);
+error:
+ return ret;
+}
+
+static void octeontx2_spi_remove(struct pci_dev *pdev)
+{
+ struct spi_master *master = pci_get_drvdata(pdev);
+ struct octeontx2_spi *p;
+
+ p = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(p->clk);
+ /* Put everything in a known state. */
+ if (p)
+ writeq(0, p->register_base + OCTEONTX2_SPI_CFG(p));
+
+ pci_disable_device(pdev);
+ spi_master_put(master);
+}
+
+static const struct pci_device_id octeontx2_spi_pci_id_table[] = {
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_SPI,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OTX2_96XX) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_SPI,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OTX2_95XX) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_SPI,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OTX2_LOKI) },
+
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, octeontx2_spi_pci_id_table);
+
+static struct pci_driver octeontx2_spi_driver = {
+ .name = DRV_NAME,
+ .id_table = octeontx2_spi_pci_id_table,
+ .probe = octeontx2_spi_probe,
+ .remove = octeontx2_spi_remove,
+};
+
+module_pci_driver(octeontx2_spi_driver);
+
+MODULE_DESCRIPTION("OcteonTX2 SPI bus driver");
+MODULE_AUTHOR("Marvell Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-octeontx2.h b/drivers/spi/spi-octeontx2.h
new file mode 100644
index 000000000000..84dfae89a8f8
--- /dev/null
+++ b/drivers/spi/spi-octeontx2.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SPI_OCTEONTX2_H
+#define __SPI_OCTEONTX2_H
+
+#include <linux/clk.h>
+
+#define PCI_DEVID_OCTEONTX2_SPI 0xA00B
+#define PCI_SUBSYS_DEVID_OTX2_96XX 0xB200
+#define PCI_SUBSYS_DEVID_OTX2_95XX 0xB300
+#define PCI_SUBSYS_DEVID_OTX2_LOKI 0xB400
+
+#define OCTEONTX2_SPI_MAX_BYTES 1024
+#define OCTEONTX2_SPI_MAX_CLOCK_HZ 25000000
+
+struct octeontx2_spi_regs {
+ int config;
+ int status;
+ int xmit;
+ int wbuf;
+};
+
+struct octeontx2_spi {
+ void __iomem *register_base;
+ u64 last_cfg;
+ u64 cs_enax;
+ int sys_freq;
+ struct octeontx2_spi_regs regs;
+ struct clk *clk;
+};
+
+#define OCTEONTX2_SPI_CFG(x) ((x)->regs.config)
+#define OCTEONTX2_SPI_STS(x) ((x)->regs.status)
+#define OCTEONTX2_SPI_XMIT(x) ((x)->regs.xmit)
+#define OCTEONTX2_SPI_WBUF(x) ((x)->regs.wbuf)
+
+int octeontx2_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg);
+
+
+union mpix_cfg {
+ uint64_t u64;
+ struct mpix_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_50_63:14;
+ uint64_t tb100_en:1;
+ uint64_t reserved_48:1;
+ uint64_t cs_espi_en:4;
+ uint64_t reserved_36_43:8;
+ uint64_t iomode:2;
+ uint64_t reserved_32_33:2;
+ uint64_t legacy_dis:1;
+ uint64_t reserved_29_30:2;
+ uint64_t clkdiv:13;
+ uint64_t csena3:1;
+ uint64_t csena2:1;
+ uint64_t csena1:1;
+ uint64_t csena0:1;
+ uint64_t cslate:1;
+ uint64_t tritx:1;
+ uint64_t idleclks:2;
+ uint64_t cshi:1;
+ uint64_t reserved_6:1;
+ uint64_t cs_sticky:1;
+ uint64_t lsbfirst:1;
+ uint64_t wireor:1;
+ uint64_t clk_cont:1;
+ uint64_t idlelo:1;
+ uint64_t enable:1;
+#else
+ uint64_t enable:1;
+ uint64_t idlelo:1;
+ uint64_t clk_cont:1;
+ uint64_t wireor:1;
+ uint64_t lsbfirst:1;
+ uint64_t cs_sticky:1;
+ uint64_t reserved_6:1;
+ uint64_t cshi:1;
+ uint64_t idleclks:2;
+ uint64_t tritx:1;
+ uint64_t cslate:1;
+ uint64_t csena0:1;
+ uint64_t csena1:1;
+ uint64_t csena2:1;
+ uint64_t csena3:1;
+ uint64_t clkdiv:13;
+ uint64_t reserved_29_30:2;
+ uint64_t legacy_dis:1;
+ uint64_t reserved_32_33:2;
+ uint64_t iomode:2;
+ uint64_t reserved_36_43:8;
+ uint64_t cs_espi_en:4;
+ uint64_t reserved_48:1;
+ uint64_t tb100_en:1;
+ uint64_t reserved_50_63:14;
+#endif
+ } s;
+};
+
+union mpix_sts {
+ uint64_t u64;
+ struct mpix_sts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_40_63:24;
+ uint64_t crc:8;
+ uint64_t reserved_27_31:5;
+ uint64_t crc_err:1;
+ uint64_t reserved_19_25:7;
+ uint64_t rxnum:11;
+ uint64_t reserved_2_7:6;
+ uint64_t mpi_intr:1;
+ uint64_t busy:1;
+#else
+ uint64_t busy:1;
+ uint64_t mpi_intr:1;
+ uint64_t reserved_2_7:6;
+ uint64_t rxnum:11;
+ uint64_t reserved_19_25:7;
+ uint64_t crc_err:1;
+ uint64_t reserved_27_31:5;
+ uint64_t crc:8;
+ uint64_t reserved_40_63:24;
+#endif
+ } s;
+};
+
+union mpix_xmit {
+ uint64_t u64;
+ struct mpix_xmit_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_63:1;
+ uint64_t csid:2;
+ uint64_t leavecs:1;
+ uint64_t reserved_31_59:29;
+ uint64_t txnum:11;
+ uint64_t reserved_11_19:9;
+ uint64_t totnum:11;
+#else
+ uint64_t totnum:11;
+ uint64_t reserved_11_19:9;
+ uint64_t txnum:11;
+ uint64_t reserved_31_59:29;
+ uint64_t leavecs:1;
+ uint64_t csid:2;
+ uint64_t reserved_63:1;
+#endif
+ } s;
+};
+#endif /* __SPI_OCTEONTX2_H */
diff --git a/include/acpi/apei.h b/include/acpi/apei.h
index 680f80960c3d..5b821a003d7c 100644
--- a/include/acpi/apei.h
+++ b/include/acpi/apei.h
@@ -33,8 +33,10 @@ extern bool ghes_disable;
#ifdef CONFIG_ACPI_APEI
void __init acpi_hest_init(void);
+void __init hest_table_set(struct acpi_table_hest *table);
#else
static inline void acpi_hest_init(void) { return; }
+static inline void hest_table_set(struct acpi_table_hest *table) { return; }
#endif
typedef int (*apei_hest_func_t)(struct acpi_hest_header *hest_hdr, void *data);
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index bd1ee9039558..5646f3cfa519 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -252,6 +252,7 @@ struct clk_ops {
int (*init)(struct clk_hw *hw);
void (*terminate)(struct clk_hw *hw);
void (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
+ int (*get_available_rates)(struct clk_hw *hw, u64 *rate);
};
/**
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 09f0565a5de3..8afc772878a3 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -150,6 +150,12 @@ struct coresight_connection {
struct coresight_device *child_dev;
};
+enum hw_state {
+ USR_STOP,
+ SW_STOP,
+ USR_START,
+};
+
/**
* struct coresight_device - representation of a device as used by the framework
* @pdata: Platform data with device connections associated to this device.
@@ -161,6 +167,7 @@ struct coresight_connection {
* @refcnt: keep track of what is in use.
* @orphan: true if the component has connections that haven't been linked.
* @enable: 'true' if component is currently part of an active path.
+ * @hw_state: state of hw
* @activated: 'true' only if a _sink_ has been activated. A sink can be
* activated but not yet enabled. Enabling for a _sink_
* appens when a source has been selected for that it.
@@ -178,6 +185,7 @@ struct coresight_device {
bool orphan;
bool enable; /* true only if configured as part of a path */
/* sink specific fields */
+ int hw_state;
bool activated; /* true only if a sink is part of a path */
struct dev_ext_attribute *ea;
/* cross trigger handling */
@@ -222,6 +230,8 @@ static struct coresight_dev_list (var) = { \
* @alloc_buffer: initialises perf's ring buffer for trace collection.
* @free_buffer: release memory allocated in @get_config.
* @update_buffer: update buffer pointers after a trace session.
+ * @register_source: Add reference to source
+ * @unregister_source: Remove reference to source
*/
struct coresight_ops_sink {
int (*enable)(struct coresight_device *csdev, u32 mode, void *data);
@@ -233,6 +243,9 @@ struct coresight_ops_sink {
unsigned long (*update_buffer)(struct coresight_device *csdev,
struct perf_output_handle *handle,
void *sink_config);
+ void (*register_source)(struct coresight_device *sink_dev,
+ void *source);
+ void (*unregister_source)(struct coresight_device *sink_dev);
};
/**
@@ -255,6 +268,8 @@ struct coresight_ops_link {
* to the HW.
* @enable: enables tracing for a source.
* @disable: disables tracing for a source.
+ * @enable_raw: Raw HW enable, without any other register configuration
+ * @disable_raw:Raw HW disable
*/
struct coresight_ops_source {
int (*cpu_id)(struct coresight_device *csdev);
@@ -263,6 +278,8 @@ struct coresight_ops_source {
struct perf_event *event, u32 mode);
void (*disable)(struct coresight_device *csdev,
struct perf_event *event);
+ void (*enable_raw)(struct coresight_device *csdev);
+ void (*disable_raw)(struct coresight_device *csdev);
};
/**
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 9b7a8d74a9d6..98df02dfafb8 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -326,6 +326,12 @@ extern int oops_may_print(void);
void do_exit(long error_code) __noreturn;
void complete_and_exit(struct completion *, long) __noreturn;
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+struct task_struct;
+int task_cleanup_handler_add(void (*handler)(struct task_struct *));
+int task_cleanup_handler_remove(void (*handler)(struct task_struct *));
+#endif
+
/* Internal, do not use. */
int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
int __must_check _kstrtol(const char *s, unsigned int base, long *res);
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index 5c873a59b387..3d7b29cecccc 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -74,6 +74,8 @@ struct scmi_clk_ops {
u64 rate);
int (*enable)(const struct scmi_handle *handle, u32 clk_id);
int (*disable)(const struct scmi_handle *handle, u32 clk_id);
+ int (*available_rates)(const struct scmi_handle *handle, u32 clk_id,
+ u64 *rates);
};
/**
diff --git a/kernel/exit.c b/kernel/exit.c
index d56fe51bdf07..0f690dd628c5 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -703,6 +703,68 @@ static void check_stack_usage(void)
static inline void check_stack_usage(void) {}
#endif
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+struct task_cleanup_handler {
+ void (*handler)(struct task_struct *);
+ struct list_head list;
+};
+
+static DEFINE_MUTEX(task_cleanup_handlers_mutex);
+static LIST_HEAD(task_cleanup_handlers);
+
+int task_cleanup_handler_add(void (*handler)(struct task_struct *))
+{
+ struct task_cleanup_handler *newhandler;
+
+ newhandler = (struct task_cleanup_handler *)
+ kmalloc(sizeof(struct task_cleanup_handler), GFP_KERNEL);
+ if (newhandler == NULL)
+ return -1;
+ newhandler->handler = handler;
+ mutex_lock(&task_cleanup_handlers_mutex);
+ list_add(&newhandler->list, &task_cleanup_handlers);
+ mutex_unlock(&task_cleanup_handlers_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(task_cleanup_handler_add);
+
+int task_cleanup_handler_remove(void (*handler)(struct task_struct *))
+{
+ struct list_head *pos, *tmppos;
+ struct task_cleanup_handler *curr_task_cleanup_handler;
+ int retval = -1;
+
+ mutex_lock(&task_cleanup_handlers_mutex);
+ list_for_each_safe(pos, tmppos, &task_cleanup_handlers) {
+ curr_task_cleanup_handler
+ = list_entry(pos, struct task_cleanup_handler, list);
+ if (curr_task_cleanup_handler->handler == handler) {
+ list_del(pos);
+ kfree(curr_task_cleanup_handler);
+ retval = 0;
+ }
+ }
+ mutex_unlock(&task_cleanup_handlers_mutex);
+ return retval;
+}
+EXPORT_SYMBOL(task_cleanup_handler_remove);
+
+static void task_cleanup_handlers_call(struct task_struct *task)
+{
+ struct list_head *pos;
+ struct task_cleanup_handler *curr_task_cleanup_handler;
+
+ mutex_lock(&task_cleanup_handlers_mutex);
+ list_for_each(pos, &task_cleanup_handlers) {
+ curr_task_cleanup_handler =
+ list_entry(pos, struct task_cleanup_handler, list);
+ if (curr_task_cleanup_handler->handler != NULL)
+ curr_task_cleanup_handler->handler(task);
+ }
+ mutex_unlock(&task_cleanup_handlers_mutex);
+}
+#endif
+
void __noreturn do_exit(long code)
{
struct task_struct *tsk = current;
@@ -787,6 +849,10 @@ void __noreturn do_exit(long code)
tsk->exit_code = code;
taskstats_exit(tsk, group_dead);
+#ifdef CONFIG_MRVL_OCTEONTX_EL0_INTR
+ task_cleanup_handlers_call(tsk);
+#endif
+
exit_mm();
if (group_dead)