aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/marvell
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/marvell')
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c22
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h586
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c1174
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h182
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c309
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c4219
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c368
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c1479
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h90
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h105
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h48
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h145
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c68
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h1223
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h265
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h3775
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c346
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.h6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c464
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h99
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c1252
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h499
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c727
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c568
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c1110
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c2226
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c1817
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h82
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_fixes.c1009
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_fixes.h18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c2858
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c27
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c1655
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c1434
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_ree.c1242
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h561
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c108
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sso.c1661
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h835
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c259
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_tim.c461
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.c984
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.h69
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/Makefile12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/bphy_common.h39
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/bphy_netdev_comm_if.h45
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_bphy_hw.h482
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_bphy_netdev_comm_if.h296
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe.c1427
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe.h142
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe_ethtool.c149
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe_ptp.c79
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy.h74
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_debugfs.c165
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_debugfs.h31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_hw.h381
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_main.c887
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri.c755
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri.h150
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri_ethtool.c102
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe.c1697
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe.h227
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe_ethtool.c152
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe_ptp.c268
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/rfoe_bphy_netdev_comm_if.h190
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/rfoe_common.h132
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c482
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h42
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c511
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h385
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c170
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c300
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c174
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c958
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c1478
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c826
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c218
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_smqvf.c282
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c1065
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c503
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c267
95 files changed, 46191 insertions, 5981 deletions
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 41815b609569..a6a74d4227da 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -85,6 +85,7 @@ config MVNETA_BM
config MVPP2
tristate "Marvell Armada 375/7K/8K network interface support"
depends on ARCH_MVEBU || COMPILE_TEST
+ depends on NET_DSA
select MVMDIO
select PHYLINK
select PAGE_POOL
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index d14762d93640..52459a13e5c1 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -80,6 +80,18 @@ enum orion_mdio_bus_type {
BUS_TYPE_XSMI
};
+struct orion_mdio_data {
+ enum orion_mdio_bus_type bus_type;
+};
+
+const struct orion_mdio_data smi_bus = {
+ .bus_type = BUS_TYPE_SMI,
+};
+
+const struct orion_mdio_data xsmi_bus = {
+ .bus_type = BUS_TYPE_XSMI,
+};
+
struct orion_mdio_ops {
int (*is_done)(struct orion_mdio_dev *);
unsigned int poll_interval_min;
@@ -275,13 +287,13 @@ static irqreturn_t orion_mdio_err_irq(int irq, void *dev_id)
static int orion_mdio_probe(struct platform_device *pdev)
{
- enum orion_mdio_bus_type type;
+ struct orion_mdio_data *data;
struct resource *r;
struct mii_bus *bus;
struct orion_mdio_dev *dev;
int i, ret;
- type = (enum orion_mdio_bus_type)of_device_get_match_data(&pdev->dev);
+ data = (struct orion_mdio_data *)of_device_get_match_data(&pdev->dev);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
@@ -294,7 +306,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
if (!bus)
return -ENOMEM;
- switch (type) {
+ switch (data->bus_type) {
case BUS_TYPE_SMI:
bus->read = orion_mdio_smi_read;
bus->write = orion_mdio_smi_write;
@@ -415,8 +427,8 @@ static int orion_mdio_remove(struct platform_device *pdev)
}
static const struct of_device_id orion_mdio_match[] = {
- { .compatible = "marvell,orion-mdio", .data = (void *)BUS_TYPE_SMI },
- { .compatible = "marvell,xmdio", .data = (void *)BUS_TYPE_XSMI },
+ { .compatible = "marvell,orion-mdio", .data = &smi_bus },
+ { .compatible = "marvell,xmdio", .data = &xsmi_bus },
{ }
};
MODULE_DEVICE_TABLE(of, orion_mdio_match);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index d825eb021b22..2361879d7188 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -15,11 +15,14 @@
#include <linux/net_tstamp.h>
#include <linux/phy.h>
#include <linux/phylink.h>
-#include <net/flow_offload.h>
#include <net/page_pool.h>
#include <linux/bpf.h>
#include <net/xdp.h>
+#ifndef CACHE_LINE_MASK
+#define CACHE_LINE_MASK (~(L1_CACHE_BYTES - 1))
+#endif
+
/* The PacketOffset field is measured in units of 32 bytes and is 3 bits wide,
* so the maximum offset is 7 * 32 = 224
*/
@@ -56,10 +59,16 @@
#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
#define MVPP2_RXQ_DISABLE_MASK BIT(31)
+/* Total max number of hw RX queues */
+#define MVPP2_RXQ_MAX_NUM 128
/* Top Registers */
#define MVPP2_MH_REG(port) (0x5040 + 4 * (port))
+#define MVPP2_DSA_NON_EXTENDED BIT(4)
#define MVPP2_DSA_EXTENDED BIT(5)
+#define MVPP2_VER_ID_REG 0x50b0
+#define MVPP2_VER_PP22 0x10
+#define MVPP2_VER_PP23 0x11
/* Parser Registers */
#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
@@ -85,13 +94,23 @@
/* RSS Registers */
#define MVPP22_RSS_INDEX 0x1500
+#define MVPP22_RSS_IDX_ENTRY_NUM_OFF 0
+#define MVPP22_RSS_IDX_ENTRY_NUM_MASK 0x1F
+#define MVPP22_RSS_IDX_TBL_NUM_OFF 8
+#define MVPP22_RSS_IDX_TBL_NUM_MASK 0x700
+#define MVPP22_RSS_IDX_RXQ_NUM_OFF 16
+#define MVPP22_RSS_IDX_RXQ_NUM_MASK 0xFF0000
#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx)
#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8)
#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16)
#define MVPP22_RXQ2RSS_TABLE 0x1504
#define MVPP22_RSS_TABLE_POINTER(p) (p)
#define MVPP22_RSS_TABLE_ENTRY 0x1508
+#define MVPP22_RSS_TBL_ENTRY_OFF 0
+#define MVPP22_RSS_TBL_ENTRY_MASK 0xFF
#define MVPP22_RSS_WIDTH 0x150c
+#define MVPP22_RSS_WIDTH_OFF 0
+#define MVPP22_RSS_WIDTH_MASK 0xF
/* Classifier Registers */
#define MVPP2_CLS_MODE_REG 0x1800
@@ -116,7 +135,8 @@
#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
#define MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK 0x7
#define MVPP2_CLS_FLOW_TBL1_N_FIELDS(x) (x)
-#define MVPP2_CLS_FLOW_TBL1_LU_TYPE(lu) (((lu) & 0x3f) << 3)
+#define MVPP2_CLS_FLOW_TBL1_LKP_TYPE_MASK 0x3f
+#define MVPP2_CLS_FLOW_TBL1_LKP_TYPE(x) ((x) << 3)
#define MVPP2_CLS_FLOW_TBL1_PRIO_MASK 0x3f
#define MVPP2_CLS_FLOW_TBL1_PRIO(x) ((x) << 9)
#define MVPP2_CLS_FLOW_TBL1_SEQ_MASK 0x7
@@ -139,18 +159,17 @@
#define MVPP22_CLS_C2_TCAM_DATA2 0x1b18
#define MVPP22_CLS_C2_TCAM_DATA3 0x1b1c
#define MVPP22_CLS_C2_TCAM_DATA4 0x1b20
-#define MVPP22_CLS_C2_LU_TYPE(lu) ((lu) & 0x3f)
#define MVPP22_CLS_C2_PORT_ID(port) ((port) << 8)
-#define MVPP22_CLS_C2_PORT_MASK (0xff << 8)
-#define MVPP22_CLS_C2_TCAM_INV 0x1b24
-#define MVPP22_CLS_C2_TCAM_INV_BIT BIT(31)
+#define MVPP2_CLS2_TCAM_INV_REG 0x1b24
+#define MVPP2_CLS2_TCAM_INV_INVALID 31
+#define MVPP22_CLS_C2_LKP_TYPE(type) (type)
+#define MVPP22_CLS_C2_LKP_TYPE_MASK (0x3f)
#define MVPP22_CLS_C2_HIT_CTR 0x1b50
#define MVPP22_CLS_C2_ACT 0x1b60
#define MVPP22_CLS_C2_ACT_RSS_EN(act) (((act) & 0x3) << 19)
#define MVPP22_CLS_C2_ACT_FWD(act) (((act) & 0x7) << 13)
#define MVPP22_CLS_C2_ACT_QHIGH(act) (((act) & 0x3) << 11)
#define MVPP22_CLS_C2_ACT_QLOW(act) (((act) & 0x3) << 9)
-#define MVPP22_CLS_C2_ACT_COLOR(act) ((act) & 0x7)
#define MVPP22_CLS_C2_ATTR0 0x1b64
#define MVPP22_CLS_C2_ATTR0_QHIGH(qh) (((qh) & 0x1f) << 24)
#define MVPP22_CLS_C2_ATTR0_QHIGH_MASK 0x1f
@@ -162,8 +181,8 @@
#define MVPP22_CLS_C2_ATTR2 0x1b6c
#define MVPP22_CLS_C2_ATTR2_RSS_EN BIT(30)
#define MVPP22_CLS_C2_ATTR3 0x1b70
-#define MVPP22_CLS_C2_TCAM_CTRL 0x1b90
-#define MVPP22_CLS_C2_TCAM_BYPASS_FIFO BIT(0)
+#define MVPP2_CLS2_TCAM_CTRL_REG 0x1b90
+#define MVPP2_CLS2_TCAM_CTRL_BYPASS_FIFO_STAGES BIT(0)
/* Descriptor Manager Top Registers */
#define MVPP2_RXQ_NUM_REG 0x2040
@@ -232,6 +251,7 @@
#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
+#define MVPP22_AXI_TX_DATA_RD_QOS_ATTRIBUTE (0x3 << 4)
#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
@@ -276,8 +296,8 @@
#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
-#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(version) \
- ((version) == MVPP21 ? 0xffff : 0xff)
+#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(variant) \
+ (static_branch_unlikely(&variant) ? 0xffff : 0xff)
#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16
#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
@@ -292,6 +312,8 @@
#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
+#define MVPP2_ISR_RX_ERR_CAUSE_REG(port) (0x5520 + 4 * (port))
+#define MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK 0x00ff
/* Buffer Manager registers */
#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
@@ -319,6 +341,10 @@
#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
MVPP2_BM_HIGH_THRESH_OFFS)
+#define MVPP2_BM_BPPI_HIGH_THRESH 0x1E
+#define MVPP2_BM_BPPI_LOW_THRESH 0x1C
+#define MVPP23_BM_BPPI_HIGH_THRESH 0x34
+#define MVPP23_BM_BPPI_LOW_THRESH 0x28
#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
@@ -343,26 +369,12 @@
#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
-/* Packet Processor per-port counters */
-#define MVPP2_OVERRUN_ETH_DROP 0x7000
-#define MVPP2_CLS_ETH_DROP 0x7020
+#define MVPP22_BM_POOL_BASE_ADDR_HIGH_REG 0x6310
+#define MVPP22_BM_POOL_BASE_ADDR_HIGH_MASK 0xff
+#define MVPP23_BM_8POOL_MODE BIT(8)
/* Hit counters registers */
#define MVPP2_CTRS_IDX 0x7040
-#define MVPP22_CTRS_TX_CTR(port, txq) ((txq) | ((port) << 3) | BIT(7))
-#define MVPP2_TX_DESC_ENQ_CTR 0x7100
-#define MVPP2_TX_DESC_ENQ_TO_DDR_CTR 0x7104
-#define MVPP2_TX_BUFF_ENQ_TO_DDR_CTR 0x7108
-#define MVPP2_TX_DESC_ENQ_HW_FWD_CTR 0x710c
-#define MVPP2_RX_DESC_ENQ_CTR 0x7120
-#define MVPP2_TX_PKTS_DEQ_CTR 0x7130
-#define MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR 0x7200
-#define MVPP2_TX_PKTS_EARLY_DROP_CTR 0x7204
-#define MVPP2_TX_PKTS_BM_DROP_CTR 0x7208
-#define MVPP2_TX_PKTS_BM_MC_DROP_CTR 0x720c
-#define MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR 0x7220
-#define MVPP2_RX_PKTS_EARLY_DROP_CTR 0x7224
-#define MVPP2_RX_PKTS_BM_DROP_CTR 0x7228
#define MVPP2_CLS_DEC_TBL_HIT_CTR 0x7700
#define MVPP2_CLS_FLOW_TBL_HIT_CTR 0x7704
@@ -443,12 +455,15 @@
#define MVPP2_GMAC_STATUS0_GMII_SPEED BIT(1)
#define MVPP2_GMAC_STATUS0_MII_SPEED BIT(2)
#define MVPP2_GMAC_STATUS0_FULL_DUPLEX BIT(3)
-#define MVPP2_GMAC_STATUS0_RX_PAUSE BIT(4)
-#define MVPP2_GMAC_STATUS0_TX_PAUSE BIT(5)
+#define MVPP2_GMAC_STATUS0_RX_PAUSE BIT(6)
+#define MVPP2_GMAC_STATUS0_TX_PAUSE BIT(7)
#define MVPP2_GMAC_STATUS0_AN_COMPLETE BIT(11)
+#define MVPP2_GMAC_PORT_FIFO_CFG_0_REG 0x18
+#define MVPP2_GMAC_TX_FIFO_WM_MASK 0xffff
+#define MVPP2_GMAC_TX_FIFO_WM_LOW_OFFSET 8
#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
-#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
+#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x3fc0
#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
#define MVPP22_GMAC_INT_STAT 0x20
@@ -469,14 +484,12 @@
#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1)
#define MVPP22_GMAC_INT_SUM_MASK_PTP BIT(2)
-/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
+/* Per-port XGMAC registers. PPv2.2 and PPv2.3, only for GOP port 0,
* relative to port->base.
*/
#define MVPP22_XLG_CTRL0_REG 0x100
#define MVPP22_XLG_CTRL0_PORT_EN BIT(0)
#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1)
-#define MVPP22_XLG_CTRL0_FORCE_LINK_DOWN BIT(2)
-#define MVPP22_XLG_CTRL0_FORCE_LINK_PASS BIT(3)
#define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7)
#define MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN BIT(8)
#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14)
@@ -503,10 +516,11 @@
#define MVPP22_XLG_CTRL4_REG 0x184
#define MVPP22_XLG_CTRL4_FWD_FC BIT(5)
#define MVPP22_XLG_CTRL4_FWD_PFC BIT(6)
+#define MVPP22_XLG_CTRL4_USE_XPCS BIT(8)
#define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12)
#define MVPP22_XLG_CTRL4_EN_IDLE_CHECK BIT(14)
-/* SMI registers. PPv2.2 only, relative to priv->iface_base. */
+/* SMI registers. PPv2.2 and PPv2.3, relative to priv->iface_base. */
#define MVPP22_SMI_MISC_CFG_REG 0x1204
#define MVPP22_SMI_POLLING_EN BIT(10)
@@ -582,7 +596,7 @@
#define MVPP2_QUEUE_NEXT_DESC(q, index) \
(((index) < (q)->last_desc) ? ((index) + 1) : 0)
-/* XPCS registers. PPv2.2 only */
+/* XPCS registers.PPv2.2 and PPv2.3 */
#define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000)
#define MVPP22_MPCS_CTRL 0x14
#define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10)
@@ -593,7 +607,16 @@
#define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4)
#define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11)
-/* XPCS registers. PPv2.2 only */
+/* FCA registers. PPv2.2 and PPv2.3 */
+#define MVPP22_FCA_BASE(port) (0x7600 + (port) * 0x1000)
+#define MVPP22_FCA_REG_SIZE 16
+#define MVPP22_FCA_REG_MASK 0xFFFF
+#define MVPP22_FCA_CONTROL_REG 0x0
+#define MVPP22_FCA_ENABLE_PERIODIC BIT(11)
+#define MVPP22_PERIODIC_COUNTER_LSB_REG (0x110)
+#define MVPP22_PERIODIC_COUNTER_MSB_REG (0x114)
+
+/* XPCS registers. PPv2.2 and PPv2.3 */
#define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000)
#define MVPP22_XPCS_CFG0 0x0
#define MVPP22_XPCS_CFG0_RESET_DIS BIT(0)
@@ -658,11 +681,14 @@
/* Various constants */
/* Coalescing */
-#define MVPP2_TXDONE_COAL_PKTS_THRESH 64
+#define MVPP2_TXDONE_COAL_PKTS_THRESH 32
#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
+#define MVPP2_GUARD_TXDONE_HRTIMER_NS (10 * NSEC_PER_MSEC)
#define MVPP2_TXDONE_COAL_USEC 1000
#define MVPP2_RX_COAL_PKTS 32
#define MVPP2_RX_COAL_USEC 64
+#define MVPP2_TX_BULK_TIME (50 * NSEC_PER_USEC)
+#define MVPP2_TX_BULK_MAX_PACKETS (MVPP2_AGGR_TXQ_SIZE / MVPP2_MAX_PORTS)
/* The two bytes Marvell header. Either contains a special value used
* by Marvell switches when a specific hardware mode is enabled (not
@@ -677,6 +703,7 @@
#define MVPP2_PPPOE_HDR_SIZE 8
#define MVPP2_VLAN_TAG_LEN 4
#define MVPP2_VLAN_TAG_EDSA_LEN 8
+#define MVPP2_MPLS_HEADER_LEN 4
/* Lbtd 802.3 type */
#define MVPP2_IP_LBDT_TYPE 0xfffa
@@ -695,32 +722,47 @@
/* Maximum number of supported ports */
#define MVPP2_MAX_PORTS 4
+/* Loopback port index */
+#define MVPP2_LOOPBACK_PORT_INDEX 3
+
/* Maximum number of TXQs used by single port */
#define MVPP2_MAX_TXQ 8
-/* MVPP2_MAX_TSO_SEGS is the maximum number of fragments to allow in the GSO
- * skb. As we need a maxium of two descriptors per fragments (1 header, 1 data),
- * multiply this value by two to count the maximum number of skb descs needed.
+/* SKB/TSO/TX-ring-size/pause-wakeup constatnts depend upon the
+ * MAX_TSO_SEGS - the max number of fragments to allow in the GSO skb.
+ * Min-Min requirement for it = maxPacket(64kB)/stdMTU(1500)=44 fragments
+ * and MVPP2_MAX_TSO_SEGS=max(MVPP2_MAX_TSO_SEGS, MAX_SKB_FRAGS).
+ * MAX_SKB_DESCS: we need 2 descriptors per TSO fragment (1 header, 1 data)
+ * + per-cpu-reservation MVPP2_CPU_DESC_CHUNK*CPUs for optimization.
+ * TX stop activation threshold (e.g. Queue is full) is MAX_SKB_DESCS
+ * TX stop-to-wake hysteresis is MAX_TSO_SEGS
+ * The Tx ring size cannot be smaller than TSO_SEGS + HYSTERESIS + SKBs
+ * The numbers depend upon num cpus (online) used by the driver
*/
-#define MVPP2_MAX_TSO_SEGS 300
-#define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
+#define MVPP2_MAX_TSO_SEGS 44
+#define MVPP2_MAX_SKB_DESCS(ncpus) (MVPP2_MAX_TSO_SEGS * 2 + \
+ MVPP2_CPU_DESC_CHUNK * ncpus)
+#define MVPP2_TX_PAUSE_HYSTERESIS (MVPP2_MAX_TSO_SEGS * 2)
/* Max number of RXQs per port */
#define MVPP2_PORT_MAX_RXQ 32
/* Max number of Rx descriptors */
-#define MVPP2_MAX_RXD_MAX 1024
-#define MVPP2_MAX_RXD_DFLT 128
+#define MVPP2_MAX_RXD_MAX 2048
+#define MVPP2_MAX_RXD_DFLT MVPP2_MAX_RXD_MAX
/* Max number of Tx descriptors */
#define MVPP2_MAX_TXD_MAX 2048
-#define MVPP2_MAX_TXD_DFLT 1024
+#define MVPP2_MAX_TXD_DFLT MVPP2_MAX_TXD_MAX
+#define MVPP2_MIN_TXD(ncpus) ALIGN(MVPP2_MAX_TSO_SEGS + \
+ MVPP2_MAX_SKB_DESCS(ncpus) + \
+ MVPP2_TX_PAUSE_HYSTERESIS, 32)
/* Amount of Tx descriptors that can be reserved at once by CPU */
#define MVPP2_CPU_DESC_CHUNK 64
/* Max number of Tx descriptors in each aggregated queue */
-#define MVPP2_AGGR_TXQ_SIZE 256
+#define MVPP2_AGGR_TXQ_SIZE 512
/* Descriptor aligned size */
#define MVPP2_DESC_ALIGNED_SIZE 32
@@ -729,33 +771,57 @@
#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
/* RX FIFO constants */
+#define MVPP2_RX_FIFO_PORT_DATA_SIZE_44KB 0xb000
#define MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB 0x8000
#define MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB 0x2000
#define MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB 0x1000
-#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB 0x200
-#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB 0x80
+#define MVPP2_RX_FIFO_PORT_ATTR_SIZE(data_size) (data_size >> 6)
#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB 0x40
#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
/* TX FIFO constants */
-#define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa
-#define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3
-#define MVPP2_TX_FIFO_THRESHOLD_MIN 256
-#define MVPP2_TX_FIFO_THRESHOLD_10KB \
- (MVPP22_TX_FIFO_DATA_SIZE_10KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
-#define MVPP2_TX_FIFO_THRESHOLD_3KB \
- (MVPP22_TX_FIFO_DATA_SIZE_3KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
+#define MVPP22_TX_FIFO_DATA_SIZE_18KB 18
+#define MVPP22_TX_FIFO_DATA_SIZE_10KB 10
+#define MVPP22_TX_FIFO_DATA_SIZE_1KB 1
+#define MVPP22_TX_FIFO_DATA_SIZE_MIN 3
+#define MVPP22_TX_FIFO_DATA_SIZE_MAX 15
+#define MVPP2_TX_FIFO_THRESHOLD_MIN 256 /* Bytes */
+#define MVPP2_TX_FIFO_THRESHOLD(kb) \
+ (kb * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
+#define MVPP22_TX_FIFO_EXTRA_PARAM_MASK 0xFF
+#define MVPP22_TX_FIFO_EXTRA_PARAM_OFFS(port) (8 * (port))
+#define MVPP22_TX_FIFO_EXTRA_PARAM_SIZE(port, val) \
+ (((val) >> MVPP22_TX_FIFO_EXTRA_PARAM_OFFS(port)) & \
+ MVPP22_TX_FIFO_EXTRA_PARAM_MASK)
+
+/* RX FIFO threshold in 1KB granularity */
+#define MVPP23_PORT0_FIFO_TRSH (9 * 1024)
+#define MVPP23_PORT1_FIFO_TRSH (4 * 1024)
+#define MVPP23_PORT2_FIFO_TRSH (2 * 1024)
+
+/* RX Flow Control Registers */
+#define MVPP2_RX_FC_REG(port) (0x150 + 4 * (port))
+#define MVPP2_RX_FC_EN BIT(24)
+#define MVPP2_RX_FC_TRSH_OFFS 16
+#define MVPP2_RX_FC_TRSH_MASK (0xFF << MVPP2_RX_FC_TRSH_OFFS)
+#define MVPP2_RX_FC_TRSH_UNIT 256
+
+/* GMAC TX FIFO configuration */
+#define MVPP2_GMAC_TX_FIFO_MIN_TH \
+ MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(50)
+#define MVPP2_GMAC_TX_FIFO_LOW_WM 75
+#define MVPP2_GMAC_TX_FIFO_HI_WM 77
/* RX buffer constants */
#define MVPP2_SKB_SHINFO_SIZE \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+#define MVPP2_MTU_OVERHEAD_SIZE \
+ (MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + ETH_HLEN + ETH_FCS_LEN)
#define MVPP2_RX_PKT_SIZE(mtu) \
- ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
- ETH_HLEN + ETH_FCS_LEN, cache_line_size())
+ ALIGN((mtu) + MVPP2_MTU_OVERHEAD_SIZE, cache_line_size())
#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + MVPP2_SKB_HEADROOM)
-#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
((total_size) - MVPP2_SKB_HEADROOM - MVPP2_SKB_SHINFO_SIZE)
@@ -765,15 +831,10 @@
#define MVPP2_BIT_TO_WORD(bit) ((bit) / 32)
#define MVPP2_BIT_IN_WORD(bit) ((bit) % 32)
-#define MVPP2_N_PRS_FLOWS 52
-#define MVPP2_N_RFS_ENTRIES_PER_FLOW 4
-
-/* There are 7 supported high-level flows */
-#define MVPP2_N_RFS_RULES (MVPP2_N_RFS_ENTRIES_PER_FLOW * 7)
-
/* RSS constants */
-#define MVPP22_N_RSS_TABLES 8
#define MVPP22_RSS_TABLE_ENTRIES 32
+#define MVPP22_RSS_TBL_NUM 8
+#define MVPP22_RSS_WIDTH_MAX 8
/* IPv6 max L3 address size */
#define MVPP2_MAX_L3_ADDR_SIZE 16
@@ -781,6 +842,9 @@
/* Port flags */
#define MVPP2_F_LOOPBACK BIT(0)
#define MVPP2_F_DT_COMPAT BIT(1)
+#define MVPP22_F_IF_MUSDK BIT(2) /* musdk port */
+/* BIT(1 and 2) are reserved */
+#define MVPP2_F_IF_TX_ON BIT(3)
/* Marvell tag types */
enum mvpp2_tag_type {
@@ -843,18 +907,17 @@ enum mvpp22_ptp_packet_format {
#define MVPP22_PTP_TIMESTAMPQUEUESELECT BIT(18)
/* BM constants */
-#define MVPP2_BM_JUMBO_BUF_NUM 512
-#define MVPP2_BM_LONG_BUF_NUM 1024
+#define MVPP2_BM_JUMBO_BUF_NUM 2048
+#define MVPP2_BM_LONG_BUF_NUM 2048
#define MVPP2_BM_SHORT_BUF_NUM 2048
#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
#define MVPP2_BM_POOL_PTR_ALIGN 128
-#define MVPP2_BM_MAX_POOLS 8
/* BM cookie (32 bits) definition */
#define MVPP2_BM_COOKIE_POOL_OFFS 8
#define MVPP2_BM_COOKIE_CPU_OFFS 24
-#define MVPP2_BM_SHORT_FRAME_SIZE 736 /* frame size 128 */
+#define MVPP2_BM_SHORT_FRAME_SIZE 1024
#define MVPP2_BM_LONG_FRAME_SIZE 2240 /* frame size 1664 */
#define MVPP2_BM_JUMBO_FRAME_SIZE 10432 /* frame size 9856 */
/* BM short pool packet size
@@ -897,7 +960,7 @@ enum mvpp22_ptp_packet_format {
#define MVPP2_MIB_FC_RCVD 0x58
#define MVPP2_MIB_RX_FIFO_OVERRUN 0x5c
#define MVPP2_MIB_UNDERSIZE_RCVD 0x60
-#define MVPP2_MIB_FRAGMENTS_RCVD 0x64
+#define MVPP2_MIB_FRAGMENTS_ERR_RCVD 0x64
#define MVPP2_MIB_OVERSIZE_RCVD 0x68
#define MVPP2_MIB_JABBER_RCVD 0x6c
#define MVPP2_MIB_MAC_RCV_ERROR 0x70
@@ -907,6 +970,18 @@ enum mvpp22_ptp_packet_format {
#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ)
+/* Other counters */
+#define MVPP2_OVERRUN_DROP_REG(port) (0x7000 + 4 * (port))
+#define MVPP2_CLS_DROP_REG(port) (0x7020 + 4 * (port))
+#define MVPP2_CNT_IDX_REG 0x7040
+#define MVPP2_TX_PKT_FULLQ_DROP_REG 0x7200
+#define MVPP2_TX_PKT_EARLY_DROP_REG 0x7204
+#define MVPP2_TX_PKT_BM_DROP_REG 0x7208
+#define MVPP2_TX_PKT_BM_MC_DROP_REG 0x720c
+#define MVPP2_RX_PKT_FULLQ_DROP_REG 0x7220
+#define MVPP2_RX_PKT_EARLY_DROP_REG 0x7224
+#define MVPP2_RX_PKT_BM_DROP_REG 0x7228
+
#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40)
/* Buffer header info bits */
@@ -919,12 +994,67 @@ enum mvpp22_ptp_packet_format {
struct mvpp2_tai;
-/* Definitions */
-struct mvpp2_dbgfs_entries;
+/* MSS Flow control */
+#define MSS_SRAM_SIZE 0x800
+#define MSS_FC_COM_REG 0
+#define FLOW_CONTROL_ENABLE_BIT BIT(0)
+#define FLOW_CONTROL_UPDATE_COMMAND_BIT BIT(31)
+#define FC_QUANTA 0xFFFF
+#define FC_CLK_DIVIDER 0x140
+
+#define MSS_BUF_POOL_BASE 0x40
+#define MSS_BUF_POOL_OFFS 4
+#define MSS_BUF_POOL_REG(id) (MSS_BUF_POOL_BASE \
+ + (id) * MSS_BUF_POOL_OFFS)
+
+#define MSS_BUF_POOL_STOP_MASK 0xFFF
+#define MSS_BUF_POOL_START_MASK (0xFFF << MSS_BUF_POOL_START_OFFS)
+#define MSS_BUF_POOL_START_OFFS 12
+#define MSS_BUF_POOL_PORTS_MASK (0xF << MSS_BUF_POOL_PORTS_OFFS)
+#define MSS_BUF_POOL_PORTS_OFFS 24
+#define MSS_BUF_POOL_PORT_OFFS(id) (0x1 << \
+ ((id) + MSS_BUF_POOL_PORTS_OFFS))
+
+#define MSS_RXQ_TRESH_BASE 0x200
+#define MSS_RXQ_TRESH_OFFS 4
+#define MSS_RXQ_TRESH_REG(q, fq) (MSS_RXQ_TRESH_BASE + (((q) + (fq)) \
+ * MSS_RXQ_TRESH_OFFS))
+
+#define MSS_RXQ_TRESH_START_MASK 0xFFFF
+#define MSS_RXQ_TRESH_STOP_MASK (0xFFFF << MSS_RXQ_TRESH_STOP_OFFS)
+#define MSS_RXQ_TRESH_STOP_OFFS 16
+
+#define MSS_RXQ_ASS_BASE 0x80
+#define MSS_RXQ_ASS_OFFS 4
+#define MSS_RXQ_ASS_PER_REG 4
+#define MSS_RXQ_ASS_PER_OFFS 8
+#define MSS_RXQ_ASS_PORTID_OFFS 0
+#define MSS_RXQ_ASS_PORTID_MASK 0x3
+#define MSS_RXQ_ASS_HOSTID_OFFS 2
+#define MSS_RXQ_ASS_HOSTID_MASK 0x3F
+
+#define MSS_RXQ_ASS_Q_BASE(q, fq) ((((q) + (fq)) % MSS_RXQ_ASS_PER_REG) \
+ * MSS_RXQ_ASS_PER_OFFS)
+#define MSS_RXQ_ASS_PQ_BASE(q, fq) ((((q) + (fq)) / MSS_RXQ_ASS_PER_REG) \
+ * MSS_RXQ_ASS_OFFS)
+#define MSS_RXQ_ASS_REG(q, fq) (MSS_RXQ_ASS_BASE + MSS_RXQ_ASS_PQ_BASE(q, fq))
+
+#define MSS_THRESHOLD_STOP 768
+#define MSS_THRESHOLD_START 1024
+#define MSS_FC_MAX_TIMEOUT 5000
+
+#define MVPP2_PRS_TCAM_SRAM_SIZE 256
+#define MVPP2_N_FLOWS 52
-struct mvpp2_rss_table {
- u32 indir[MVPP22_RSS_TABLE_ENTRIES];
-};
+/* Buffer header info bits */
+#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
+#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
+#define MVPP2_B_HDR_INFO_LAST_OFFS 12
+#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
+#define MVPP2_B_HDR_INFO_IS_LAST(info) \
+ (((info) & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
+
+/* Definitions */
struct mvpp2_buff_hdr {
__le32 next_phys_addr;
@@ -945,16 +1075,18 @@ struct mvpp2 {
/* Shared registers' base addresses */
void __iomem *lms_base;
void __iomem *iface_base;
+ void __iomem *cm3_base;
- /* On PPv2.2, each "software thread" can access the base
+ /* On PPv2.2 and PPv2.3, each "software thread" can access the base
* register through a separate address space, each 64 KB apart
* from each other. Typically, such address spaces will be
* used per CPU.
*/
void __iomem *swth_base[MVPP2_MAX_THREADS];
- /* On PPv2.2, some port control registers are located into the system
- * controller space. These registers are accessible through a regmap.
+ /* On PPv2.2 and PPv2.3, some port control registers are located into
+ * the system controller space. These registers are accessible
+ * through a regmap.
*/
struct regmap *sysctrl_base;
@@ -968,7 +1100,8 @@ struct mvpp2 {
/* List of pointers to port structures */
int port_count;
struct mvpp2_port *port_list[MVPP2_MAX_PORTS];
- struct mvpp2_tai *tai;
+ /* Map of enabled ports */
+ unsigned long port_map;
/* Number of Tx threads used */
unsigned int nthreads;
@@ -978,11 +1111,9 @@ struct mvpp2 {
/* Aggregated TXQs */
struct mvpp2_tx_queue *aggr_txqs;
- /* Are we using page_pool with per-cpu pools? */
- int percpu_pools;
-
/* BM pools */
struct mvpp2_bm_pool *bm_pools;
+ struct mvpp2_bm_pool **pools_pcpu;
/* PRS shadow table */
struct mvpp2_prs_shadow *prs_shadow;
@@ -993,7 +1124,10 @@ struct mvpp2 {
u32 tclk;
/* HW version */
- enum { MVPP21, MVPP22 } hw_version;
+ enum { MVPP21, MVPP22, MVPP23 } hw_version;
+
+ /* Bitmap of the participating cpu's */
+ u16 cpu_map;
/* Maximum number of RXQs per port */
unsigned int max_port_rxqs;
@@ -1004,17 +1138,39 @@ struct mvpp2 {
/* Debugfs root entry */
struct dentry *dbgfs_dir;
+ struct mvpp2_dbgfs_prs_entry *dbgfs_prs_entry[MVPP2_PRS_TCAM_SRAM_SIZE];
+ struct mvpp2_dbgfs_flow_entry *dbgfs_flow_entry[MVPP2_N_FLOWS];
+
+ /* CM3 SRAM pool */
+ struct gen_pool *sram_pool;
- /* Debugfs entries private data */
- struct mvpp2_dbgfs_entries *dbgfs_entries;
+ /* Global TX Flow Control config */
+ bool global_tx_fc;
- /* RSS Indirection tables */
- struct mvpp2_rss_table *rss_tables[MVPP22_N_RSS_TABLES];
+ bool custom_dma_mask;
+
+ /* Spinlocks for CM3 shared memory configuration */
+ spinlock_t mss_spinlock;
/* page_pool allocator */
struct page_pool *page_pool[MVPP2_PORT_MAX_RXQ];
};
+struct mvpp2_dbgfs_prs_entry {
+ int tid;
+ struct mvpp2 *priv;
+};
+
+struct mvpp2_dbgfs_flow_entry {
+ int flow;
+ struct mvpp2 *priv;
+};
+
+struct mvpp2_dbgfs_port_flow_entry {
+ struct mvpp2_port *port;
+ struct mvpp2_dbgfs_flow_entry *dbg_fe;
+};
+
struct mvpp2_pcpu_stats {
struct u64_stats_sync syncp;
u64 rx_packets;
@@ -1033,9 +1189,24 @@ struct mvpp2_pcpu_stats {
/* Per-CPU port control */
struct mvpp2_port_pcpu {
+ /* Timer & Tasklet for bulk-tx optimization */
+ struct hrtimer bulk_timer;
+ bool bulk_timer_scheduled;
+ bool bulk_timer_restart_req;
+ struct tasklet_struct bulk_tasklet;
+
+ /* Timer & Tasklet for egress finalization */
struct hrtimer tx_done_timer;
- struct net_device *dev;
- bool timer_scheduled;
+ bool tx_done_timer_scheduled;
+ bool guard_timer_scheduled;
+ struct tasklet_struct tx_done_tasklet;
+
+ /* tx-done guard timer fields */
+ struct mvpp2_port *port; /* reference to get from tx_done_timer */
+ bool tx_done_passed; /* tx-done passed since last guard-check */
+ u8 txq_coal_is_zero_map; /* map tx queues (max=8) forced coal=Zero */
+ u8 txq_busy_suspect_map; /* map suspect txq to be forced */
+ u32 tx_guard_cntr; /* statistic */
};
struct mvpp2_queue_vector {
@@ -1051,37 +1222,6 @@ struct mvpp2_queue_vector {
struct cpumask *mask;
};
-/* Internal represention of a Flow Steering rule */
-struct mvpp2_rfs_rule {
- /* Rule location inside the flow*/
- int loc;
-
- /* Flow type, such as TCP_V4_FLOW, IP6_FLOW, etc. */
- int flow_type;
-
- /* Index of the C2 TCAM entry handling this rule */
- int c2_index;
-
- /* Header fields that needs to be extracted to match this flow */
- u16 hek_fields;
-
- /* CLS engine : only c2 is supported for now. */
- u8 engine;
-
- /* TCAM key and mask for C2-based steering. These fields should be
- * encapsulated in a union should we add more engines.
- */
- u64 c2_tcam;
- u64 c2_tcam_mask;
-
- struct flow_rule *flow;
-};
-
-struct mvpp2_ethtool_fs {
- struct mvpp2_rfs_rule rule;
- struct ethtool_rxnfc rxnfc;
-};
-
struct mvpp2_hwtstamp_queue {
struct sk_buff *skb[32];
u8 next;
@@ -1118,6 +1258,7 @@ struct mvpp2_port {
struct bpf_prog *xdp_prog;
int pkt_size;
+ u32 num_tc_queues;
/* Per-CPU port control */
struct mvpp2_port_pcpu __percpu *pcpu;
@@ -1145,11 +1286,14 @@ struct mvpp2_port {
struct device_node *of_node;
phy_interface_t phy_interface;
+ phy_interface_t of_phy_interface;
struct phylink *phylink;
struct phylink_config phylink_config;
struct phylink_pcs phylink_pcs;
struct phy *comphy;
+ bool phy_exist;
+
struct mvpp2_bm_pool *pool_long;
struct mvpp2_bm_pool *pool_short;
@@ -1162,19 +1306,32 @@ struct mvpp2_port {
u32 tx_time_coal;
- /* List of steering rules active on that port */
- struct mvpp2_ethtool_fs *rfs_rules[MVPP2_N_RFS_ENTRIES_PER_FLOW];
- int n_rfs_rules;
+ /* RSS indirection table */
+ u32 indir[MVPP22_RSS_TABLE_ENTRIES];
- /* Each port has its own view of the rss contexts, so that it can number
- * them from 0
- */
- int rss_ctx[MVPP22_N_RSS_TABLES];
+ /* us private storage, allocated/used by User/Kernel mode toggling */
+ void *us_cfg;
+
+ /* Coherency-update for TX-ON from link_status_irq */
+ struct tasklet_struct txqs_on_tasklet;
+
+ /* Firmware TX flow control */
+ bool tx_fc;
+
+ /* Indication, whether port is connected to XLG MAC */
+ bool has_xlg_mac;
+
+#if IS_ENABLED(CONFIG_NET_DSA)
+ /* Notifier required when the port is connected to the switch */
+ struct notifier_block dsa_notifier;
+#endif
bool hwtstamp;
bool rx_hwtstamp;
enum hwtstamp_tx_types tx_hwtstamp_type;
struct mvpp2_hwtstamp_queue tx_hwtstamp_queue[2];
+
+ struct mvpp2_dbgfs_port_flow_entry *dbgfs_port_flow_entry;
};
/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
@@ -1195,7 +1352,7 @@ struct mvpp2_port {
#define MVPP2_RXD_ERR_SUMMARY BIT(15)
#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
-#define MVPP2_RXD_ERR_CRC 0x0
+#define MVPP2_RXD_ERR_MAC 0x0
#define MVPP2_RXD_ERR_OVERRUN BIT(13)
#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
#define MVPP2_RXD_BM_POOL_ID_OFFS 16
@@ -1209,6 +1366,20 @@ struct mvpp2_port {
#define MVPP2_RXD_L3_IP6 BIT(30)
#define MVPP2_RXD_BUF_HDR BIT(31)
+struct mvpp2_buff_hdr {
+ __le32 next_dma_addr;
+ __le32 next_cookie_addr;
+ __le16 byte_count;
+ __le16 info;
+ __le16 reserved1; /* bm_qset (for future use, BM) */
+ u8 next_dma_addr_high;
+ u8 next_cookie_addr_high;
+ __le16 reserved2;
+ __le16 reserved3;
+ __le16 reserved4;
+ __le16 reserved5;
+};
+
/* HW TX descriptor for PPv2.1 */
struct mvpp21_tx_desc {
__le32 command; /* Options used by HW for packet transmitting.*/
@@ -1237,7 +1408,7 @@ struct mvpp21_rx_desc {
__le32 reserved8;
};
-/* HW TX descriptor for PPv2.2 */
+/* HW TX descriptor for PPv2.2 and PPv2.3 */
struct mvpp22_tx_desc {
__le32 command;
u8 packet_offset;
@@ -1249,7 +1420,7 @@ struct mvpp22_tx_desc {
__le64 buf_cookie_misc;
};
-/* HW RX descriptor for PPv2.2 */
+/* HW RX descriptor for PPv2.2 and PPv2.3 */
struct mvpp22_rx_desc {
__le32 status;
__le16 reserved1;
@@ -1311,8 +1482,10 @@ struct mvpp2_txq_pcpu {
*/
int count;
- int wake_threshold;
- int stop_threshold;
+ u16 wake_threshold;
+ u16 stop_threshold;
+ /* TXQ-number above stop_threshold to be wake-up */
+ u16 stopped_on_txq_id;
/* Number of Tx DMA descriptors reserved for each CPU */
int reserved_num;
@@ -1343,6 +1516,7 @@ struct mvpp2_tx_queue {
/* Number of currently used Tx DMA descriptor in the descriptor ring */
int count;
+ int pending;
/* Per-CPU control of physical Tx queues */
struct mvpp2_txq_pcpu __percpu *pcpu;
@@ -1360,40 +1534,46 @@ struct mvpp2_tx_queue {
/* Index of the next Tx DMA descriptor to process */
int next_desc_to_proc;
-};
+} __aligned(L1_CACHE_BYTES);
struct mvpp2_rx_queue {
+ /* Virtual address of the RX DMA descriptors array */
+ struct mvpp2_rx_desc *descs;
+
+ /* Index of the next-to-process and last RX DMA descriptor */
+ int next_desc_to_proc;
+ int last_desc;
+
/* RX queue number, in the range 0-31 for physical RXQs */
u8 id;
+ /* Port's logic RXQ number to which physical RXQ is mapped */
+ u8 logic_rxq;
+
+ /* Num of RXed packets seen in HW but meanwhile not handled by SW */
+ u16 rx_pending;
+
/* Num of rx descriptors in the rx descriptor ring */
int size;
u32 pkts_coal;
u32 time_coal;
- /* Virtual address of the RX DMA descriptors array */
- struct mvpp2_rx_desc *descs;
-
/* DMA address of the RX DMA descriptors array */
dma_addr_t descs_dma;
- /* Index of the last RX DMA descriptor */
- int last_desc;
-
- /* Index of the next RX DMA descriptor to process */
- int next_desc_to_proc;
-
/* ID of port to which physical RXQ is mapped */
int port;
- /* Port's logic RXQ number to which physical RXQ is mapped */
- int logic_rxq;
-
/* XDP memory accounting */
struct xdp_rxq_info xdp_rxq_short;
struct xdp_rxq_info xdp_rxq_long;
-};
+} __aligned(L1_CACHE_BYTES);
+
+enum mvpp2_bm_pool_type {
+ MVPP2_BM_SHORT,
+ MVPP2_BM_JUMBO,
+ MVPP2_BM_LONG,
struct mvpp2_bm_pool {
/* Pool number in the range 0-7 */
@@ -1411,6 +1591,9 @@ struct mvpp2_bm_pool {
int pkt_size;
int frag_size;
+ /* Pool type (short/long/jumbo) */
+ enum mvpp2_bm_pool_type type;
+
/* BPPE virtual base address */
u32 *virt_addr;
/* BPPE DMA base address */
@@ -1420,20 +1603,123 @@ struct mvpp2_bm_pool {
u32 port_map;
};
+#define MVPP2_BM_POOLS_NUM (recycle ? (2 + num_present_cpus()) : 3)
+#define MVPP2_BM_POOLS_NUM_MAX 8
+
#define IS_TSO_HEADER(txq_pcpu, addr) \
((addr) >= (txq_pcpu)->tso_headers_dma && \
(addr) < (txq_pcpu)->tso_headers_dma + \
(txq_pcpu)->size * TSO_HEADER_SIZE)
+#define TSO_HEADER_MARK ((void *)BIT(0))
#define MVPP2_DRIVER_NAME "mvpp2"
#define MVPP2_DRIVER_VERSION "1.0"
-void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data);
-u32 mvpp2_read(struct mvpp2 *priv, u32 offset);
+/* Run-time critical Utility/helper methods */
+static inline
+void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
+{
+ writel(data, priv->swth_base[0] + offset);
+}
-void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
+static inline
+u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
+{
+ return readl(priv->swth_base[0] + offset);
+}
+
+static inline
+u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
+{
+ return readl_relaxed(priv->swth_base[0] + offset);
+}
+
+static inline
+u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
+{
+ return cpu % priv->nthreads;
+}
+
+static inline
+void mvpp2_cm3_write(struct mvpp2 *priv, u32 offset, u32 data)
+{
+ writel(data, priv->cm3_base + offset);
+}
+
+static inline
+u32 mvpp2_cm3_read(struct mvpp2 *priv, u32 offset)
+{
+ return readl(priv->cm3_base + offset);
+}
+/* These accessors should be used to access:
+ *
+ * - per-thread registers, where each thread has its own copy of the
+ * register.
+ *
+ * MVPP2_BM_VIRT_ALLOC_REG
+ * MVPP2_BM_ADDR_HIGH_ALLOC
+ * MVPP22_BM_ADDR_HIGH_RLS_REG
+ * MVPP2_BM_VIRT_RLS_REG
+ * MVPP2_ISR_RX_TX_CAUSE_REG
+ * MVPP2_ISR_RX_TX_MASK_REG
+ * MVPP2_TXQ_NUM_REG
+ * MVPP2_AGGR_TXQ_UPDATE_REG
+ * MVPP2_TXQ_RSVD_REQ_REG
+ * MVPP2_TXQ_RSVD_RSLT_REG
+ * MVPP2_TXQ_SENT_REG
+ * MVPP2_RXQ_NUM_REG
+ *
+ * - global registers that must be accessed through a specific thread
+ * window, because they are related to an access to a per-thread
+ * register
+ *
+ * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
+ * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
+ * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
+ * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
+ * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
+ */
+static inline
+void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread,
+ u32 offset, u32 data)
+{
+ writel(data, priv->swth_base[thread] + offset);
+}
+
+static inline
+u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread, u32 offset)
+{
+ return readl(priv->swth_base[thread] + offset);
+}
+
+static inline
+void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread,
+ u32 offset, u32 data)
+{
+ writel_relaxed(data, priv->swth_base[thread] + offset);
+}
+
+static inline
+u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread,
+ u32 offset)
+{
+ return readl_relaxed(priv->swth_base[thread] + offset);
+}
+
+void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv);
+void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en);
+u32 mvpp2_get_tc_width(struct mvpp2_port *port);
+int mvpp22_rss_fill_table_per_tc(struct mvpp2_port *port);
#ifdef CONFIG_MVPP2_PTP
int mvpp22_tai_probe(struct device *dev, struct mvpp2 *priv);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 41d935d1aaf6..8e7d4046cc4a 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -22,302 +22,302 @@
} \
}
-static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
+static struct mvpp2_cls_flow cls_flows[MVPP2_N_FLOWS] = {
/* TCP over IPv4 flows, Not fragmented, no vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
MVPP22_CLS_HEK_IP4_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
MVPP22_CLS_HEK_IP4_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
MVPP22_CLS_HEK_IP4_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* TCP over IPv4 flows, Not fragmented, with vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
- MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
- MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
- MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
/* TCP over IPv4 flows, fragmented, no vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* TCP over IPv4 flows, fragmented, with vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
/* UDP over IPv4 flows, Not fragmented, no vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
MVPP22_CLS_HEK_IP4_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
MVPP22_CLS_HEK_IP4_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
MVPP22_CLS_HEK_IP4_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* UDP over IPv4 flows, Not fragmented, with vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
- MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
- MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
- MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
/* UDP over IPv4 flows, fragmented, no vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* UDP over IPv4 flows, fragmented, with vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
/* TCP over IPv6 flows, not fragmented, no vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG,
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
MVPP22_CLS_HEK_IP6_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG,
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
MVPP22_CLS_HEK_IP6_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* TCP over IPv6 flows, not fragmented, with vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG,
- MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG,
- MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
/* TCP over IPv6 flows, fragmented, no vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP6_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP6_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* TCP over IPv6 flows, fragmented, with vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG,
- MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG,
- MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
/* UDP over IPv6 flows, not fragmented, no vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG,
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
MVPP22_CLS_HEK_IP6_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG,
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
MVPP22_CLS_HEK_IP6_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* UDP over IPv6 flows, not fragmented, with vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG,
- MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG,
- MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
/* UDP over IPv6 flows, fragmented, no vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP6_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP6_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* UDP over IPv6 flows, fragmented, with vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG,
- MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG,
- MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
/* IPv4 flows, no vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4,
MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT,
MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER,
MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
/* IPv4 flows, with vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4,
MVPP2_PRS_RI_L3_PROTO_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OPT,
MVPP2_PRS_RI_L3_PROTO_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OTHER,
MVPP2_PRS_RI_L3_PROTO_MASK),
/* IPv6 flows, no vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG,
+ MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
MVPP22_CLS_HEK_IP6_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG,
+ MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
MVPP22_CLS_HEK_IP6_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
/* IPv6 flows, with vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG,
- MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_L3_PROTO_MASK),
- MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG,
- MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
+ MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_L3_PROTO_MASK),
/* Non IP flow, no vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_UNTAG,
+ MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_UNTAG,
0,
MVPP2_PRS_RI_VLAN_NONE,
MVPP2_PRS_RI_VLAN_MASK),
/* Non IP flow, with vlan tag */
- MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG,
+ MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_TAG,
MVPP22_CLS_HEK_OPT_VLAN,
0, 0),
};
@@ -344,9 +344,9 @@ static void mvpp2_cls_flow_write(struct mvpp2 *priv,
struct mvpp2_cls_flow_entry *fe)
{
mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
- mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
- mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
- mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
+ mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
+ mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
+ mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
}
u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index)
@@ -436,6 +436,19 @@ static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe,
fe->data[0] |= !!is_last;
}
+static bool mvpp2_cls_flow_last_get(struct mvpp2_cls_flow_entry *fe)
+{
+ return (fe->data[0] & MVPP2_CLS_FLOW_TBL0_LAST);
+}
+
+static void mvpp2_cls_flow_lkp_type_set(struct mvpp2_cls_flow_entry *fe,
+ int lkp_type)
+{
+ fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_LKP_TYPE(
+ MVPP2_CLS_FLOW_TBL1_LKP_TYPE_MASK);
+ fe->data[1] |= MVPP2_CLS_FLOW_TBL1_LKP_TYPE(lkp_type);
+}
+
static void mvpp2_cls_flow_pri_set(struct mvpp2_cls_flow_entry *fe, int prio)
{
fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_PRIO(MVPP2_CLS_FLOW_TBL1_PRIO_MASK);
@@ -448,22 +461,14 @@ static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry *fe,
fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
}
-static void mvpp2_cls_flow_port_remove(struct mvpp2_cls_flow_entry *fe,
- u32 port)
-{
- fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
-}
-
-static void mvpp2_cls_flow_lu_type_set(struct mvpp2_cls_flow_entry *fe,
- u8 lu_type)
+static int mvpp2_cls_flow_port_get(struct mvpp2_cls_flow_entry *fe)
{
- fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK);
- fe->data[1] |= MVPP2_CLS_FLOW_TBL1_LU_TYPE(lu_type);
+ return ((fe->data[0] >> 4) & MVPP2_CLS_FLOW_TBL0_PORT_ID_MASK);
}
/* Initialize the parser entry for the given flow */
static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv,
- const struct mvpp2_cls_flow *flow)
+ struct mvpp2_cls_flow *flow)
{
mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri,
flow->prs_ri.ri_mask);
@@ -471,7 +476,7 @@ static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv,
/* Initialize the Lookup Id table entry for the given flow */
static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
- const struct mvpp2_cls_flow *flow)
+ struct mvpp2_cls_flow *flow)
{
struct mvpp2_cls_lookup_entry le;
@@ -484,7 +489,7 @@ static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
/* We point on the first lookup in the sequence for the flow, that is
* the C2 lookup.
*/
- le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_CLS_FLT_FIRST(flow->flow_id));
+ le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_FLOW_C2_ENTRY(flow->flow_id));
/* CLS is always enabled, RSS is enabled/disabled in C2 lookup */
le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
@@ -492,113 +497,21 @@ static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
mvpp2_cls_lookup_write(priv, &le);
}
-static void mvpp2_cls_c2_write(struct mvpp2 *priv,
- struct mvpp2_cls_c2_entry *c2)
-{
- u32 val;
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
-
- val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV);
- if (c2->valid)
- val &= ~MVPP22_CLS_C2_TCAM_INV_BIT;
- else
- val |= MVPP22_CLS_C2_TCAM_INV_BIT;
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_INV, val);
-
- mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
-
- mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
- mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
- mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
- mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
-
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
- /* Writing TCAM_DATA4 flushes writes to TCAM_DATA0-4 and INV to HW */
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
-}
-
-void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
- struct mvpp2_cls_c2_entry *c2)
-{
- u32 val;
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
-
- c2->index = index;
-
- c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
- c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
- c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
- c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
- c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
-
- c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
-
- c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
- c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
- c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
- c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
-
- val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV);
- c2->valid = !(val & MVPP22_CLS_C2_TCAM_INV_BIT);
-}
-
-static int mvpp2_cls_ethtool_flow_to_type(int flow_type)
-{
- switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
- case ETHER_FLOW:
- return MVPP22_FLOW_ETHERNET;
- case TCP_V4_FLOW:
- return MVPP22_FLOW_TCP4;
- case TCP_V6_FLOW:
- return MVPP22_FLOW_TCP6;
- case UDP_V4_FLOW:
- return MVPP22_FLOW_UDP4;
- case UDP_V6_FLOW:
- return MVPP22_FLOW_UDP6;
- case IPV4_FLOW:
- return MVPP22_FLOW_IP4;
- case IPV6_FLOW:
- return MVPP22_FLOW_IP6;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int mvpp2_cls_c2_port_flow_index(struct mvpp2_port *port, int loc)
-{
- return MVPP22_CLS_C2_RFS_LOC(port->id, loc);
-}
-
/* Initialize the flow table entries for the given flow */
-static void mvpp2_cls_flow_init(struct mvpp2 *priv,
- const struct mvpp2_cls_flow *flow)
+static void mvpp2_cls_flow_init(struct mvpp2 *priv, struct mvpp2_cls_flow *flow)
{
struct mvpp2_cls_flow_entry fe;
- int i, pri = 0;
-
- /* Assign default values to all entries in the flow */
- for (i = MVPP2_CLS_FLT_FIRST(flow->flow_id);
- i <= MVPP2_CLS_FLT_LAST(flow->flow_id); i++) {
- memset(&fe, 0, sizeof(fe));
- fe.index = i;
- mvpp2_cls_flow_pri_set(&fe, pri++);
-
- if (i == MVPP2_CLS_FLT_LAST(flow->flow_id))
- mvpp2_cls_flow_last_set(&fe, 1);
-
- mvpp2_cls_flow_write(priv, &fe);
- }
+ int i;
- /* RSS config C2 lookup */
- mvpp2_cls_flow_read(priv, MVPP2_CLS_FLT_C2_RSS_ENTRY(flow->flow_id),
- &fe);
+ /* C2 lookup */
+ memset(&fe, 0, sizeof(fe));
+ fe.index = MVPP2_FLOW_C2_ENTRY(flow->flow_id);
mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2);
mvpp2_cls_flow_port_id_sel(&fe, true);
- mvpp2_cls_flow_lu_type_set(&fe, MVPP22_CLS_LU_TYPE_ALL);
+ mvpp2_cls_flow_last_set(&fe, 0);
+ mvpp2_cls_flow_pri_set(&fe, 0);
+ mvpp2_cls_flow_lkp_type_set(&fe, MVPP2_CLS_LKP_DEFAULT);
/* Add all ports */
for (i = 0; i < MVPP2_MAX_PORTS; i++)
@@ -608,19 +521,22 @@ static void mvpp2_cls_flow_init(struct mvpp2 *priv,
/* C3Hx lookups */
for (i = 0; i < MVPP2_MAX_PORTS; i++) {
- mvpp2_cls_flow_read(priv,
- MVPP2_CLS_FLT_HASH_ENTRY(i, flow->flow_id),
- &fe);
+ memset(&fe, 0, sizeof(fe));
+ fe.index = MVPP2_PORT_FLOW_INDEX(i, flow->flow_id);
- /* Set a default engine. Will be overwritten when setting the
- * real HEK parameters
- */
mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C3HA);
mvpp2_cls_flow_port_id_sel(&fe, true);
+ mvpp2_cls_flow_pri_set(&fe, i + 1);
mvpp2_cls_flow_port_add(&fe, BIT(i));
+ mvpp2_cls_flow_lkp_type_set(&fe, MVPP2_CLS_LKP_HASH);
mvpp2_cls_flow_write(priv, &fe);
}
+
+ /* Update the last entry */
+ mvpp2_cls_flow_last_set(&fe, 1);
+
+ mvpp2_cls_flow_write(priv, &fe);
}
/* Adds a field to the Header Extracted Key generation parameters*/
@@ -639,6 +555,20 @@ static int mvpp2_flow_add_hek_field(struct mvpp2_cls_flow_entry *fe,
return 0;
}
+static void mvpp2_cls_c2_inv_set(struct mvpp2 *priv,
+ int index)
+{
+ /* write index reg */
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
+
+ /* set invalid bit */
+ mvpp2_write(priv, MVPP2_CLS2_TCAM_INV_REG,
+ (1 << MVPP2_CLS2_TCAM_INV_INVALID));
+
+ /* trigger */
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, 0);
+}
+
static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
unsigned long hash_opts)
{
@@ -651,15 +581,9 @@ static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
switch (BIT(i)) {
- case MVPP22_CLS_HEK_OPT_MAC_DA:
- field_id = MVPP22_CLS_FIELD_MAC_DA;
- break;
case MVPP22_CLS_HEK_OPT_VLAN:
field_id = MVPP22_CLS_FIELD_VLAN;
break;
- case MVPP22_CLS_HEK_OPT_VLAN_PRI:
- field_id = MVPP22_CLS_FIELD_VLAN_PRI;
- break;
case MVPP22_CLS_HEK_OPT_IP4SA:
field_id = MVPP22_CLS_FIELD_IP4SA;
break;
@@ -688,36 +612,42 @@ static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
return 0;
}
-/* Returns the size, in bits, of the corresponding HEK field */
-static int mvpp2_cls_hek_field_size(u32 field)
+struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
{
- switch (field) {
- case MVPP22_CLS_HEK_OPT_MAC_DA:
- return 48;
- case MVPP22_CLS_HEK_OPT_VLAN:
- return 12;
- case MVPP22_CLS_HEK_OPT_VLAN_PRI:
- return 3;
- case MVPP22_CLS_HEK_OPT_IP4SA:
- case MVPP22_CLS_HEK_OPT_IP4DA:
- return 32;
- case MVPP22_CLS_HEK_OPT_IP6SA:
- case MVPP22_CLS_HEK_OPT_IP6DA:
- return 128;
- case MVPP22_CLS_HEK_OPT_L4SIP:
- case MVPP22_CLS_HEK_OPT_L4DIP:
- return 16;
- default:
- return -1;
- }
+ if (flow >= MVPP2_N_FLOWS)
+ return NULL;
+
+ return &cls_flows[flow];
}
-const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
+int mvpp2_cls_flow_hash_find(struct mvpp2_port *port,
+ struct mvpp2_cls_flow *flow,
+ struct mvpp2_cls_flow_entry *fe,
+ int *flow_index)
{
- if (flow >= MVPP2_N_PRS_FLOWS)
- return NULL;
+ int engine, flow_offset, port_bm, idx = 0, is_last = 0;
- return &cls_flows[flow];
+ flow_offset = 0;
+ do {
+ idx = MVPP2_PORT_FLOW_INDEX(flow_offset, flow->flow_id);
+ if (idx >= MVPP2_CLS_FLOWS_TBL_SIZE)
+ break;
+ mvpp2_cls_flow_read(port->priv, idx, fe);
+ engine = mvpp2_cls_flow_eng_get(fe);
+ port_bm = mvpp2_cls_flow_port_get(fe);
+ is_last = mvpp2_cls_flow_last_get(fe);
+ if ((engine == MVPP22_CLS_ENGINE_C3HA ||
+ engine == MVPP22_CLS_ENGINE_C3HB) &&
+ (port_bm & BIT(port->id)))
+ break;
+ flow_offset++;
+ } while (!is_last);
+
+ *flow_index = idx;
+ if (is_last)
+ return -EINVAL;
+
+ return 0;
}
/* Set the hash generation options for the given traffic flow.
@@ -734,17 +664,21 @@ const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type,
u16 requested_opts)
{
- const struct mvpp2_cls_flow *flow;
struct mvpp2_cls_flow_entry fe;
+ struct mvpp2_cls_flow *flow;
int i, engine, flow_index;
u16 hash_opts;
- for_each_cls_flow_id_with_type(i, flow_type) {
+ for (i = 0; i < MVPP2_N_FLOWS; i++) {
flow = mvpp2_cls_flow_get(i);
if (!flow)
return -EINVAL;
- flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id);
+ if (flow->flow_type != flow_type)
+ continue;
+
+ if (mvpp2_cls_flow_hash_find(port, flow, &fe, &flow_index))
+ return -EINVAL;
mvpp2_cls_flow_read(port->priv, flow_index, &fe);
@@ -786,9 +720,6 @@ u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe)
case MVPP22_CLS_FIELD_VLAN:
hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
break;
- case MVPP22_CLS_FIELD_VLAN_PRI:
- hash_opts |= MVPP22_CLS_HEK_OPT_VLAN_PRI;
- break;
case MVPP22_CLS_FIELD_L3_PROTO:
hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
break;
@@ -822,17 +753,21 @@ u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe)
*/
static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type)
{
- const struct mvpp2_cls_flow *flow;
struct mvpp2_cls_flow_entry fe;
+ struct mvpp2_cls_flow *flow;
int i, flow_index;
u16 hash_opts = 0;
- for_each_cls_flow_id_with_type(i, flow_type) {
+ for (i = 0; i < MVPP2_N_FLOWS; i++) {
flow = mvpp2_cls_flow_get(i);
if (!flow)
return 0;
- flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id);
+ if (flow->flow_type != flow_type)
+ continue;
+
+ if (mvpp2_cls_flow_hash_find(port, flow, &fe, &flow_index))
+ return 0;
mvpp2_cls_flow_read(port->priv, flow_index, &fe);
@@ -844,10 +779,10 @@ static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type)
static void mvpp2_cls_port_init_flows(struct mvpp2 *priv)
{
- const struct mvpp2_cls_flow *flow;
+ struct mvpp2_cls_flow *flow;
int i;
- for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) {
+ for (i = 0; i < MVPP2_N_FLOWS; i++) {
flow = mvpp2_cls_flow_get(i);
if (!flow)
break;
@@ -858,6 +793,51 @@ static void mvpp2_cls_port_init_flows(struct mvpp2 *priv)
}
}
+static void mvpp2_cls_c2_write(struct mvpp2 *priv,
+ struct mvpp2_cls_c2_entry *c2)
+{
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
+
+ mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
+
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
+
+ /* write valid bit*/
+ mvpp2_write(priv, MVPP2_CLS2_TCAM_INV_REG,
+ (0 << MVPP2_CLS2_TCAM_INV_INVALID));
+
+ /* Write TCAM */
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
+}
+
+void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
+ struct mvpp2_cls_c2_entry *c2)
+{
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
+
+ c2->index = index;
+
+ c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
+ c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
+ c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
+ c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
+ c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
+
+ c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
+
+ c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
+ c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
+ c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
+ c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
+}
+
static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
{
struct mvpp2_cls_c2_entry c2;
@@ -871,9 +851,9 @@ static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
- /* Match on Lookup Type */
- c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK));
- c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(MVPP22_CLS_LU_TYPE_ALL);
+ /* Set lkp_type */
+ c2.tcam[4] |= MVPP22_CLS_C2_LKP_TYPE(MVPP2_CLS_LKP_DEFAULT);
+ c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LKP_TYPE_MASK);
/* Update RSS status after matching this entry */
c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
@@ -893,17 +873,27 @@ static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
MVPP22_CLS_C2_ATTR0_QLOW(ql);
- c2.valid = true;
-
mvpp2_cls_c2_write(port->priv, &c2);
}
+static void mvpp2_cls_c2_init(struct mvpp2 *priv)
+{
+ int index;
+
+ /* Toggle C2 from Built-In Self-Test mode to Functional mode */
+ mvpp2_write(priv, MVPP2_CLS2_TCAM_CTRL_REG,
+ MVPP2_CLS2_TCAM_CTRL_BYPASS_FIFO_STAGES);
+
+ /* Invalidate all C2 entries */
+ for (index = 0; index < MVPP22_CLS_C2_MAX_ENTRIES; index++)
+ mvpp2_cls_c2_inv_set(priv, index);
+}
+
/* Classifier default initialization */
void mvpp2_cls_init(struct mvpp2 *priv)
{
struct mvpp2_cls_lookup_entry le;
struct mvpp2_cls_flow_entry fe;
- struct mvpp2_cls_c2_entry c2;
int index;
/* Enable classifier */
@@ -927,21 +917,15 @@ void mvpp2_cls_init(struct mvpp2 *priv)
mvpp2_cls_lookup_write(priv, &le);
}
- /* Clear C2 TCAM engine table */
- memset(&c2, 0, sizeof(c2));
- c2.valid = false;
- for (index = 0; index < MVPP22_CLS_C2_N_ENTRIES; index++) {
- c2.index = index;
- mvpp2_cls_c2_write(priv, &c2);
- }
-
- /* Disable the FIFO stages in C2 engine, which are only used in BIST
- * mode
+ /* Clear CLS_SWFWD_PCTRL register - value of QueueHigh is defined by
+ * the Classifier
*/
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_CTRL,
- MVPP22_CLS_C2_TCAM_BYPASS_FIFO);
+ mvpp2_write(priv, MVPP2_CLS_SWFWD_PCTRL_REG, 0);
mvpp2_cls_port_init_flows(priv);
+
+ /* Initialize C2 */
+ mvpp2_cls_c2_init(priv);
}
void mvpp2_cls_port_config(struct mvpp2_port *port)
@@ -981,22 +965,12 @@ u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index)
return mvpp2_read(priv, MVPP22_CLS_C2_HIT_CTR);
}
-static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port, u32 ctx)
+static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port)
{
struct mvpp2_cls_c2_entry c2;
- u8 qh, ql;
mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
- /* The RxQ number is used to select the RSS table. It that case, we set
- * it to be the ctx number.
- */
- qh = (ctx >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
- ql = ctx & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
-
- c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
- MVPP22_CLS_C2_ATTR0_QLOW(ql);
-
c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
mvpp2_cls_c2_write(port->priv, &c2);
@@ -1005,446 +979,29 @@ static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port, u32 ctx)
static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port)
{
struct mvpp2_cls_c2_entry c2;
- u8 qh, ql;
mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
- /* Reset the default destination RxQ to the port's first rx queue. */
- qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
- ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
-
- c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
- MVPP22_CLS_C2_ATTR0_QLOW(ql);
-
c2.attr[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN;
mvpp2_cls_c2_write(port->priv, &c2);
}
-static inline int mvpp22_rss_ctx(struct mvpp2_port *port, int port_rss_ctx)
-{
- return port->rss_ctx[port_rss_ctx];
-}
-
-int mvpp22_port_rss_enable(struct mvpp2_port *port)
+void mvpp22_rss_enable(struct mvpp2_port *port)
{
- if (mvpp22_rss_ctx(port, 0) < 0)
- return -EINVAL;
-
- mvpp2_rss_port_c2_enable(port, mvpp22_rss_ctx(port, 0));
-
- return 0;
+ mvpp2_rss_port_c2_enable(port);
}
-int mvpp22_port_rss_disable(struct mvpp2_port *port)
+void mvpp22_rss_disable(struct mvpp2_port *port)
{
- if (mvpp22_rss_ctx(port, 0) < 0)
- return -EINVAL;
-
mvpp2_rss_port_c2_disable(port);
-
- return 0;
-}
-
-static void mvpp22_port_c2_lookup_disable(struct mvpp2_port *port, int entry)
-{
- struct mvpp2_cls_c2_entry c2;
-
- mvpp2_cls_c2_read(port->priv, entry, &c2);
-
- /* Clear the port map so that the entry doesn't match anymore */
- c2.tcam[4] &= ~(MVPP22_CLS_C2_PORT_ID(BIT(port->id)));
-
- mvpp2_cls_c2_write(port->priv, &c2);
}
/* Set CPU queue number for oversize packets */
void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
{
- u32 val;
-
mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
-
- mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
- (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
-
- val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
- val &= ~MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
- mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
-}
-
-static int mvpp2_port_c2_tcam_rule_add(struct mvpp2_port *port,
- struct mvpp2_rfs_rule *rule)
-{
- struct flow_action_entry *act;
- struct mvpp2_cls_c2_entry c2;
- u8 qh, ql, pmap;
- int index, ctx;
-
- if (!flow_action_basic_hw_stats_check(&rule->flow->action, NULL))
- return -EOPNOTSUPP;
-
- memset(&c2, 0, sizeof(c2));
-
- index = mvpp2_cls_c2_port_flow_index(port, rule->loc);
- if (index < 0)
- return -EINVAL;
- c2.index = index;
-
- act = &rule->flow->action.entries[0];
-
- rule->c2_index = c2.index;
-
- c2.tcam[3] = (rule->c2_tcam & 0xffff) |
- ((rule->c2_tcam_mask & 0xffff) << 16);
- c2.tcam[2] = ((rule->c2_tcam >> 16) & 0xffff) |
- (((rule->c2_tcam_mask >> 16) & 0xffff) << 16);
- c2.tcam[1] = ((rule->c2_tcam >> 32) & 0xffff) |
- (((rule->c2_tcam_mask >> 32) & 0xffff) << 16);
- c2.tcam[0] = ((rule->c2_tcam >> 48) & 0xffff) |
- (((rule->c2_tcam_mask >> 48) & 0xffff) << 16);
-
- pmap = BIT(port->id);
- c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
- c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
-
- /* Match on Lookup Type */
- c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK));
- c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(rule->loc);
-
- if (act->id == FLOW_ACTION_DROP) {
- c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_RED_LOCK);
- } else {
- /* We want to keep the default color derived from the Header
- * Parser drop entries, for VLAN and MAC filtering. This will
- * assign a default color of Green or Red, and we want matches
- * with a non-drop action to keep that color.
- */
- c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_NO_UPD_LOCK);
-
- /* Update RSS status after matching this entry */
- if (act->queue.ctx)
- c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
-
- /* Always lock the RSS_EN decision. We might have high prio
- * rules steering to an RXQ, and a lower one steering to RSS,
- * we don't want the low prio RSS rule overwriting this flag.
- */
- c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
-
- /* Mark packet as "forwarded to software", needed for RSS */
- c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
-
- c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD_LOCK) |
- MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD_LOCK);
-
- if (act->queue.ctx) {
- /* Get the global ctx number */
- ctx = mvpp22_rss_ctx(port, act->queue.ctx);
- if (ctx < 0)
- return -EINVAL;
-
- qh = (ctx >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
- ql = ctx & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
- } else {
- qh = ((act->queue.index + port->first_rxq) >> 3) &
- MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
- ql = (act->queue.index + port->first_rxq) &
- MVPP22_CLS_C2_ATTR0_QLOW_MASK;
- }
-
- c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
- MVPP22_CLS_C2_ATTR0_QLOW(ql);
- }
-
- c2.valid = true;
-
- mvpp2_cls_c2_write(port->priv, &c2);
-
- return 0;
-}
-
-static int mvpp2_port_c2_rfs_rule_insert(struct mvpp2_port *port,
- struct mvpp2_rfs_rule *rule)
-{
- return mvpp2_port_c2_tcam_rule_add(port, rule);
-}
-
-static int mvpp2_port_cls_rfs_rule_remove(struct mvpp2_port *port,
- struct mvpp2_rfs_rule *rule)
-{
- const struct mvpp2_cls_flow *flow;
- struct mvpp2_cls_flow_entry fe;
- int index, i;
-
- for_each_cls_flow_id_containing_type(i, rule->flow_type) {
- flow = mvpp2_cls_flow_get(i);
- if (!flow)
- return 0;
-
- index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc);
-
- mvpp2_cls_flow_read(port->priv, index, &fe);
- mvpp2_cls_flow_port_remove(&fe, BIT(port->id));
- mvpp2_cls_flow_write(port->priv, &fe);
- }
-
- if (rule->c2_index >= 0)
- mvpp22_port_c2_lookup_disable(port, rule->c2_index);
-
- return 0;
-}
-
-static int mvpp2_port_flt_rfs_rule_insert(struct mvpp2_port *port,
- struct mvpp2_rfs_rule *rule)
-{
- const struct mvpp2_cls_flow *flow;
- struct mvpp2 *priv = port->priv;
- struct mvpp2_cls_flow_entry fe;
- int index, ret, i;
-
- if (rule->engine != MVPP22_CLS_ENGINE_C2)
- return -EOPNOTSUPP;
-
- ret = mvpp2_port_c2_rfs_rule_insert(port, rule);
- if (ret)
- return ret;
-
- for_each_cls_flow_id_containing_type(i, rule->flow_type) {
- flow = mvpp2_cls_flow_get(i);
- if (!flow)
- return 0;
-
- if ((rule->hek_fields & flow->supported_hash_opts) != rule->hek_fields)
- continue;
-
- index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc);
-
- mvpp2_cls_flow_read(priv, index, &fe);
- mvpp2_cls_flow_eng_set(&fe, rule->engine);
- mvpp2_cls_flow_port_id_sel(&fe, true);
- mvpp2_flow_set_hek_fields(&fe, rule->hek_fields);
- mvpp2_cls_flow_lu_type_set(&fe, rule->loc);
- mvpp2_cls_flow_port_add(&fe, 0xf);
-
- mvpp2_cls_flow_write(priv, &fe);
- }
-
- return 0;
-}
-
-static int mvpp2_cls_c2_build_match(struct mvpp2_rfs_rule *rule)
-{
- struct flow_rule *flow = rule->flow;
- int offs = 0;
-
- /* The order of insertion in C2 tcam must match the order in which
- * the fields are found in the header
- */
- if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
- struct flow_match_vlan match;
-
- flow_rule_match_vlan(flow, &match);
- if (match.mask->vlan_id) {
- rule->hek_fields |= MVPP22_CLS_HEK_OPT_VLAN;
-
- rule->c2_tcam |= ((u64)match.key->vlan_id) << offs;
- rule->c2_tcam_mask |= ((u64)match.mask->vlan_id) << offs;
-
- /* Don't update the offset yet */
- }
-
- if (match.mask->vlan_priority) {
- rule->hek_fields |= MVPP22_CLS_HEK_OPT_VLAN_PRI;
-
- /* VLAN pri is always at offset 13 relative to the
- * current offset
- */
- rule->c2_tcam |= ((u64)match.key->vlan_priority) <<
- (offs + 13);
- rule->c2_tcam_mask |= ((u64)match.mask->vlan_priority) <<
- (offs + 13);
- }
-
- if (match.mask->vlan_dei)
- return -EOPNOTSUPP;
-
- /* vlan id and prio always seem to take a full 16-bit slot in
- * the Header Extracted Key.
- */
- offs += 16;
- }
-
- if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
- struct flow_match_ports match;
-
- flow_rule_match_ports(flow, &match);
- if (match.mask->src) {
- rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4SIP;
-
- rule->c2_tcam |= ((u64)ntohs(match.key->src)) << offs;
- rule->c2_tcam_mask |= ((u64)ntohs(match.mask->src)) << offs;
- offs += mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4SIP);
- }
-
- if (match.mask->dst) {
- rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4DIP;
-
- rule->c2_tcam |= ((u64)ntohs(match.key->dst)) << offs;
- rule->c2_tcam_mask |= ((u64)ntohs(match.mask->dst)) << offs;
- offs += mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4DIP);
- }
- }
-
- if (hweight16(rule->hek_fields) > MVPP2_FLOW_N_FIELDS)
- return -EOPNOTSUPP;
-
- return 0;
-}
-
-static int mvpp2_cls_rfs_parse_rule(struct mvpp2_rfs_rule *rule)
-{
- struct flow_rule *flow = rule->flow;
- struct flow_action_entry *act;
-
- if (!flow_action_basic_hw_stats_check(&rule->flow->action, NULL))
- return -EOPNOTSUPP;
-
- act = &flow->action.entries[0];
- if (act->id != FLOW_ACTION_QUEUE && act->id != FLOW_ACTION_DROP)
- return -EOPNOTSUPP;
-
- /* When both an RSS context and an queue index are set, the index
- * is considered as an offset to be added to the indirection table
- * entries. We don't support this, so reject this rule.
- */
- if (act->queue.ctx && act->queue.index)
- return -EOPNOTSUPP;
-
- /* For now, only use the C2 engine which has a HEK size limited to 64
- * bits for TCAM matching.
- */
- rule->engine = MVPP22_CLS_ENGINE_C2;
-
- if (mvpp2_cls_c2_build_match(rule))
- return -EINVAL;
-
- return 0;
-}
-
-int mvpp2_ethtool_cls_rule_get(struct mvpp2_port *port,
- struct ethtool_rxnfc *rxnfc)
-{
- struct mvpp2_ethtool_fs *efs;
-
- if (rxnfc->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
- return -EINVAL;
-
- efs = port->rfs_rules[rxnfc->fs.location];
- if (!efs)
- return -ENOENT;
-
- memcpy(rxnfc, &efs->rxnfc, sizeof(efs->rxnfc));
-
- return 0;
-}
-
-int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port,
- struct ethtool_rxnfc *info)
-{
- struct ethtool_rx_flow_spec_input input = {};
- struct ethtool_rx_flow_rule *ethtool_rule;
- struct mvpp2_ethtool_fs *efs, *old_efs;
- int ret = 0;
-
- if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
- return -EINVAL;
-
- efs = kzalloc(sizeof(*efs), GFP_KERNEL);
- if (!efs)
- return -ENOMEM;
-
- input.fs = &info->fs;
-
- /* We need to manually set the rss_ctx, since this info isn't present
- * in info->fs
- */
- if (info->fs.flow_type & FLOW_RSS)
- input.rss_ctx = info->rss_context;
-
- ethtool_rule = ethtool_rx_flow_rule_create(&input);
- if (IS_ERR(ethtool_rule)) {
- ret = PTR_ERR(ethtool_rule);
- goto clean_rule;
- }
-
- efs->rule.flow = ethtool_rule->rule;
- efs->rule.flow_type = mvpp2_cls_ethtool_flow_to_type(info->fs.flow_type);
- if (efs->rule.flow_type < 0) {
- ret = efs->rule.flow_type;
- goto clean_rule;
- }
-
- ret = mvpp2_cls_rfs_parse_rule(&efs->rule);
- if (ret)
- goto clean_eth_rule;
-
- efs->rule.loc = info->fs.location;
-
- /* Replace an already existing rule */
- if (port->rfs_rules[efs->rule.loc]) {
- old_efs = port->rfs_rules[efs->rule.loc];
- ret = mvpp2_port_cls_rfs_rule_remove(port, &old_efs->rule);
- if (ret)
- goto clean_eth_rule;
- kfree(old_efs);
- port->n_rfs_rules--;
- }
-
- ret = mvpp2_port_flt_rfs_rule_insert(port, &efs->rule);
- if (ret)
- goto clean_eth_rule;
-
- ethtool_rx_flow_rule_destroy(ethtool_rule);
- efs->rule.flow = NULL;
-
- memcpy(&efs->rxnfc, info, sizeof(*info));
- port->rfs_rules[efs->rule.loc] = efs;
- port->n_rfs_rules++;
-
- return ret;
-
-clean_eth_rule:
- ethtool_rx_flow_rule_destroy(ethtool_rule);
-clean_rule:
- kfree(efs);
- return ret;
-}
-
-int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port,
- struct ethtool_rxnfc *info)
-{
- struct mvpp2_ethtool_fs *efs;
- int ret;
-
- if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
- return -EINVAL;
-
- efs = port->rfs_rules[info->fs.location];
- if (!efs)
- return -EINVAL;
-
- /* Remove the rule from the engines. */
- ret = mvpp2_port_cls_rfs_rule_remove(port, &efs->rule);
- if (ret)
- return ret;
-
- port->n_rfs_rules--;
- port->rfs_rules[info->fs.location] = NULL;
- kfree(efs);
-
- return 0;
}
static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq)
@@ -1466,181 +1023,37 @@ static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq)
return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs);
}
-static void mvpp22_rss_fill_table(struct mvpp2_port *port,
- struct mvpp2_rss_table *table,
- u32 rss_ctx)
+void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table)
{
struct mvpp2 *priv = port->priv;
int i;
for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
- u32 sel = MVPP22_RSS_INDEX_TABLE(rss_ctx) |
+ u32 sel = MVPP22_RSS_INDEX_TABLE(table) |
MVPP22_RSS_INDEX_TABLE_ENTRY(i);
mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY,
- mvpp22_rxfh_indir(port, table->indir[i]));
- }
-}
-
-static int mvpp22_rss_context_create(struct mvpp2_port *port, u32 *rss_ctx)
-{
- struct mvpp2 *priv = port->priv;
- u32 ctx;
-
- /* Find the first free RSS table */
- for (ctx = 0; ctx < MVPP22_N_RSS_TABLES; ctx++) {
- if (!priv->rss_tables[ctx])
- break;
- }
-
- if (ctx == MVPP22_N_RSS_TABLES)
- return -EINVAL;
-
- priv->rss_tables[ctx] = kzalloc(sizeof(*priv->rss_tables[ctx]),
- GFP_KERNEL);
- if (!priv->rss_tables[ctx])
- return -ENOMEM;
-
- *rss_ctx = ctx;
-
- /* Set the table width: replace the whole classifier Rx queue number
- * with the ones configured in RSS table entries.
- */
- mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(ctx));
- mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
-
- mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(ctx));
- mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE, MVPP22_RSS_TABLE_POINTER(ctx));
-
- return 0;
-}
-
-int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *port_ctx)
-{
- u32 rss_ctx;
- int ret, i;
-
- ret = mvpp22_rss_context_create(port, &rss_ctx);
- if (ret)
- return ret;
-
- /* Find the first available context number in the port, starting from 1.
- * Context 0 on each port is reserved for the default context.
- */
- for (i = 1; i < MVPP22_N_RSS_TABLES; i++) {
- if (port->rss_ctx[i] < 0)
- break;
- }
-
- if (i == MVPP22_N_RSS_TABLES)
- return -EINVAL;
-
- port->rss_ctx[i] = rss_ctx;
- *port_ctx = i;
-
- return 0;
-}
-
-static struct mvpp2_rss_table *mvpp22_rss_table_get(struct mvpp2 *priv,
- int rss_ctx)
-{
- if (rss_ctx < 0 || rss_ctx >= MVPP22_N_RSS_TABLES)
- return NULL;
-
- return priv->rss_tables[rss_ctx];
-}
-
-int mvpp22_port_rss_ctx_delete(struct mvpp2_port *port, u32 port_ctx)
-{
- struct mvpp2 *priv = port->priv;
- struct ethtool_rxnfc *rxnfc;
- int i, rss_ctx, ret;
-
- rss_ctx = mvpp22_rss_ctx(port, port_ctx);
-
- if (rss_ctx < 0 || rss_ctx >= MVPP22_N_RSS_TABLES)
- return -EINVAL;
-
- /* Invalidate any active classification rule that use this context */
- for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
- if (!port->rfs_rules[i])
- continue;
-
- rxnfc = &port->rfs_rules[i]->rxnfc;
- if (!(rxnfc->fs.flow_type & FLOW_RSS) ||
- rxnfc->rss_context != port_ctx)
- continue;
-
- ret = mvpp2_ethtool_cls_rule_del(port, rxnfc);
- if (ret) {
- netdev_warn(port->dev,
- "couldn't remove classification rule %d associated to this context",
- rxnfc->fs.location);
- }
+ mvpp22_rxfh_indir(port, port->indir[i]));
}
-
- kfree(priv->rss_tables[rss_ctx]);
-
- priv->rss_tables[rss_ctx] = NULL;
- port->rss_ctx[port_ctx] = -1;
-
- return 0;
-}
-
-int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 port_ctx,
- const u32 *indir)
-{
- int rss_ctx = mvpp22_rss_ctx(port, port_ctx);
- struct mvpp2_rss_table *rss_table = mvpp22_rss_table_get(port->priv,
- rss_ctx);
-
- if (!rss_table)
- return -EINVAL;
-
- memcpy(rss_table->indir, indir,
- MVPP22_RSS_TABLE_ENTRIES * sizeof(rss_table->indir[0]));
-
- mvpp22_rss_fill_table(port, rss_table, rss_ctx);
-
- return 0;
-}
-
-int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 port_ctx,
- u32 *indir)
-{
- int rss_ctx = mvpp22_rss_ctx(port, port_ctx);
- struct mvpp2_rss_table *rss_table = mvpp22_rss_table_get(port->priv,
- rss_ctx);
-
- if (!rss_table)
- return -EINVAL;
-
- memcpy(indir, rss_table->indir,
- MVPP22_RSS_TABLE_ENTRIES * sizeof(rss_table->indir[0]));
-
- return 0;
}
int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
{
u16 hash_opts = 0;
- u32 flow_type;
- flow_type = mvpp2_cls_ethtool_flow_to_type(info->flow_type);
-
- switch (flow_type) {
- case MVPP22_FLOW_TCP4:
- case MVPP22_FLOW_UDP4:
- case MVPP22_FLOW_TCP6:
- case MVPP22_FLOW_UDP6:
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
if (info->data & RXH_L4_B_0_1)
hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
if (info->data & RXH_L4_B_2_3)
hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
fallthrough;
- case MVPP22_FLOW_IP4:
- case MVPP22_FLOW_IP6:
+ case IPV4_FLOW:
+ case IPV6_FLOW:
if (info->data & RXH_L2DA)
hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
if (info->data & RXH_VLAN)
@@ -1657,18 +1070,15 @@ int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
default: return -EOPNOTSUPP;
}
- return mvpp2_port_rss_hash_opts_set(port, flow_type, hash_opts);
+ return mvpp2_port_rss_hash_opts_set(port, info->flow_type, hash_opts);
}
int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
{
unsigned long hash_opts;
- u32 flow_type;
int i;
- flow_type = mvpp2_cls_ethtool_flow_to_type(info->flow_type);
-
- hash_opts = mvpp2_port_rss_hash_opts_get(port, flow_type);
+ hash_opts = mvpp2_port_rss_hash_opts_get(port, info->flow_type);
info->data = 0;
for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
@@ -1703,40 +1113,56 @@ int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
return 0;
}
-int mvpp22_port_rss_init(struct mvpp2_port *port)
+void mvpp22_rss_port_init(struct mvpp2_port *port)
{
- struct mvpp2_rss_table *table;
- u32 context = 0;
- int i, ret;
-
- for (i = 0; i < MVPP22_N_RSS_TABLES; i++)
- port->rss_ctx[i] = -1;
+ struct mvpp2 *priv = port->priv;
+ int i;
- ret = mvpp22_rss_context_create(port, &context);
- if (ret)
- return ret;
+ /* Set the table width: replace the whole classifier Rx queue number
+ * with the ones configured in RSS table entries.
+ */
+ mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(port->id));
+ mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
- table = mvpp22_rss_table_get(port->priv, context);
- if (!table)
- return -EINVAL;
+ if (port->num_tc_queues > 1) {
+ int rxq;
+ u32 tc_width;
+ int tc_mask;
- port->rss_ctx[0] = context;
+ tc_width = mvpp2_get_tc_width(port);
+ tc_mask = ((1 << tc_width) - 1);
+ for (rxq = 0; rxq < port->nrxqs; rxq++) {
+ mvpp2_write(priv, MVPP22_RSS_INDEX,
+ MVPP22_RSS_INDEX_QUEUE(port->rxqs[rxq]->id));
+ mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE,
+ MVPP22_RSS_TABLE_POINTER(port->rxqs[rxq]->id & tc_mask));
+ }
+ } else {
+ /* The default RxQ is used as a key to select the RSS table to use.
+ * We use one RSS table per port.
+ */
+ mvpp2_write(priv, MVPP22_RSS_INDEX,
+ MVPP22_RSS_INDEX_QUEUE(port->first_rxq));
+ mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE,
+ MVPP22_RSS_TABLE_POINTER(port->id));
+ }
/* Configure the first table to evenly distribute the packets across
* real Rx Queues. The table entries map a hash to a port Rx Queue.
*/
for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++)
- table->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
+ port->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
- mvpp22_rss_fill_table(port, table, mvpp22_rss_ctx(port, 0));
+ if (port->num_tc_queues > 1)
+ mvpp22_rss_fill_table_per_tc(port);
+ else
+ mvpp22_rss_fill_table(port, port->id);
/* Configure default flows */
- mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP4, MVPP22_CLS_HEK_IP4_2T);
- mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP6, MVPP22_CLS_HEK_IP6_2T);
- mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP4, MVPP22_CLS_HEK_IP4_5T);
- mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP6, MVPP22_CLS_HEK_IP6_5T);
- mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP4, MVPP22_CLS_HEK_IP4_5T);
- mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP6, MVPP22_CLS_HEK_IP6_5T);
-
- return 0;
+ mvpp2_port_rss_hash_opts_set(port, IPV4_FLOW, MVPP22_CLS_HEK_IP4_2T);
+ mvpp2_port_rss_hash_opts_set(port, IPV6_FLOW, MVPP22_CLS_HEK_IP6_2T);
+ mvpp2_port_rss_hash_opts_set(port, TCP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
+ mvpp2_port_rss_hash_opts_set(port, TCP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
+ mvpp2_port_rss_hash_opts_set(port, UDP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
+ mvpp2_port_rss_hash_opts_set(port, UDP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
index 8867f25afab4..e5b7d28abc07 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
@@ -33,16 +33,15 @@ enum mvpp2_cls_engine {
};
#define MVPP22_CLS_HEK_OPT_MAC_DA BIT(0)
-#define MVPP22_CLS_HEK_OPT_VLAN_PRI BIT(1)
-#define MVPP22_CLS_HEK_OPT_VLAN BIT(2)
-#define MVPP22_CLS_HEK_OPT_L3_PROTO BIT(3)
-#define MVPP22_CLS_HEK_OPT_IP4SA BIT(4)
-#define MVPP22_CLS_HEK_OPT_IP4DA BIT(5)
-#define MVPP22_CLS_HEK_OPT_IP6SA BIT(6)
-#define MVPP22_CLS_HEK_OPT_IP6DA BIT(7)
-#define MVPP22_CLS_HEK_OPT_L4SIP BIT(8)
-#define MVPP22_CLS_HEK_OPT_L4DIP BIT(9)
-#define MVPP22_CLS_HEK_N_FIELDS 10
+#define MVPP22_CLS_HEK_OPT_VLAN BIT(1)
+#define MVPP22_CLS_HEK_OPT_L3_PROTO BIT(2)
+#define MVPP22_CLS_HEK_OPT_IP4SA BIT(3)
+#define MVPP22_CLS_HEK_OPT_IP4DA BIT(4)
+#define MVPP22_CLS_HEK_OPT_IP6SA BIT(5)
+#define MVPP22_CLS_HEK_OPT_IP6DA BIT(6)
+#define MVPP22_CLS_HEK_OPT_L4SIP BIT(7)
+#define MVPP22_CLS_HEK_OPT_L4DIP BIT(8)
+#define MVPP22_CLS_HEK_N_FIELDS 9
#define MVPP22_CLS_HEK_L4_OPTS (MVPP22_CLS_HEK_OPT_L4SIP | \
MVPP22_CLS_HEK_OPT_L4DIP)
@@ -60,12 +59,8 @@ enum mvpp2_cls_engine {
#define MVPP22_CLS_HEK_IP6_5T (MVPP22_CLS_HEK_IP6_2T | \
MVPP22_CLS_HEK_L4_OPTS)
-#define MVPP22_CLS_HEK_TAGGED (MVPP22_CLS_HEK_OPT_VLAN | \
- MVPP22_CLS_HEK_OPT_VLAN_PRI)
-
enum mvpp2_cls_field_id {
MVPP22_CLS_FIELD_MAC_DA = 0x03,
- MVPP22_CLS_FIELD_VLAN_PRI = 0x05,
MVPP22_CLS_FIELD_VLAN = 0x06,
MVPP22_CLS_FIELD_L3_PROTO = 0x0f,
MVPP22_CLS_FIELD_IP4SA = 0x10,
@@ -76,6 +71,19 @@ enum mvpp2_cls_field_id {
MVPP22_CLS_FIELD_L4DIP = 0x1e,
};
+enum mvpp2_cls_lkp_type {
+ MVPP2_CLS_LKP_HASH = 0,
+ MVPP2_CLS_LKP_DEFAULT = 3,
+ MVPP2_CLS_LKP_MAX,
+};
+enum mvpp2_cls_flow_seq {
+ MVPP2_CLS_FLOW_SEQ_NORMAL = 0,
+ MVPP2_CLS_FLOW_SEQ_FIRST1,
+ MVPP2_CLS_FLOW_SEQ_FIRST2,
+ MVPP2_CLS_FLOW_SEQ_LAST,
+ MVPP2_CLS_FLOW_SEQ_MIDDLE
+};
+
/* Classifier C2 engine constants */
#define MVPP22_CLS_C2_TCAM_EN(data) ((data) << 16)
@@ -97,62 +105,40 @@ enum mvpp22_cls_c2_fwd_action {
MVPP22_C2_FWD_HW_LOW_LAT_LOCK,
};
-enum mvpp22_cls_c2_color_action {
- MVPP22_C2_COL_NO_UPD = 0,
- MVPP22_C2_COL_NO_UPD_LOCK,
- MVPP22_C2_COL_GREEN,
- MVPP22_C2_COL_GREEN_LOCK,
- MVPP22_C2_COL_YELLOW,
- MVPP22_C2_COL_YELLOW_LOCK,
- MVPP22_C2_COL_RED, /* Drop */
- MVPP22_C2_COL_RED_LOCK, /* Drop */
-};
-
#define MVPP2_CLS_C2_TCAM_WORDS 5
#define MVPP2_CLS_C2_ATTR_WORDS 5
struct mvpp2_cls_c2_entry {
u32 index;
- /* TCAM lookup key */
u32 tcam[MVPP2_CLS_C2_TCAM_WORDS];
- /* Actions to perform upon TCAM match */
u32 act;
- /* Attributes relative to the actions to perform */
u32 attr[MVPP2_CLS_C2_ATTR_WORDS];
- /* Entry validity */
- u8 valid;
};
-#define MVPP22_FLOW_ETHER_BIT BIT(0)
-#define MVPP22_FLOW_IP4_BIT BIT(1)
-#define MVPP22_FLOW_IP6_BIT BIT(2)
-#define MVPP22_FLOW_TCP_BIT BIT(3)
-#define MVPP22_FLOW_UDP_BIT BIT(4)
-
-#define MVPP22_FLOW_TCP4 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT | MVPP22_FLOW_TCP_BIT)
-#define MVPP22_FLOW_TCP6 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT | MVPP22_FLOW_TCP_BIT)
-#define MVPP22_FLOW_UDP4 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT | MVPP22_FLOW_UDP_BIT)
-#define MVPP22_FLOW_UDP6 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT | MVPP22_FLOW_UDP_BIT)
-#define MVPP22_FLOW_IP4 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT)
-#define MVPP22_FLOW_IP6 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT)
-#define MVPP22_FLOW_ETHERNET (MVPP22_FLOW_ETHER_BIT)
-
/* Classifier C2 engine entries */
-#define MVPP22_CLS_C2_N_ENTRIES 256
-
-/* Number of per-port dedicated entries in the C2 TCAM */
-#define MVPP22_CLS_C2_PORT_N_FLOWS MVPP2_N_RFS_ENTRIES_PER_FLOW
+#define MVPP22_CLS_C2_MAX_ENTRIES 256
+#define MVPP22_CLS_C2_RSS_ENTRY(port) (port)
+#define MVPP22_CLS_C2_N_ENTRIES MVPP2_MAX_PORTS
-/* Each port has oen range per flow type + one entry controling the global RSS
- * setting and the default rx queue
+/* RSS flow entries in the flow table. We have 2 entries per port for RSS.
+ *
+ * The first performs a lookup using the C2 TCAM engine, to tag the
+ * packet for software forwarding (needed for RSS), enable or disable RSS, and
+ * assign the default rx queue.
+ *
+ * The second configures the hash generation, by specifying which fields of the
+ * packet header are used to generate the hash, and specifies the relevant hash
+ * engine to use.
*/
-#define MVPP22_CLS_C2_PORT_RANGE (MVPP22_CLS_C2_PORT_N_FLOWS + 1)
-#define MVPP22_CLS_C2_PORT_FIRST(p) ((p) * MVPP22_CLS_C2_PORT_RANGE)
-#define MVPP22_CLS_C2_RSS_ENTRY(p) (MVPP22_CLS_C2_PORT_FIRST((p) + 1) - 1)
-
-#define MVPP22_CLS_C2_PORT_FLOW_FIRST(p) (MVPP22_CLS_C2_PORT_FIRST(p))
+#define MVPP22_RSS_FLOW_C2_OFFS 0
+#define MVPP22_RSS_FLOW_HASH_OFFS 1
+#define MVPP22_RSS_FLOW_SIZE (MVPP22_RSS_FLOW_HASH_OFFS + 1)
-#define MVPP22_CLS_C2_RFS_LOC(p, loc) (MVPP22_CLS_C2_PORT_FLOW_FIRST(p) + (loc))
+#define MVPP22_RSS_FLOW_C2(port) ((port) * MVPP22_RSS_FLOW_SIZE + \
+ MVPP22_RSS_FLOW_C2_OFFS)
+#define MVPP22_RSS_FLOW_HASH(port) ((port) * MVPP22_RSS_FLOW_SIZE + \
+ MVPP22_RSS_FLOW_HASH_OFFS)
+#define MVPP22_RSS_FLOW_FIRST(port) MVPP22_RSS_FLOW_C2(port)
/* Packet flow ID */
enum mvpp2_prs_flow {
@@ -182,16 +168,6 @@ enum mvpp2_prs_flow {
MVPP2_FL_LAST,
};
-/* LU Type defined for all engines, and specified in the flow table */
-#define MVPP2_CLS_LU_TYPE_MASK 0x3f
-
-enum mvpp2_cls_lu_type {
- /* rule->loc is used as a lu-type for the entries 0 - 62. */
- MVPP22_CLS_LU_TYPE_ALL = 63,
-};
-
-#define MVPP2_N_FLOWS (MVPP2_FL_LAST - MVPP2_FL_START)
-
struct mvpp2_cls_flow {
/* The L2-L4 traffic flow type */
int flow_type;
@@ -206,47 +182,13 @@ struct mvpp2_cls_flow {
struct mvpp2_prs_result_info prs_ri;
};
-#define MVPP2_CLS_FLT_ENTRIES_PER_FLOW (MVPP2_MAX_PORTS + 1 + 16)
-#define MVPP2_CLS_FLT_FIRST(id) (((id) - MVPP2_FL_START) * \
- MVPP2_CLS_FLT_ENTRIES_PER_FLOW)
-
-#define MVPP2_CLS_FLT_C2_RFS(port, id, rfs_n) (MVPP2_CLS_FLT_FIRST(id) + \
- ((port) * MVPP2_MAX_PORTS) + \
- (rfs_n))
+#define MVPP2_N_FLOWS 52
-#define MVPP2_CLS_FLT_C2_RSS_ENTRY(id) (MVPP2_CLS_FLT_C2_RFS(MVPP2_MAX_PORTS, id, 0))
-#define MVPP2_CLS_FLT_HASH_ENTRY(port, id) (MVPP2_CLS_FLT_C2_RSS_ENTRY(id) + 1 + (port))
-#define MVPP2_CLS_FLT_LAST(id) (MVPP2_CLS_FLT_FIRST(id) + \
- MVPP2_CLS_FLT_ENTRIES_PER_FLOW - 1)
-
-/* Iterate on each classifier flow id. Sets 'i' to be the index of the first
- * entry in the cls_flows table for each different flow_id.
- * This relies on entries having the same flow_id in the cls_flows table being
- * contiguous.
- */
-#define for_each_cls_flow_id(i) \
- for ((i) = 0; (i) < MVPP2_N_PRS_FLOWS; (i)++) \
- if ((i) > 0 && \
- cls_flows[(i)].flow_id == cls_flows[(i) - 1].flow_id) \
- continue; \
- else
-
-/* Iterate on each classifier flow that has a given flow_type. Sets 'i' to be
- * the index of the first entry in the cls_flow table for each different flow_id
- * that has the given flow_type. This allows to operate on all flows that
- * matches a given ethtool flow type.
- */
-#define for_each_cls_flow_id_with_type(i, type) \
- for_each_cls_flow_id((i)) \
- if (cls_flows[(i)].flow_type != (type)) \
- continue; \
- else
-
-#define for_each_cls_flow_id_containing_type(i, type) \
- for_each_cls_flow_id((i)) \
- if ((cls_flows[(i)].flow_type & (type)) != (type)) \
- continue; \
- else
+#define MVPP2_ENTRIES_PER_FLOW (MVPP2_MAX_PORTS + 1)
+#define MVPP2_FLOW_C2_ENTRY(id) ((((id) - MVPP2_FL_START) * \
+ MVPP2_ENTRIES_PER_FLOW) + 1)
+#define MVPP2_PORT_FLOW_INDEX(offset, id) (MVPP2_FLOW_C2_ENTRY(id) + \
+ 1 + (offset))
struct mvpp2_cls_flow_entry {
u32 index;
@@ -259,18 +201,12 @@ struct mvpp2_cls_lookup_entry {
u32 data;
};
-int mvpp22_port_rss_init(struct mvpp2_port *port);
-
-int mvpp22_port_rss_enable(struct mvpp2_port *port);
-int mvpp22_port_rss_disable(struct mvpp2_port *port);
+void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table);
-int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *rss_ctx);
-int mvpp22_port_rss_ctx_delete(struct mvpp2_port *port, u32 rss_ctx);
+void mvpp22_rss_port_init(struct mvpp2_port *port);
-int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 rss_ctx,
- const u32 *indir);
-int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 rss_ctx,
- u32 *indir);
+void mvpp22_rss_enable(struct mvpp2_port *port);
+void mvpp22_rss_disable(struct mvpp2_port *port);
int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info);
int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info);
@@ -285,7 +221,7 @@ int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe);
u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe);
-const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow);
+struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow);
u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index);
@@ -302,13 +238,9 @@ u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index);
void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
struct mvpp2_cls_c2_entry *c2);
-int mvpp2_ethtool_cls_rule_get(struct mvpp2_port *port,
- struct ethtool_rxnfc *rxnfc);
-
-int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port,
- struct ethtool_rxnfc *info);
-
-int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port,
- struct ethtool_rxnfc *info);
+int mvpp2_cls_flow_hash_find(struct mvpp2_port *port,
+ struct mvpp2_cls_flow *flow,
+ struct mvpp2_cls_flow_entry *fe,
+ int *flow_index);
#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
index 4a3baa7e0142..1e614771f3a1 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -13,53 +13,12 @@
#include "mvpp2_prs.h"
#include "mvpp2_cls.h"
-struct mvpp2_dbgfs_prs_entry {
- int tid;
- struct mvpp2 *priv;
-};
-
-struct mvpp2_dbgfs_c2_entry {
- int id;
- struct mvpp2 *priv;
-};
-
-struct mvpp2_dbgfs_flow_entry {
- int flow;
- struct mvpp2 *priv;
-};
-
-struct mvpp2_dbgfs_flow_tbl_entry {
- int id;
- struct mvpp2 *priv;
-};
-
-struct mvpp2_dbgfs_port_flow_entry {
- struct mvpp2_port *port;
- struct mvpp2_dbgfs_flow_entry *dbg_fe;
-};
-
-struct mvpp2_dbgfs_entries {
- /* Entries for Header Parser debug info */
- struct mvpp2_dbgfs_prs_entry prs_entries[MVPP2_PRS_TCAM_SRAM_SIZE];
-
- /* Entries for Classifier C2 engine debug info */
- struct mvpp2_dbgfs_c2_entry c2_entries[MVPP22_CLS_C2_N_ENTRIES];
-
- /* Entries for Classifier Flow Table debug info */
- struct mvpp2_dbgfs_flow_tbl_entry flt_entries[MVPP2_CLS_FLOWS_TBL_SIZE];
-
- /* Entries for Classifier flows debug info */
- struct mvpp2_dbgfs_flow_entry flow_entries[MVPP2_N_PRS_FLOWS];
-
- /* Entries for per-port flows debug info */
- struct mvpp2_dbgfs_port_flow_entry port_flow_entries[MVPP2_MAX_PORTS];
-};
-
static int mvpp2_dbgfs_flow_flt_hits_show(struct seq_file *s, void *unused)
{
- struct mvpp2_dbgfs_flow_tbl_entry *entry = s->private;
+ struct mvpp2_dbgfs_flow_entry *entry = s->private;
+ int id = MVPP2_FLOW_C2_ENTRY(entry->flow);
- u32 hits = mvpp2_cls_flow_hits(entry->priv, entry->id);
+ u32 hits = mvpp2_cls_flow_hits(entry->priv, id);
seq_printf(s, "%u\n", hits);
@@ -84,7 +43,7 @@ DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_dec_hits);
static int mvpp2_dbgfs_flow_type_show(struct seq_file *s, void *unused)
{
struct mvpp2_dbgfs_flow_entry *entry = s->private;
- const struct mvpp2_cls_flow *f;
+ struct mvpp2_cls_flow *f;
const char *flow_name;
f = mvpp2_cls_flow_get(entry->flow);
@@ -119,12 +78,21 @@ static int mvpp2_dbgfs_flow_type_show(struct seq_file *s, void *unused)
return 0;
}
-DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_type);
+static int mvpp2_dbgfs_flow_type_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mvpp2_dbgfs_flow_type_show, inode->i_private);
+}
+
+static const struct file_operations mvpp2_dbgfs_flow_type_fops = {
+ .open = mvpp2_dbgfs_flow_type_open,
+ .read = seq_read,
+ .release = single_release,
+};
static int mvpp2_dbgfs_flow_id_show(struct seq_file *s, void *unused)
{
- const struct mvpp2_dbgfs_flow_entry *entry = s->private;
- const struct mvpp2_cls_flow *f;
+ struct mvpp2_dbgfs_flow_entry *entry = s->private;
+ struct mvpp2_cls_flow *f;
f = mvpp2_cls_flow_get(entry->flow);
if (!f)
@@ -142,7 +110,7 @@ static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused)
struct mvpp2_dbgfs_port_flow_entry *entry = s->private;
struct mvpp2_port *port = entry->port;
struct mvpp2_cls_flow_entry fe;
- const struct mvpp2_cls_flow *f;
+ struct mvpp2_cls_flow *f;
int flow_index;
u16 hash_opts;
@@ -150,7 +118,8 @@ static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused)
if (!f)
return -EINVAL;
- flow_index = MVPP2_CLS_FLT_HASH_ENTRY(entry->port->id, f->flow_id);
+ if (mvpp2_cls_flow_hash_find(port, f, &fe, &flow_index))
+ return -EINVAL;
mvpp2_cls_flow_read(port->priv, flow_index, &fe);
@@ -161,21 +130,33 @@ static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused)
return 0;
}
-DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_hash_opt);
+static int mvpp2_dbgfs_port_flow_hash_opt_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, mvpp2_dbgfs_port_flow_hash_opt_show,
+ inode->i_private);
+}
+
+static const struct file_operations mvpp2_dbgfs_port_flow_hash_opt_fops = {
+ .open = mvpp2_dbgfs_port_flow_hash_opt_open,
+ .read = seq_read,
+ .release = single_release,
+};
static int mvpp2_dbgfs_port_flow_engine_show(struct seq_file *s, void *unused)
{
struct mvpp2_dbgfs_port_flow_entry *entry = s->private;
struct mvpp2_port *port = entry->port;
struct mvpp2_cls_flow_entry fe;
- const struct mvpp2_cls_flow *f;
+ struct mvpp2_cls_flow *f;
int flow_index, engine;
f = mvpp2_cls_flow_get(entry->dbg_fe->flow);
if (!f)
return -EINVAL;
- flow_index = MVPP2_CLS_FLT_HASH_ENTRY(entry->port->id, f->flow_id);
+ if (mvpp2_cls_flow_hash_find(port, f, &fe, &flow_index))
+ return -EINVAL;
mvpp2_cls_flow_read(port->priv, flow_index, &fe);
@@ -190,10 +171,11 @@ DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_engine);
static int mvpp2_dbgfs_flow_c2_hits_show(struct seq_file *s, void *unused)
{
- struct mvpp2_dbgfs_c2_entry *entry = s->private;
+ struct mvpp2_port *port = s->private;
u32 hits;
- hits = mvpp2_cls_c2_hit_count(entry->priv, entry->id);
+ hits = mvpp2_cls_c2_hit_count(port->priv,
+ MVPP22_CLS_C2_RSS_ENTRY(port->id));
seq_printf(s, "%u\n", hits);
@@ -204,11 +186,11 @@ DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_hits);
static int mvpp2_dbgfs_flow_c2_rxq_show(struct seq_file *s, void *unused)
{
- struct mvpp2_dbgfs_c2_entry *entry = s->private;
+ struct mvpp2_port *port = s->private;
struct mvpp2_cls_c2_entry c2;
u8 qh, ql;
- mvpp2_cls_c2_read(entry->priv, entry->id, &c2);
+ mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
qh = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QHIGH_OFFS) &
MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
@@ -225,11 +207,11 @@ DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_rxq);
static int mvpp2_dbgfs_flow_c2_enable_show(struct seq_file *s, void *unused)
{
- struct mvpp2_dbgfs_c2_entry *entry = s->private;
+ struct mvpp2_port *port = s->private;
struct mvpp2_cls_c2_entry c2;
int enabled;
- mvpp2_cls_c2_read(entry->priv, entry->id, &c2);
+ mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
enabled = !!(c2.attr[2] & MVPP22_CLS_C2_ATTR2_RSS_EN);
@@ -275,6 +257,41 @@ static int mvpp2_dbgfs_port_vid_show(struct seq_file *s, void *unused)
DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_vid);
+static int mvpp2_prs_hw_hits_dump(struct seq_file *s,
+ struct mvpp2_prs_entry *pe)
+{
+ struct mvpp2 *priv = ((struct mvpp2_port *)s->private)->priv;
+ unsigned int cnt;
+
+ cnt = mvpp2_prs_hits(priv, pe->index);
+ if (cnt != 0)
+ seq_printf(s, "----- HITS: %d ------\n", cnt);
+ return 0;
+}
+
+static int mvpp2_dbgfs_port_parser_dump(struct seq_file *s,
+ struct mvpp2_prs_entry *pe)
+{
+ int i;
+
+ /* hw entry id */
+ seq_printf(s, " [%4d] ", pe->index);
+
+ i = MVPP2_PRS_TCAM_WORDS - 1;
+ seq_printf(s, "%1.1x ", pe->tcam[i--] & MVPP2_PRS_LU_MASK);
+
+ while (i >= 0)
+ seq_printf(s, "%4.4x ", (pe->tcam[i--]) & MVPP2_PRS_WORD_MASK);
+
+ seq_printf(s, "| %4.4x %8.8x %8.8x %8.8x\n",
+ pe->sram[3] & MVPP2_PRS_WORD_MASK,
+ pe->sram[2], pe->sram[1], pe->sram[0]);
+
+ mvpp2_prs_hw_hits_dump(s, pe);
+
+ return 0;
+}
+
static int mvpp2_dbgfs_port_parser_show(struct seq_file *s, void *unused)
{
struct mvpp2_port *port = s->private;
@@ -288,7 +305,7 @@ static int mvpp2_dbgfs_port_parser_show(struct seq_file *s, void *unused)
pmap = mvpp2_prs_tcam_port_map_get(&pe);
if (priv->prs_shadow[i].valid && test_bit(port->id, &pmap))
- seq_printf(s, "%03d\n", i);
+ mvpp2_dbgfs_port_parser_dump(s, &pe);
}
return 0;
@@ -442,7 +459,16 @@ static int mvpp2_dbgfs_prs_valid_show(struct seq_file *s, void *unused)
return 0;
}
-DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_valid);
+static int mvpp2_dbgfs_prs_valid_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mvpp2_dbgfs_prs_valid_show, inode->i_private);
+}
+
+static const struct file_operations mvpp2_dbgfs_prs_valid_fops = {
+ .open = mvpp2_dbgfs_prs_valid_open,
+ .read = seq_read,
+ .release = single_release,
+};
static int mvpp2_dbgfs_flow_port_init(struct dentry *parent,
struct mvpp2_port *port,
@@ -452,11 +478,16 @@ static int mvpp2_dbgfs_flow_port_init(struct dentry *parent,
struct dentry *port_dir;
port_dir = debugfs_create_dir(port->dev->name, parent);
+ if (IS_ERR(port_dir))
+ return PTR_ERR(port_dir);
- port_entry = &port->priv->dbgfs_entries->port_flow_entries[port->id];
+ port_entry = kmalloc(sizeof(*port_entry), GFP_KERNEL);
+ if (!port_entry)
+ return -ENOMEM;
port_entry->port = port;
port_entry->dbg_fe = entry;
+ port->dbgfs_port_flow_entry = port_entry;
debugfs_create_file("hash_opts", 0444, port_dir, port_entry,
&mvpp2_dbgfs_port_flow_hash_opt_fops);
@@ -478,11 +509,19 @@ static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent,
sprintf(flow_entry_name, "%02d", flow);
flow_entry_dir = debugfs_create_dir(flow_entry_name, parent);
+ if (!flow_entry_dir)
+ return -ENOMEM;
- entry = &priv->dbgfs_entries->flow_entries[flow];
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
entry->flow = flow;
entry->priv = priv;
+ priv->dbgfs_flow_entry[flow] = entry;
+
+ debugfs_create_file("flow_hits", 0444, flow_entry_dir, entry,
+ &mvpp2_dbgfs_flow_flt_hits_fops);
debugfs_create_file("dec_hits", 0444, flow_entry_dir, entry,
&mvpp2_dbgfs_flow_dec_hits_fops);
@@ -500,7 +539,6 @@ static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent,
if (ret)
return ret;
}
-
return 0;
}
@@ -510,8 +548,10 @@ static int mvpp2_dbgfs_flow_init(struct dentry *parent, struct mvpp2 *priv)
int i, ret;
flow_dir = debugfs_create_dir("flows", parent);
+ if (!flow_dir)
+ return -ENOMEM;
- for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) {
+ for (i = 0; i < MVPP2_N_FLOWS; i++) {
ret = mvpp2_dbgfs_flow_entry_init(flow_dir, priv, i);
if (ret)
return ret;
@@ -533,11 +573,16 @@ static int mvpp2_dbgfs_prs_entry_init(struct dentry *parent,
sprintf(prs_entry_name, "%03d", tid);
prs_entry_dir = debugfs_create_dir(prs_entry_name, parent);
+ if (!prs_entry_dir)
+ return -ENOMEM;
- entry = &priv->dbgfs_entries->prs_entries[tid];
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
entry->tid = tid;
entry->priv = priv;
+ priv->dbgfs_prs_entry[tid] = entry;
/* Create each attr */
debugfs_create_file("sram", 0444, prs_entry_dir, entry,
@@ -558,9 +603,6 @@ static int mvpp2_dbgfs_prs_entry_init(struct dentry *parent,
debugfs_create_file("hits", 0444, prs_entry_dir, entry,
&mvpp2_dbgfs_prs_hits_fops);
- debugfs_create_file("pmap", 0444, prs_entry_dir, entry,
- &mvpp2_dbgfs_prs_pmap_fops);
-
return 0;
}
@@ -570,6 +612,8 @@ static int mvpp2_dbgfs_prs_init(struct dentry *parent, struct mvpp2 *priv)
int i, ret;
prs_dir = debugfs_create_dir("parser", parent);
+ if (!prs_dir)
+ return -ENOMEM;
for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
ret = mvpp2_dbgfs_prs_entry_init(prs_dir, priv, i);
@@ -580,104 +624,14 @@ static int mvpp2_dbgfs_prs_init(struct dentry *parent, struct mvpp2 *priv)
return 0;
}
-static int mvpp2_dbgfs_c2_entry_init(struct dentry *parent,
- struct mvpp2 *priv, int id)
-{
- struct mvpp2_dbgfs_c2_entry *entry;
- struct dentry *c2_entry_dir;
- char c2_entry_name[10];
-
- if (id >= MVPP22_CLS_C2_N_ENTRIES)
- return -EINVAL;
-
- sprintf(c2_entry_name, "%03d", id);
-
- c2_entry_dir = debugfs_create_dir(c2_entry_name, parent);
- if (!c2_entry_dir)
- return -ENOMEM;
-
- entry = &priv->dbgfs_entries->c2_entries[id];
-
- entry->id = id;
- entry->priv = priv;
-
- debugfs_create_file("hits", 0444, c2_entry_dir, entry,
- &mvpp2_dbgfs_flow_c2_hits_fops);
-
- debugfs_create_file("default_rxq", 0444, c2_entry_dir, entry,
- &mvpp2_dbgfs_flow_c2_rxq_fops);
-
- debugfs_create_file("rss_enable", 0444, c2_entry_dir, entry,
- &mvpp2_dbgfs_flow_c2_enable_fops);
-
- return 0;
-}
-
-static int mvpp2_dbgfs_flow_tbl_entry_init(struct dentry *parent,
- struct mvpp2 *priv, int id)
-{
- struct mvpp2_dbgfs_flow_tbl_entry *entry;
- struct dentry *flow_tbl_entry_dir;
- char flow_tbl_entry_name[10];
-
- if (id >= MVPP2_CLS_FLOWS_TBL_SIZE)
- return -EINVAL;
-
- sprintf(flow_tbl_entry_name, "%03d", id);
-
- flow_tbl_entry_dir = debugfs_create_dir(flow_tbl_entry_name, parent);
- if (!flow_tbl_entry_dir)
- return -ENOMEM;
-
- entry = &priv->dbgfs_entries->flt_entries[id];
-
- entry->id = id;
- entry->priv = priv;
-
- debugfs_create_file("hits", 0444, flow_tbl_entry_dir, entry,
- &mvpp2_dbgfs_flow_flt_hits_fops);
-
- return 0;
-}
-
-static int mvpp2_dbgfs_cls_init(struct dentry *parent, struct mvpp2 *priv)
-{
- struct dentry *cls_dir, *c2_dir, *flow_tbl_dir;
- int i, ret;
-
- cls_dir = debugfs_create_dir("classifier", parent);
- if (!cls_dir)
- return -ENOMEM;
-
- c2_dir = debugfs_create_dir("c2", cls_dir);
- if (!c2_dir)
- return -ENOMEM;
-
- for (i = 0; i < MVPP22_CLS_C2_N_ENTRIES; i++) {
- ret = mvpp2_dbgfs_c2_entry_init(c2_dir, priv, i);
- if (ret)
- return ret;
- }
-
- flow_tbl_dir = debugfs_create_dir("flow_table", cls_dir);
- if (!flow_tbl_dir)
- return -ENOMEM;
-
- for (i = 0; i < MVPP2_CLS_FLOWS_TBL_SIZE; i++) {
- ret = mvpp2_dbgfs_flow_tbl_entry_init(flow_tbl_dir, priv, i);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static int mvpp2_dbgfs_port_init(struct dentry *parent,
struct mvpp2_port *port)
{
struct dentry *port_dir;
port_dir = debugfs_create_dir(port->dev->name, parent);
+ if (IS_ERR(port_dir))
+ return PTR_ERR(port_dir);
debugfs_create_file("parser_entries", 0444, port_dir, port,
&mvpp2_dbgfs_port_parser_fops);
@@ -688,14 +642,29 @@ static int mvpp2_dbgfs_port_init(struct dentry *parent,
debugfs_create_file("vid_filter", 0444, port_dir, port,
&mvpp2_dbgfs_port_vid_fops);
+ debugfs_create_file("c2_hits", 0444, port_dir, port,
+ &mvpp2_dbgfs_flow_c2_hits_fops);
+
+ debugfs_create_file("default_rxq", 0444, port_dir, port,
+ &mvpp2_dbgfs_flow_c2_rxq_fops);
+
+ debugfs_create_file("rss_enable", 0444, port_dir, port,
+ &mvpp2_dbgfs_flow_c2_enable_fops);
+
return 0;
}
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
{
- debugfs_remove_recursive(priv->dbgfs_dir);
+ int i;
- kfree(priv->dbgfs_entries);
+ for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++)
+ kfree(priv->dbgfs_prs_entry[i]);
+
+ for (i = 0; i < MVPP2_N_FLOWS; i++)
+ kfree(priv->dbgfs_flow_entry[i]);
+
+ debugfs_remove_recursive(priv->dbgfs_dir);
}
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
@@ -704,24 +673,22 @@ void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
int ret, i;
mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL);
- if (!mvpp2_root)
+ if (!mvpp2_root) {
mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
+ if (IS_ERR(mvpp2_root))
+ return;
+ }
mvpp2_dir = debugfs_create_dir(name, mvpp2_root);
+ if (IS_ERR(mvpp2_dir))
+ return;
priv->dbgfs_dir = mvpp2_dir;
- priv->dbgfs_entries = kzalloc(sizeof(*priv->dbgfs_entries), GFP_KERNEL);
- if (!priv->dbgfs_entries)
- goto err;
ret = mvpp2_dbgfs_prs_init(mvpp2_dir, priv);
if (ret)
goto err;
- ret = mvpp2_dbgfs_cls_init(mvpp2_dir, priv);
- if (ret)
- goto err;
-
for (i = 0; i < priv->port_count; i++) {
ret = mvpp2_dbgfs_port_init(mvpp2_dir, priv->port_list[i]);
if (ret)
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 542cd6f2c9bd..ad63ac5604f9 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -8,6 +8,7 @@
*/
#include <linux/acpi.h>
+#include <linux/dma-direct.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -17,6 +18,7 @@
#include <linux/mbus.h>
#include <linux/module.h>
#include <linux/mfd/syscon.h>
+#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/cpumask.h>
#include <linux/of.h>
@@ -25,6 +27,8 @@
#include <linux/of_net.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/genalloc.h>
#include <linux/phy.h>
#include <linux/phylink.h>
#include <linux/phy/phy.h>
@@ -34,62 +38,117 @@
#include <linux/ktime.h>
#include <linux/regmap.h>
#include <uapi/linux/ppp_defs.h>
+#include <net/dsa.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/tso.h>
#include <linux/bpf_trace.h>
+#include <net/busy_poll.h>
#include "mvpp2.h"
#include "mvpp2_prs.h"
#include "mvpp2_cls.h"
-enum mvpp2_bm_pool_log_num {
- MVPP2_BM_SHORT,
- MVPP2_BM_LONG,
- MVPP2_BM_JUMBO,
- MVPP2_BM_POOLS_NUM
+/* RX-TX fast-forwarding path optimization */
+#define MVPP2_RXTX_HASH 0xbac0
+#define MVPP2_RXTX_HASH_CONST_MASK 0xfff0
+#define MVPP2_RXTX_HASH_BMID_MASK 0xf
+/* HashBits[31..16] contain skb->head[22..7], the head is aligned and [6..0]=0,
+ * so skb->head is shifted left for (16-7) bits.
+ * This hash permits to detect 2 non-recyclable cases:
+ * - new skb with old hash inside
+ * - same skb but NET-stack has replaced the data-buffer with another one
+ */
+#define MVPP2_HEAD_HASH_SHIFT (16 - 7)
+#define MVPP2_RXTX_HASH_GENER(skb, bm_pool_id) \
+ (((u32)(phys_addr_t)skb->head << MVPP2_HEAD_HASH_SHIFT) | \
+ MVPP2_RXTX_HASH | bm_pool_id)
+#define MVPP2_RXTX_HASH_IS_OK(skb, hash) \
+ (MVPP2_RXTX_HASH_GENER(skb, 0) == (hash & ~MVPP2_RXTX_HASH_BMID_MASK))
+#define MVPP2_RXTX_HASH_IS_OK_TX(skb, hash) \
+ (((((u32)(phys_addr_t)skb->head << MVPP2_HEAD_HASH_SHIFT) | \
+ MVPP2_RXTX_HASH) ^ hash) <= MVPP2_RXTX_HASH_BMID_MASK)
+
+/* The recycle pool size should be "effectively big" but limited (to eliminate
+ * memory-wasting on TX-pick). It should be >8 (Net-stack-forwarding-buffer)
+ * and >pkt-coalescing. For "effective" >=NAPI_POLL_WEIGHT.
+ * For 4 ports we need more buffers but not x4, statistically it is enough x3.
+ * SKB-pool is shared for Small/Large/Jumbo buffers so we need more SKBs,
+ * statistically it is enough x5.
+ */
+#define MVPP2_RECYCLE_FULL (NAPI_POLL_WEIGHT * 3)
+#define MVPP2_RECYCLE_FULL_SKB (NAPI_POLL_WEIGHT * 5)
+
+#define MVPP2_NUM_OF_TC 1
+
+struct mvpp2_recycle_pool {
+ void *pbuf[MVPP2_RECYCLE_FULL_SKB];
};
-static struct {
- int pkt_size;
- int buf_num;
-} mvpp2_pools[MVPP2_BM_POOLS_NUM];
+struct mvpp2_recycle_pcpu {
+ /* All pool-indexes are in 1 cache-line */
+ short int idx[MVPP2_BM_POOLS_NUM_MAX];
+ /* BM/SKB-buffer pools */
+ struct mvpp2_recycle_pool pool[MVPP2_BM_POOLS_NUM_MAX];
+} __aligned(L1_CACHE_BYTES);
+
+struct mvpp2_share {
+ struct mvpp2_recycle_pcpu *recycle;
+ void *recycle_base;
+
+ /* Counters set by Probe/Init/Open */
+ int num_open_ports;
+} __aligned(L1_CACHE_BYTES);
+
+/* Normal RSS entry */
+struct mvpp2_rss_tbl_entry {
+ u8 tbl_id;
+ u8 tbl_line;
+ u8 width;
+ u8 rxq;
+};
+
+struct mvpp2_share mvpp2_share;
+
+#ifndef MODULE
+static inline void mvpp2_recycle_put(struct mvpp2_port *port,
+ struct mvpp2_txq_pcpu *txq_pcpu,
+ struct mvpp2_txq_pcpu_buf *tx_buf);
+#endif
+
+static void mvpp2_tx_done_guard_force_irq(struct mvpp2_port *port,
+ int sw_thread, u8 to_zero_map);
+static inline void mvpp2_tx_done_guard_timer_set(struct mvpp2_port *port,
+ int sw_thread);
+static u32 mvpp2_tx_done_guard_get_stats(struct mvpp2_port *port, int cpu);
/* The prototype is added here to be used in start_dev when using ACPI. This
* will be removed once phylink is used for all modes (dt+ACPI).
*/
static void mvpp2_acpi_start(struct mvpp2_port *port);
+/* Branch prediction switches */
+DEFINE_STATIC_KEY_FALSE(mvpp21_variant);
+DEFINE_STATIC_KEY_FALSE(mvpp2_recycle_ena);
+
/* Queue modes */
#define MVPP2_QDIST_SINGLE_MODE 0
#define MVPP2_QDIST_MULTI_MODE 1
static int queue_mode = MVPP2_QDIST_MULTI_MODE;
+static int tx_fifo_protection;
+static int bm_underrun_protect = 1;
+static int recycle;
+static u32 tx_fifo_map;
module_param(queue_mode, int, 0444);
MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
-/* Utility/helper methods */
-
-void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
-{
- writel(data, priv->swth_base[0] + offset);
-}
+module_param(tx_fifo_protection, int, 0444);
+MODULE_PARM_DESC(tx_fifo_protection, "Set tx_fifo_protection (off=0, on=1)");
-u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
-{
- return readl(priv->swth_base[0] + offset);
-}
-
-static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
-{
- return readl_relaxed(priv->swth_base[0] + offset);
-}
-
-static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
-{
- return cpu % priv->nthreads;
-}
+module_param(bm_underrun_protect, int, 0444);
+MODULE_PARM_DESC(bm_underrun_protect, "Set BM underrun protect feature (0-1), def=1");
static struct page_pool *
mvpp2_create_page_pool(struct device *dev, int num, int len,
@@ -109,70 +168,16 @@ mvpp2_create_page_pool(struct device *dev, int num, int len,
return page_pool_create(&pp_params);
}
-/* These accessors should be used to access:
- *
- * - per-thread registers, where each thread has its own copy of the
- * register.
- *
- * MVPP2_BM_VIRT_ALLOC_REG
- * MVPP2_BM_ADDR_HIGH_ALLOC
- * MVPP22_BM_ADDR_HIGH_RLS_REG
- * MVPP2_BM_VIRT_RLS_REG
- * MVPP2_ISR_RX_TX_CAUSE_REG
- * MVPP2_ISR_RX_TX_MASK_REG
- * MVPP2_TXQ_NUM_REG
- * MVPP2_AGGR_TXQ_UPDATE_REG
- * MVPP2_TXQ_RSVD_REQ_REG
- * MVPP2_TXQ_RSVD_RSLT_REG
- * MVPP2_TXQ_SENT_REG
- * MVPP2_RXQ_NUM_REG
- *
- * - global registers that must be accessed through a specific thread
- * window, because they are related to an access to a per-thread
- * register
- *
- * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
- * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
- * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
- * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
- * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
- * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
- * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
- * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
- * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
- * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
- * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
- * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
- * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
- */
-static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread,
- u32 offset, u32 data)
-{
- writel(data, priv->swth_base[thread] + offset);
-}
+module_param(recycle, int, 0444);
+MODULE_PARM_DESC(recycle, "Recycle: 0:disable(default), >=1:enable");
-static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread,
- u32 offset)
-{
- return readl(priv->swth_base[thread] + offset);
-}
-
-static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread,
- u32 offset, u32 data)
-{
- writel_relaxed(data, priv->swth_base[thread] + offset);
-}
-
-static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread,
- u32 offset)
-{
- return readl_relaxed(priv->swth_base[thread] + offset);
-}
+module_param(tx_fifo_map, uint, 0444);
+MODULE_PARM_DESC(tx_fifo_map, "Set PPv2 TX FIFO ports map");
static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
struct mvpp2_tx_desc *tx_desc)
{
- if (port->priv->hw_version == MVPP21)
+ if (static_branch_unlikely(&mvpp21_variant))
return le32_to_cpu(tx_desc->pp21.buf_dma_addr);
else
return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) &
@@ -188,7 +193,7 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
offset = dma_addr & MVPP2_TX_DESC_ALIGN;
- if (port->priv->hw_version == MVPP21) {
+ if (static_branch_unlikely(&mvpp21_variant)) {
tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr);
tx_desc->pp21.packet_offset = offset;
} else {
@@ -203,7 +208,7 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
struct mvpp2_tx_desc *tx_desc)
{
- if (port->priv->hw_version == MVPP21)
+ if (static_branch_unlikely(&mvpp21_variant))
return le16_to_cpu(tx_desc->pp21.data_size);
else
return le16_to_cpu(tx_desc->pp22.data_size);
@@ -213,7 +218,7 @@ static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
struct mvpp2_tx_desc *tx_desc,
size_t size)
{
- if (port->priv->hw_version == MVPP21)
+ if (static_branch_unlikely(&mvpp21_variant))
tx_desc->pp21.data_size = cpu_to_le16(size);
else
tx_desc->pp22.data_size = cpu_to_le16(size);
@@ -223,7 +228,7 @@ static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
struct mvpp2_tx_desc *tx_desc,
unsigned int txq)
{
- if (port->priv->hw_version == MVPP21)
+ if (static_branch_unlikely(&mvpp21_variant))
tx_desc->pp21.phys_txq = txq;
else
tx_desc->pp22.phys_txq = txq;
@@ -233,7 +238,7 @@ static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
struct mvpp2_tx_desc *tx_desc,
unsigned int command)
{
- if (port->priv->hw_version == MVPP21)
+ if (static_branch_unlikely(&mvpp21_variant))
tx_desc->pp21.command = cpu_to_le32(command);
else
tx_desc->pp22.command = cpu_to_le32(command);
@@ -242,7 +247,7 @@ static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
struct mvpp2_tx_desc *tx_desc)
{
- if (port->priv->hw_version == MVPP21)
+ if (static_branch_unlikely(&mvpp21_variant))
return tx_desc->pp21.packet_offset;
else
return tx_desc->pp22.packet_offset;
@@ -251,27 +256,17 @@ static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
struct mvpp2_rx_desc *rx_desc)
{
- if (port->priv->hw_version == MVPP21)
+ if (static_branch_unlikely(&mvpp21_variant))
return le32_to_cpu(rx_desc->pp21.buf_dma_addr);
else
return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) &
MVPP2_DESC_DMA_MASK;
}
-static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
- struct mvpp2_rx_desc *rx_desc)
-{
- if (port->priv->hw_version == MVPP21)
- return le32_to_cpu(rx_desc->pp21.buf_cookie);
- else
- return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) &
- MVPP2_DESC_DMA_MASK;
-}
-
static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
struct mvpp2_rx_desc *rx_desc)
{
- if (port->priv->hw_version == MVPP21)
+ if (static_branch_unlikely(&mvpp21_variant))
return le16_to_cpu(rx_desc->pp21.data_size);
else
return le16_to_cpu(rx_desc->pp22.data_size);
@@ -280,7 +275,7 @@ static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
struct mvpp2_rx_desc *rx_desc)
{
- if (port->priv->hw_version == MVPP21)
+ if (static_branch_unlikely(&mvpp21_variant))
return le32_to_cpu(rx_desc->pp21.status);
else
return le32_to_cpu(rx_desc->pp22.status);
@@ -314,26 +309,6 @@ static void mvpp2_txq_inc_put(struct mvpp2_port *port,
txq_pcpu->txq_put_index = 0;
}
-/* Get number of maximum RXQ */
-static int mvpp2_get_nrxqs(struct mvpp2 *priv)
-{
- unsigned int nrxqs;
-
- if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE)
- return 1;
-
- /* According to the PPv2.2 datasheet and our experiments on
- * PPv2.1, RX queues have an allocation granularity of 4 (when
- * more than a single one on PPv2.2).
- * Round up to nearest multiple of 4.
- */
- nrxqs = (num_possible_cpus() + 3) & ~0x3;
- if (nrxqs > MVPP2_PORT_MAX_RXQ)
- nrxqs = MVPP2_PORT_MAX_RXQ;
-
- return nrxqs;
-}
-
/* Get number of physical egress port */
static inline int mvpp2_egress_port(struct mvpp2_port *port)
{
@@ -372,8 +347,85 @@ static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool,
/* Buffer Manager configuration routines */
+/* Get default packet size for given BM pool type */
+static int mvpp2_bm_pool_default_pkt_size(enum mvpp2_bm_pool_type bm_pool_type)
+{
+ switch (bm_pool_type) {
+ case MVPP2_BM_SHORT:
+ return MVPP2_BM_SHORT_PKT_SIZE;
+ case MVPP2_BM_JUMBO:
+ return MVPP2_BM_JUMBO_PKT_SIZE;
+ case MVPP2_BM_LONG:
+ return MVPP2_BM_LONG_PKT_SIZE;
+ default:
+ return -EINVAL;
+ }
+}
+
+/* Get default buffer count for given BM pool type */
+static int mvpp2_bm_pool_default_buf_num(enum mvpp2_bm_pool_type bm_pool_type)
+{
+ switch (bm_pool_type) {
+ case MVPP2_BM_SHORT:
+ return MVPP2_BM_SHORT_BUF_NUM;
+ case MVPP2_BM_JUMBO:
+ return MVPP2_BM_JUMBO_BUF_NUM;
+ case MVPP2_BM_LONG:
+ return MVPP2_BM_LONG_BUF_NUM;
+ default:
+ return -EINVAL;
+ }
+}
+
+/* Get BM pool type mapping - return the hardware Buffer Manager pools
+ * type according to the mapping to its ID:
+ * POOL#0 - short packets
+ * POOL#1 - jumbo packets
+ * POOL#2 - long packets
+ * In case the KS recycling feature is enabled, ID = 2 is
+ * the first (CPU#0) out of the per-CPU pools for long packets.
+ */
+static enum mvpp2_bm_pool_type mvpp2_bm_pool_get_type(int id)
+{
+ switch (id) {
+ case 0:
+ return MVPP2_BM_SHORT;
+ case 1:
+ return MVPP2_BM_JUMBO;
+ case 2:
+ return MVPP2_BM_LONG;
+ default:
+ if (recycle)
+ return MVPP2_BM_LONG;
+ return -EINVAL;
+ }
+}
+
+/* Get BM pool ID mapping - return the hardware Buffer Manager pools
+ * ID according to the mapping to its type:
+ * Short packets - POOL#0
+ * Jumbo packets - POOL#1
+ * Long packets - POOL#2
+ * In case the KS recycling feature is enabled, ID = 2 is
+ * the first (CPU#0) out of the per-CPU pools for long packets.
+ */
+static int mvpp2_bm_pool_get_id(enum mvpp2_bm_pool_type bm_pool_type)
+{
+ switch (bm_pool_type) {
+ case MVPP2_BM_SHORT:
+ return 0;
+ case MVPP2_BM_JUMBO:
+ return 1;
+ case MVPP2_BM_LONG:
+ return 2;
+ default:
+ return -EINVAL;
+ }
+}
+
/* Create pool */
-static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
+static int mvpp2_bm_pool_create(struct platform_device *pdev,
+ struct mvpp2 *priv,
struct mvpp2_bm_pool *bm_pool, int size)
{
u32 val;
@@ -384,7 +436,7 @@ static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
if (!IS_ALIGNED(size, 16))
return -EINVAL;
- /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
+ /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 and PPv2.3 needs 16
* bytes per buffer pointer
*/
if (priv->hw_version == MVPP21)
@@ -392,7 +444,7 @@ static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
else
bm_pool->size_bytes = 2 * sizeof(u64) * size;
- bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes,
+ bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
&bm_pool->dma_addr,
GFP_KERNEL);
if (!bm_pool->virt_addr)
@@ -400,9 +452,9 @@ static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
MVPP2_BM_POOL_PTR_ALIGN)) {
- dma_free_coherent(dev, bm_pool->size_bytes,
+ dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
bm_pool->virt_addr, bm_pool->dma_addr);
- dev_err(dev, "BM pool %d is not %d bytes aligned\n",
+ dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
return -ENOMEM;
}
@@ -413,11 +465,27 @@ static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
val |= MVPP2_BM_START_MASK;
+
+ val &= ~MVPP2_BM_LOW_THRESH_MASK;
+ val &= ~MVPP2_BM_HIGH_THRESH_MASK;
+
+ /* Set 8 Pools BPPI threshold if BM underrun protection feature
+ * were enabled
+ */
+ if (priv->hw_version == MVPP23 && bm_underrun_protect) {
+ val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP23_BM_BPPI_LOW_THRESH);
+ val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP23_BM_BPPI_HIGH_THRESH);
+ } else {
+ val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP2_BM_BPPI_LOW_THRESH);
+ val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP2_BM_BPPI_HIGH_THRESH);
+ }
+
mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
bm_pool->size = size;
bm_pool->pkt_size = 0;
bm_pool->buf_num = 0;
+ bm_pool->type = mvpp2_bm_pool_get_type(bm_pool->id);
return 0;
}
@@ -444,23 +512,16 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
*dma_addr = mvpp2_thread_read(priv, thread,
MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
- *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
- if (priv->hw_version == MVPP22) {
+ if (priv->hw_version != MVPP21 && sizeof(dma_addr_t) == 8) {
u32 val;
- u32 dma_addr_highbits, phys_addr_highbits;
+ u32 dma_addr_highbits;
val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC);
dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
- phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
- MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
-
- if (sizeof(dma_addr_t) == 8)
- *dma_addr |= (u64)dma_addr_highbits << 32;
-
- if (sizeof(phys_addr_t) == 8)
- *phys_addr |= (u64)phys_addr_highbits << 32;
+ *dma_addr |= (u64)dma_addr_highbits << 32;
}
+ *phys_addr = dma_to_phys(dev, *dma_addr);
put_cpu();
}
@@ -522,14 +583,15 @@ static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_p
}
/* Cleanup pool */
-static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv,
+static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
+ struct mvpp2 *priv,
struct mvpp2_bm_pool *bm_pool)
{
int buf_num;
u32 val;
buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
- mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num);
+ mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool, buf_num);
/* Check buffer counters after free */
buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
@@ -548,26 +610,37 @@ static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv,
priv->page_pool[bm_pool->id] = NULL;
}
- dma_free_coherent(dev, bm_pool->size_bytes,
+ dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
bm_pool->virt_addr,
bm_pool->dma_addr);
return 0;
}
-static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv)
+static int mvpp2_bm_pools_init(struct platform_device *pdev,
+ struct mvpp2 *priv)
{
- int i, err, size, poolnum = MVPP2_BM_POOLS_NUM;
+ int i, err, size, cpu;
struct mvpp2_bm_pool *bm_pool;
- if (priv->percpu_pools)
- poolnum = mvpp2_get_nrxqs(priv) * 2;
+ if (recycle) {
+ /* Allocate per-CPU long pools array */
+ priv->pools_pcpu = devm_kcalloc(&pdev->dev, num_present_cpus(),
+ sizeof(*priv->pools_pcpu),
+ GFP_KERNEL);
+ if (!priv->pools_pcpu)
+ return -ENOMEM;
+ }
+
+ /* Initialize Virtual with 0x0 */
+ for_each_present_cpu(cpu)
+ mvpp2_thread_write(priv, cpu, MVPP2_BM_VIRT_RLS_REG, 0x0);
/* Create all pools with maximum size */
size = MVPP2_BM_POOL_SIZE_MAX;
- for (i = 0; i < poolnum; i++) {
+ for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
bm_pool = &priv->bm_pools[i];
bm_pool->id = i;
- err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
+ err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
if (err)
goto err_unroll_pools;
mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
@@ -575,86 +648,69 @@ static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv)
return 0;
err_unroll_pools:
- dev_err(dev, "failed to create BM pool %d, size %d\n", i, size);
+ dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
for (i = i - 1; i >= 0; i--)
- mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
+ mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
return err;
}
-static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
+/* Routine enable PPv23 8 pool mode */
+static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv)
{
- enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
- int i, err, poolnum = MVPP2_BM_POOLS_NUM;
- struct mvpp2_port *port;
+ int val;
- if (priv->percpu_pools) {
- for (i = 0; i < priv->port_count; i++) {
- port = priv->port_list[i];
- if (port->xdp_prog) {
- dma_dir = DMA_BIDIRECTIONAL;
- break;
- }
- }
+ val = mvpp2_read(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG);
+ val |= MVPP23_BM_8POOL_MODE;
+ mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val);
+}
- poolnum = mvpp2_get_nrxqs(priv) * 2;
- for (i = 0; i < poolnum; i++) {
- /* the pool in use */
- int pn = i / (poolnum / 2);
-
- priv->page_pool[i] =
- mvpp2_create_page_pool(dev,
- mvpp2_pools[pn].buf_num,
- mvpp2_pools[pn].pkt_size,
- dma_dir);
- if (IS_ERR(priv->page_pool[i])) {
- int j;
-
- for (j = 0; j < i; j++) {
- page_pool_destroy(priv->page_pool[j]);
- priv->page_pool[j] = NULL;
- }
- return PTR_ERR(priv->page_pool[i]);
- }
- }
- }
+/* Cleanup pool before actual initialization in the OS */
+static void mvpp2_bm_pool_cleanup(struct mvpp2 *priv, int pool_id)
+{
+ u32 val;
+ int i;
- dev_info(dev, "using %d %s buffers\n", poolnum,
- priv->percpu_pools ? "per-cpu" : "shared");
+ /* Drain the BM from all possible residues left by firmware */
+ for (i = 0; i < MVPP2_BM_POOL_SIZE_MAX; i++)
+ mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(pool_id));
+
+ /* Stop the BM pool */
+ val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(pool_id));
+ val |= MVPP2_BM_STOP_MASK;
+ mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(pool_id), val);
- for (i = 0; i < poolnum; i++) {
- /* Mask BM all interrupts */
- mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
- /* Clear BM cause register */
- mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
+ /* Mask BM all interrupts */
+ mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(pool_id), 0);
+ /* Clear BM cause register */
+ mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(pool_id), 0);
+}
+
+static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
+{
+ int i, err;
+
+ for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
+ /* Make sure about the pool state in case it was
+ * used by firmware.
+ */
+ mvpp2_bm_pool_cleanup(priv, i);
}
/* Allocate and initialize BM pools */
- priv->bm_pools = devm_kcalloc(dev, poolnum,
+ priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
sizeof(*priv->bm_pools), GFP_KERNEL);
if (!priv->bm_pools)
return -ENOMEM;
- err = mvpp2_bm_pools_init(dev, priv);
+ if (priv->hw_version == MVPP23 && bm_underrun_protect)
+ mvpp23_bm_set_8pool_mode(priv);
+
+ err = mvpp2_bm_pools_init(pdev, priv);
if (err < 0)
return err;
return 0;
}
-static void mvpp2_setup_bm_pool(void)
-{
- /* Short pool */
- mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
- mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
-
- /* Long pool */
- mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
- mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
-
- /* Jumbo pool */
- mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM;
- mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE;
-}
-
/* Attach long pool to rxq */
static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
int lrxq, int long_pool)
@@ -697,12 +753,11 @@ static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
}
-static void *mvpp2_buf_alloc(struct mvpp2_port *port,
- struct mvpp2_bm_pool *bm_pool,
- struct page_pool *page_pool,
- dma_addr_t *buf_dma_addr,
- phys_addr_t *buf_phys_addr,
- gfp_t gfp_mask)
+
+static dma_addr_t mvpp2_buf_alloc(struct mvpp2_port *port,
+ struct mvpp2_bm_pool *bm_pool,
+ struct page_pool *page_pool,
+ gfp_t gfp_mask)
{
dma_addr_t dma_addr;
struct page *page;
@@ -710,7 +765,7 @@ static void *mvpp2_buf_alloc(struct mvpp2_port *port,
data = mvpp2_frag_alloc(bm_pool, page_pool);
if (!data)
- return NULL;
+ return (dma_addr_t)data;
if (page_pool) {
page = (struct page *)data;
@@ -718,23 +773,214 @@ static void *mvpp2_buf_alloc(struct mvpp2_port *port,
data = page_to_virt(page);
} else {
dma_addr = dma_map_single(port->dev->dev.parent, data,
- MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
- DMA_FROM_DEVICE);
+ bm_pool->buf_size, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
mvpp2_frag_free(bm_pool, NULL, data);
- return NULL;
+ dma_addr = 0;
+ }
+ return dma_addr;
+}
+
+/* Routine calculate single queue shares address space */
+static int mvpp22_calc_shared_addr_space(struct mvpp2_port *port)
+{
+ /* If number of CPU's greater than number of threads, return last
+ * address space
+ */
+ if (num_active_cpus() >= MVPP2_MAX_THREADS)
+ return MVPP2_MAX_THREADS - 1;
+
+ return num_active_cpus();
+}
+
+/* Routine enable flow control for RXQs conditon */
+void mvpp2_rxq_enable_fc(struct mvpp2_port *port)
+{
+ int val, cm3_state, host_id, q;
+ int fq = port->first_rxq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->priv->mss_spinlock, flags);
+
+ /* Remove Flow control enable bit to prevent race between FW and Kernel
+ * If Flow control were enabled, it would be re-enabled.
+ */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
+ val &= ~FLOW_CONTROL_ENABLE_BIT;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ /* Set same Flow control for all RXQs */
+ for (q = 0; q < port->nrxqs; q++) {
+ /* Set stop and start Flow control RXQ thresholds */
+ val = MSS_THRESHOLD_START;
+ val |= (MSS_THRESHOLD_STOP << MSS_RXQ_TRESH_STOP_OFFS);
+ mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
+
+ val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
+ /* Set RXQ port ID */
+ val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
+ val |= (port->id << MSS_RXQ_ASS_Q_BASE(q, fq));
+ val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
+ + MSS_RXQ_ASS_HOSTID_OFFS));
+
+ /* Calculate RXQ host ID:
+ * In Single queue mode: Host ID equal to Host ID used for
+ * shared RX interrupt
+ * In Multi queue mode: Host ID equal to number of
+ * RXQ ID / number of tc queues
+ * In Single resource mode: Host ID always equal to 0
+ */
+ if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
+ host_id = mvpp22_calc_shared_addr_space(port);
+ else if (queue_mode == MVPP2_QDIST_MULTI_MODE)
+ host_id = q / port->num_tc_queues;
+ else
+ host_id = 0;
+
+ /* Set RXQ host ID */
+ val |= (host_id << (MSS_RXQ_ASS_Q_BASE(q, fq)
+ + MSS_RXQ_ASS_HOSTID_OFFS));
+
+ mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val);
+ }
+
+ /* Notify Firmware that Flow control config space ready for update */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
+ val |= cm3_state;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
+}
+
+/* Routine disable flow control for RXQs conditon */
+void mvpp2_rxq_disable_fc(struct mvpp2_port *port)
+{
+ int val, cm3_state, q;
+ unsigned long flags;
+ int fq = port->first_rxq;
+
+ spin_lock_irqsave(&port->priv->mss_spinlock, flags);
+
+ /* Remove Flow control enable bit to prevent race between FW and Kernel
+ * If Flow control were enabled, it would be re-enabled.
+ */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
+ val &= ~FLOW_CONTROL_ENABLE_BIT;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ /* Disable Flow control for all RXQs */
+ for (q = 0; q < port->nrxqs; q++) {
+ /* Set threshold 0 to disable Flow control */
+ val = 0;
+ val |= (0 << MSS_RXQ_TRESH_STOP_OFFS);
+ mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
+
+ val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
+
+ val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
+
+ val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
+ + MSS_RXQ_ASS_HOSTID_OFFS));
+
+ mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val);
+ }
+
+ /* Notify Firmware that Flow control config space ready for update */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
+ val |= cm3_state;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
+}
+
+/* Routine disable/enable flow control for BM pool conditon */
+void mvpp2_bm_pool_update_fc(struct mvpp2_port *port,
+ struct mvpp2_bm_pool *pool,
+ bool en)
+{
+ int val, cm3_state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->priv->mss_spinlock, flags);
+
+ /* Remove Flow control enable bit to prevent race between FW and Kernel
+ * If Flow control were enabled, it would be re-enabled.
+ */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
+ val &= ~FLOW_CONTROL_ENABLE_BIT;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ /* Check if BM pool should be enabled/disable */
+ if (en) {
+ /* Set BM pool start and stop thresholds per port */
+ val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id));
+ val |= MSS_BUF_POOL_PORT_OFFS(port->id);
+ val &= ~MSS_BUF_POOL_START_MASK;
+ val |= (MSS_THRESHOLD_START << MSS_BUF_POOL_START_OFFS);
+ val &= ~MSS_BUF_POOL_STOP_MASK;
+ val |= MSS_THRESHOLD_STOP;
+ mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val);
+ } else {
+ /* Remove BM pool from the port */
+ val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id));
+ val &= ~MSS_BUF_POOL_PORT_OFFS(port->id);
+
+ /* Zero BM pool start and stop thresholds to disable pool
+ * flow control if pool empty (not used by any port)
+ */
+ if (!pool->buf_num) {
+ val &= ~MSS_BUF_POOL_START_MASK;
+ val &= ~MSS_BUF_POOL_STOP_MASK;
}
+
+ mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val);
+ }
+
+ /* Notify Firmware that Flow control config space ready for update */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
+ val |= cm3_state;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
+}
+
+static int mvpp2_enable_global_fc(struct mvpp2 *priv)
+{
+ int val, timeout = 0;
+
+ /* Enable global flow control. In this stage global
+ * flow control enabled, but still disabled per port.
+ */
+ val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
+ val |= FLOW_CONTROL_ENABLE_BIT;
+ mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
+
+ /* Check if Firmware running and disable FC if not*/
+ val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
+ mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
+
+ while (timeout < MSS_FC_MAX_TIMEOUT) {
+ val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
+
+ if (!(val & FLOW_CONTROL_UPDATE_COMMAND_BIT))
+ return 0;
+ usleep_range(10, 20);
+ timeout++;
}
- *buf_dma_addr = dma_addr;
- *buf_phys_addr = virt_to_phys(data);
- return data;
+ priv->global_tx_fc = false;
+ return -ENOTSUPP;
}
/* Release buffer to BM */
static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
- dma_addr_t buf_dma_addr,
- phys_addr_t buf_phys_addr)
+ dma_addr_t buf_dma_addr)
{
unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
unsigned long flags = 0;
@@ -742,29 +988,21 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
if (test_bit(thread, &port->priv->lock_map))
spin_lock_irqsave(&port->bm_lock[thread], flags);
- if (port->priv->hw_version == MVPP22) {
- u32 val = 0;
-
- if (sizeof(dma_addr_t) == 8)
- val |= upper_32_bits(buf_dma_addr) &
+ /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
+ * returned in the "cookie" field of the RX descriptor.
+ * For performance reasons don't store VA|PA and don't use "cookie".
+ * VA/PA obtained faster from dma_to_phys(dma-addr) and phys_to_virt.
+ */
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) && defined(CONFIG_PHYS_ADDR_T_64BIT)
+ if (!static_branch_unlikely(&mvpp21_variant)) {
+ u32 val = upper_32_bits(buf_dma_addr) &
MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
- if (sizeof(phys_addr_t) == 8)
- val |= (upper_32_bits(buf_phys_addr)
- << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
- MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
-
mvpp2_thread_write_relaxed(port->priv, thread,
MVPP22_BM_ADDR_HIGH_RLS_REG, val);
}
+#endif
- /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
- * returned in the "cookie" field of the RX
- * descriptor. Instead of storing the virtual address, we
- * store the physical address
- */
- mvpp2_thread_write_relaxed(port->priv, thread,
- MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
mvpp2_thread_write_relaxed(port->priv, thread,
MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
@@ -805,13 +1043,11 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
if (port->priv->percpu_pools)
pp = port->priv->page_pool[bm_pool->id];
for (i = 0; i < buf_num; i++) {
- buf = mvpp2_buf_alloc(port, bm_pool, pp, &dma_addr,
- &phys_addr, GFP_KERNEL);
- if (!buf)
+ dma_addr = mvpp2_buf_alloc(port, bm_pool, pp, GFP_KERNEL);
+ if (!dma_addr)
break;
- mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
- phys_addr);
+ mvpp2_bm_pool_put(port, bm_pool->id, dma_addr);
}
/* Update BM driver with number of buffers added to pool */
@@ -819,7 +1055,9 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
netdev_dbg(port->dev,
"pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
- bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
+ bm_pool->id, bm_pool->pkt_size,
+ MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
+ bm_pool->frag_size);
netdev_dbg(port->dev,
"pool %d: %d of %d buffers added\n",
@@ -834,10 +1072,10 @@ static struct mvpp2_bm_pool *
mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
{
struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
+ enum mvpp2_bm_pool_type pool_type = mvpp2_bm_pool_get_type(pool);
int num;
- if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) ||
- (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) {
+ if (pool >= MVPP2_BM_POOLS_NUM) {
netdev_err(port->dev, "Invalid pool %d\n", pool);
return NULL;
}
@@ -853,14 +1091,9 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
*/
pkts_num = new_pool->buf_num;
if (pkts_num == 0) {
- if (port->priv->percpu_pools) {
- if (pool < port->nrxqs)
- pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num;
- else
- pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num;
- } else {
- pkts_num = mvpp2_pools[pool].buf_num;
- }
+ pkts_num = mvpp2_bm_pool_default_buf_num(pool_type);
+ if (pkts_num < 0)
+ return NULL;
} else {
mvpp2_bm_bufs_free(port->dev->dev.parent,
port->priv, new_pool, pkts_num);
@@ -886,76 +1119,57 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
return new_pool;
}
-static struct mvpp2_bm_pool *
-mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type,
- unsigned int pool, int pkt_size)
+/* Create long pool per-CPU */
+static void mvpp2_bm_pool_pcpu_use(void *arg)
{
- struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
- int num;
-
- if (pool > port->nrxqs * 2) {
- netdev_err(port->dev, "Invalid pool %d\n", pool);
- return NULL;
- }
-
- /* Allocate buffers in case BM pool is used as long pool, but packet
- * size doesn't match MTU or BM pool hasn't being used yet
- */
- if (new_pool->pkt_size == 0) {
- int pkts_num;
-
- /* Set default buffer number or free all the buffers in case
- * the pool is not empty
- */
- pkts_num = new_pool->buf_num;
- if (pkts_num == 0)
- pkts_num = mvpp2_pools[type].buf_num;
- else
- mvpp2_bm_bufs_free(port->dev->dev.parent,
- port->priv, new_pool, pkts_num);
-
- new_pool->pkt_size = pkt_size;
- new_pool->frag_size =
- SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
- MVPP2_SKB_SHINFO_SIZE;
+ struct mvpp2_port *port = arg;
+ struct mvpp2_bm_pool **pools_pcpu = port->priv->pools_pcpu;
+ int cpu = smp_processor_id();
+ int pool_id, pkt_size;
- /* Allocate buffers for this pool */
- num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
- if (num != pkts_num) {
- WARN(1, "pool %d: %d of %d allocated\n",
- new_pool->id, num, pkts_num);
- return NULL;
- }
- }
+ if (pools_pcpu[cpu])
+ return;
- mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
- MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
+ pool_id = mvpp2_bm_pool_get_id(MVPP2_BM_LONG) + cpu,
+ pkt_size = mvpp2_bm_pool_default_pkt_size(MVPP2_BM_LONG);
- return new_pool;
+ pools_pcpu[cpu] = mvpp2_bm_pool_use(port, pool_id, pkt_size);
}
-/* Initialize pools for swf, shared buffers variant */
-static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port)
+/* Initialize pools for swf */
+static int mvpp2_swf_bm_pool_pcpu_init(struct mvpp2_port *port)
{
- enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
- int rxq;
+ enum mvpp2_bm_pool_type long_pool_type, short_pool_type;
+ int rxq, pkt_size, pool_id, cpu;
/* If port pkt_size is higher than 1518B:
* HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
* else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
*/
if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
- long_log_pool = MVPP2_BM_JUMBO;
- short_log_pool = MVPP2_BM_LONG;
+ long_pool_type = MVPP2_BM_JUMBO;
+ short_pool_type = MVPP2_BM_LONG;
} else {
- long_log_pool = MVPP2_BM_LONG;
- short_log_pool = MVPP2_BM_SHORT;
+ long_pool_type = MVPP2_BM_LONG;
+ short_pool_type = MVPP2_BM_SHORT;
}
- if (!port->pool_long) {
- port->pool_long =
- mvpp2_bm_pool_use(port, long_log_pool,
- mvpp2_pools[long_log_pool].pkt_size);
+ /* First handle the per-CPU long pools,
+ * as they are used in both cases.
+ */
+ on_each_cpu(mvpp2_bm_pool_pcpu_use, port, 1);
+ /* Sanity check */
+ for_each_present_cpu(cpu) {
+ if (!port->priv->pools_pcpu[cpu])
+ return -ENOMEM;
+ }
+
+ if (!port->pool_long && long_pool_type == MVPP2_BM_JUMBO) {
+ /* HW Long pool - SW Jumbo pool */
+ pool_id = mvpp2_bm_pool_get_id(long_pool_type);
+ pkt_size = mvpp2_bm_pool_default_pkt_size(long_pool_type);
+
+ port->pool_long = mvpp2_bm_pool_use(port, pool_id, pkt_size);
if (!port->pool_long)
return -ENOMEM;
@@ -963,12 +1177,27 @@ static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port)
for (rxq = 0; rxq < port->nrxqs; rxq++)
mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
+
+ /* HW Short pool - SW Long pool (per-CPU) */
+ port->pool_short = port->priv->pools_pcpu[0];
+ for (rxq = 0; rxq < port->nrxqs; rxq++)
+ mvpp2_rxq_short_pool_set(port, rxq,
+ port->pool_short->id + rxq);
+
+ } else if (!port->pool_long) {
+ /* HW Long pool - SW Long pool (per-CPU) */
+ port->pool_long = port->priv->pools_pcpu[0];
+ for (rxq = 0; rxq < port->nrxqs; rxq++)
+ mvpp2_rxq_long_pool_set(port, rxq,
+ port->pool_long->id + rxq);
}
if (!port->pool_short) {
- port->pool_short =
- mvpp2_bm_pool_use(port, short_log_pool,
- mvpp2_pools[short_log_pool].pkt_size);
+ /* HW Short pool - SW Short pool */
+ pool_id = mvpp2_bm_pool_get_id(short_pool_type);
+ pkt_size = mvpp2_bm_pool_default_pkt_size(short_pool_type);
+
+ port->pool_short = mvpp2_bm_pool_use(port, pool_id, pkt_size);
if (!port->pool_short)
return -ENOMEM;
@@ -979,108 +1208,141 @@ static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port)
port->pool_short->id);
}
+ /* Fill per-CPU Long pools' port map */
+ for_each_present_cpu(cpu)
+ port->priv->pools_pcpu[cpu]->port_map |= BIT(port->id);
+
return 0;
}
-/* Initialize pools for swf, percpu buffers variant */
-static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port)
+/* Initialize pools for swf */
+static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
{
- struct mvpp2_bm_pool *bm_pool;
- int i;
-
- for (i = 0; i < port->nrxqs; i++) {
- bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i,
- mvpp2_pools[MVPP2_BM_SHORT].pkt_size);
- if (!bm_pool)
- return -ENOMEM;
+ enum mvpp2_bm_pool_type long_pool_type, short_pool_type;
+ int rxq;
- bm_pool->port_map |= BIT(port->id);
- mvpp2_rxq_short_pool_set(port, i, bm_pool->id);
+ /* If port pkt_size is higher than 1518B:
+ * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
+ * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
+ */
+ if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
+ long_pool_type = MVPP2_BM_JUMBO;
+ short_pool_type = MVPP2_BM_LONG;
+ } else {
+ long_pool_type = MVPP2_BM_LONG;
+ short_pool_type = MVPP2_BM_SHORT;
}
- for (i = 0; i < port->nrxqs; i++) {
- bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs,
- mvpp2_pools[MVPP2_BM_LONG].pkt_size);
- if (!bm_pool)
+ if (!port->pool_long) {
+ port->pool_long =
+ mvpp2_bm_pool_use(port,
+ mvpp2_bm_pool_get_id(long_pool_type),
+ mvpp2_bm_pool_default_pkt_size(long_pool_type));
+ if (!port->pool_long)
return -ENOMEM;
- bm_pool->port_map |= BIT(port->id);
- mvpp2_rxq_long_pool_set(port, i, bm_pool->id);
- }
-
- port->pool_long = NULL;
- port->pool_short = NULL;
+ port->pool_long->port_map |= BIT(port->id);
- return 0;
-}
+ for (rxq = 0; rxq < port->nrxqs; rxq++)
+ mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
+ }
-static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
-{
- if (port->priv->percpu_pools)
- return mvpp2_swf_bm_pool_init_percpu(port);
- else
- return mvpp2_swf_bm_pool_init_shared(port);
-}
+ if (!port->pool_short) {
+ port->pool_short =
+ mvpp2_bm_pool_use(port,
+ mvpp2_bm_pool_get_id(short_pool_type),
+ mvpp2_bm_pool_default_pkt_size(short_pool_type));
+ if (!port->pool_short)
+ return -ENOMEM;
-static void mvpp2_set_hw_csum(struct mvpp2_port *port,
- enum mvpp2_bm_pool_log_num new_long_pool)
-{
- const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ port->pool_short->port_map |= BIT(port->id);
- /* Update L4 checksum when jumbo enable/disable on port.
- * Only port 0 supports hardware checksum offload due to
- * the Tx FIFO size limitation.
- * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor
- * has 7 bits, so the maximum L3 offset is 128.
- */
- if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
- port->dev->features &= ~csums;
- port->dev->hw_features &= ~csums;
- } else {
- port->dev->features |= csums;
- port->dev->hw_features |= csums;
+ for (rxq = 0; rxq < port->nrxqs; rxq++)
+ mvpp2_rxq_short_pool_set(port, rxq,
+ port->pool_short->id);
}
+
+ return 0;
}
static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
{
struct mvpp2_port *port = netdev_priv(dev);
- enum mvpp2_bm_pool_log_num new_long_pool;
+ enum mvpp2_bm_pool_type new_long_pool_type;
+ struct mvpp2_bm_pool **pools_pcpu = port->priv->pools_pcpu;
int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
-
- if (port->priv->percpu_pools)
- goto out_set;
+ int err, cpu;
/* If port MTU is higher than 1518B:
* HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
* else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
*/
if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
- new_long_pool = MVPP2_BM_JUMBO;
+ new_long_pool_type = MVPP2_BM_JUMBO;
else
- new_long_pool = MVPP2_BM_LONG;
+ new_long_pool_type = MVPP2_BM_LONG;
+
+ if (new_long_pool_type != port->pool_long->type) {
+ if (port->tx_fc) {
+ if (recycle) {
+ for_each_present_cpu(cpu)
+ mvpp2_bm_pool_update_fc(port,
+ pools_pcpu[cpu],
+ false);
+ } else if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
+ mvpp2_bm_pool_update_fc(port,
+ port->pool_short,
+ false);
+ else
+ mvpp2_bm_pool_update_fc(port, port->pool_long,
+ false);
+ }
- if (new_long_pool != port->pool_long->id) {
/* Remove port from old short & long pool */
- port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
- port->pool_long->pkt_size);
port->pool_long->port_map &= ~BIT(port->id);
port->pool_long = NULL;
- port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
- port->pool_short->pkt_size);
port->pool_short->port_map &= ~BIT(port->id);
port->pool_short = NULL;
port->pkt_size = pkt_size;
/* Add port to new short & long pool */
- mvpp2_swf_bm_pool_init(port);
+ if (recycle) {
+ for_each_present_cpu(cpu)
+ pools_pcpu[cpu]->port_map &= ~BIT(port->id);
+ err = mvpp2_swf_bm_pool_pcpu_init(port);
+ } else {
+ err = mvpp2_swf_bm_pool_init(port);
+ }
+ if (err)
+ return err;
- mvpp2_set_hw_csum(port, new_long_pool);
+ if (port->tx_fc) {
+ if (recycle) {
+ for_each_present_cpu(cpu)
+ mvpp2_bm_pool_update_fc(port,
+ pools_pcpu[cpu],
+ false);
+ } else if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
+ mvpp2_bm_pool_update_fc(port, port->pool_long,
+ true);
+ else
+ mvpp2_bm_pool_update_fc(port, port->pool_short,
+ true);
+ }
+
+ /* Update L4 checksum when jumbo enable/disable on port */
+ if (new_long_pool_type == MVPP2_BM_JUMBO && port->id != 0) {
+ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ dev->hw_features &= ~(NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM);
+ } else {
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ }
}
-out_set:
dev->mtu = mtu;
dev->wanted_features = dev->features;
@@ -1141,6 +1403,9 @@ static void mvpp2_interrupts_mask(void *arg)
mvpp2_thread_write(port->priv,
mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
+ mvpp2_thread_write(port->priv,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
+ MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), 0);
}
/* Unmask the current thread's Rx/Tx interrupts.
@@ -1156,14 +1421,20 @@ static void mvpp2_interrupts_unmask(void *arg)
if (smp_processor_id() >= port->priv->nthreads)
return;
- val = MVPP2_CAUSE_MISC_SUM_MASK |
- MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
+ if (port->flags & MVPP22_F_IF_MUSDK)
+ return;
+
+ val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(mvpp21_variant);
if (port->has_tx_irqs)
val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
mvpp2_thread_write(port->priv,
mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
+ mvpp2_thread_write(port->priv,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
+ MVPP2_ISR_RX_ERR_CAUSE_REG(port->id),
+ MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK);
}
static void
@@ -1172,13 +1443,13 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
u32 val;
int i;
- if (port->priv->hw_version != MVPP22)
+ if (port->priv->hw_version == MVPP21)
return;
if (mask)
val = 0;
else
- val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22);
+ val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(mvpp21_variant);
for (i = 0; i < port->nqvecs; i++) {
struct mvpp2_queue_vector *v = port->qvecs + i;
@@ -1188,6 +1459,9 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
mvpp2_thread_write(port->priv, v->sw_thread_id,
MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
+ mvpp2_thread_write(port->priv, v->sw_thread_id,
+ MVPP2_ISR_RX_ERR_CAUSE_REG(port->id),
+ MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK);
}
}
@@ -1203,12 +1477,6 @@ static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port)
}
/* Port configuration routines */
-static bool mvpp2_is_xlg(phy_interface_t interface)
-{
- return interface == PHY_INTERFACE_MODE_10GBASER ||
- interface == PHY_INTERFACE_MODE_XAUI;
-}
-
static void mvpp2_modify(void __iomem *ptr, u32 mask, u32 set)
{
u32 old, val;
@@ -1237,6 +1505,21 @@ static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
}
+static void mvpp22_gop_init_mii(struct mvpp2_port *port)
+{
+ struct mvpp2 *priv = port->priv;
+ u32 val;
+
+ regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
+ val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
+ regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
+
+ regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
+ val |= GENCONF_CTRL0_PORT1_RGMII_MII;
+ val &= ~GENCONF_CTRL0_PORT1_RGMII;
+ regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
+}
+
static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
{
struct mvpp2 *priv = port->priv;
@@ -1257,27 +1540,92 @@ static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
}
}
-static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
+static void mvpp22_gop_init_xpcs(struct mvpp2_port *port)
{
struct mvpp2 *priv = port->priv;
- void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
u32 val;
+ /* Reset the XPCS when reconfiguring the lanes */
+ val = readl(xpcs + MVPP22_XPCS_CFG0);
+ writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
+
+ /* XPCS */
val = readl(xpcs + MVPP22_XPCS_CFG0);
val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
writel(val, xpcs + MVPP22_XPCS_CFG0);
+ /* Release lanes from reset */
+ val = readl(xpcs + MVPP22_XPCS_CFG0);
+ writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
+
+}
+
+static void mvpp22_gop_init_mpcs(struct mvpp2_port *port)
+{
+ struct mvpp2 *priv = port->priv;
+ void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
+ u32 val;
+
+ /* MPCS */
val = readl(mpcs + MVPP22_MPCS_CTRL);
val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
writel(val, mpcs + MVPP22_MPCS_CTRL);
val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
- val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7);
+ val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC |
+ MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
+
+ val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
+ val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX;
+ writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
+}
+
+static void mvpp22_gop_fca_enable_periodic(struct mvpp2_port *port, bool en)
+{
+ struct mvpp2 *priv = port->priv;
+ void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id);
+ u32 val;
+
+ val = readl(fca + MVPP22_FCA_CONTROL_REG);
+ val &= ~MVPP22_FCA_ENABLE_PERIODIC;
+ if (en)
+ val |= MVPP22_FCA_ENABLE_PERIODIC;
+ writel(val, fca + MVPP22_FCA_CONTROL_REG);
+}
+
+static void mvpp22_gop_fca_set_timer(struct mvpp2_port *port, u32 timer)
+{
+ struct mvpp2 *priv = port->priv;
+ void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id);
+ u32 lsb, msb;
+
+ lsb = timer & MVPP22_FCA_REG_MASK;
+ msb = timer >> MVPP22_FCA_REG_SIZE;
+
+ writel(lsb, fca + MVPP22_PERIODIC_COUNTER_LSB_REG);
+ writel(msb, fca + MVPP22_PERIODIC_COUNTER_MSB_REG);
+}
+
+/* Set Flow Control timer x140 faster than pause quanta to ensure that link
+ * partner won't send taffic if port in XOFF mode.
+ */
+static void mvpp22_gop_fca_set_periodic_timer(struct mvpp2_port *port)
+{
+ u32 timer;
+
+ timer = (port->priv->tclk / (USEC_PER_SEC * FC_CLK_DIVIDER))
+ * FC_QUANTA;
+
+ mvpp22_gop_fca_enable_periodic(port, false);
+
+ mvpp22_gop_fca_set_timer(port, timer);
+
+ mvpp22_gop_fca_enable_periodic(port, true);
}
static int mvpp22_gop_init(struct mvpp2_port *port)
@@ -1289,6 +1637,11 @@ static int mvpp22_gop_init(struct mvpp2_port *port)
return 0;
switch (port->phy_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ if (port->gop_id == 0 || port->gop_id == 2)
+ goto invalid_conf;
+ mvpp22_gop_init_mii(port);
+ break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
@@ -1300,13 +1653,22 @@ static int mvpp22_gop_init(struct mvpp2_port *port)
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_2500BASET:
mvpp22_gop_init_sgmii(port);
break;
+ case PHY_INTERFACE_MODE_RXAUI:
+ if (port->gop_id != 0)
+ goto invalid_conf;
+ mvpp22_gop_init_xpcs(port);
+ break;
case PHY_INTERFACE_MODE_10GBASER:
- if (!mvpp2_port_supports_xlg(port))
+ case PHY_INTERFACE_MODE_5GKR:
+ if (!port->has_xlg_mac)
goto invalid_conf;
- mvpp22_gop_init_10gkr(port);
+ mvpp22_gop_init_mpcs(port);
break;
+ case PHY_INTERFACE_MODE_INTERNAL:
+ return 0;
default:
goto unsupported_conf;
}
@@ -1324,6 +1686,8 @@ static int mvpp22_gop_init(struct mvpp2_port *port)
val |= GENCONF_SOFT_RESET1_GOP;
regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
+ mvpp22_gop_fca_set_periodic_timer(port);
+
unsupported_conf:
return 0;
@@ -1338,17 +1702,21 @@ static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
if (phy_interface_mode_is_rgmii(port->phy_interface) ||
phy_interface_mode_is_8023z(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ port->phy_interface == PHY_INTERFACE_MODE_MII ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
+ port->phy_interface == PHY_INTERFACE_MODE_2500BASET) {
/* Enable the GMAC link status irq for this port */
val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
}
- if (mvpp2_port_supports_xlg(port)) {
+ if (port->has_xlg_mac) {
/* Enable the XLG/GIG irqs for this port */
val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
- if (mvpp2_is_xlg(port->phy_interface))
+ if (port->phy_interface == PHY_INTERFACE_MODE_10GKR ||
+ port->phy_interface == PHY_INTERFACE_MODE_5GKR ||
+ port->phy_interface == PHY_INTERFACE_MODE_INTERNAL)
val |= MVPP22_XLG_EXT_INT_MASK_XLG;
else
val |= MVPP22_XLG_EXT_INT_MASK_GIG;
@@ -1360,7 +1728,7 @@ static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
{
u32 val;
- if (mvpp2_port_supports_xlg(port)) {
+ if (port->has_xlg_mac) {
val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
MVPP22_XLG_EXT_INT_MASK_GIG);
@@ -1369,7 +1737,9 @@ static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
if (phy_interface_mode_is_rgmii(port->phy_interface) ||
phy_interface_mode_is_8023z(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ port->phy_interface == PHY_INTERFACE_MODE_MII ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
+ port->phy_interface == PHY_INTERFACE_MODE_2500BASET) {
val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
@@ -1387,13 +1757,14 @@ static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
if (port->phylink ||
phy_interface_mode_is_rgmii(port->phy_interface) ||
phy_interface_mode_is_8023z(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_MII ||
port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
val = readl(port->base + MVPP22_GMAC_INT_MASK);
val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
writel(val, port->base + MVPP22_GMAC_INT_MASK);
}
- if (mvpp2_port_supports_xlg(port)) {
+ if (port->has_xlg_mac) {
val = readl(port->base + MVPP22_XLG_INT_MASK);
val |= MVPP22_XLG_INT_MASK_LINK;
writel(val, port->base + MVPP22_XLG_INT_MASK);
@@ -1435,10 +1806,16 @@ static void mvpp2_port_enable(struct mvpp2_port *port)
{
u32 val;
- if (mvpp2_port_supports_xlg(port) &&
- mvpp2_is_xlg(port->phy_interface)) {
+ if (port->phy_interface == PHY_INTERFACE_MODE_INTERNAL)
+ return;
+
+ if (port->has_xlg_mac &&
+ (port->phy_interface == PHY_INTERFACE_MODE_RXAUI ||
+ port->phy_interface == PHY_INTERFACE_MODE_10GKR ||
+ port->phy_interface == PHY_INTERFACE_MODE_5GKR)) {
val = readl(port->base + MVPP22_XLG_CTRL0_REG);
- val |= MVPP22_XLG_CTRL0_PORT_EN;
+ val |= MVPP22_XLG_CTRL0_PORT_EN |
+ MVPP22_XLG_CTRL0_MAC_RESET_DIS;
val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
writel(val, port->base + MVPP22_XLG_CTRL0_REG);
} else {
@@ -1453,16 +1830,20 @@ static void mvpp2_port_disable(struct mvpp2_port *port)
{
u32 val;
- if (mvpp2_port_supports_xlg(port) &&
- mvpp2_is_xlg(port->phy_interface)) {
+ if (port->phy_interface == PHY_INTERFACE_MODE_INTERNAL)
+ return;
+
+ if (port->has_xlg_mac &&
+ (port->phy_interface == PHY_INTERFACE_MODE_RXAUI ||
+ port->phy_interface == PHY_INTERFACE_MODE_10GKR ||
+ port->phy_interface == PHY_INTERFACE_MODE_5GKR)) {
val = readl(port->base + MVPP22_XLG_CTRL0_REG);
val &= ~MVPP22_XLG_CTRL0_PORT_EN;
writel(val, port->base + MVPP22_XLG_CTRL0_REG);
- }
-
- val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
- val &= ~(MVPP2_GMAC_PORT_EN_MASK);
- writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+ } else {
+ val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+ val &= ~(MVPP2_GMAC_PORT_EN_MASK);
+ writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
}
/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
@@ -1525,17 +1906,6 @@ static u64 mvpp2_read_count(struct mvpp2_port *port,
return val;
}
-/* Some counters are accessed indirectly by first writing an index to
- * MVPP2_CTRS_IDX. The index can represent various resources depending on the
- * register we access, it can be a hit counter for some classification tables,
- * a counter specific to a rxq, a txq or a buffer pool.
- */
-static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg)
-{
- mvpp2_write(priv, MVPP2_CTRS_IDX, index);
- return mvpp2_read(priv, reg);
-}
-
/* Due to the fact that software statistics and hardware statistics are, by
* design, incremented at different moments in the chain of packet processing,
* it is very likely that incoming packets could have been dropped after being
@@ -1545,7 +1915,7 @@ static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg)
* Hence, statistics gathered from userspace with ifconfig (software) and
* ethtool (hardware) cannot be compared.
*/
-static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = {
+static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs[] = {
{ MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
{ MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
{ MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
@@ -1566,38 +1936,47 @@ static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = {
{ MVPP2_MIB_FC_RCVD, "fc_received" },
{ MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
{ MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
- { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
+ { MVPP2_MIB_FRAGMENTS_ERR_RCVD, "fragments_err_received" },
{ MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
{ MVPP2_MIB_JABBER_RCVD, "jabber_received" },
{ MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
{ MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
{ MVPP2_MIB_COLLISION, "collision" },
{ MVPP2_MIB_LATE_COLLISION, "late_collision" },
+#define MVPP2_LAST_MIB MVPP2_MIB_LATE_COLLISION
+
+ /* Extend counters */
+ { MVPP2_OVERRUN_DROP_REG(0), "rx_ppv2_overrun" },
+ { MVPP2_CLS_DROP_REG(0), "rx_cls_drop" },
+ { MVPP2_RX_PKT_FULLQ_DROP_REG, "rx_fullq_drop" },
+ { MVPP2_RX_PKT_EARLY_DROP_REG, "rx_early_drop" },
+ { MVPP2_RX_PKT_BM_DROP_REG, "rx_bm_drop" },
+
+ /* Extend SW counters (not registers) */
+#define MVPP2_FIRST_CNT_SW 0xf000
+#define MVPP2_TX_GUARD_CNT(cpu) (MVPP2_FIRST_CNT_SW + cpu)
+ { MVPP2_TX_GUARD_CNT(0), "tx-guard-cpu0" },
+ { MVPP2_TX_GUARD_CNT(1), "tx-guard-cpu1" },
+ { MVPP2_TX_GUARD_CNT(2), "tx-guard-cpu2" },
+ { MVPP2_TX_GUARD_CNT(3), "tx-guard-cpu3" },
};
-static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = {
- { MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" },
- { MVPP2_CLS_ETH_DROP, "rx_classifier_drops" },
+static const char mvpp22_priv_flags_strings[][ETH_GSTRING_LEN] = {
+ "musdk",
};
-static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = {
- { MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" },
- { MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" },
- { MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" },
- { MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" },
- { MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" },
- { MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" },
- { MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" },
- { MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" },
- { MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" },
-};
+#define MVPP22_F_IF_MUSDK_PRIV BIT(0)
-static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = {
- { MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" },
- { MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" },
- { MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" },
- { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" },
-};
+static int mvpp2_ethtool_get_mib_cntr_size(void)
+{
+ int i = 0;
+
+ while (i < ARRAY_SIZE(mvpp2_ethtool_regs)) {
+ if (mvpp2_ethtool_regs[i++].offset == MVPP2_LAST_MIB)
+ break;
+ }
+ return i; /* mib_size */
+}
static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] = {
{ ETHTOOL_XDP_REDIRECT, "rx_xdp_redirect", },
@@ -1609,55 +1988,63 @@ static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] = {
{ ETHTOOL_XDP_XMIT_ERR, "tx_xdp_xmit_errors", },
};
-#define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \
- ARRAY_SIZE(mvpp2_ethtool_port_regs) + \
- (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \
- (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)) + \
- ARRAY_SIZE(mvpp2_ethtool_xdp))
-
-static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
- u8 *data)
+static int mvpp2_ethtool_get_cntr_index(u32 offset)
{
- struct mvpp2_port *port = netdev_priv(netdev);
- int i, q;
-
- if (sset != ETH_SS_STATS)
- return;
+ int i = 0;
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) {
- strscpy(data, mvpp2_ethtool_mib_regs[i].string,
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
+ while (i < ARRAY_SIZE(mvpp2_ethtool_regs)) {
+ if (mvpp2_ethtool_regs[i].offset == offset)
+ break;
+ i++;
}
+ return i;
+}
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) {
- strscpy(data, mvpp2_ethtool_port_regs[i].string,
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+/* hw_get_stats - update the ethtool_stats accumulator from HW-registers
+ * The HW-registers/counters are cleared on read.
+ */
+static void mvpp2_hw_get_stats(struct mvpp2_port *port, u64 *pstats)
+{
+ int i, mib_size, queue, cpu;
+ unsigned int reg_offs;
+ u32 val, cls_drops;
+ u64 *ptmp;
- for (q = 0; q < port->ntxqs; q++) {
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) {
- snprintf(data, ETH_GSTRING_LEN,
- mvpp2_ethtool_txq_regs[i].string, q);
- data += ETH_GSTRING_LEN;
- }
- }
+ mib_size = mvpp2_ethtool_get_mib_cntr_size();
- for (q = 0; q < port->nrxqs; q++) {
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) {
- snprintf(data, ETH_GSTRING_LEN,
- mvpp2_ethtool_rxq_regs[i].string,
- q);
- data += ETH_GSTRING_LEN;
+ cls_drops = mvpp2_read(port->priv, MVPP2_OVERRUN_DROP_REG(port->id));
+
+ for (i = 0; i < mib_size; i++) {
+ if (mvpp2_ethtool_regs[i].offset == MVPP2_MIB_COLLISION) {
+ val = mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
+ port->dev->stats.collisions += val;
+ *pstats++ += val;
+ continue;
+ }
+ *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
+ }
+
+ /* Extend HW counters */
+ *pstats++ += cls_drops;
+ *pstats++ += mvpp2_read(port->priv, MVPP2_CLS_DROP_REG(port->id));
+ ptmp = pstats;
+ queue = port->first_rxq;
+ while (queue < (port->first_rxq + port->nrxqs)) {
+ mvpp2_write(port->priv, MVPP2_CNT_IDX_REG, queue++);
+ pstats = ptmp;
+ i = mib_size + 2;
+ while (i < ARRAY_SIZE(mvpp2_ethtool_regs)) {
+ reg_offs = mvpp2_ethtool_regs[i++].offset;
+ if (reg_offs == MVPP2_FIRST_CNT_SW)
+ break;
+ *pstats++ += mvpp2_read(port->priv, reg_offs);
}
}
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++) {
- strscpy(data, mvpp2_ethtool_xdp[i].string,
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+
+ /* Extend SW counters (i=MVPP2_FIRST_CNT_SW) */
+ for_each_present_cpu(cpu)
+ *pstats++ = mvpp2_tx_done_guard_get_stats(port, cpu);
}
static void
@@ -1699,68 +2086,47 @@ mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats)
}
}
-static void mvpp2_read_stats(struct mvpp2_port *port)
+static void mvpp2_hw_clear_stats(struct mvpp2_port *port)
{
- struct mvpp2_pcpu_stats xdp_stats = {};
- const struct mvpp2_ethtool_counter *s;
- u64 *pstats;
- int i, q;
-
- pstats = port->ethtool_stats;
+ int i, mib_size, queue;
+ unsigned int reg_offs;
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++)
- *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]);
+ mib_size = mvpp2_ethtool_get_mib_cntr_size();
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++)
- *pstats++ += mvpp2_read(port->priv,
- mvpp2_ethtool_port_regs[i].offset +
- 4 * port->id);
+ for (i = 0; i < mib_size; i++)
+ mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
- for (q = 0; q < port->ntxqs; q++)
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++)
- *pstats++ += mvpp2_read_index(port->priv,
- MVPP22_CTRS_TX_CTR(port->id, q),
- mvpp2_ethtool_txq_regs[i].offset);
-
- /* Rxqs are numbered from 0 from the user standpoint, but not from the
- * driver's. We need to add the port->first_rxq offset.
- */
- for (q = 0; q < port->nrxqs; q++)
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++)
- *pstats++ += mvpp2_read_index(port->priv,
- port->first_rxq + q,
- mvpp2_ethtool_rxq_regs[i].offset);
-
- /* Gather XDP Statistics */
- mvpp2_get_xdp_stats(port, &xdp_stats);
-
- for (i = 0, s = mvpp2_ethtool_xdp;
- s < mvpp2_ethtool_xdp + ARRAY_SIZE(mvpp2_ethtool_xdp);
- s++, i++) {
- switch (s->offset) {
- case ETHTOOL_XDP_REDIRECT:
- *pstats++ = xdp_stats.xdp_redirect;
- break;
- case ETHTOOL_XDP_PASS:
- *pstats++ = xdp_stats.xdp_pass;
- break;
- case ETHTOOL_XDP_DROP:
- *pstats++ = xdp_stats.xdp_drop;
- break;
- case ETHTOOL_XDP_TX:
- *pstats++ = xdp_stats.xdp_tx;
- break;
- case ETHTOOL_XDP_TX_ERR:
- *pstats++ = xdp_stats.xdp_tx_err;
- break;
- case ETHTOOL_XDP_XMIT:
- *pstats++ = xdp_stats.xdp_xmit;
- break;
- case ETHTOOL_XDP_XMIT_ERR:
- *pstats++ = xdp_stats.xdp_xmit_err;
- break;
+ /* Extend counters */
+ mvpp2_read(port->priv, MVPP2_OVERRUN_DROP_REG(port->id));
+ mvpp2_read(port->priv, MVPP2_CLS_DROP_REG(port->id));
+ queue = port->first_rxq;
+ while (queue < (port->first_rxq + port->nrxqs)) {
+ mvpp2_write(port->priv, MVPP2_CNT_IDX_REG, queue++);
+ i = mib_size + 2;
+ while (i < ARRAY_SIZE(mvpp2_ethtool_regs)) {
+ reg_offs = mvpp2_ethtool_regs[i++].offset;
+ if (reg_offs == MVPP2_FIRST_CNT_SW)
+ break;
+ mvpp2_read(port->priv, reg_offs);
}
}
+ /* Extend SW counters (i=MVPP2_FIRST_CNT_SW) */
+ /* no clear */
+}
+
+static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
+ u8 *data)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, mvpp22_priv_flags_strings,
+ ARRAY_SIZE(mvpp22_priv_flags_strings) * ETH_GSTRING_LEN);
+ }
}
static void mvpp2_gather_hw_statistics(struct work_struct *work)
@@ -1769,109 +2135,69 @@ static void mvpp2_gather_hw_statistics(struct work_struct *work)
struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
stats_work);
+ /* Update the statistic buffer by q-work only, not by ethtool-S */
mutex_lock(&port->gather_stats_lock);
-
- mvpp2_read_stats(port);
-
- /* No need to read again the counters right after this function if it
- * was called asynchronously by the user (ie. use of ethtool).
- */
- cancel_delayed_work(&port->stats_work);
+ mvpp2_hw_get_stats(port, port->ethtool_stats);
+ mutex_unlock(&port->gather_stats_lock);
queue_delayed_work(port->priv->stats_queue, &port->stats_work,
MVPP2_MIB_COUNTERS_STATS_DELAY);
-
- mutex_unlock(&port->gather_stats_lock);
}
static void mvpp2_ethtool_get_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct mvpp2_port *port = netdev_priv(dev);
+ int cls_drp, fc_rcv;
- /* Update statistics for the given port, then take the lock to avoid
- * concurrent accesses on the ethtool_stats structure during its copy.
+ /* Use statistic already accumulated in ethtool_stats by q-work
+ * and copy under mutex-lock it into given ethtool-data-buffer.
*/
- mvpp2_gather_hw_statistics(&port->stats_work.work);
mutex_lock(&port->gather_stats_lock);
memcpy(data, port->ethtool_stats,
- sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs));
+ sizeof(u64) * ARRAY_SIZE(mvpp2_ethtool_regs));
mutex_unlock(&port->gather_stats_lock);
+
+ /* Do not count flow control receive frames as classifier drops */
+ cls_drp = mvpp2_ethtool_get_cntr_index(MVPP2_CLS_DROP_REG(0));
+ fc_rcv = mvpp2_ethtool_get_cntr_index(MVPP2_MIB_FC_RCVD);
+ data[cls_drp] =
+ data[fc_rcv] > data[cls_drp] ? 0 : data[cls_drp] - data[fc_rcv];
}
static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
{
struct mvpp2_port *port = netdev_priv(dev);
- if (sset == ETH_SS_STATS)
- return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs);
-
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(mvpp2_ethtool_regs);
+ case ETH_SS_PRIV_FLAGS:
+ return (port->priv->hw_version == MVPP21) ?
+ 0 : ARRAY_SIZE(mvpp22_priv_flags_strings);
+ }
return -EOPNOTSUPP;
}
-static void mvpp2_mac_reset_assert(struct mvpp2_port *port)
+static void mvpp2_port_reset(struct mvpp2_port *port)
{
u32 val;
+ /* Read the GOP statistics to reset the hardware counters */
+ mvpp2_hw_clear_stats(port);
+
val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) |
MVPP2_GMAC_PORT_RESET_MASK;
writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
- if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
+ if (port->has_xlg_mac) {
+ /* Set the XLG MAC in reset */
val = readl(port->base + MVPP22_XLG_CTRL0_REG) &
~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
writel(val, port->base + MVPP22_XLG_CTRL0_REG);
- }
-}
-
-static void mvpp22_pcs_reset_assert(struct mvpp2_port *port)
-{
- struct mvpp2 *priv = port->priv;
- void __iomem *mpcs, *xpcs;
- u32 val;
-
- if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
- return;
-
- mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
- xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
-
- val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
- val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
- val |= MVPP22_MPCS_CLK_RESET_DIV_SET;
- writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
-
- val = readl(xpcs + MVPP22_XPCS_CFG0);
- writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
-}
-
-static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port)
-{
- struct mvpp2 *priv = port->priv;
- void __iomem *mpcs, *xpcs;
- u32 val;
-
- if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
- return;
-
- mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
- xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
-
- switch (port->phy_interface) {
- case PHY_INTERFACE_MODE_10GBASER:
- val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
- val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
- MAC_CLK_RESET_SD_TX;
- val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
- writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
- break;
- case PHY_INTERFACE_MODE_XAUI:
- case PHY_INTERFACE_MODE_RXAUI:
- val = readl(xpcs + MVPP22_XPCS_CFG0);
- writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
- break;
- default:
- break;
+ while (readl(port->base + MVPP22_XLG_CTRL0_REG) &
+ MVPP22_XLG_CTRL0_MAC_RESET_DIS)
+ continue;
}
}
@@ -1880,6 +2206,9 @@ static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
{
u32 val;
+ if (port->flags & MVPP22_F_IF_MUSDK)
+ return;
+
val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
@@ -1892,6 +2221,9 @@ static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
{
u32 val;
+ if (port->flags & MVPP22_F_IF_MUSDK)
+ return;
+
val = readl(port->base + MVPP22_XLG_CTRL1_REG);
val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
@@ -1899,19 +2231,42 @@ static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
writel(val, port->base + MVPP22_XLG_CTRL1_REG);
}
+static void mvpp2_gmac_tx_fifo_configure(struct mvpp2_port *port)
+{
+ u32 val, tx_fifo_min_th;
+ u8 low_wm, hi_wm;
+
+ tx_fifo_min_th = MVPP2_GMAC_TX_FIFO_MIN_TH;
+ low_wm = MVPP2_GMAC_TX_FIFO_LOW_WM;
+ hi_wm = MVPP2_GMAC_TX_FIFO_HI_WM;
+
+ /* Update TX FIFO MIN Threshold */
+ val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+ val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
+ val |= tx_fifo_min_th;
+ writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+
+ /* Update TX FIFO levels of assertion/deassertion
+ * of p2mem_ready_signal, which indicates readiness
+ * for fetching the data from DRAM.
+ */
+ val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_0_REG);
+ val &= ~MVPP2_GMAC_TX_FIFO_WM_MASK;
+ val |= (low_wm << MVPP2_GMAC_TX_FIFO_WM_LOW_OFFSET) | hi_wm;
+ writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_0_REG);
+}
+
/* Set defaults to the MVPP2 port */
static void mvpp2_defaults_set(struct mvpp2_port *port)
{
- int tx_port_num, val, queue, lrxq;
+ int tx_port_num, val, queue, ptxq, lrxq;
- if (port->priv->hw_version == MVPP21) {
- /* Update TX FIFO MIN Threshold */
- val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
- val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
- /* Min. TX threshold must be less than minimal packet length */
- val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
- writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
- }
+ if (phy_interface_mode_is_rgmii(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_MII ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
+ port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
+ port->phy_interface == PHY_INTERFACE_MODE_2500BASEX)
+ mvpp2_gmac_tx_fifo_configure(port);
/* Disable Legacy WRR, Disable EJP, Release from reset */
tx_port_num = mvpp2_egress_port(port);
@@ -1923,9 +2278,11 @@ static void mvpp2_defaults_set(struct mvpp2_port *port)
mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0);
/* Close bandwidth for all queues */
- for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
+ for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
+ ptxq = mvpp2_txq_phys(port->id, queue);
mvpp2_write(port->priv,
- MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
+ MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
+ }
/* Set refill period to 1 usec, refill tokens
* and bucket size to maximum
@@ -1994,6 +2351,9 @@ static void mvpp2_egress_enable(struct mvpp2_port *port)
int queue;
int tx_port_num = mvpp2_egress_port(port);
+ if (port->flags & MVPP22_F_IF_MUSDK)
+ return;
+
/* Enable all initialized TXs. */
qmap = 0;
for (queue = 0; queue < port->ntxqs; queue++) {
@@ -2016,6 +2376,9 @@ static void mvpp2_egress_disable(struct mvpp2_port *port)
int delay;
int tx_port_num = mvpp2_egress_port(port);
+ if (port->flags & MVPP22_F_IF_MUSDK)
+ return;
+
/* Issue stop command for active channels only */
mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
@@ -2118,9 +2481,13 @@ mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
*/
static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
{
+ int cpu = smp_processor_id();
+
+ mvpp2_tx_done_guard_timer_set(port, cpu);
+
/* aggregated access - relevant TXQ number is written in TX desc */
mvpp2_thread_write(port->priv,
- mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
+ mvpp2_cpu_to_thread(port->priv, cpu),
MVPP2_AGGR_TXQ_UPDATE_REG, pending);
}
@@ -2177,8 +2544,9 @@ static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port,
struct mvpp2_txq_pcpu *txq_pcpu,
int num)
{
- int req, desc_count;
unsigned int thread;
+ int req, desc_count;
+ struct mvpp2_txq_pcpu *txq_pcpu_aux;
if (txq_pcpu->reserved_num >= num)
return 0;
@@ -2186,27 +2554,24 @@ static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port,
/* Not enough descriptors reserved! Update the reserved descriptor
* count and check again.
*/
-
- desc_count = 0;
- /* Compute total of used descriptors */
- for (thread = 0; thread < port->priv->nthreads; thread++) {
- struct mvpp2_txq_pcpu *txq_pcpu_aux;
-
- txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread);
- desc_count += txq_pcpu_aux->count;
- desc_count += txq_pcpu_aux->reserved_num;
+ if (num <= MAX_SKB_FRAGS) {
+ req = MVPP2_CPU_DESC_CHUNK;
+ } else {
+ /* Compute total of used descriptors */
+ desc_count = 0;
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
+ txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread);
+ desc_count += txq_pcpu_aux->reserved_num;
+ }
+ req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
+ /* Check the reservation is possible */
+ if ((desc_count + req) > txq->size)
+ return -ENOMEM;
}
- req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
- desc_count += req;
-
- if (desc_count >
- (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK)))
- return -ENOMEM;
-
txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req);
- /* OK, the descriptor could have been updated: check again. */
+ /* Check the resulting reservation is enough */
if (txq_pcpu->reserved_num < num)
return -ENOMEM;
return 0;
@@ -2299,6 +2664,107 @@ static void mvpp2_txq_sent_counter_clear(void *arg)
}
}
+/* Avoid wrong tx_done calling for netif_tx_wake at time of
+ * dev-stop or linkDown processing by flag MVPP2_F_IF_TX_ON.
+ * Set/clear it on each cpu.
+ */
+static inline bool mvpp2_tx_stopped(struct mvpp2_port *port)
+{
+ return !(port->flags & MVPP2_F_IF_TX_ON);
+}
+
+static void mvpp2_txqs_on(void *arg)
+{
+ ((struct mvpp2_port *)arg)->flags |= MVPP2_F_IF_TX_ON;
+}
+
+static void mvpp2_txqs_off(void *arg)
+{
+ ((struct mvpp2_port *)arg)->flags &= ~MVPP2_F_IF_TX_ON;
+}
+
+static void mvpp2_txqs_on_tasklet_cb(unsigned long data)
+{
+ /* Activated/runs on 1 cpu only (with link_status_irq)
+ * to update/guarantee TX_ON coherency on other cpus
+ */
+ struct mvpp2_port *port = (struct mvpp2_port *)data;
+
+ if (mvpp2_tx_stopped(port))
+ on_each_cpu(mvpp2_txqs_off, port, 1);
+ else
+ on_each_cpu(mvpp2_txqs_on, port, 1);
+}
+
+static void mvpp2_txqs_on_tasklet_init(struct mvpp2_port *port)
+{
+ /* Init called only for port with link_status_isr */
+ tasklet_init(&port->txqs_on_tasklet,
+ mvpp2_txqs_on_tasklet_cb,
+ (unsigned long)port);
+}
+
+static void mvpp2_txqs_on_tasklet_kill(struct mvpp2_port *port)
+{
+ if (port->txqs_on_tasklet.func)
+ tasklet_kill(&port->txqs_on_tasklet);
+}
+
+/* Use mvpp2 APIs instead of netif_TX_ALL:
+ * netif_tx_start_all_queues -> mvpp2_tx_start_all_queues
+ * netif_tx_wake_all_queues -> mvpp2_tx_wake_all_queues
+ * netif_tx_stop_all_queues -> mvpp2_tx_stop_all_queues
+ * But keep using per-queue APIs netif_tx_wake_queue,
+ * netif_tx_stop_queue and netif_tx_queue_stopped.
+ */
+static void mvpp2_tx_start_all_queues(struct net_device *dev)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (port->flags & MVPP22_F_IF_MUSDK)
+ return;
+
+ /* Never called from IRQ. Update all cpus directly */
+ on_each_cpu(mvpp2_txqs_on, port, 1);
+ netif_tx_start_all_queues(dev);
+}
+
+static void mvpp2_tx_wake_all_queues(struct net_device *dev)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (port->flags & MVPP22_F_IF_MUSDK)
+ return;
+
+ if (irqs_disabled()) {
+ /* Link-status IRQ context (also ACPI).
+ * Set for THIS cpu, update other cpus over tasklet
+ */
+ mvpp2_txqs_on((void *)port);
+ tasklet_schedule(&port->txqs_on_tasklet);
+ } else {
+ on_each_cpu(mvpp2_txqs_on, port, 1);
+ }
+ netif_tx_wake_all_queues(dev);
+}
+
+static void mvpp2_tx_stop_all_queues(struct net_device *dev)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (port->flags & MVPP22_F_IF_MUSDK)
+ return;
+
+ if (irqs_disabled()) {
+ /* IRQ context. Set for THIS, update other cpus over tasklet */
+ mvpp2_txqs_off((void *)port);
+ tasklet_schedule(&port->txqs_on_tasklet);
+ } else {
+ on_each_cpu(mvpp2_txqs_off, port, 1);
+ }
+ netif_tx_stop_all_queues(dev);
+}
+
/* Set max sizes for Tx queues */
static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
{
@@ -2348,6 +2814,22 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
}
}
+/* Routine set the number of non-occupied descriptors threshold that change
+ * interrupt error cause polled by FW Flow Control
+ */
+void mvpp2_set_rxq_free_tresh(struct mvpp2_port *port,
+ struct mvpp2_rx_queue *rxq)
+{
+ u32 val;
+
+ mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
+
+ val = mvpp2_read(port->priv, MVPP2_RXQ_THRESH_REG);
+ val &= ~MVPP2_RXQ_NON_OCCUPIED_MASK;
+ val |= MSS_THRESHOLD_STOP << MVPP2_RXQ_NON_OCCUPIED_OFFSET;
+ mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
+}
+
/* Set the number of packets that will be received before Rx interrupt
* will be generated by HW.
*/
@@ -2366,24 +2848,44 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
put_cpu();
}
-/* For some reason in the LSP this is done on each CPU. Why ? */
-static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
- struct mvpp2_tx_queue *txq)
+/* Set pkts-coalescing HW with ZERO or configured VALUE
+ * The same should be set for all TXQs and all for all CPUs.
+ * Setting ZERO causes for immediate flush into tx-done handler.
+ */
+static inline void mvpp2_tx_pkts_coal_set_txqs(struct mvpp2_port *port,
+ int cpu, u32 val)
{
- unsigned int thread;
- u32 val;
+ struct mvpp2_tx_queue *txq;
+ int queue;
- if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
- txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
+ val <<= MVPP2_TXQ_THRESH_OFFSET;
- val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
- /* PKT-coalescing registers are per-queue + per-thread */
- for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) {
- mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
+ for (queue = 0; queue < port->ntxqs; queue++) {
+ txq = port->txqs[queue];
+ mvpp2_thread_write(port->priv, cpu, MVPP2_TXQ_NUM_REG,
+ txq->id);
+ mvpp2_thread_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val);
}
}
+static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port)
+{
+ struct mvpp2_tx_queue *txq = port->txqs[0];
+ u32 cfg_val = txq->done_pkts_coal;
+ int cpu;
+
+ for_each_present_cpu(cpu)
+ mvpp2_tx_pkts_coal_set_txqs(port, cpu, cfg_val);
+}
+
+/* Set ZERO value on on_each_cpu IRQ-context for 1 cpu only */
+static void mvpp2_tx_pkts_coal_set_zero_pcpu(void *arg)
+{
+ struct mvpp2_port *port = arg;
+
+ mvpp2_tx_pkts_coal_set_txqs(port, smp_processor_id(), 0);
+}
+
static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
{
u64 tmp = (u64)clk_hz * usec;
@@ -2447,16 +2949,29 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
struct mvpp2_txq_pcpu_buf *tx_buf =
txq_pcpu->buffs + txq_pcpu->txq_get_index;
- if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma) &&
+ if (!tx_buf->skb &&
tx_buf->type != MVPP2_TYPE_XDP_TX)
dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
tx_buf->size, DMA_TO_DEVICE);
- if (tx_buf->type == MVPP2_TYPE_SKB && tx_buf->skb)
- dev_kfree_skb_any(tx_buf->skb);
+ else if (tx_buf->skb != TSO_HEADER_MARK &&
+ tx_buf->type != MVPP2_TYPE_XDP_TX) {
+ dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
+ tx_buf->size, DMA_TO_DEVICE);
+#ifndef MODULE
+ if (static_branch_unlikely(&mvpp2_recycle_ena)) {
+ mvpp2_recycle_put(port, txq_pcpu, tx_buf);
+ /* sets tx_buf->skb=NULL if put to recycle */
+ if (tx_buf->skb)
+ dev_kfree_skb_any(tx_buf->skb);
+ } else
+#endif
+ dev_kfree_skb_any(tx_buf->skb);
+ }
else if (tx_buf->type == MVPP2_TYPE_XDP_TX ||
tx_buf->type == MVPP2_TYPE_XDP_NDO)
xdp_return_frame(tx_buf->xdpf);
+ /* else: no action, tx_buf->skb always overwritten in xmit */
mvpp2_txq_inc_get(txq_pcpu);
}
}
@@ -2494,9 +3009,15 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
txq_pcpu->count -= tx_done;
- if (netif_tx_queue_stopped(nq))
- if (txq_pcpu->count <= txq_pcpu->wake_threshold)
+ if (netif_tx_queue_stopped(nq) && !mvpp2_tx_stopped(port)) {
+ /* Wake if netif_tx_queue_stopped on same txq->log_id */
+ if (txq_pcpu->stopped_on_txq_id == txq->log_id &&
+ txq_pcpu->count <= txq_pcpu->wake_threshold) {
+ txq_pcpu->stopped_on_txq_id = MVPP2_MAX_TXQ;
+ nq = netdev_get_tx_queue(port->dev, txq->log_id);
netif_tx_wake_queue(nq);
+ }
+ }
}
static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
@@ -2506,6 +3027,9 @@ static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
struct mvpp2_txq_pcpu *txq_pcpu;
unsigned int tx_todo = 0;
+ /* Set/Restore "no-force" */
+ mvpp2_tx_done_guard_force_irq(port, thread, 0);
+
while (cause) {
txq = mvpp2_get_tx_queue(port, cause);
if (!txq)
@@ -2534,8 +3058,8 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
/* Allocate memory for TX descriptors */
aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
- MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
- &aggr_txq->descs_dma, GFP_KERNEL);
+ MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
+ &aggr_txq->descs_dma, GFP_KERNEL);
if (!aggr_txq->descs)
return -ENOMEM;
@@ -2580,6 +3104,7 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
return -ENOMEM;
rxq->last_desc = rxq->size - 1;
+ rxq->rx_pending = 0;
/* Zero occupied and non-occupied counters - direct access */
mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
@@ -2603,6 +3128,9 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
mvpp2_rx_pkts_coal_set(port, rxq);
mvpp2_rx_time_coal_set(port, rxq);
+ /* Set the number of non occupied descriptors threshold */
+ mvpp2_set_rxq_free_tresh(port, rxq);
+
/* Add number of descriptors ready for receiving packets */
mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
@@ -2651,6 +3179,7 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
{
int rx_received, i;
+ rxq->rx_pending = 0;
rx_received = mvpp2_rxq_received(port, rxq->id);
if (!rx_received)
return;
@@ -2664,8 +3193,7 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
MVPP2_RXD_BM_POOL_ID_OFFS;
mvpp2_bm_pool_put(port, pool,
- mvpp2_rxdesc_dma_addr_get(port, rx_desc),
- mvpp2_rxdesc_cookie_get(port, rx_desc));
+ mvpp2_rxdesc_dma_addr_get(port, rx_desc));
}
mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
}
@@ -2706,6 +3234,19 @@ static void mvpp2_rxq_deinit(struct mvpp2_port *port,
put_cpu();
}
+/* Disable all rx/ingress queues, called by mvpp2_init */
+static void mvpp2_rxq_disable_all(struct mvpp2 *priv)
+{
+ int i;
+ u32 val;
+
+ for (i = 0; i < MVPP2_RXQ_MAX_NUM; i++) {
+ val = mvpp2_read(priv, MVPP2_RXQ_CONFIG_REG(i));
+ val |= MVPP2_RXQ_DISABLE_MASK;
+ mvpp2_write(priv, MVPP2_RXQ_CONFIG_REG(i), val);
+ }
+}
+
/* Create and initialize a Tx queue */
static int mvpp2_txq_init(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq)
@@ -2783,8 +3324,11 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
txq_pcpu->txq_get_index = 0;
txq_pcpu->tso_headers = NULL;
- txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
- txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
+ txq_pcpu->stop_threshold = txq->size -
+ MVPP2_MAX_SKB_DESCS(num_present_cpus());
+ txq_pcpu->wake_threshold = txq_pcpu->stop_threshold -
+ MVPP2_TX_PAUSE_HYSTERESIS;
+ txq_pcpu->stopped_on_txq_id = MVPP2_MAX_TXQ;
txq_pcpu->tso_headers =
dma_alloc_coherent(port->dev->dev.parent,
@@ -2829,7 +3373,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
txq->descs_dma = 0;
/* Set minimum bandwidth for disabled TXQs */
- mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
+ mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
/* Set Tx descriptors queue starting address and size */
thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
@@ -2852,6 +3396,11 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
val |= MVPP2_TXQ_DRAIN_EN_MASK;
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
+ /* Temporarily enable egress for the port.
+ * It is required for releasing all remaining packets.
+ */
+ mvpp2_egress_enable(port);
+
/* The napi queue has been stopped so wait for all packets
* to be transmitted.
*/
@@ -2871,6 +3420,8 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
pending &= MVPP2_TXQ_PENDING_MASK;
} while (pending);
+ mvpp2_egress_disable(port);
+
val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
put_cpu();
@@ -2920,6 +3471,9 @@ static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
for (queue = 0; queue < port->nrxqs; queue++)
mvpp2_rxq_deinit(port, port->rxqs[queue]);
+
+ if (port->tx_fc)
+ mvpp2_rxq_disable_fc(port);
}
/* Init all Rx queues for port */
@@ -2932,6 +3486,10 @@ static int mvpp2_setup_rxqs(struct mvpp2_port *port)
if (err)
goto err_cleanup;
}
+
+ if (port->tx_fc)
+ mvpp2_rxq_enable_fc(port);
+
return 0;
err_cleanup:
@@ -2950,18 +3508,11 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port)
err = mvpp2_txq_init(port, txq);
if (err)
goto err_cleanup;
-
- /* Assign this queue to a CPU */
- if (queue < num_possible_cpus())
- netif_set_xps_queue(port->dev, cpumask_of(queue), queue);
}
if (port->has_tx_irqs) {
+ /* Download time-coal. The pkts-coal done in start_dev */
mvpp2_tx_time_coal_set(port);
- for (queue = 0; queue < port->ntxqs; queue++) {
- txq = port->txqs[queue];
- mvpp2_tx_pkts_coal_set(port, txq);
- }
}
on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
@@ -3038,23 +3589,23 @@ static void mvpp2_isr_handle_link(struct mvpp2_port *port, bool link)
{
struct net_device *dev = port->dev;
+ if (!netif_running(dev))
+ return;
+
if (port->phylink) {
phylink_mac_change(port->phylink, link);
return;
}
- if (!netif_running(dev))
- return;
-
if (link) {
mvpp2_interrupts_enable(port);
mvpp2_egress_enable(port);
mvpp2_ingress_enable(port);
netif_carrier_on(dev);
- netif_tx_wake_all_queues(dev);
+ mvpp2_tx_wake_all_queues(dev);
} else {
- netif_tx_stop_all_queues(dev);
+ mvpp2_tx_stop_all_queues(dev);
netif_carrier_off(dev);
mvpp2_ingress_disable(port);
mvpp2_egress_disable(port);
@@ -3083,6 +3634,7 @@ static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port)
if (phy_interface_mode_is_rgmii(port->phy_interface) ||
phy_interface_mode_is_8023z(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_MII ||
port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
val = readl(port->base + MVPP22_GMAC_INT_STAT);
if (val & MVPP22_GMAC_INT_STAT_LINK) {
@@ -3101,8 +3653,11 @@ static irqreturn_t mvpp2_port_isr(int irq, void *dev_id)
mvpp22_gop_mask_irq(port);
- if (mvpp2_port_supports_xlg(port) &&
- mvpp2_is_xlg(port->phy_interface)) {
+ if (port->has_xlg_mac &&
+ (port->phy_interface == PHY_INTERFACE_MODE_RXAUI ||
+ port->phy_interface == PHY_INTERFACE_MODE_10GKR ||
+ port->phy_interface == PHY_INTERFACE_MODE_5GKR ||
+ port->phy_interface == PHY_INTERFACE_MODE_INTERNAL)) {
/* Check the external status register */
val = readl(port->base + MVPP22_XLG_EXT_INT_STAT);
if (val & MVPP22_XLG_EXT_INT_STAT_XLG)
@@ -3124,21 +3679,31 @@ static irqreturn_t mvpp2_port_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
+static void mvpp2_tx_done_timer_set(struct mvpp2_port_pcpu *port_pcpu)
{
- struct net_device *dev;
- struct mvpp2_port *port;
+ ktime_t interval;
+
+ if (!port_pcpu->tx_done_timer_scheduled) {
+ port_pcpu->tx_done_timer_scheduled = true;
+ interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
+ hrtimer_start(&port_pcpu->tx_done_timer, interval,
+ HRTIMER_MODE_REL_PINNED);
+ }
+}
+
+static void mvpp2_tx_done_proc_cb(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct mvpp2_port *port = netdev_priv(dev);
struct mvpp2_port_pcpu *port_pcpu;
unsigned int tx_todo, cause;
- port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer);
- dev = port_pcpu->dev;
+ port_pcpu = per_cpu_ptr(port->pcpu,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
if (!netif_running(dev))
- return HRTIMER_NORESTART;
-
- port_pcpu->timer_scheduled = false;
- port = netdev_priv(dev);
+ return;
+ port_pcpu->tx_done_timer_scheduled = false;
/* Process all the Tx queues */
cause = (1 << port->ntxqs) - 1;
@@ -3146,16 +3711,318 @@ static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
/* Set the timer in case not all the packets were processed */
- if (tx_todo && !port_pcpu->timer_scheduled) {
- port_pcpu->timer_scheduled = true;
- hrtimer_forward_now(&port_pcpu->tx_done_timer,
- MVPP2_TXDONE_HRTIMER_PERIOD_NS);
+ if (tx_todo)
+ mvpp2_tx_done_timer_set(port_pcpu);
+}
+
+static enum hrtimer_restart mvpp2_tx_done_timer_cb(struct hrtimer *timer)
+{
+ struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
+ struct mvpp2_port_pcpu,
+ tx_done_timer);
+
+ tasklet_schedule(&port_pcpu->tx_done_tasklet);
+ return HRTIMER_NORESTART;
+}
+
+/* Bulk-timer could be started/restarted by XMIT, timer-cb or Tasklet.
+ * XMIT calls bulk-restart() which is CONDITIONAL (restart vs request).
+ * Timer-cb has own condition-logic, calls hrtimer_forward().
+ * Tasklet has own condition-logic, calls unconditional bulk-start().
+ * The flags scheduled::restart_req are used in the state-logic.
+ */
+static inline void mvpp2_bulk_timer_restart(struct mvpp2_port_pcpu *port_pcpu)
+{
+ if (!port_pcpu->bulk_timer_scheduled) {
+ port_pcpu->bulk_timer_scheduled = true;
+ hrtimer_start(&port_pcpu->bulk_timer, MVPP2_TX_BULK_TIME,
+ HRTIMER_MODE_REL_PINNED);
+ } else {
+ port_pcpu->bulk_timer_restart_req = true;
+ }
+}
+
+static void mvpp2_bulk_timer_start(struct mvpp2_port_pcpu *port_pcpu)
+{
+ port_pcpu->bulk_timer_scheduled = true;
+ port_pcpu->bulk_timer_restart_req = false;
+ hrtimer_start(&port_pcpu->bulk_timer, MVPP2_TX_BULK_TIME,
+ HRTIMER_MODE_REL_PINNED);
+}
+
+static enum hrtimer_restart mvpp2_bulk_timer_cb(struct hrtimer *timer)
+{
+ /* ISR context */
+ struct mvpp2_port_pcpu *port_pcpu =
+ container_of(timer, struct mvpp2_port_pcpu, bulk_timer);
+
+ if (!port_pcpu->bulk_timer_scheduled) {
+ /* All pending are already flushed by xmit */
+ return HRTIMER_NORESTART;
+ }
+ if (port_pcpu->bulk_timer_restart_req) {
+ /* Not flushed but restart requested by xmit */
+ port_pcpu->bulk_timer_scheduled = true;
+ port_pcpu->bulk_timer_restart_req = false;
+ hrtimer_forward_now(timer, MVPP2_TX_BULK_TIME);
return HRTIMER_RESTART;
}
+ /* Expired and need the flush for pending */
+ tasklet_schedule(&port_pcpu->bulk_tasklet);
return HRTIMER_NORESTART;
}
+static void mvpp2_bulk_tasklet_cb(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_port_pcpu *port_pcpu;
+ struct mvpp2_tx_queue *aggr_txq;
+ int frags;
+ int cpu = smp_processor_id();
+
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+ if (!port_pcpu->bulk_timer_scheduled) {
+ /* Flushed by xmit-softirq since timer-irq */
+ return;
+ }
+ port_pcpu->bulk_timer_scheduled = false;
+ if (port_pcpu->bulk_timer_restart_req) {
+ /* Restart requested by xmit-softirq since timer-irq */
+ mvpp2_bulk_timer_start(port_pcpu);
+ return;
+ }
+
+ /* Full time expired. Flush pending packets here */
+ aggr_txq = &port->priv->aggr_txqs[cpu];
+ frags = aggr_txq->pending;
+ if (!frags)
+ return; /* Flushed by xmit */
+ aggr_txq->pending -= frags;
+ mvpp2_aggr_txq_pend_desc_add(port, frags);
+}
+
+/* Guard timer, tasklet, fixer utilities */
+
+/* The Guard fixer, called for 2 opposite actions:
+ * Activate fix by set frame-coalescing to Zero (according to_zero_map)
+ * which forces the tx-done IRQ. Called by guard tasklet.
+ * Deactivate fixer ~ restore the coal-configration (to_zero_map=0)
+ * when/by tx-done activated.
+ */
+static void mvpp2_tx_done_guard_force_irq(struct mvpp2_port *port,
+ int sw_thread, u8 to_zero_map)
+{
+ int q;
+ u32 val, coal, qmask, xor;
+ struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, sw_thread);
+
+ if (port_pcpu->txq_coal_is_zero_map == to_zero_map)
+ return; /* all current & requested are already the same */
+
+ xor = port_pcpu->txq_coal_is_zero_map ^ to_zero_map;
+ /* Configuration num-of-frames coalescing is the same for all queues */
+ coal = port->txqs[0]->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET;
+
+ for (q = 0; q < port->ntxqs; q++) {
+ qmask = 1 << q;
+ if (!(xor & qmask))
+ continue;
+ if (to_zero_map & qmask)
+ val = 0; /* Set ZERO forcing the Interrupt */
+ else
+ val = coal; /* Set/restore configured threshold */
+ mvpp2_thread_write(port->priv, sw_thread,
+ MVPP2_TXQ_NUM_REG, port->txqs[q]->id);
+ mvpp2_thread_write(port->priv, sw_thread,
+ MVPP2_TXQ_THRESH_REG, val);
+ }
+ port_pcpu->txq_coal_is_zero_map = to_zero_map;
+}
+
+static inline void mvpp2_tx_done_guard_timer_set(struct mvpp2_port *port,
+ int sw_thread)
+{
+ struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu,
+ sw_thread);
+
+ if (!port_pcpu->guard_timer_scheduled) {
+ port_pcpu->guard_timer_scheduled = true;
+ hrtimer_start(&port_pcpu->tx_done_timer,
+ MVPP2_GUARD_TXDONE_HRTIMER_NS,
+ HRTIMER_MODE_REL_PINNED);
+ }
+}
+
+/* Guard timer and tasklet callbacks making check logic upon flags
+ * guard_timer_scheduled, tx_done_passed,
+ * txq_coal_is_zero_map, txq_busy_suspect_map
+ */
+static enum hrtimer_restart mvpp2_guard_timer_cb(struct hrtimer *timer)
+{
+ struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
+ struct mvpp2_port_pcpu, tx_done_timer);
+ struct mvpp2_port *port = port_pcpu->port;
+ struct mvpp2_tx_queue *txq;
+ struct mvpp2_txq_pcpu *txq_pcpu;
+ u8 txq_nonempty_map = 0;
+ int q, cpu;
+ ktime_t time;
+
+ if (port_pcpu->tx_done_passed) {
+ /* ok, tx-done was active since last checking */
+ port_pcpu->tx_done_passed = false;
+ time = MVPP2_GUARD_TXDONE_HRTIMER_NS; /* regular long */
+ goto timer_restart;
+ }
+
+ cpu = smp_processor_id(); /* timer is per-cpu */
+
+ for (q = 0; q < port->ntxqs; q++) {
+ txq = port->txqs[q];
+ txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+ if (txq_pcpu->count)
+ txq_nonempty_map |= 1 << q;
+ }
+
+ if (!txq_nonempty_map || mvpp2_tx_stopped(port)) {
+ /* All queues are empty, guard-timer may be stopped now
+ * It would be started again on new transmit.
+ */
+ port_pcpu->guard_timer_scheduled = false;
+ return HRTIMER_NORESTART;
+ }
+
+ if (port_pcpu->txq_busy_suspect_map) {
+ /* Second-hit ~~ tx-done is really stalled.
+ * Activate the tasklet to fix.
+ * Keep guard_timer_scheduled=TRUE
+ */
+ tasklet_schedule(&port_pcpu->tx_done_tasklet);
+ return HRTIMER_NORESTART;
+ }
+
+ /* First-hit ~~ tx-done seems stalled. Schedule re-check with SHORT time
+ * bigger a bit than HW-coal-time-usec (1024=2^10 vs NSEC_PER_USEC)
+ */
+ time = ktime_set(0, port->tx_time_coal << 10);
+ port_pcpu->txq_busy_suspect_map |= txq_nonempty_map;
+
+timer_restart:
+ /* Keep guard_timer_scheduled=TRUE but set new expiration time */
+ hrtimer_forward_now(timer, time);
+ return HRTIMER_RESTART;
+}
+
+static void mvpp2_tx_done_guard_tasklet_cb(unsigned long data)
+{
+ struct mvpp2_port *port = (void *)data;
+ struct mvpp2_port_pcpu *port_pcpu;
+ int cpu;
+
+ /* stop_dev() has permanent setting for coal=0 */
+ if (mvpp2_tx_stopped(port))
+ return;
+
+ cpu = get_cpu();
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu); /* tasklet is per-cpu */
+
+ if (port_pcpu->tx_done_passed) {
+ port_pcpu->tx_done_passed = false;
+ } else { /* Force IRQ */
+ mvpp2_tx_done_guard_force_irq(port, cpu,
+ port_pcpu->txq_busy_suspect_map);
+ port_pcpu->tx_guard_cntr++;
+ }
+ port_pcpu->txq_busy_suspect_map = 0;
+
+ /* guard_timer_scheduled is already TRUE, just start the timer */
+ hrtimer_start(&port_pcpu->tx_done_timer,
+ MVPP2_GUARD_TXDONE_HRTIMER_NS,
+ HRTIMER_MODE_REL_PINNED);
+ put_cpu();
+}
+
+static u32 mvpp2_tx_done_guard_get_stats(struct mvpp2_port *port, int cpu)
+{
+ return per_cpu_ptr(port->pcpu, cpu)->tx_guard_cntr;
+}
+
+static void mvpp2_tx_done_init_on_open(struct mvpp2_port *port, bool open)
+{
+ struct mvpp2_port_pcpu *port_pcpu;
+ int cpu;
+
+ if (port->flags & MVPP2_F_LOOPBACK)
+ return;
+
+ if (!open)
+ goto close;
+
+ /* Init tx-done tasklets and variables */
+ for_each_present_cpu(cpu) {
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+ /* Timer works in tx-done or Guard mode. To eliminate per-packet
+ * mode checking each mode has own "_scheduled" flag.
+ * Set scheduled=FALSE for active mode and TRUE for inactive, so
+ * timer would never be started in inactive mode.
+ */
+ if (port->has_tx_irqs) { /* guard-mode */
+ port_pcpu->txq_coal_is_zero_map = 0;
+ port_pcpu->txq_busy_suspect_map = 0;
+ port_pcpu->tx_done_passed = false;
+
+ /* "true" is never started */
+ port_pcpu->tx_done_timer_scheduled = true;
+ port_pcpu->guard_timer_scheduled = false;
+ tasklet_init(&port_pcpu->tx_done_tasklet,
+ mvpp2_tx_done_guard_tasklet_cb,
+ (unsigned long)port);
+ } else {
+ port_pcpu->tx_done_timer_scheduled = false;
+ /* "true" is never started */
+ port_pcpu->guard_timer_scheduled = true;
+ tasklet_init(&port_pcpu->tx_done_tasklet,
+ mvpp2_tx_done_proc_cb,
+ (unsigned long)port->dev);
+ }
+ }
+ return;
+close:
+ /* Kill tx-done timers and tasklets */
+ for_each_present_cpu(cpu) {
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+ /* Say "scheduled=true" is never started on XMIT */
+ port_pcpu->tx_done_timer_scheduled = true;
+ port_pcpu->guard_timer_scheduled = true;
+ hrtimer_cancel(&port_pcpu->tx_done_timer);
+ tasklet_kill(&port_pcpu->tx_done_tasklet);
+ }
+}
+
+static void mvpp2_tx_done_init_on_probe(struct platform_device *pdev,
+ struct mvpp2_port *port)
+{
+ struct mvpp2_port_pcpu *port_pcpu;
+ int cpu;
+ bool guard_mode = port->has_tx_irqs;
+
+ if (port->flags & MVPP2_F_LOOPBACK)
+ return;
+
+ for_each_present_cpu(cpu) {
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+ port_pcpu->port = port;
+ hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ port_pcpu->tx_done_timer.function = (guard_mode) ?
+ mvpp2_guard_timer_cb : mvpp2_tx_done_timer_cb;
+ }
+}
+
/* Main RX/TX processing routines */
/* Display more error info */
@@ -3167,8 +4034,8 @@ static void mvpp2_rx_error(struct mvpp2_port *port,
char *err_str = NULL;
switch (status & MVPP2_RXD_ERR_CODE_MASK) {
- case MVPP2_RXD_ERR_CRC:
- err_str = "crc";
+ case MVPP2_RXD_ERR_MAC:
+ err_str = "MAC";
break;
case MVPP2_RXD_ERR_OVERRUN:
err_str = "overrun";
@@ -3178,7 +4045,7 @@ static void mvpp2_rx_error(struct mvpp2_port *port,
break;
}
if (err_str && net_ratelimit())
- netdev_err(port->dev,
+ netdev_dbg(port->dev,
"bad rx status %08x (%s error), size=%zu\n",
status, err_str, sz);
}
@@ -3201,6 +4068,356 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
skb->ip_summed = CHECKSUM_NONE;
}
+static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc,
+ int pool, u32 rx_status)
+{
+ dma_addr_t dma_addr, dma_addr_next;
+ struct mvpp2_buff_hdr *buff_hdr;
+ phys_addr_t phys_addr;
+
+ dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
+ phys_addr = dma_to_phys(port->dev->dev.parent, dma_addr);
+
+ do {
+ buff_hdr = (struct mvpp2_buff_hdr *)phys_to_virt(phys_addr);
+
+ dma_addr_next = le32_to_cpu(buff_hdr->next_dma_addr);
+
+ if (port->priv->hw_version >= MVPP22)
+ dma_addr_next |= ((u64)buff_hdr->next_dma_addr_high << 32);
+
+ mvpp2_bm_pool_put(port, pool, dma_addr);
+
+ dma_addr = dma_addr_next;
+ phys_addr = dma_to_phys(port->dev->dev.parent, dma_addr);
+
+ } while (!MVPP2_B_HDR_INFO_IS_LAST(le16_to_cpu(buff_hdr->info)));
+}
+
+#ifndef MODULE
+void mvpp2_recycle_stats(void)
+{
+ int cpu;
+ int pl_id;
+ struct mvpp2_recycle_pcpu *pcpu;
+
+ pr_info("Recycle-stats: %d open ports (on all CP110s)\n",
+ mvpp2_share.num_open_ports);
+ if (!mvpp2_share.recycle_base)
+ return;
+ pcpu = mvpp2_share.recycle;
+ for_each_online_cpu(cpu) {
+ for (pl_id = 0; pl_id < MVPP2_BM_POOLS_NUM; pl_id++) {
+ pr_info("| cpu[%d].pool_%d: idx=%d\n",
+ cpu, pl_id, pcpu->idx[pl_id]);
+ }
+ pr_info("| ___[%d].skb_____idx=%d__\n",
+ cpu, pcpu->idx[MVPP2_BM_POOLS_NUM]);
+ pcpu++;
+ }
+}
+
+static int mvpp2_recycle_open(void)
+{
+ int cpu, pl_id, size;
+ struct mvpp2_recycle_pcpu *pcpu;
+ phys_addr_t addr;
+
+ mvpp2_share.num_open_ports++;
+ wmb(); /* for num_open_ports */
+
+ if (mvpp2_share.recycle_base)
+ return 0;
+
+ /* Allocate pool-tree */
+ size = sizeof(*pcpu) * num_online_cpus() + L1_CACHE_BYTES;
+ mvpp2_share.recycle_base = kzalloc(size, GFP_KERNEL);
+ if (!mvpp2_share.recycle_base)
+ goto err;
+ /* Use Address aligned to L1_CACHE_BYTES */
+ addr = (phys_addr_t)mvpp2_share.recycle_base + (L1_CACHE_BYTES - 1);
+ addr &= ~(L1_CACHE_BYTES - 1);
+ mvpp2_share.recycle = (void *)addr;
+
+ pcpu = mvpp2_share.recycle;
+ for_each_online_cpu(cpu) {
+ for (pl_id = 0; pl_id <= MVPP2_BM_POOLS_NUM; pl_id++)
+ pcpu->idx[pl_id] = -1;
+ pcpu++;
+ }
+ return 0;
+err:
+ pr_err("mvpp2 error: cannot allocate recycle pool\n");
+ return -ENOMEM;
+}
+
+static void mvpp2_recycle_close(void)
+{
+ int cpu, pl_id, i;
+ struct mvpp2_recycle_pcpu *pcpu;
+ struct mvpp2_recycle_pool *pool;
+
+ mvpp2_share.num_open_ports--;
+ wmb(); /* for num_open_ports */
+
+ /* Do nothing if recycle is not used at all or in use by port/ports */
+ if (mvpp2_share.num_open_ports || !mvpp2_share.recycle_base)
+ return;
+
+ /* Usable (recycle_base!=NULL), but last port gone down
+ * Let's free all accumulated buffers.
+ */
+ pcpu = mvpp2_share.recycle;
+ for_each_online_cpu(cpu) {
+ for (pl_id = 0; pl_id <= MVPP2_BM_POOLS_NUM; pl_id++) {
+ pool = &pcpu->pool[pl_id];
+ for (i = 0; i <= pcpu->idx[pl_id]; i++) {
+ if (!pool->pbuf[i])
+ continue;
+ if (pl_id < MVPP2_BM_POOLS_NUM)
+ kfree(pool->pbuf[i]);
+ else
+ kmem_cache_free(skbuff_head_cache,
+ pool->pbuf[i]);
+ }
+ }
+ pcpu++;
+ }
+ kfree(mvpp2_share.recycle_base);
+ mvpp2_share.recycle_base = NULL;
+}
+
+static int mvpp2_recycle_get_bm_id(struct sk_buff *skb)
+{
+ u32 hash;
+
+ /* Keep checking ordering for performance */
+ hash = skb_get_hash_raw(skb);
+ /* Check hash */
+ if (!MVPP2_RXTX_HASH_IS_OK(skb, hash))
+ return -1;
+ /* Check if skb could be free */
+ /* Use skb->cloned but not skb_cloned(), skb_header_cloned() */
+ if (skb_shared(skb) || skb->cloned)
+ return -1;
+ /* ipsec: sp/secpath, _skb_refdst ... */
+ if (!skb_irq_freeable(skb))
+ return -1;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_ZEROCOPY_FRAG)
+ return -1;
+
+ /* Get bm-pool-id */
+ hash &= MVPP2_RXTX_HASH_BMID_MASK;
+ if (hash >= MVPP2_BM_POOLS_NUM)
+ return -1;
+
+ return (int)hash;
+}
+
+static inline void mvpp2_recycle_put(struct mvpp2_port *port,
+ struct mvpp2_txq_pcpu *txq_pcpu,
+ struct mvpp2_txq_pcpu_buf *tx_buf)
+{
+ struct mvpp2_recycle_pcpu *pcpu;
+ struct mvpp2_recycle_pool *pool;
+ short int idx, pool_id;
+ struct sk_buff *skb = tx_buf->skb;
+ struct mvpp2_bm_pool *bm_pool;
+
+ /* tx_buf->skb is not NULL */
+ pool_id = mvpp2_recycle_get_bm_id(skb);
+ if (pool_id < 0)
+ return; /* non-recyclable */
+
+ bm_pool = &port->priv->bm_pools[pool_id];
+ if (skb_end_offset(skb) < (bm_pool->frag_size - MVPP2_SKB_SHINFO_SIZE))
+ return; /* shrank -> non-recyclable */
+
+ /* This skb could be destroyed. Put into recycle */
+ pcpu = mvpp2_share.recycle + txq_pcpu->thread;
+ idx = pcpu->idx[pool_id];
+ if (idx < (MVPP2_RECYCLE_FULL - 1)) {
+ pool = &pcpu->pool[pool_id];
+ pool->pbuf[++idx] = skb->head; /* pre-increment */
+ pcpu->idx[pool_id] = idx;
+ skb->head = NULL;
+ }
+ idx = pcpu->idx[MVPP2_BM_POOLS_NUM];
+ if (idx < (MVPP2_RECYCLE_FULL_SKB - 1)) {
+ pool = &pcpu->pool[MVPP2_BM_POOLS_NUM];
+ pool->pbuf[++idx] = skb;
+ pcpu->idx[MVPP2_BM_POOLS_NUM] = idx;
+ if (skb->head) {
+ if (bm_pool->frag_size <= PAGE_SIZE)
+ skb_free_frag(skb->head);
+ else
+ kfree(skb->head);
+ }
+ tx_buf->skb = NULL;
+ }
+}
+
+/* Allocate a new skb and add it to BM pool */
+static struct sk_buff *mvpp2_recycle_get(struct mvpp2_port *port,
+ struct mvpp2_bm_pool *bm_pool)
+{
+ int cpu;
+ struct mvpp2_recycle_pcpu *pcpu;
+ struct mvpp2_recycle_pool *pool;
+ short int idx;
+ void *frag;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ cpu = smp_processor_id();
+ pcpu = mvpp2_share.recycle + cpu;
+
+ /* GET bm buffer */
+ idx = pcpu->idx[bm_pool->id];
+ pool = &pcpu->pool[bm_pool->id];
+
+ if (idx >= 0) {
+ frag = pool->pbuf[idx];
+ pcpu->idx[bm_pool->id]--; /* post-decrement */
+ } else {
+ /* Allocate 2 buffers, put 1, use another now */
+ pcpu->idx[bm_pool->id] = 0;
+ pool->pbuf[0] = mvpp2_frag_alloc(bm_pool);
+ frag = NULL;
+ }
+ if (!frag)
+ frag = mvpp2_frag_alloc(bm_pool);
+
+ /* refill the buffer into BM */
+ dma_addr = dma_map_single(port->dev->dev.parent, frag,
+ bm_pool->buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
+ pcpu->idx[bm_pool->id]++; /* Return back to recycle */
+ netdev_err(port->dev, "failed to refill BM pool-%d (%d:%p)\n",
+ bm_pool->id, pcpu->idx[bm_pool->id], frag);
+ return NULL;
+ }
+
+ /* GET skb buffer */
+ idx = pcpu->idx[MVPP2_BM_POOLS_NUM];
+ if (idx >= 0) {
+ pool = &pcpu->pool[MVPP2_BM_POOLS_NUM];
+ skb = pool->pbuf[idx];
+ pcpu->idx[MVPP2_BM_POOLS_NUM]--;
+ } else {
+ skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
+ }
+
+ if (unlikely(!skb)) {
+ dma_unmap_single(port->dev->dev.parent, dma_addr,
+ bm_pool->buf_size, DMA_FROM_DEVICE);
+ mvpp2_frag_free(bm_pool, frag);
+ return NULL;
+ }
+ mvpp2_bm_pool_put(port, bm_pool->id, dma_addr);
+ return skb;
+}
+
+/* SKB and BM-buff alloc/refill like mvpp2_recycle_get but without recycle */
+static inline
+struct sk_buff *mvpp2_bm_refill_skb_get(struct mvpp2_port *port,
+ struct mvpp2_bm_pool *bm_pool)
+{
+ void *frag;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+
+ /* GET bm buffer, refill into BM */
+ frag = mvpp2_frag_alloc(bm_pool);
+ dma_addr = dma_map_single(port->dev->dev.parent, frag,
+ bm_pool->buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
+ netdev_err(port->dev, "failed to refill BM pool-%d\n",
+ bm_pool->id);
+ return NULL;
+ }
+
+ /* GET skb buffer */
+ skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
+ if (unlikely(!skb)) {
+ dma_unmap_single(port->dev->dev.parent, dma_addr,
+ bm_pool->buf_size, DMA_FROM_DEVICE);
+ mvpp2_frag_free(bm_pool, frag);
+ return NULL;
+ }
+ mvpp2_bm_pool_put(port, bm_pool->id, dma_addr);
+ return skb;
+}
+
+static inline void mvpp2_skb_set_extra(struct sk_buff *skb,
+ struct napi_struct *napi,
+ u32 status,
+ u8 rxq_id,
+ struct mvpp2_bm_pool *bm_pool)
+{
+ u32 hash;
+ enum pkt_hash_types hash_type;
+
+ /* Improve performance and set identification for RX-TX fast-forward */
+ hash = MVPP2_RXTX_HASH_GENER(skb, bm_pool->id);
+ hash_type = (status & (MVPP2_RXD_L4_UDP | MVPP2_RXD_L4_TCP)) ?
+ PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
+ skb_set_hash(skb, hash, hash_type);
+ skb_mark_napi_id(skb, napi);
+ skb_record_rx_queue(skb, (u16)rxq_id);
+}
+
+/* This is "fast inline" clone of __build_skb+build_skb,
+ * and also with setting mv-extra information
+ */
+static inline
+struct sk_buff *mvpp2_build_skb(void *data, unsigned int frag_size,
+ struct napi_struct *napi,
+ struct mvpp2_port *port,
+ u32 rx_status,
+ u8 rxq_id,
+ struct mvpp2_bm_pool *bm_pool)
+{
+ struct skb_shared_info *shinfo;
+ struct sk_buff *skb;
+ unsigned int size = frag_size ? : ksize(data);
+
+ if (static_branch_unlikely(&mvpp2_recycle_ena))
+ skb = mvpp2_recycle_get(port, bm_pool);
+ else
+ skb = mvpp2_bm_refill_skb_get(port, bm_pool);
+ if (unlikely(!skb))
+ return NULL;
+
+ size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ memset(skb, 0, offsetof(struct sk_buff, tail));
+ skb->truesize = SKB_TRUESIZE(size);
+ refcount_set(&skb->users, 1);
+ skb->head = data;
+ skb->data = data;
+ skb_reset_tail_pointer(skb);
+ skb->end = skb->tail + size;
+ skb->mac_header = (typeof(skb->mac_header))~0U;
+ skb->transport_header = (typeof(skb->transport_header))~0U;
+
+ /* make sure we initialize shinfo sequentially */
+ shinfo = skb_shinfo(skb);
+ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+ atomic_set(&shinfo->dataref, 1);
+
+ /* From build_skb wrapper */
+ if (frag_size) {
+ skb->head_frag = 1;
+ if (page_is_pfmemalloc(virt_to_head_page(data)))
+ skb->pfmemalloc = 1;
+ }
+
+ mvpp2_skb_set_extra(skb, napi, rx_status, rxq_id, bm_pool);
+
+ return skb;
+}
+#endif
+
/* Allocate a new skb and add it to BM pool */
static int mvpp2_rx_refill(struct mvpp2_port *port,
struct mvpp2_bm_pool *bm_pool,
@@ -3521,16 +4738,26 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
struct xdp_buff xdp;
int rx_received;
int rx_done = 0;
- u32 xdp_ret = 0;
+ u32 xdp_ret = 0, i = 0;
+ struct sk_buff *skb_all[64];
rcu_read_lock();
xdp_prog = READ_ONCE(port->xdp_prog);
- /* Get number of received packets and clamp the to-do */
- rx_received = mvpp2_rxq_received(port, rxq->id);
- if (rx_todo > rx_received)
- rx_todo = rx_received;
+ if (rxq->rx_pending >= rx_todo) {
+ rx_received = rx_todo;
+ rxq->rx_pending -= rx_todo;
+ } else {
+ /* Get number of received packets and clamp the to-do */
+ rx_received = mvpp2_rxq_received(port, rxq->id);
+ if (rx_received < rx_todo) {
+ rx_todo = rx_received;
+ rxq->rx_pending = 0;
+ } else {
+ rxq->rx_pending = rx_received - rx_todo;
+ }
+ }
while (rx_done < rx_todo) {
struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
@@ -3541,7 +4768,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
dma_addr_t dma_addr;
phys_addr_t phys_addr;
u32 rx_status, timestamp;
- int pool, rx_bytes, err, ret;
+ int pool, rx_bytes, ret;
void *data;
rx_done++;
@@ -3549,7 +4776,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
rx_bytes -= MVPP2_MH_SIZE;
dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
- phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
+ phys_addr = dma_to_phys(port->dev->dev.parent, dma_addr);
data = (void *)phys_to_virt(phys_addr);
pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
@@ -3616,7 +4843,29 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
}
}
+ /* _sync_ for coherency (_unmap_ is asynchroneous).
+ * _sync_ should be done for the SAME size as in map/unmap.
+ * The prefetch is for CPU and should be after unmap ~ mapToCPU
+ */
+ if (rx_todo == 1)
+ dma_sync_single_for_cpu(dev->dev.parent, dma_addr,
+ bm_pool->buf_size,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(dev->dev.parent, dma_addr,
+ bm_pool->buf_size, DMA_FROM_DEVICE);
+
+ /* Buffer header not supported */
+ if (rx_status & MVPP2_RXD_BUF_HDR)
+ goto err_drop_frame;
+
+ prefetch(data + NET_SKB_PAD); /* packet header */
+
+#ifdef MODULE
skb = build_skb(data, frag_size);
+#else
+ skb = mvpp2_build_skb(data, frag_size,
+ napi, port, rx_status, rxq->id, bm_pool);
+#endif
if (!skb) {
netdev_warn(port->dev, "skb build failed\n");
goto err_drop_frame;
@@ -3631,29 +4880,13 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
skb_hwtstamps(skb));
}
- err = mvpp2_rx_refill(port, bm_pool, pp, pool);
- if (err) {
- netdev_err(port->dev, "failed to refill BM pools\n");
- dev_kfree_skb_any(skb);
- goto err_drop_frame;
- }
-
- if (pp)
- page_pool_release_page(pp, virt_to_page(data));
- else
- dma_unmap_single_attrs(dev->dev.parent, dma_addr,
- bm_pool->buf_size, DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
-
- ps.rx_packets++;
- ps.rx_bytes += rx_bytes;
-
skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
skb_put(skb, rx_bytes);
skb->protocol = eth_type_trans(skb, dev);
mvpp2_rx_csum(port, rx_status, skb);
- napi_gro_receive(napi, skb);
+ skb_all[rcvd_pkts++] = skb;
+ rcvd_bytes += rx_bytes;
continue;
err_drop_frame:
@@ -3663,9 +4896,12 @@ err_drop_frame:
if (rx_status & MVPP2_RXD_BUF_HDR)
mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status);
else
- mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
+ mvpp2_bm_pool_put(port, pool, dma_addr);
}
+ while (i < rcvd_pkts)
+ napi_gro_receive(napi, skb_all[i++]);
+
rcu_read_unlock();
if (xdp_ret & MVPP2_XDP_REDIR)
@@ -3684,8 +4920,7 @@ err_drop_frame:
u64_stats_update_end(&stats->syncp);
}
- /* Update Rx queue management counters */
- wmb();
+ /* Update HW Rx queue management counters with RX-done */
mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
return rx_todo;
@@ -3817,8 +5052,7 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag));
buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
- skb_frag_size(frag),
- DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
mvpp2_txq_desc_put(txq);
goto cleanup;
@@ -3874,6 +5108,7 @@ static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
MVPP2_TXD_F_DESC |
MVPP2_TXD_PADDING_DISABLE);
mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
+ mvpp2_txq_inc_put(port, txq_pcpu, TSO_HEADER_MARK, tx_desc, MVPP2_TYPE_SKB);
}
static inline int mvpp2_tso_put_data(struct sk_buff *skb,
@@ -3920,15 +5155,18 @@ static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
struct mvpp2_txq_pcpu *txq_pcpu)
{
struct mvpp2_port *port = netdev_priv(dev);
- int hdr_sz, i, len, descs = 0;
+ int hdr_sz, i, len, descs = tso_count_descs(skb);
struct tso_t tso;
- /* Check number of available descriptors */
- if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) ||
- mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu,
- tso_count_descs(skb)))
+ /* Check enough free-space in txq and
+ * number of available aggr/reserved descriptors
+ */
+ if (((txq_pcpu->size - txq_pcpu->count) < descs) ||
+ mvpp2_aggr_desc_num_check(port, aggr_txq, descs) ||
+ mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, descs))
return 0;
+ descs = 0; /* real descs <= tso_count_descs() */
hdr_sz = tso_start(skb, &tso);
len = skb->len - hdr_sz;
@@ -3995,8 +5233,11 @@ static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
}
frags = skb_shinfo(skb)->nr_frags + 1;
- /* Check number of available descriptors */
- if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) ||
+ /* Check enough free-space in txq and
+ * number of available aggr/reserved descriptors
+ */
+ if (((txq_pcpu->size - txq_pcpu->count) < frags) ||
+ mvpp2_aggr_desc_num_check(port, aggr_txq, frags) ||
mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) {
frags = 0;
goto out;
@@ -4043,19 +5284,41 @@ static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
out:
if (frags > 0) {
struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread);
- struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
+ struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+ struct netdev_queue *nq;
+ bool deferred_tx;
txq_pcpu->reserved_num -= frags;
txq_pcpu->count += frags;
aggr_txq->count += frags;
- /* Enable transmit */
- wmb();
- mvpp2_aggr_txq_pend_desc_add(port, frags);
+ /* Enable transmit; RX-to-TX may be deferred with Bulk-timer */
+ deferred_tx = (frags == 1) &&
+ MVPP2_RXTX_HASH_IS_OK_TX(skb, skb_get_hash_raw(skb)) &&
+ (aggr_txq->pending < min(MVPP2_TX_BULK_MAX_PACKETS,
+ (int)(txq->done_pkts_coal / 2)));
- if (txq_pcpu->count >= txq_pcpu->stop_threshold)
- netif_tx_stop_queue(nq);
+ if (deferred_tx) {
+ aggr_txq->pending += frags;
+ mvpp2_bulk_timer_restart(port_pcpu);
+ } else {
+ port_pcpu->bulk_timer_scheduled = false;
+ port_pcpu->bulk_timer_restart_req = false;
+ frags += aggr_txq->pending;
+ aggr_txq->pending = 0;
+ mvpp2_aggr_txq_pend_desc_add(port, frags);
+ }
+ if (unlikely(txq_pcpu->count >= txq_pcpu->stop_threshold)) {
+ nq = netdev_get_tx_queue(dev, txq_id);
+ /* txq_id may differ from thread/cpu and come from more
+ * than one txq_pcpu. Save only the first for wakeup.
+ */
+ if (unlikely(!netif_tx_queue_stopped(nq))) {
+ txq_pcpu->stopped_on_txq_id = txq_id;
+ netif_tx_stop_queue(nq);
+ }
+ }
u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
stats->tx_bytes += skb->len;
@@ -4074,12 +5337,7 @@ out:
txq_pcpu->count > 0) {
struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
- if (!port_pcpu->timer_scheduled) {
- port_pcpu->timer_scheduled = true;
- hrtimer_start(&port_pcpu->tx_done_timer,
- MVPP2_TXDONE_HRTIMER_PERIOD_NS,
- HRTIMER_MODE_REL_PINNED_SOFT);
- }
+ mvpp2_tx_done_timer_set(port_pcpu);
}
if (test_bit(thread, &port->priv->lock_map))
@@ -4088,23 +5346,12 @@ out:
return NETDEV_TX_OK;
}
-static inline void mvpp2_cause_error(struct net_device *dev, int cause)
-{
- if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
- netdev_err(dev, "FCS error\n");
- if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
- netdev_err(dev, "rx fifo overrun error\n");
- if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
- netdev_err(dev, "tx fifo underrun error\n");
-}
-
static int mvpp2_poll(struct napi_struct *napi, int budget)
{
- u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
+ u32 cause_rx_tx, cause_rx, cause_tx;
int rx_done = 0;
struct mvpp2_port *port = netdev_priv(napi->dev);
struct mvpp2_queue_vector *qv;
- unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
qv = container_of(napi, struct mvpp2_queue_vector, napi);
@@ -4121,20 +5368,11 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id,
MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
- cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
- if (cause_misc) {
- mvpp2_cause_error(port->dev, cause_misc);
-
- /* Clear the cause register */
- mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
- mvpp2_thread_write(port->priv, thread,
- MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
- cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
- }
-
if (port->has_tx_irqs) {
cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
if (cause_tx) {
+ per_cpu_ptr(port->pcpu,
+ qv->sw_thread_id)->tx_done_passed = true;
cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
}
@@ -4142,7 +5380,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
/* Process RX packets */
cause_rx = cause_rx_tx &
- MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
+ MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(mvpp21_variant);
cause_rx <<= qv->first_rxq;
cause_rx |= qv->pending_cause_rx;
while (cause_rx && budget > 0) {
@@ -4179,25 +5417,22 @@ static void mvpp22_mode_reconfigure(struct mvpp2_port *port)
{
u32 ctrl3;
- /* Set the GMAC & XLG MAC in reset */
- mvpp2_mac_reset_assert(port);
-
- /* Set the MPCS and XPCS in reset */
- mvpp22_pcs_reset_assert(port);
-
/* comphy reconfiguration */
mvpp22_comphy_init(port);
/* gop reconfiguration */
mvpp22_gop_init(port);
- mvpp22_pcs_reset_deassert(port);
+ if (port->phy_interface == PHY_INTERFACE_MODE_INTERNAL)
+ return;
- if (mvpp2_port_supports_xlg(port)) {
+ if (port->has_xlg_mac) {
ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG);
ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
- if (mvpp2_is_xlg(port->phy_interface))
+ if (port->phy_interface == PHY_INTERFACE_MODE_RXAUI ||
+ port->phy_interface == PHY_INTERFACE_MODE_10GKR ||
+ port->phy_interface == PHY_INTERFACE_MODE_5GKR)
ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
else
ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
@@ -4205,7 +5440,10 @@ static void mvpp22_mode_reconfigure(struct mvpp2_port *port)
writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG);
}
- if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(port->phy_interface))
+ if (port->has_xlg_mac &&
+ (port->phy_interface == PHY_INTERFACE_MODE_RXAUI ||
+ port->phy_interface == PHY_INTERFACE_MODE_10GKR ||
+ port->phy_interface == PHY_INTERFACE_MODE_5GKR))
mvpp2_xlg_max_rx_size_set(port);
else
mvpp2_gmac_max_rx_size_set(port);
@@ -4218,13 +5456,17 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
mvpp2_txp_max_tx_size_set(port);
+ /* stop_dev() sets Coal to ZERO. Care to restore it now */
+ if (port->has_tx_irqs)
+ mvpp2_tx_pkts_coal_set(port);
+
for (i = 0; i < port->nqvecs; i++)
napi_enable(&port->qvecs[i].napi);
/* Enable interrupts on all threads */
mvpp2_interrupts_enable(port);
- if (port->priv->hw_version == MVPP22)
+ if (port->priv->hw_version != MVPP21)
mvpp22_mode_reconfigure(port);
if (port->phylink) {
@@ -4233,7 +5475,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
mvpp2_acpi_start(port);
}
- netif_tx_start_all_queues(port->dev);
+ mvpp2_tx_start_all_queues(port->dev);
clear_bit(0, &port->state);
}
@@ -4243,6 +5485,21 @@ static void mvpp2_stop_dev(struct mvpp2_port *port)
{
int i;
+ /* Stop-dev called by ifconfig but also by ethtool-features.
+ * Under active traffic the BM/RX and TX PP2-HW could be non-empty.
+ * Stop asap new packets ariving from both RX and TX directions,
+ * but do NOT disable egress free/send-out and interrupts tx-done,
+ * yeild and msleep this context for gracefull finishing.
+ * Flush all tx-done by forcing pkts-coal to ZERO
+ */
+ mvpp2_tx_stop_all_queues(port->dev);
+ mvpp2_ingress_disable(port);
+ if (port->has_tx_irqs)
+ on_each_cpu(mvpp2_tx_pkts_coal_set_zero_pcpu, port, 1);
+
+ msleep(40);
+ mvpp2_egress_disable(port);
+
set_bit(0, &port->state);
/* Disable interrupts on all threads */
@@ -4275,11 +5532,8 @@ static int mvpp2_check_ringparam_valid(struct net_device *dev,
else if (!IS_ALIGNED(ring->tx_pending, 32))
new_tx_pending = ALIGN(ring->tx_pending, 32);
- /* The Tx ring size cannot be smaller than the minimum number of
- * descriptors needed for TSO.
- */
- if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
- new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
+ if (new_tx_pending < MVPP2_MIN_TXD(num_present_cpus()))
+ new_tx_pending = MVPP2_MIN_TXD(num_present_cpus());
if (ring->rx_pending != new_rx_pending) {
netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
@@ -4374,30 +5628,38 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port)
}
}
-static bool mvpp22_rss_is_supported(void)
+static bool mvpp22_rss_is_supported(struct mvpp2_port *port)
{
- return queue_mode == MVPP2_QDIST_MULTI_MODE;
+ return (queue_mode == MVPP2_QDIST_MULTI_MODE) &&
+ !(port->flags & MVPP2_F_LOOPBACK) &&
+ !(port->flags & MVPP22_F_IF_MUSDK);
}
static int mvpp2_open(struct net_device *dev)
{
struct mvpp2_port *port = netdev_priv(dev);
struct mvpp2 *priv = port->priv;
+ struct mvpp2_port_pcpu *port_pcpu;
unsigned char mac_bcast[ETH_ALEN] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
bool valid = false;
- int err;
+ int err, cpu;
err = mvpp2_prs_mac_da_accept(port, mac_bcast, true);
if (err) {
netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
return err;
}
+
err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true);
if (err) {
netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n");
return err;
}
+
+ if (port->flags & MVPP22_F_IF_MUSDK)
+ goto skip_musdk_parser;
+
err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
if (err) {
netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
@@ -4409,6 +5671,7 @@ static int mvpp2_open(struct net_device *dev)
return err;
}
+skip_musdk_parser:
/* Allocate the Rx/Tx queues */
err = mvpp2_setup_rxqs(port);
if (err) {
@@ -4422,6 +5685,11 @@ static int mvpp2_open(struct net_device *dev)
goto err_cleanup_rxqs;
}
+#ifndef MODULE
+ /* Recycle buffer pool for performance optimization */
+ mvpp2_recycle_open();
+#endif
+
err = mvpp2_irqs_init(port);
if (err) {
netdev_err(port->dev, "cannot init IRQs\n");
@@ -4440,7 +5708,9 @@ static int mvpp2_open(struct net_device *dev)
valid = true;
}
- if (priv->hw_version == MVPP22 && port->port_irq) {
+ if (priv->hw_version != MVPP21 && port->port_irq &&
+ (!port->phylink || !port->has_phy)) {
+ mvpp2_txqs_on_tasklet_init(port);
err = request_irq(port->port_irq, mvpp2_port_isr, 0,
dev->name, port);
if (err) {
@@ -4467,10 +5737,19 @@ static int mvpp2_open(struct net_device *dev)
goto err_free_irq;
}
+ /* Init bulk-transmit timer */
+ for_each_present_cpu(cpu) {
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+ port_pcpu->bulk_timer_scheduled = false;
+ port_pcpu->bulk_timer_restart_req = false;
+ }
+
/* Unmask interrupts on all CPUs */
on_each_cpu(mvpp2_interrupts_unmask, port, 1);
mvpp2_shared_interrupt_mask_unmask(port, false);
+ mvpp2_tx_done_init_on_open(port, true);
+
mvpp2_start_dev(port);
/* Start hardware statistics gathering */
@@ -4493,6 +5772,7 @@ static int mvpp2_stop(struct net_device *dev)
struct mvpp2_port *port = netdev_priv(dev);
struct mvpp2_port_pcpu *port_pcpu;
unsigned int thread;
+ int cpu;
mvpp2_stop_dev(port);
@@ -4511,16 +5791,25 @@ static int mvpp2_stop(struct net_device *dev)
port_pcpu = per_cpu_ptr(port->pcpu, thread);
hrtimer_cancel(&port_pcpu->tx_done_timer);
- port_pcpu->timer_scheduled = false;
+ port_pcpu->tx_done_timer_scheduled = false;
+ tasklet_kill(&port_pcpu->tx_done_tasklet);
}
}
+ /* Cancel bulk tasklet and timer */
+ for_each_present_cpu(cpu) {
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+ hrtimer_cancel(&port_pcpu->bulk_timer);
+ tasklet_kill(&port_pcpu->bulk_tasklet);
+ }
+ mvpp2_tx_done_init_on_open(port, false);
+ mvpp2_txqs_on_tasklet_kill(port);
mvpp2_cleanup_rxqs(port);
mvpp2_cleanup_txqs(port);
cancel_delayed_work_sync(&port->stats_work);
-
- mvpp2_mac_reset_assert(port);
- mvpp22_pcs_reset_assert(port);
+#ifndef MODULE
+ mvpp2_recycle_close();
+#endif
return 0;
}
@@ -4602,104 +5891,54 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p)
return err;
}
-/* Shut down all the ports, reconfigure the pools as percpu or shared,
- * then bring up again all ports.
- */
-static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
-{
- int numbufs = MVPP2_BM_POOLS_NUM, i;
- struct mvpp2_port *port = NULL;
- bool status[MVPP2_MAX_PORTS];
-
- for (i = 0; i < priv->port_count; i++) {
- port = priv->port_list[i];
- status[i] = netif_running(port->dev);
- if (status[i])
- mvpp2_stop(port->dev);
- }
-
- /* nrxqs is the same for all ports */
- if (priv->percpu_pools)
- numbufs = port->nrxqs * 2;
-
- for (i = 0; i < numbufs; i++)
- mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]);
-
- devm_kfree(port->dev->dev.parent, priv->bm_pools);
- priv->percpu_pools = percpu;
- mvpp2_bm_init(port->dev->dev.parent, priv);
-
- for (i = 0; i < priv->port_count; i++) {
- port = priv->port_list[i];
- mvpp2_swf_bm_pool_init(port);
- if (status[i])
- mvpp2_open(port->dev);
- }
-
- return 0;
-}
-
static int mvpp2_change_mtu(struct net_device *dev, int mtu)
{
struct mvpp2_port *port = netdev_priv(dev);
- bool running = netif_running(dev);
- struct mvpp2 *priv = port->priv;
int err;
- if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
- netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
- ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
- mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
+ if (port->flags & MVPP22_F_IF_MUSDK) {
+ netdev_err(dev, "MTU cannot be modified in MUSDK mode\n");
+ return -EPERM;
}
- if (port->xdp_prog && mtu > MVPP2_MAX_RX_BUF_SIZE) {
- netdev_err(dev, "Illegal MTU value %d (> %d) for XDP mode\n",
- mtu, (int)MVPP2_MAX_RX_BUF_SIZE);
+ if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE && port->xdp_prog) {
+ netdev_err(dev, "Jumbo frames are not supported with XDP\n");
return -EINVAL;
}
- if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) {
- if (priv->percpu_pools) {
- netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu);
- mvpp2_bm_switch_buffers(priv, false);
- }
- } else {
- bool jumbo = false;
- int i;
-
- for (i = 0; i < priv->port_count; i++)
- if (priv->port_list[i] != port &&
- MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) >
- MVPP2_BM_LONG_PKT_SIZE) {
- jumbo = true;
- break;
- }
-
- /* No port is using jumbo frames */
- if (!jumbo) {
- dev_info(port->dev->dev.parent,
- "all ports have a low MTU, switching to per-cpu buffers");
- mvpp2_bm_switch_buffers(priv, true);
+ if (!netif_running(dev)) {
+ err = mvpp2_bm_update_mtu(dev, mtu);
+ if (!err) {
+ port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
+ return 0;
}
+ /* Reconfigure BM to the original MTU */
+ err = mvpp2_bm_update_mtu(dev, dev->mtu);
+ if (err)
+ goto log_error;
}
- if (running)
- mvpp2_stop_dev(port);
+ mvpp2_stop_dev(port);
err = mvpp2_bm_update_mtu(dev, mtu);
- if (err) {
- netdev_err(dev, "failed to change MTU\n");
- /* Reconfigure BM to the original MTU */
- mvpp2_bm_update_mtu(dev, dev->mtu);
- } else {
+ if (!err) {
port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
+ goto out_start;
}
- if (running) {
- mvpp2_start_dev(port);
- mvpp2_egress_enable(port);
- mvpp2_ingress_enable(port);
- }
+ /* Reconfigure BM to the original MTU */
+ err = mvpp2_bm_update_mtu(dev, dev->mtu);
+ if (err)
+ goto log_error;
+
+out_start:
+ mvpp2_start_dev(port);
+ mvpp2_egress_enable(port);
+ mvpp2_ingress_enable(port);
+
+ return 0;
+log_error:
+ netdev_err(dev, "failed to change MTU\n");
return err;
}
@@ -4763,6 +6002,7 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->rx_errors = dev->stats.rx_errors;
stats->rx_dropped = dev->stats.rx_dropped;
stats->tx_dropped = dev->stats.tx_dropped;
+ stats->collisions = dev->stats.collisions;
}
static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
@@ -4930,9 +6170,9 @@ static int mvpp2_set_features(struct net_device *dev,
if (changed & NETIF_F_RXHASH) {
if (features & NETIF_F_RXHASH)
- mvpp22_port_rss_enable(port);
+ mvpp22_rss_enable(port);
else
- mvpp22_port_rss_disable(port);
+ mvpp22_rss_disable(port);
}
return 0;
@@ -5010,6 +6250,7 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *c)
{
struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_tx_queue *txq;
int queue;
for (queue = 0; queue < port->nrxqs; queue++) {
@@ -5021,18 +6262,22 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
mvpp2_rx_time_coal_set(port, rxq);
}
- if (port->has_tx_irqs) {
+ /* Set TX time and pkts coalescing configuration */
+ if (port->has_tx_irqs)
port->tx_time_coal = c->tx_coalesce_usecs;
- mvpp2_tx_time_coal_set(port);
- }
for (queue = 0; queue < port->ntxqs; queue++) {
- struct mvpp2_tx_queue *txq = port->txqs[queue];
-
+ txq = port->txqs[queue];
txq->done_pkts_coal = c->tx_max_coalesced_frames;
+ if (port->has_tx_irqs &&
+ txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
+ txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
+ }
- if (port->has_tx_irqs)
- mvpp2_tx_pkts_coal_set(port, txq);
+ if (port->has_tx_irqs) {
+ /* Download configured values into MVPP2 HW */
+ mvpp2_tx_time_coal_set(port);
+ mvpp2_tx_pkts_coal_set(port);
}
return 0;
@@ -5054,12 +6299,16 @@ static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
+ struct mvpp2_port *port = netdev_priv(dev);
+
strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
sizeof(drvinfo->driver));
strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
sizeof(drvinfo->bus_info));
+ drvinfo->n_priv_flags = (port->priv->hw_version == MVPP21) ?
+ 0 : ARRAY_SIZE(mvpp22_priv_flags_strings);
}
static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
@@ -5085,6 +6334,15 @@ static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
if (err)
return err;
+ if (ring->rx_pending < MSS_THRESHOLD_START && port->tx_fc) {
+ netdev_warn(dev, "TX FC disabled. Ring size is less than %d\n",
+ MSS_THRESHOLD_START);
+ port->tx_fc = false;
+ mvpp2_rxq_disable_fc(port);
+ if (port->priv->hw_version == MVPP23)
+ mvpp23_rx_fifo_fc_en(port->priv, port->id, false);
+ }
+
if (!netif_running(dev)) {
port->rx_ring_size = ring->rx_pending;
port->tx_ring_size = ring->tx_pending;
@@ -5144,11 +6402,52 @@ static void mvpp2_ethtool_get_pause_param(struct net_device *dev,
phylink_ethtool_get_pauseparam(port->phylink, pause);
}
+static void mvpp2_reconfigure_fc(struct mvpp2_port *port)
+{
+ struct mvpp2_bm_pool **pools_pcpu = port->priv->pools_pcpu;
+ int cpu;
+
+ if (recycle) {
+ for_each_present_cpu(cpu)
+ mvpp2_bm_pool_update_fc(port, pools_pcpu[cpu],
+ port->tx_fc);
+ if (port->pool_long->type == MVPP2_BM_JUMBO)
+ mvpp2_bm_pool_update_fc(port,
+ port->pool_long, port->tx_fc);
+ else
+ mvpp2_bm_pool_update_fc(port,
+ port->pool_short, port->tx_fc);
+ } else {
+ mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc);
+ mvpp2_bm_pool_update_fc(port, port->pool_short, port->tx_fc);
+ }
+ if (port->priv->hw_version == MVPP23)
+ mvpp23_rx_fifo_fc_en(port->priv, port->id, port->tx_fc);
+}
+
static int mvpp2_ethtool_set_pause_param(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct mvpp2_port *port = netdev_priv(dev);
+ if (pause->tx_pause && port->priv->global_tx_fc &&
+ bm_underrun_protect) {
+ if (port->rx_ring_size < MSS_THRESHOLD_START) {
+ netdev_err(dev, "TX FC cannot be supported.");
+ netdev_err(dev, "Ring size is less than %d\n",
+ MSS_THRESHOLD_START);
+ return -EINVAL;
+ }
+
+ port->tx_fc = true;
+ mvpp2_rxq_enable_fc(port);
+ mvpp2_reconfigure_fc(port);
+ } else if (port->priv->global_tx_fc) {
+ port->tx_fc = false;
+ mvpp2_rxq_disable_fc(port);
+ mvpp2_reconfigure_fc(port);
+ }
+
if (!port->phylink)
return -ENOTSUPP;
@@ -5181,9 +6480,9 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *info, u32 *rules)
{
struct mvpp2_port *port = netdev_priv(dev);
- int ret = 0, i, loc = 0;
+ int ret = 0;
- if (!mvpp22_rss_is_supported())
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
switch (info->cmd) {
@@ -5193,18 +6492,6 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
case ETHTOOL_GRXRINGS:
info->data = port->nrxqs;
break;
- case ETHTOOL_GRXCLSRLCNT:
- info->rule_cnt = port->n_rfs_rules;
- break;
- case ETHTOOL_GRXCLSRULE:
- ret = mvpp2_ethtool_cls_rule_get(port, info);
- break;
- case ETHTOOL_GRXCLSRLALL:
- for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
- if (port->rfs_rules[i])
- rules[loc++] = i;
- }
- break;
default:
return -ENOTSUPP;
}
@@ -5218,19 +6505,13 @@ static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
struct mvpp2_port *port = netdev_priv(dev);
int ret = 0;
- if (!mvpp22_rss_is_supported())
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
switch (info->cmd) {
case ETHTOOL_SRXFH:
ret = mvpp2_ethtool_rxfh_set(port, info);
break;
- case ETHTOOL_SRXCLSRLINS:
- ret = mvpp2_ethtool_cls_rule_ins(port, info);
- break;
- case ETHTOOL_SRXCLSRLDEL:
- ret = mvpp2_ethtool_cls_rule_del(port, info);
- break;
default:
return -EOPNOTSUPP;
}
@@ -5239,34 +6520,136 @@ static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
{
- return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES : 0;
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ return mvpp22_rss_is_supported(port) ? MVPP22_RSS_TABLE_ENTRIES : 0;
}
static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct mvpp2_port *port = netdev_priv(dev);
- int ret = 0;
- if (!mvpp22_rss_is_supported())
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
if (indir)
- ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir);
+ memcpy(indir, port->indir,
+ ARRAY_SIZE(port->indir) * sizeof(port->indir[0]));
if (hfunc)
*hfunc = ETH_RSS_HASH_CRC32;
- return ret;
+ return 0;
+}
+
+/* RSS API */
+
+/* Translate CPU sequence number to real CPU ID */
+static int mvpp22_cpu_id_from_indir_tbl_get(struct mvpp2 *pp2,
+ int cpu_seq, u32 *cpu_id)
+{
+ int i;
+ int seq = 0;
+
+ if (!pp2 || !cpu_id || cpu_seq >= 16)
+ return -EINVAL;
+
+ for (i = 0; i < 16; i++) {
+ if (pp2->cpu_map & (1 << i)) {
+ if (seq == cpu_seq) {
+ *cpu_id = i;
+ return 0;
+ }
+ seq++;
+ }
+ }
+
+ return -1;
+}
+
+/* RSS */
+/* The function will set rss table entry */
+int mvpp22_rss_tbl_entry_set(struct mvpp2 *hw, struct mvpp2_rss_tbl_entry entry)
+{
+ unsigned int reg_val = 0;
+
+ if (entry.tbl_id >= MVPP22_RSS_TBL_NUM ||
+ entry.tbl_line >= MVPP22_RSS_TABLE_ENTRIES ||
+ entry.width >= MVPP22_RSS_WIDTH_MAX)
+ return -EINVAL;
+ /* Write index */
+ reg_val |= (entry.tbl_line << MVPP22_RSS_IDX_ENTRY_NUM_OFF |
+ entry.tbl_id << MVPP22_RSS_IDX_TBL_NUM_OFF);
+ mvpp2_write(hw, MVPP22_RSS_INDEX, reg_val);
+ /* Write entry */
+ reg_val &= (~MVPP22_RSS_TBL_ENTRY_MASK);
+ reg_val |= (entry.rxq << MVPP22_RSS_TBL_ENTRY_OFF);
+ mvpp2_write(hw, MVPP22_RSS_TABLE_ENTRY, reg_val);
+ reg_val &= (~MVPP22_RSS_WIDTH_MASK);
+ reg_val |= (entry.width << MVPP22_RSS_WIDTH_OFF);
+ mvpp2_write(hw, MVPP22_RSS_WIDTH, reg_val);
+
+ return 0;
+}
+
+static u32 mvpp2_get_cpu_width(struct mvpp2_port *port)
+{
+ return ilog2(roundup_pow_of_two(num_online_cpus()));
+}
+
+u32 mvpp2_get_tc_width(struct mvpp2_port *port)
+{
+ return ilog2(roundup_pow_of_two(port->num_tc_queues));
+}
+
+int mvpp22_rss_fill_table_per_tc(struct mvpp2_port *port)
+{
+ struct mvpp2_rss_tbl_entry rss_entry;
+ int rss_tbl, entry_idx;
+ u32 tc_width = 0, cpu_width = 0, cpu_id = 0;
+ int rss_tbl_needed = port->num_tc_queues;
+
+ if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
+ return -1;
+
+ memset(&rss_entry, 0, sizeof(rss_entry));
+
+ if (!port->priv->cpu_map)
+ return -1;
+
+ /* Calculate cpu and tc width */
+ cpu_width = mvpp2_get_cpu_width(port);
+ tc_width = mvpp2_get_tc_width(port);
+
+ rss_entry.width = tc_width + cpu_width;
+
+ for (rss_tbl = 0; rss_tbl < rss_tbl_needed; rss_tbl++) {
+ for (entry_idx = 0; entry_idx < MVPP22_RSS_TABLE_ENTRIES;
+ entry_idx++) {
+ rss_entry.tbl_id = rss_tbl;
+ rss_entry.tbl_line = entry_idx;
+ if (mvpp22_cpu_id_from_indir_tbl_get(port->priv,
+ port->indir[entry_idx],
+ &cpu_id))
+ return -1;
+ /* Value of rss_tbl equals to tc queue */
+ rss_entry.rxq = (cpu_id << tc_width) |
+ rss_tbl;
+ if (mvpp22_rss_tbl_entry_set(port->priv, rss_entry))
+ return -1;
+ }
+ }
+
+ return 0;
}
static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct mvpp2_port *port = netdev_priv(dev);
- int ret = 0;
- if (!mvpp22_rss_is_supported())
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
@@ -5275,60 +6658,142 @@ static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
if (key)
return -EOPNOTSUPP;
- if (indir)
- ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir);
+ if (indir) {
+ memcpy(port->indir, indir,
+ ARRAY_SIZE(port->indir) * sizeof(port->indir[0]));
+ if (port->num_tc_queues > 1)
+ mvpp22_rss_fill_table_per_tc(port);
+ else
+ mvpp22_rss_fill_table(port, port->id);
+ }
- return ret;
+ return 0;
}
-static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
- u8 *key, u8 *hfunc, u32 rss_context)
+static u32 mvpp22_get_priv_flags(struct net_device *dev)
{
struct mvpp2_port *port = netdev_priv(dev);
- int ret = 0;
+ u32 priv_flags = 0;
- if (!mvpp22_rss_is_supported())
- return -EOPNOTSUPP;
- if (rss_context >= MVPP22_N_RSS_TABLES)
- return -EINVAL;
+ if (port->flags & MVPP22_F_IF_MUSDK)
+ priv_flags |= MVPP22_F_IF_MUSDK_PRIV;
+ return priv_flags;
+}
- if (hfunc)
- *hfunc = ETH_RSS_HASH_CRC32;
+static int mvpp2_port_musdk_cfg(struct net_device *dev, bool ena)
+{
+ struct mvpp2_port_us_cfg {
+ unsigned int nqvecs;
+ unsigned int nrxqs;
+ unsigned int ntxqs;
+ int mtu;
+ bool rxhash_en;
+ u8 rss_en;
+ } *us;
- if (indir)
- ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir);
+ struct mvpp2_port *port = netdev_priv(dev);
+ int rxq;
- return ret;
+ if (ena) {
+ /* Disable Queues and IntVec allocations for MUSDK,
+ * but save original values.
+ */
+ us = kzalloc(sizeof(*us), GFP_KERNEL);
+ if (!us)
+ return -ENOMEM;
+ port->us_cfg = (void *)us;
+ us->nqvecs = port->nqvecs;
+ us->nrxqs = port->nrxqs;
+ us->ntxqs = port->ntxqs;
+ us->mtu = dev->mtu;
+ us->rxhash_en = !!(dev->hw_features & NETIF_F_RXHASH);
+
+ port->nqvecs = 0;
+ port->nrxqs = 0;
+ port->ntxqs = 0;
+ if (us->rxhash_en) {
+ dev->hw_features &= ~NETIF_F_RXHASH;
+ netdev_update_features(dev);
+ }
+ } else {
+ /* Back to Kernel mode */
+ us = port->us_cfg;
+ port->nqvecs = us->nqvecs;
+ port->nrxqs = us->nrxqs;
+ port->ntxqs = us->ntxqs;
+ if (us->rxhash_en) {
+ dev->hw_features |= NETIF_F_RXHASH;
+ netdev_update_features(dev);
+ }
+ kfree(us);
+ port->us_cfg = NULL;
+
+ /* Restore RxQ/pool association */
+ for (rxq = 0; rxq < port->nrxqs; rxq++) {
+ mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
+ mvpp2_rxq_short_pool_set(port, rxq,
+ port->pool_short->id);
+ }
+ }
+ return 0;
}
-static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
- const u32 *indir, const u8 *key,
- const u8 hfunc, u32 *rss_context,
- bool delete)
+static int mvpp2_port_musdk_set(struct net_device *dev, bool ena)
{
struct mvpp2_port *port = netdev_priv(dev);
- int ret;
+ bool running = netif_running(dev);
+ int err;
- if (!mvpp22_rss_is_supported())
- return -EOPNOTSUPP;
+ /* This procedure is called by ethtool change or by Module-remove.
+ * For "remove" do anything only if we are in musdk-mode
+ * and toggling back to Kernel-mode is really required.
+ */
+ if (!ena && !port->us_cfg)
+ return 0;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
- return -EOPNOTSUPP;
+ if (running)
+ mvpp2_stop(dev);
- if (key)
- return -EOPNOTSUPP;
+ if (ena) {
+ err = mvpp2_port_musdk_cfg(dev, ena);
+ port->flags |= MVPP22_F_IF_MUSDK;
+ } else {
+ err = mvpp2_port_musdk_cfg(dev, ena);
+ port->flags &= ~MVPP22_F_IF_MUSDK;
+ }
- if (delete)
- return mvpp22_port_rss_ctx_delete(port, *rss_context);
+ if (err) {
+ netdev_err(dev, "musdk set=%d: error=%d\n", ena, err);
+ if (err)
+ return err;
+ /* print Error message but continue */
+ }
- if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- ret = mvpp22_port_rss_ctx_create(port, rss_context);
- if (ret)
- return ret;
+ if (running)
+ mvpp2_open(dev);
+
+ return 0;
+}
+
+static int mvpp22_set_priv_flags(struct net_device *dev, u32 priv_flags)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ bool f_old, f_new;
+ int err = 0;
+
+ if (recycle && (priv_flags & MVPP22_F_IF_MUSDK_PRIV)) {
+ WARN(1, "Fail to enable MUSDK. KS recycling feature enabled.");
+ return -EOPNOTSUPP;
}
- return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir);
+ f_old = port->flags & MVPP22_F_IF_MUSDK;
+ f_new = priv_flags & MVPP22_F_IF_MUSDK_PRIV;
+ if (f_old != f_new)
+ err = mvpp2_port_musdk_set(dev, f_new);
+
+ return err;
}
+
/* Device ops */
static const struct net_device_ops mvpp2_netdev_ops = {
@@ -5370,8 +6835,8 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
.get_rxfh = mvpp2_ethtool_get_rxfh,
.set_rxfh = mvpp2_ethtool_set_rxfh,
- .get_rxfh_context = mvpp2_ethtool_get_rxfh_context,
- .set_rxfh_context = mvpp2_ethtool_set_rxfh_context,
+ .get_priv_flags = mvpp22_get_priv_flags,
+ .set_priv_flags = mvpp22_set_priv_flags,
};
/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
@@ -5431,8 +6896,8 @@ static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
snprintf(irqname, sizeof(irqname), "hif%d", i);
if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
- v->first_rxq = i;
- v->nrxqs = 1;
+ v->first_rxq = port->num_tc_queues * i;
+ v->nrxqs = port->num_tc_queues;
} else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
i == (port->nqvecs - 1)) {
v->first_rxq = 0;
@@ -5494,7 +6959,7 @@ static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
return;
}
- /* Handle the more complicated PPv2.2 case */
+ /* Handle the more complicated PPv2.2 and PPv2.3 case */
for (i = 0; i < port->nqvecs; i++) {
struct mvpp2_queue_vector *qv = port->qvecs + i;
@@ -5518,7 +6983,7 @@ static int mvpp2_port_init(struct mvpp2_port *port)
struct mvpp2 *priv = port->priv;
struct mvpp2_txq_pcpu *txq_pcpu;
unsigned int thread;
- int queue, err, val;
+ int queue, err;
/* Checks for hardware constraints */
if (port->first_rxq + port->nrxqs >
@@ -5532,18 +6997,6 @@ static int mvpp2_port_init(struct mvpp2_port *port)
mvpp2_egress_disable(port);
mvpp2_port_disable(port);
- if (mvpp2_is_xlg(port->phy_interface)) {
- val = readl(port->base + MVPP22_XLG_CTRL0_REG);
- val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
- val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
- writel(val, port->base + MVPP22_XLG_CTRL0_REG);
- } else {
- val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
- val |= MVPP2_GMAC_FORCE_LINK_DOWN;
- writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- }
-
port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
@@ -5601,7 +7054,7 @@ static int mvpp2_port_init(struct mvpp2_port *port)
/* Map this Rx queue to a physical queue */
rxq->id = port->first_rxq + queue;
rxq->port = port->id;
- rxq->logic_rxq = queue;
+ rxq->logic_rxq = (u8)queue;
port->rxqs[queue] = rxq;
}
@@ -5626,22 +7079,20 @@ static int mvpp2_port_init(struct mvpp2_port *port)
mvpp2_cls_oversize_rxq_set(port);
mvpp2_cls_port_config(port);
- if (mvpp22_rss_is_supported())
- mvpp22_port_rss_init(port);
+ if (mvpp22_rss_is_supported(port))
+ mvpp22_rss_port_init(port);
/* Provide an initial Rx packet size */
port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
/* Initialize pools for swf */
- err = mvpp2_swf_bm_pool_init(port);
+ if (recycle)
+ err = mvpp2_swf_bm_pool_pcpu_init(port);
+ else
+ err = mvpp2_swf_bm_pool_init(port);
if (err)
goto err_free_percpu;
- /* Clear all port stats */
- mvpp2_read_stats(port);
- memset(port->ethtool_stats, 0,
- MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64));
-
return 0;
err_free_percpu:
@@ -5671,7 +7122,7 @@ static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node,
/* Checks if the port dt description has the required Tx interrupts:
* - PPv2.1: there are no such interrupts.
- * - PPv2.2:
+ * - PPv2.2 and PPv2.3:
* - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3]
* - The new ones have: "hifX" with X in [0..8]
*
@@ -5736,6 +7187,18 @@ static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
return container_of(config, struct mvpp2_port, phylink_config);
}
+static void mvpp2_get_interface_by_speed(struct phylink_link_state *state)
+{
+ if (state->speed == SPEED_1000)
+ state->interface = PHY_INTERFACE_MODE_1000BASEX;
+ else if (state->speed == SPEED_2500)
+ state->interface = PHY_INTERFACE_MODE_2500BASEX;
+ else if (state->speed == SPEED_5000)
+ state->interface = PHY_INTERFACE_MODE_5GKR;
+ else if (state->speed == SPEED_10000)
+ state->interface = PHY_INTERFACE_MODE_10GKR;
+}
+
static struct mvpp2_port *mvpp2_pcs_to_port(struct phylink_pcs *pcs)
{
return container_of(pcs, struct mvpp2_port, phylink_pcs);
@@ -5747,7 +7210,11 @@ static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
u32 val;
- state->speed = SPEED_10000;
+ if (state->interface == PHY_INTERFACE_MODE_5GKR)
+ state->speed = SPEED_5000;
+ else
+ state->speed = SPEED_10000;
+
state->duplex = 1;
state->an_complete = 1;
@@ -5793,6 +7260,7 @@ static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
state->speed = SPEED_1000;
break;
case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_2500BASET:
state->speed = SPEED_2500;
break;
default:
@@ -5870,7 +7338,8 @@ static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
{
- struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ struct net_device *dev = to_net_dev(config->dev);
+ struct mvpp2_port *port = netdev_priv(dev);
u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
@@ -5889,21 +7358,35 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
unsigned long *supported,
struct phylink_link_state *state)
{
- struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+ struct net_device *dev = to_net_dev(config->dev);
+ struct mvpp2_port *port = netdev_priv(dev);
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ if (!port->phy_exist)
+ mvpp2_get_interface_by_speed(state);
+
/* Invalid combinations */
switch (state->interface) {
case PHY_INTERFACE_MODE_10GBASER:
- case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_5GKR:
+ case PHY_INTERFACE_MODE_INTERNAL:
+ if (!port->has_xlg_mac)
+ goto empty_set;
+ break;
+ case PHY_INTERFACE_MODE_RXAUI:
if (!mvpp2_port_supports_xlg(port))
goto empty_set;
break;
+ case PHY_INTERFACE_MODE_MII:
+ if (port->gop_id == 2)
+ goto empty_set;
+ /* Fall-through */
+ case PHY_INTERFACE_MODE_GMII:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
- if (!mvpp2_port_supports_rgmii(port))
+ if (port->priv->hw_version != MVPP21 && port->gop_id == 0)
goto empty_set;
break;
default:
@@ -5913,11 +7396,15 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
phylink_set(mask, Autoneg);
phylink_set_port_modes(mask);
- switch (state->interface) {
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ switch (port->of_phy_interface) {
case PHY_INTERFACE_MODE_10GBASER:
- case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
case PHY_INTERFACE_MODE_NA:
- if (mvpp2_port_supports_xlg(port)) {
+ case PHY_INTERFACE_MODE_INTERNAL:
+ if (port->has_xlg_mac) {
phylink_set(mask, 10000baseT_Full);
phylink_set(mask, 10000baseCR_Full);
phylink_set(mask, 10000baseSR_Full);
@@ -5929,38 +7416,50 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
if (state->interface != PHY_INTERFACE_MODE_NA)
break;
fallthrough;
+ case PHY_INTERFACE_MODE_5GKR:
+ if (port->has_xlg_mac)
+ phylink_set(mask, 5000baseT_Full);
+ if (!port->phy_exist) {
+ phylink_set(mask, 2500baseX_Full);
+ phylink_set(mask, 1000baseX_Full);
+ break;
+ };
+ /* Fall-through */
+ case PHY_INTERFACE_MODE_2500BASET:
+ phylink_set(mask, 2500baseT_Full);
+ /* Fall-through */
+ case PHY_INTERFACE_MODE_GMII:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_SGMII:
+ phylink_set(mask, 1000baseT_Full);
+ /* Fall-through */
+ case PHY_INTERFACE_MODE_MII:
phylink_set(mask, 10baseT_Half);
phylink_set(mask, 10baseT_Full);
phylink_set(mask, 100baseT_Half);
phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 1000baseT_Full);
phylink_set(mask, 1000baseX_Full);
if (state->interface != PHY_INTERFACE_MODE_NA)
break;
- fallthrough;
- case PHY_INTERFACE_MODE_1000BASEX:
+ phylink_set(mask, 1000baseT_Full);
+ break;
case PHY_INTERFACE_MODE_2500BASEX:
if (port->comphy ||
- state->interface != PHY_INTERFACE_MODE_2500BASEX) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- }
- if (port->comphy ||
state->interface == PHY_INTERFACE_MODE_2500BASEX) {
- phylink_set(mask, 2500baseT_Full);
phylink_set(mask, 2500baseX_Full);
}
+ /* Fall-through */
+ case PHY_INTERFACE_MODE_1000BASEX:
+ phylink_set(mask, 1000baseX_Full);
break;
default:
goto empty_set;
}
- bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_copy(supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
bitmap_and(state->advertising, state->advertising, mask,
__ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -5979,11 +7478,11 @@ static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
MVPP22_XLG_CTRL0_MAC_RESET_DIS,
MVPP22_XLG_CTRL0_MAC_RESET_DIS);
- mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG,
- MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
- MVPP22_XLG_CTRL4_EN_IDLE_CHECK |
- MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC,
- MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC);
+
+ if (state->interface == PHY_INTERFACE_MODE_RXAUI)
+ mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG,
+ MVPP22_XLG_CTRL4_MACMODSELECT_GMAC,
+ MVPP22_XLG_CTRL4_USE_XPCS);
/* Wait for reset to deassert */
do {
@@ -6012,19 +7511,23 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
MVPP22_CTRL4_DP_CLK_SEL |
MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
- } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
+ } else if (state->interface == PHY_INTERFACE_MODE_SGMII ||
+ state->interface == PHY_INTERFACE_MODE_2500BASET) {
ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK;
ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
MVPP22_CTRL4_DP_CLK_SEL |
MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
- } else if (phy_interface_mode_is_rgmii(state->interface)) {
+ } else if ((phy_interface_mode_is_rgmii(state->interface)) ||
+ (state->interface == PHY_INTERFACE_MODE_MII)) {
ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL;
ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
MVPP22_CTRL4_SYNC_BYPASS_DIS |
MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
}
+ ctrl4 &= ~(MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN);
+
/* Configure negotiation style */
if (!phylink_autoneg_inband(mode)) {
/* Phy or fixed speed - no in-band AN, nothing to do, leave the
@@ -6054,12 +7557,27 @@ static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+ struct net_device *dev = port->dev;
/* Check for invalid configuration */
- if (mvpp2_is_xlg(interface) && port->gop_id != 0) {
- netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name);
- return -EINVAL;
- }
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_5GKR:
+ if (!port->has_xlg_mac) {
+ netdev_err(dev, "Invalid mode %s on %s\n",
+ phy_modes(port->phy_interface), dev->name);
+ return;
+ }
+ break;
+ case PHY_INTERFACE_MODE_RXAUI:
+ if (port->id != 0) {
+ netdev_err(dev, "Invalid mode %s on %s\n",
+ phy_modes(port->phy_interface), dev->name);
+ return;
+ }
+ default:
+ break;
+ };
if (port->phy_interface != interface ||
phylink_autoneg_inband(mode)) {
@@ -6082,7 +7600,10 @@ static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode,
}
/* Make sure the port is disabled when reconfiguring the mode */
- mvpp2_port_disable(port);
+ if (port->priv->hw_version != MVPP21 && change_interface) {
+ /* Make sure the port is disabled when reconfiguring the mode */
+ mvpp2_tx_stop_all_queues(port->dev);
+ mvpp2_port_disable(port);
if (port->phy_interface != interface) {
/* Place GMAC into reset */
@@ -6090,11 +7611,12 @@ static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode,
MVPP2_GMAC_PORT_RESET_MASK,
MVPP2_GMAC_PORT_RESET_MASK);
- if (port->priv->hw_version == MVPP22) {
mvpp22_gop_mask_irq(port);
phy_power_off(port->comphy);
- }
+
+ mvpp2_tx_wake_all_queues(dev);
+ mvpp2_port_enable(port);
}
/* Select the appropriate PCS operations depending on the
@@ -6128,12 +7650,14 @@ static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
struct mvpp2_port *port = mvpp2_phylink_to_port(config);
/* mac (re)configuration */
- if (mvpp2_is_xlg(state->interface))
+ if (state->interface == PHY_INTERFACE_MODE_RXAUI ||
+ state->interface == PHY_INTERFACE_MODE_10GKR ||
+ state->interface == PHY_INTERFACE_MODE_5GKR) {
mvpp2_xlg_config(port, mode, state);
- else if (phy_interface_mode_is_rgmii(state->interface) ||
- phy_interface_mode_is_8023z(state->interface) ||
- state->interface == PHY_INTERFACE_MODE_SGMII)
+ } else {
mvpp2_gmac_config(port, mode, state);
+ mvpp2_gmac_tx_fifo_configure(port);
+ }
if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
mvpp2_port_loopback_set(port, state);
@@ -6193,75 +7717,60 @@ static void mvpp2_mac_link_up(struct phylink_config *config,
struct mvpp2_port *port = mvpp2_phylink_to_port(config);
u32 val;
- if (mvpp2_is_xlg(interface)) {
- if (!phylink_autoneg_inband(mode)) {
- val = MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
- if (tx_pause)
- val |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
- if (rx_pause)
- val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
+ if (!phylink_autoneg_inband(mode) &&
+ interface != PHY_INTERFACE_MODE_RXAUI &&
+ interface != PHY_INTERFACE_MODE_10GBASER &&
+ interface != PHY_INTERFACE_MODE_5GKR) {
+ val = MVPP2_GMAC_FORCE_LINK_PASS;
- mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
- MVPP22_XLG_CTRL0_FORCE_LINK_DOWN |
- MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
- MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN |
- MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN, val);
- }
- } else {
- if (!phylink_autoneg_inband(mode)) {
- val = MVPP2_GMAC_FORCE_LINK_PASS;
+ if (speed == SPEED_1000 || speed == SPEED_2500)
+ val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
+ else if (speed == SPEED_100)
+ val |= MVPP2_GMAC_CONFIG_MII_SPEED;
- if (speed == SPEED_1000 || speed == SPEED_2500)
- val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
- else if (speed == SPEED_100)
- val |= MVPP2_GMAC_CONFIG_MII_SPEED;
+ if (duplex == DUPLEX_FULL)
+ val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
- if (duplex == DUPLEX_FULL)
- val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
-
- mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
- MVPP2_GMAC_FORCE_LINK_DOWN |
- MVPP2_GMAC_FORCE_LINK_PASS |
- MVPP2_GMAC_CONFIG_MII_SPEED |
- MVPP2_GMAC_CONFIG_GMII_SPEED |
- MVPP2_GMAC_CONFIG_FULL_DUPLEX, val);
- }
+ mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
+ MVPP2_GMAC_FORCE_LINK_DOWN |
+ MVPP2_GMAC_FORCE_LINK_PASS |
+ MVPP2_GMAC_CONFIG_MII_SPEED |
+ MVPP2_GMAC_CONFIG_GMII_SPEED |
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX, val);
+ }
- /* We can always update the flow control enable bits;
- * these will only be effective if flow control AN
- * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled.
- */
- val = 0;
- if (tx_pause)
- val |= MVPP22_CTRL4_TX_FC_EN;
- if (rx_pause)
- val |= MVPP22_CTRL4_RX_FC_EN;
+ /* We can always update the flow control enable bits;
+ * these will only be effective if flow control AN
+ * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled.
+ */
+ val = 0;
+ if (tx_pause)
+ val |= MVPP22_CTRL4_TX_FC_EN;
+ if (rx_pause)
+ val |= MVPP22_CTRL4_RX_FC_EN;
- mvpp2_modify(port->base + MVPP22_GMAC_CTRL_4_REG,
- MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN,
- val);
+ mvpp2_modify(port->base + MVPP22_GMAC_CTRL_4_REG,
+ MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN,
+ val);
}
mvpp2_port_enable(port);
mvpp2_egress_enable(port);
mvpp2_ingress_enable(port);
- netif_tx_wake_all_queues(port->dev);
+ mvpp2_tx_wake_all_queues(port->dev);
}
-static void mvpp2_mac_link_down(struct phylink_config *config,
- unsigned int mode, phy_interface_t interface)
+static void mvpp2_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
{
struct mvpp2_port *port = mvpp2_phylink_to_port(config);
u32 val;
- if (!phylink_autoneg_inband(mode)) {
- if (mvpp2_is_xlg(interface)) {
- val = readl(port->base + MVPP22_XLG_CTRL0_REG);
- val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
- val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
- writel(val, port->base + MVPP22_XLG_CTRL0_REG);
- } else {
+ if (!phylink_autoneg_inband(mode) &&
+ interface != PHY_INTERFACE_MODE_RXAUI &&
+ interface != PHY_INTERFACE_MODE_10GKR &&
+ interface != PHY_INTERFACE_MODE_5GKR) {
val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
val |= MVPP2_GMAC_FORCE_LINK_DOWN;
@@ -6269,7 +7778,7 @@ static void mvpp2_mac_link_down(struct phylink_config *config,
}
}
- netif_tx_stop_all_queues(port->dev);
+ mvpp2_tx_stop_all_queues(port->dev);
mvpp2_egress_disable(port);
mvpp2_ingress_disable(port);
@@ -6308,6 +7817,36 @@ static void mvpp2_acpi_start(struct mvpp2_port *port)
SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
}
+#if IS_ENABLED(CONFIG_NET_DSA)
+/* DSA notifier */
+static void mvpp2_dsa_port_register(struct net_device *dev)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2 *priv = port->priv;
+ u32 reg;
+
+ /* For switch port enable non-extended DSA tags and make sure
+ * the extended DSA tag usage is disabled as those
+ * two options cannot coexist.
+ */
+ reg = mvpp2_read(priv, MVPP2_MH_REG(port->id));
+ reg &= ~MVPP2_DSA_EXTENDED;
+ reg |= MVPP2_DSA_NON_EXTENDED;
+ mvpp2_write(priv, MVPP2_MH_REG(port->id), reg);
+}
+
+static int mvpp2_dsa_notifier(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct dsa_notifier_register_info *info = ptr;
+
+ if (event == DSA_PORT_REGISTER)
+ mvpp2_dsa_port_register(info->master);
+
+ return NOTIFY_DONE;
+}
+#endif
+
/* Ports initialization */
static int mvpp2_port_probe(struct platform_device *pdev,
struct fwnode_handle *port_fwnode,
@@ -6317,16 +7856,21 @@ static int mvpp2_port_probe(struct platform_device *pdev,
struct mvpp2_port *port;
struct mvpp2_port_pcpu *port_pcpu;
struct device_node *port_node = to_of_node(port_fwnode);
- netdev_features_t features;
struct net_device *dev;
+ struct resource *res;
struct phylink *phylink;
char *mac_from = "";
- unsigned int ntxqs, nrxqs, thread;
+ unsigned int ntxqs, nrxqs;
unsigned long flags = 0;
+ u32 cpu_nrxqs;
+ u16 cpu_map = 0;
bool has_tx_irqs;
+ dma_addr_t p;
u32 id;
+ int features;
int phy_mode;
int err, i;
+ int cpu;
has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags);
if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) {
@@ -6336,12 +7880,34 @@ static int mvpp2_port_probe(struct platform_device *pdev,
}
ntxqs = MVPP2_MAX_TXQ;
- nrxqs = mvpp2_get_nrxqs(priv);
+ cpu_nrxqs = MVPP2_NUM_OF_TC;
+ if (priv->hw_version != MVPP21 && queue_mode ==
+ MVPP2_QDIST_SINGLE_MODE) {
+ nrxqs = 1;
+ } else {
+ /* According to the PPv2.2 datasheet and our experiments on
+ * PPv2.1, RX queues have an allocation granularity of 4 (when
+ * more than a single one on PPv2.2).
+ * Round up to nearest multiple of 4.
+ */
+ nrxqs = (num_possible_cpus() * cpu_nrxqs + 3) & ~0x3;
+ if (nrxqs > MVPP2_PORT_MAX_RXQ) {
+ nrxqs = MVPP2_PORT_MAX_RXQ;
+ cpu_nrxqs = MVPP2_PORT_MAX_RXQ / num_possible_cpus();
+ dev_warn(&pdev->dev, "cpu_nrxqs to big set to %d\n", cpu_nrxqs);
+ }
+ }
dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
if (!dev)
return -ENOMEM;
+ /* XPS mapping queues to 0..N cpus (may be less than ntxqs) */
+ for_each_online_cpu(cpu) {
+ cpu_map |= (1 << cpu);
+ netif_set_xps_queue(dev, cpumask_of(cpu), cpu);
+ }
+ priv->cpu_map = cpu_map;
phy_mode = fwnode_get_phy_mode(port_fwnode);
if (phy_mode < 0) {
dev_err(&pdev->dev, "incorrect phy mode\n");
@@ -6384,6 +7950,13 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->dev = dev;
port->fwnode = port_fwnode;
port->has_phy = !!of_find_property(port_node, "phy", NULL);
+ port->num_tc_queues = cpu_nrxqs;
+ if (port->has_phy && phy_mode == PHY_INTERFACE_MODE_INTERNAL) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "internal mode doesn't work with phy\n");
+ goto err_free_netdev;
+ }
+
port->ntxqs = ntxqs;
port->nrxqs = nrxqs;
port->priv = priv;
@@ -6417,10 +7990,21 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->of_node = port_node;
port->phy_interface = phy_mode;
+ port->of_phy_interface = phy_mode;
port->comphy = comphy;
+ if (of_phy_find_device(port_node))
+ port->phy_exist = true;
+ else
+ port->phy_exist = false;
+
+ if ((port->id == 0 && port->priv->hw_version != MVPP21) ||
+ (port->id == 1 && port->priv->hw_version == MVPP23))
+ port->has_xlg_mac = true;
+
if (priv->hw_version == MVPP21) {
- port->base = devm_platform_ioremap_resource(pdev, 2 + id);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
+ port->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(port->base)) {
err = PTR_ERR(port->base);
goto err_free_irq;
@@ -6456,13 +8040,16 @@ static int mvpp2_port_probe(struct platform_device *pdev,
goto err_free_irq;
}
- port->ethtool_stats = devm_kcalloc(&pdev->dev,
- MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs),
- sizeof(u64), GFP_KERNEL);
- if (!port->ethtool_stats) {
+ p = (dma_addr_t)devm_kcalloc(&pdev->dev,
+ ARRAY_SIZE(mvpp2_ethtool_regs) +
+ L1_CACHE_BYTES,
+ sizeof(u64), GFP_KERNEL);
+ if (!p) {
err = -ENOMEM;
goto err_free_stats;
}
+ p = (p + ~CACHE_LINE_MASK) & CACHE_LINE_MASK;
+ port->ethtool_stats = (void *)p;
mutex_init(&port->gather_stats_lock);
INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
@@ -6481,8 +8068,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
mvpp2_port_periodic_xon_disable(port);
- mvpp2_mac_reset_assert(port);
- mvpp22_pcs_reset_assert(port);
+ mvpp2_port_reset(port);
port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
if (!port->pcpu) {
@@ -6490,31 +8076,31 @@ static int mvpp2_port_probe(struct platform_device *pdev,
goto err_free_txq_pcpu;
}
- if (!port->has_tx_irqs) {
- for (thread = 0; thread < priv->nthreads; thread++) {
- port_pcpu = per_cpu_ptr(port->pcpu, thread);
+ /* Init tx-done/guard timer and tasklet */
+ mvpp2_tx_done_init_on_probe(pdev, port);
- hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED_SOFT);
- port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
- port_pcpu->timer_scheduled = false;
- port_pcpu->dev = dev;
- }
+ /* Init bulk timer and tasklet */
+ for_each_present_cpu(cpu) {
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+ hrtimer_init(&port_pcpu->bulk_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+ port_pcpu->bulk_timer.function = mvpp2_bulk_timer_cb;
+ tasklet_init(&port_pcpu->bulk_tasklet,
+ mvpp2_bulk_tasklet_cb, (unsigned long)dev);
}
features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_TSO;
+ NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_FILTER;
dev->features = features | NETIF_F_RXCSUM;
- dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
- NETIF_F_HW_VLAN_CTAG_FILTER;
+ dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
- if (mvpp22_rss_is_supported()) {
+ if (mvpp22_rss_is_supported(port))
dev->hw_features |= NETIF_F_RXHASH;
- dev->features |= NETIF_F_NTUPLE;
- }
- if (!port->priv->percpu_pools)
- mvpp2_set_hw_csum(port, port->pool_long->id);
+ if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
+ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ }
dev->vlan_features |= features;
dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
@@ -6522,8 +8108,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
/* MTU range: 68 - 9704 */
dev->min_mtu = ETH_MIN_MTU;
- /* 9704 == 9728 - 20 and rounding to 8 */
- dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
+ /* 9704 == 9728 - 24 (no rounding for MTU but for frag_size) */
+ dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE - MVPP2_MTU_OVERHEAD_SIZE;
dev->dev.of_node = port_node;
/* Phylink isn't used w/ ACPI as of now */
@@ -6561,6 +8147,38 @@ static int mvpp2_port_probe(struct platform_device *pdev,
priv->port_list[priv->port_count++] = port;
+ /* Port may be configured by Uboot to transmit IDLE, so a remote side
+ * feels the link as UP. Stop TX in same way as in mvpp2_open/stop.
+ */
+ if (port->of_node && port->phylink) {
+ if (rtnl_is_locked()) {
+ if (!phylink_of_phy_connect(port->phylink,
+ port->of_node, 0))
+ phylink_disconnect_phy(port->phylink);
+ } else {
+ rtnl_lock();
+ if (!phylink_of_phy_connect(port->phylink,
+ port->of_node, 0))
+ phylink_disconnect_phy(port->phylink);
+ rtnl_unlock();
+ }
+ }
+
+ /* Init TX locks and bm locks */
+ for (i = 0; i < MVPP2_MAX_THREADS; i++) {
+ spin_lock_init(&port->bm_lock[i]);
+ spin_lock_init(&port->tx_lock[i]);
+ }
+
+#if IS_ENABLED(CONFIG_NET_DSA)
+ /* Register DSA notifier */
+ port->dsa_notifier.notifier_call = mvpp2_dsa_notifier;
+ err = register_dsa_notifier(&port->dsa_notifier);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register DSA notifier\n");
+ goto err_phylink;
+ }
+#endif
return 0;
err_phylink:
@@ -6588,7 +8206,12 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
{
int i;
+ mvpp2_port_musdk_set(port->dev, false);
+ kfree(port->dbgfs_port_flow_entry);
unregister_netdev(port->dev);
+#if IS_ENABLED(CONFIG_NET_DSA)
+ unregister_dsa_notifier(&port->dsa_notifier);
+#endif
if (port->phylink)
phylink_destroy(port->phylink);
free_percpu(port->pcpu);
@@ -6651,32 +8274,56 @@ static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
}
-static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
+static void mvpp22_rx_fifo_set_hw(struct mvpp2 *priv, int port, int data_size)
{
- int port;
+ int attr_size = MVPP2_RX_FIFO_PORT_ATTR_SIZE(data_size);
- /* The FIFO size parameters are set depending on the maximum speed a
- * given port can handle:
- * - Port 0: 10Gbps
- * - Port 1: 2.5Gbps
- * - Ports 2 and 3: 1Gbps
- */
+ mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), data_size);
+ mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), attr_size);
+}
- mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
- MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
- mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
- MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
+/* Initialize TX FIFO's: the total FIFO size is 48kB on PPv2.2 and PPv2.3.
+ * 4kB fixed space must be assigned for the loopback port.
+ * Redistribute remaining avialable 44kB space among all active ports.
+ * Guarantee minimum 32kB for 10G port and 8kB for port 1, capable of 2.5G
+ * SGMII link.
+ */
+static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
+{
+ int port, size;
+ unsigned long port_map;
+ int remaining_ports_count;
+ int size_remainder;
+
+ /* The loopback requires fixed 4kB of the FIFO space assignment. */
+ mvpp22_rx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
+ MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
+ port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
+
+ /* Set RX FIFO size to 0 for inactive ports. */
+ for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX)
+ mvpp22_rx_fifo_set_hw(priv, port, 0);
+
+ /* Assign remaining RX FIFO space among all active ports. */
+ size_remainder = MVPP2_RX_FIFO_PORT_DATA_SIZE_44KB;
+ remaining_ports_count = hweight_long(port_map);
+
+ for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
+ if (remaining_ports_count == 1)
+ size = size_remainder;
+ else if (port == 0)
+ size = max(size_remainder / remaining_ports_count,
+ MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
+ else if (port == 1)
+ size = max(size_remainder / remaining_ports_count,
+ MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
+ else
+ size = size_remainder / remaining_ports_count;
- mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
- MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
- mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
- MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
+ size_remainder -= size;
+ remaining_ports_count--;
- for (port = 2; port < MVPP2_MAX_PORTS; port++) {
- mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
- MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
- mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
- MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
+ mvpp22_rx_fifo_set_hw(priv, port, size);
}
mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
@@ -6684,27 +8331,152 @@ static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
}
-/* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G
- * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G,
- * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB.
- */
-static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
+/* Configure Rx FIFO Flow control thresholds */
+static void mvpp23_rx_fifo_fc_set_tresh(struct mvpp2 *priv)
{
- int port, size, thrs;
+ int port, val;
- for (port = 0; port < MVPP2_MAX_PORTS; port++) {
+ /* Port 0: maximum speed -10Gb/s port
+ * required by spec RX FIFO threshold 9KB
+ * Port 1: maximum speed -5Gb/s port
+ * required by spec RX FIFO threshold 4KB
+ * Port 2: maximum speed -1Gb/s port
+ * required by spec RX FIFO threshold 2KB
+ */
+
+ /* Without loopback port */
+ for (port = 0; port < (MVPP2_MAX_PORTS - 1); port++) {
if (port == 0) {
- size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
- thrs = MVPP2_TX_FIFO_THRESHOLD_10KB;
+ val = (MVPP23_PORT0_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
+ << MVPP2_RX_FC_TRSH_OFFS;
+ val &= MVPP2_RX_FC_TRSH_MASK;
+ mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
+ } else if (port == 1) {
+ val = (MVPP23_PORT1_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
+ << MVPP2_RX_FC_TRSH_OFFS;
+ val &= MVPP2_RX_FC_TRSH_MASK;
+ mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
} else {
- size = MVPP22_TX_FIFO_DATA_SIZE_3KB;
- thrs = MVPP2_TX_FIFO_THRESHOLD_3KB;
+ val = (MVPP23_PORT2_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
+ << MVPP2_RX_FC_TRSH_OFFS;
+ val &= MVPP2_RX_FC_TRSH_MASK;
+ mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
}
- mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
- mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs);
}
}
+/* Configure Rx FIFO Flow control thresholds */
+void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en)
+{
+ int val;
+
+ val = mvpp2_read(priv, MVPP2_RX_FC_REG(port));
+
+ if (en)
+ val |= MVPP2_RX_FC_EN;
+ else
+ val &= ~MVPP2_RX_FC_EN;
+
+ mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
+}
+
+static void mvpp22_tx_fifo_set_hw(struct mvpp2 *priv, int port, int size)
+{
+ int threshold = MVPP2_TX_FIFO_THRESHOLD(size);
+
+ mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
+ mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), threshold);
+}
+
+/* Initialize TX FIFO's: the total FIFO size is 19kB on PPv2.2 and PPv2.3.
+ * 1kB fixed space must be assigned for the loopback port.
+ * Redistribute remaining avialable 18kB space among all active ports.
+ * The 10G interface should use 10kB (which is maximum possible size
+ * per single port).
+ */
+static void mvpp22_tx_fifo_init_default(struct mvpp2 *priv)
+{
+ int port, size;
+ unsigned long port_map;
+ int remaining_ports_count;
+ int size_remainder;
+
+ /* The loopback requires fixed 1kB of the FIFO space assignment. */
+ mvpp22_tx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
+ MVPP22_TX_FIFO_DATA_SIZE_1KB);
+ port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
+
+ /* Set TX FIFO size to 0 for inactive ports. */
+ for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX)
+ mvpp22_tx_fifo_set_hw(priv, port, 0);
+
+ /* Assign remaining TX FIFO space among all active ports. */
+ size_remainder = MVPP22_TX_FIFO_DATA_SIZE_18KB;
+ remaining_ports_count = hweight_long(port_map);
+
+ for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
+ if (remaining_ports_count == 1)
+ size = min(size_remainder,
+ MVPP22_TX_FIFO_DATA_SIZE_10KB);
+ else if (port == 0)
+ size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
+ else
+ size = size_remainder / remaining_ports_count;
+
+ size_remainder -= size;
+ remaining_ports_count--;
+
+ mvpp22_tx_fifo_set_hw(priv, port, size);
+ }
+}
+
+static void mvpp22_tx_fifo_init_param(struct platform_device *pdev,
+ struct mvpp2 *priv)
+{
+ unsigned long port_map;
+ int size_remainder;
+ int port, size;
+
+ /* The loopback requires fixed 1kB of the FIFO space assignment. */
+ mvpp22_tx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
+ MVPP22_TX_FIFO_DATA_SIZE_1KB);
+ port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
+
+ /* Set TX FIFO size to 0 for inactive ports. */
+ for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
+ mvpp22_tx_fifo_set_hw(priv, port, 0);
+ if (MVPP22_TX_FIFO_EXTRA_PARAM_SIZE(port, tx_fifo_map))
+ goto error;
+ }
+
+ /* The physical port requires minimum 3kB */
+ for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
+ size = MVPP22_TX_FIFO_EXTRA_PARAM_SIZE(port, tx_fifo_map);
+ if (size < MVPP22_TX_FIFO_DATA_SIZE_MIN ||
+ size > MVPP22_TX_FIFO_DATA_SIZE_MAX)
+ goto error;
+ }
+
+ /* Assign remaining TX FIFO space among all active ports. */
+ size_remainder = MVPP22_TX_FIFO_DATA_SIZE_18KB;
+ for (port = 0; port < MVPP2_LOOPBACK_PORT_INDEX; port++) {
+ size = MVPP22_TX_FIFO_EXTRA_PARAM_SIZE(port, tx_fifo_map);
+ if (!size)
+ continue;
+ size_remainder -= size;
+ mvpp22_tx_fifo_set_hw(priv, port, size);
+ }
+
+ if (size_remainder)
+ goto error;
+
+ return;
+
+error:
+ dev_warn(&pdev->dev, "Fail to set TX FIFO from module_param, fallback to default\n");
+ mvpp22_tx_fifo_init_default(priv);
+}
+
static void mvpp2_axi_init(struct mvpp2 *priv)
{
u32 val, rdval, wrval;
@@ -6734,6 +8506,10 @@ static void mvpp2_axi_init(struct mvpp2 *priv)
mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
/* Buffer Data */
+ /* Force TX FIFO transactions priority on the AXI QOS bus */
+ if (tx_fifo_protection)
+ rdval |= MVPP22_AXI_TX_DATA_RD_QOS_ATTRIBUTE;
+
mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
@@ -6765,13 +8541,14 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
const struct mbus_dram_target_info *dram_target_info;
int err, i;
u32 val;
+ dma_addr_t p;
/* MBUS windows configuration */
dram_target_info = mv_mbus_dram_info();
if (dram_target_info)
mvpp2_conf_mbus_windows(dram_target_info, priv);
- if (priv->hw_version == MVPP22)
+ if (priv->hw_version != MVPP21)
mvpp2_axi_init(priv);
/* Disable HW PHY polling */
@@ -6785,12 +8562,16 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
}
- /* Allocate and initialize aggregated TXQs */
- priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS,
- sizeof(*priv->aggr_txqs),
- GFP_KERNEL);
- if (!priv->aggr_txqs)
+ /* Allocate and initialize aggregated TXQs
+ * The aggr_txqs[per-cpu] entry should be aligned onto cache.
+ * So allocate more than needed and round-up the pointer.
+ */
+ val = sizeof(*priv->aggr_txqs) * MVPP2_MAX_THREADS + L1_CACHE_BYTES;
+ p = (dma_addr_t)devm_kzalloc(&pdev->dev, val, GFP_KERNEL);
+ if (!p)
return -ENOMEM;
+ p = (p + ~CACHE_LINE_MASK) & CACHE_LINE_MASK;
+ priv->aggr_txqs = (struct mvpp2_tx_queue *)p;
for (i = 0; i < MVPP2_MAX_THREADS; i++) {
priv->aggr_txqs[i].id = i;
@@ -6805,7 +8586,12 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
mvpp2_rx_fifo_init(priv);
} else {
mvpp22_rx_fifo_init(priv);
- mvpp22_tx_fifo_init(priv);
+ if (tx_fifo_map)
+ mvpp22_tx_fifo_init_param(pdev, priv);
+ else
+ mvpp22_tx_fifo_init_default(priv);
+ if (priv->hw_version == MVPP23)
+ mvpp23_rx_fifo_fc_set_tresh(priv);
}
if (priv->hw_version == MVPP21)
@@ -6816,7 +8602,7 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
/* Buffer Manager initialization */
- err = mvpp2_bm_init(&pdev->dev, priv);
+ err = mvpp2_bm_init(pdev, priv);
if (err < 0)
return err;
@@ -6828,6 +8614,38 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
/* Classifier default initialization */
mvpp2_cls_init(priv);
+ /* Disable all ingress queues */
+ mvpp2_rxq_disable_all(priv);
+
+ return 0;
+}
+
+static int mvpp2_get_sram(struct platform_device *pdev,
+ struct mvpp2 *priv)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ struct resource *res;
+
+ if (has_acpi_companion(&pdev->dev)) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!res) {
+ dev_warn(&pdev->dev, "ACPI is too old, TX FC disabled\n");
+ return 0;
+ }
+ priv->cm3_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->cm3_base))
+ return PTR_ERR(priv->cm3_base);
+ } else {
+ priv->sram_pool = of_gen_pool_get(dn, "cm3-mem", 0);
+ if (!priv->sram_pool) {
+ dev_warn(&pdev->dev, "DT is too old, TX FC disabled\n");
+ return 0;
+ }
+ priv->cm3_base = (void __iomem *)gen_pool_alloc(priv->sram_pool,
+ MSS_SRAM_SIZE);
+ if (!priv->cm3_base)
+ return -ENOMEM;
+ }
return 0;
}
@@ -6849,8 +8667,6 @@ static int mvpp2_probe(struct platform_device *pdev)
if (has_acpi_companion(&pdev->dev)) {
acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
&pdev->dev);
- if (!acpi_id)
- return -EINVAL;
priv->hw_version = (unsigned long)acpi_id->driver_data;
} else {
priv->hw_version =
@@ -6863,12 +8679,14 @@ static int mvpp2_probe(struct platform_device *pdev)
if (priv->hw_version == MVPP21)
queue_mode = MVPP2_QDIST_SINGLE_MODE;
- base = devm_platform_ioremap_resource(pdev, 0);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
if (priv->hw_version == MVPP21) {
- priv->lms_base = devm_platform_ioremap_resource(pdev, 1);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->lms_base))
return PTR_ERR(priv->lms_base);
} else {
@@ -6891,9 +8709,18 @@ static int mvpp2_probe(struct platform_device *pdev)
priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->iface_base))
return PTR_ERR(priv->iface_base);
+
+ /* Map CM3 SRAM */
+ err = mvpp2_get_sram(pdev, priv);
+ if (err)
+ dev_warn(&pdev->dev, "Fail to alloc CM3 SRAM\n");
+
+ /* Enable global Flow Control only if hanler to SRAM not NULL */
+ if (priv->cm3_base)
+ priv->global_tx_fc = true;
}
- if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) {
+ if (priv->hw_version != MVPP21 && dev_of_node(&pdev->dev)) {
priv->sysctrl_base =
syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"marvell,system-controller");
@@ -6906,13 +8733,6 @@ static int mvpp2_probe(struct platform_device *pdev)
priv->sysctrl_base = NULL;
}
- if (priv->hw_version == MVPP22 &&
- mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS)
- priv->percpu_pools = 1;
-
- mvpp2_setup_bm_pool();
-
-
priv->nthreads = min_t(unsigned int, num_present_cpus(),
MVPP2_MAX_THREADS);
@@ -6929,18 +8749,15 @@ static int mvpp2_probe(struct platform_device *pdev)
priv->swth_base[i] = base + i * addr_space_sz;
}
- if (priv->hw_version == MVPP21)
- priv->max_port_rxqs = 8;
- else
- priv->max_port_rxqs = 32;
-
if (dev_of_node(&pdev->dev)) {
priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
- if (IS_ERR(priv->pp_clk))
- return PTR_ERR(priv->pp_clk);
+ if (IS_ERR(priv->pp_clk)) {
+ err = PTR_ERR(priv->pp_clk);
+ goto err_cm3;
+ }
err = clk_prepare_enable(priv->pp_clk);
if (err < 0)
- return err;
+ goto err_cm3;
priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
if (IS_ERR(priv->gop_clk)) {
@@ -6951,7 +8768,7 @@ static int mvpp2_probe(struct platform_device *pdev)
if (err < 0)
goto err_pp_clk;
- if (priv->hw_version == MVPP22) {
+ if (priv->hw_version != MVPP21) {
priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
if (IS_ERR(priv->mg_clk)) {
err = PTR_ERR(priv->mg_clk);
@@ -6992,10 +8809,39 @@ static int mvpp2_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (priv->hw_version == MVPP22) {
+ if (priv->hw_version != MVPP21) {
+ if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23)
+ priv->hw_version = MVPP23;
+ }
+
+ if (priv->hw_version == MVPP21)
+ priv->max_port_rxqs = 8;
+ else
+ priv->max_port_rxqs = 32;
+
+ priv->custom_dma_mask = false;
+ if (priv->hw_version != MVPP21) {
+ /* If dma_mask points to coherent_dma_mask, setting both will
+ * override the value of the other. This is problematic as the
+ * PPv2 driver uses a 32-bit-mask for coherent accesses (txq,
+ * rxq, bm) and a 40-bit mask for all other accesses.
+ */
+ if (pdev->dev.dma_mask == &pdev->dev.coherent_dma_mask) {
+ pdev->dev.dma_mask =
+ kzalloc(sizeof(*pdev->dev.dma_mask),
+ GFP_KERNEL);
+ if (!pdev->dev.dma_mask) {
+ err = -ENOMEM;
+ goto err_mg_clk;
+ }
+
+ priv->custom_dma_mask = true;
+ }
+
err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
if (err)
- goto err_axi_clk;
+ goto err_dma_mask;
+
/* Sadly, the BM pools all share the same register to
* store the high 32 bits of their address. So they
* must all have the same high 32 bits, which forces
@@ -7003,9 +8849,35 @@ static int mvpp2_probe(struct platform_device *pdev)
*/
err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err)
- goto err_axi_clk;
+ goto err_dma_mask;
+ }
+
+ /* Assign the reserved memory region to the device for DMA allocations,
+ * if a memory-region phandle is found.
+ */
+ if (dev_of_node(&pdev->dev))
+ of_reserved_mem_device_init_by_idx(&pdev->dev,
+ pdev->dev.of_node, 0);
+
+ /* Configure branch prediction switch */
+ if (priv->hw_version == MVPP21)
+ static_branch_enable(&mvpp21_variant);
+ if (recycle) {
+ dev_info(&pdev->dev,
+ "kernel space packet recycling feature enabled\n");
+ static_branch_enable(&mvpp2_recycle_ena);
+ }
+ /* else - keep the DEFINE_STATIC_KEY_FALSE */
+
+ /* Map DTS-active ports. Should be done before FIFO mvpp2_init */
+ fwnode_for_each_available_child_node(fwnode, port_fwnode) {
+ if (!fwnode_property_read_u32(port_fwnode, "port-id", &i))
+ priv->port_map |= BIT(i);
}
+ /* Init mss lock */
+ spin_lock_init(&priv->mss_spinlock);
+
/* Initialize network controller */
err = mvpp2_init(pdev, priv);
if (err < 0) {
@@ -7045,6 +8917,12 @@ static int mvpp2_probe(struct platform_device *pdev)
goto err_port_probe;
}
+ if (priv->global_tx_fc && priv->hw_version != MVPP21) {
+ err = mvpp2_enable_global_fc(priv);
+ if (err)
+ dev_warn(&pdev->dev, "CM3 firmware not running, TX FC disabled\n");
+ }
+
mvpp2_dbgfs_init(priv, pdev->name);
platform_set_drvdata(pdev, priv);
@@ -7059,19 +8937,29 @@ err_port_probe:
mvpp2_port_remove(priv->port_list[i]);
i++;
}
+err_dma_mask:
+ if (priv->custom_dma_mask) {
+ kfree(pdev->dev.dma_mask);
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ }
err_axi_clk:
clk_disable_unprepare(priv->axi_clk);
err_mg_core_clk:
- if (priv->hw_version == MVPP22)
+ if (priv->hw_version != MVPP21)
clk_disable_unprepare(priv->mg_core_clk);
err_mg_clk:
- if (priv->hw_version == MVPP22)
+ if (priv->hw_version != MVPP21)
clk_disable_unprepare(priv->mg_clk);
err_gop_clk:
clk_disable_unprepare(priv->gop_clk);
err_pp_clk:
clk_disable_unprepare(priv->pp_clk);
+err_cm3:
+ if (!has_acpi_companion(&pdev->dev) && priv->cm3_base)
+ gen_pool_free(priv->sram_pool, (unsigned long)priv->cm3_base,
+ MSS_SRAM_SIZE);
+
return err;
}
@@ -7079,11 +8967,14 @@ static int mvpp2_remove(struct platform_device *pdev)
{
struct mvpp2 *priv = platform_get_drvdata(pdev);
struct fwnode_handle *fwnode = pdev->dev.fwnode;
- int i = 0, poolnum = MVPP2_BM_POOLS_NUM;
struct fwnode_handle *port_fwnode;
+ int i = 0;
mvpp2_dbgfs_cleanup(priv);
+ flush_workqueue(priv->stats_queue);
+ destroy_workqueue(priv->stats_queue);
+
fwnode_for_each_available_child_node(fwnode, port_fwnode) {
if (priv->port_list[i]) {
mutex_destroy(&priv->port_list[i]->gather_stats_lock);
@@ -7092,15 +8983,10 @@ static int mvpp2_remove(struct platform_device *pdev)
i++;
}
- destroy_workqueue(priv->stats_queue);
-
- if (priv->percpu_pools)
- poolnum = mvpp2_get_nrxqs(priv) * 2;
-
- for (i = 0; i < poolnum; i++) {
+ for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
- mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool);
+ mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
}
for (i = 0; i < MVPP2_MAX_THREADS; i++) {
@@ -7112,6 +8998,17 @@ static int mvpp2_remove(struct platform_device *pdev)
aggr_txq->descs_dma);
}
+ if (priv->custom_dma_mask) {
+ kfree(pdev->dev.dma_mask);
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ }
+
+ if (!has_acpi_companion(&pdev->dev)) {
+ gen_pool_free(priv->sram_pool, (unsigned long)priv->cm3_base,
+ MSS_SRAM_SIZE);
+ gen_pool_destroy(priv->sram_pool);
+ }
+
if (is_acpi_node(port_fwnode))
return 0;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index dd590086fe6a..1e1d70fffcdc 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -11,6 +11,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
+#include <linux/mpls.h>
#include <uapi/linux/ppp_defs.h>
#include <net/ip.h>
#include <net/ipv6.h>
@@ -198,6 +199,19 @@ static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
}
+/* Set u32 in tcam sw entry */
+static void mvpp2_prs_tcam_data_u32_set(struct mvpp2_prs_entry *pe,
+ u32 val, u32 mask)
+{
+ int i;
+
+ for (i = sizeof(u32) - 1; i >= 0; --i) {
+ mvpp2_prs_tcam_data_byte_set(pe, i, val & 0xff, mask & 0xff);
+ mask >>= 8;
+ val >>= 8;
+ }
+}
+
/* Set vid in tcam sw entry */
static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
unsigned short vid)
@@ -406,11 +420,12 @@ static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
}
/* Drop flow control pause frames */
-static void mvpp2_prs_drop_fc(struct mvpp2 *priv)
+static void mv_pp2x_prs_drop_fc(struct mvpp2 *priv)
{
- unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
struct mvpp2_prs_entry pe;
unsigned int len;
+ unsigned char da[ETH_ALEN] = {
+ 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
memset(&pe, 0, sizeof(pe));
@@ -559,12 +574,8 @@ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
/* Set ai bits for next iteration */
- if (extend)
- mvpp2_prs_sram_ai_update(&pe, 1,
- MVPP2_PRS_SRAM_AI_MASK);
- else
- mvpp2_prs_sram_ai_update(&pe, 0,
- MVPP2_PRS_SRAM_AI_MASK);
+ mvpp2_prs_sram_ai_update(&pe, extend,
+ MVPP2_PRS_SRAM_AI_MASK);
/* Set result info bits to 'single vlan' */
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
@@ -914,15 +925,14 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
pe.index = tid;
- /* Set next lu to IPv4 */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
- /* Set L4 offset */
- mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
- sizeof(struct iphdr) - 4,
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, -4,
MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
- mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
- MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
@@ -931,7 +941,8 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
MVPP2_PRS_TCAM_PROTO_MASK);
mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
- mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
/* Unmask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
@@ -999,12 +1010,17 @@ static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
return -EINVAL;
}
- /* Finished: go to flowid generation */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
- mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ /* Go again to ipv4 */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
MVPP2_PRS_IPV4_DIP_AI_BIT);
+
+ /* Shift back to IPv4 proto */
+ mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+
/* Unmask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
@@ -1166,6 +1182,21 @@ static void mvpp2_prs_mh_init(struct mvpp2 *priv)
/* Update shadow table and hw entry */
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
mvpp2_prs_hw_write(priv, &pe);
+
+ /* Set MH entry that skip parser */
+ pe.index = MVPP2_PE_MH_SKIP_PRS;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
+ mvpp2_prs_hw_write(priv, &pe);
}
/* Set default entires (place holder) for promiscuous, non-promiscuous and
@@ -1194,7 +1225,7 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
mvpp2_prs_hw_write(priv, &pe);
/* Create dummy entries for drop all and promiscuous modes */
- mvpp2_prs_drop_fc(priv);
+ mv_pp2x_prs_drop_fc(priv);
mvpp2_prs_mac_drop_all_set(priv, 0, false);
mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
@@ -1314,7 +1345,8 @@ static void mvpp2_prs_vid_init(struct mvpp2 *priv)
static int mvpp2_prs_etype_init(struct mvpp2 *priv)
{
struct mvpp2_prs_entry pe;
- int tid;
+ unsigned short ethertype;
+ int tid, ihl;
/* Ethertype: PPPoE */
tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
@@ -1406,66 +1438,43 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv)
MVPP2_PRS_RI_UDF3_MASK);
mvpp2_prs_hw_write(priv, &pe);
- /* Ethertype: IPv4 without options */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
-
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
- pe.index = tid;
-
- mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
- mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
- MVPP2_PRS_IPV4_HEAD_MASK |
- MVPP2_PRS_IPV4_IHL_MASK);
-
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
- MVPP2_PRS_RI_L3_PROTO_MASK);
- /* Skip eth_type + 4 bytes of IP header */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
- MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
- /* Set L3 offset */
- mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
- MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
- priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
- priv->prs_shadow[pe.index].finish = false;
- mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
- MVPP2_PRS_RI_L3_PROTO_MASK);
- mvpp2_prs_hw_write(priv, &pe);
-
- /* Ethertype: IPv4 with options */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
-
- pe.index = tid;
+ /* Ethertype: IPv4 with header length >= 5 */
+ for (ihl = MVPP2_PRS_IPV4_IHL_MIN; ihl <= MVPP2_PRS_IPV4_IHL_MAX; ihl++) {
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
- mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_IPV4_HEAD,
- MVPP2_PRS_IPV4_HEAD_MASK);
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = tid;
- /* Clear ri before updating */
- pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
- pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
- MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
+ mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_IPV4_HEAD | ihl,
+ MVPP2_PRS_IPV4_HEAD_MASK |
+ MVPP2_PRS_IPV4_IHL_MASK);
+
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ /* goto ipv4 dst-address (skip eth_type + IP-header-size - 4) */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
+ sizeof(struct iphdr) - 4,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Set L4 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+ MVPP2_ETH_TYPE_LEN + (ihl * 4),
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
- priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
- priv->prs_shadow[pe.index].finish = false;
- mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
- MVPP2_PRS_RI_L3_PROTO_MASK);
- mvpp2_prs_hw_write(priv, &pe);
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+ priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ priv->prs_shadow[pe.index].finish = false;
+ mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_hw_write(priv, &pe);
+ }
/* Ethertype: IPv6 without options */
tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
@@ -1498,6 +1507,34 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv)
MVPP2_PRS_RI_L3_PROTO_MASK);
mvpp2_prs_hw_write(priv, &pe);
+ for (ethertype = ETH_P_MPLS_UC; ethertype <= ETH_P_MPLS_MC; ++ethertype) {
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = tid;
+
+ mvpp2_prs_match_etype(&pe, 0, ethertype);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MPLS);
+
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+ priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ priv->prs_shadow[pe.index].finish = false;
+ mvpp2_prs_hw_write(priv, &pe);
+ }
+
/* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
@@ -1609,6 +1646,104 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
return 0;
}
+/* Set entries for MPLS ethertype */
+static int mvpp2_prs_mpls_init(struct mvpp2 *priv)
+{
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ /* Add multiple MPLS TCAM entry */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MPLS);
+ pe.index = tid;
+
+ mvpp2_prs_tcam_data_u32_set(&pe, 0, MPLS_LS_S_MASK);
+
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_MPLS_HEADER_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_MPLS_HEADER_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* If MPLS isn't last MPLS jump to next MPLS */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MPLS);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MPLS);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Add ipv4 MPLS TCAM entry */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MPLS);
+ pe.index = tid;
+
+ mvpp2_prs_tcam_data_u32_set(&pe, MPLS_LABEL_IPV4NULL << MPLS_LS_LABEL_SHIFT,
+ MPLS_LS_LABEL_MASK);
+
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+
+ /* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_MPLS_HEADER_LEN +
+ sizeof(struct iphdr) - 4,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_MPLS_HEADER_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MPLS);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Add ipv6 MPLS TCAM entry */
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MPLS);
+ pe.index = tid;
+
+ mvpp2_prs_tcam_data_u32_set(&pe, MPLS_LABEL_IPV6NULL << MPLS_LS_LABEL_SHIFT,
+ MPLS_LS_LABEL_MASK);
+
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+
+ /* Skip DIP of IPV6 header */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_MPLS_HEADER_LEN + 8 +
+ MVPP2_MAX_L3_ADDR_SIZE,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_MPLS_HEADER_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MPLS);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
/* Set entries for PPPoE ethertype */
static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
{
@@ -1630,8 +1765,9 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
MVPP2_PRS_RI_L3_PROTO_MASK);
- /* Skip eth_type + 4 bytes of IP header */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+ /* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
+ sizeof(struct iphdr) - 4,
MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
/* Set L3 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
@@ -1651,7 +1787,8 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
pe.index = tid;
mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
+ MVPP2_PRS_IPV4_HEAD |
+ MVPP2_PRS_IPV4_IHL_MIN,
MVPP2_PRS_IPV4_HEAD_MASK |
MVPP2_PRS_IPV4_IHL_MASK);
@@ -1761,19 +1898,19 @@ static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
pe.index = MVPP2_PE_IP4_PROTO_UN;
- /* Set next lu to IPv4 */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
- /* Set L4 offset */
- mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
- sizeof(struct iphdr) - 4,
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, -4,
MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
- mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
- MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
MVPP2_PRS_RI_L4_PROTO_MASK);
- mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
/* Unmask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
@@ -1786,14 +1923,19 @@ static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
pe.index = MVPP2_PE_IP4_ADDR_UN;
- /* Finished: go to flowid generation */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
- mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ /* Go again to ipv4 */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+
+ mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
+
+ /* Shift back to IPv4 proto */
+ mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
MVPP2_PRS_RI_L3_ADDR_MASK);
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
- mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
- MVPP2_PRS_IPV4_DIP_AI_BIT);
/* Unmask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
@@ -1831,14 +1973,6 @@ static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
if (err)
return err;
- /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
- /* Result Info: UDF7=1, DS lite */
- err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
- MVPP2_PRS_RI_UDF7_IP6_LITE,
- MVPP2_PRS_RI_UDF7_MASK);
- if (err)
- return err;
-
/* IPv6 multicast */
err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
if (err)
@@ -1940,7 +2074,8 @@ static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
}
/* Find tcam entry with matched pair <vid,port> */
-static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask)
+static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int port_id, u16 vid,
+ u16 mask)
{
unsigned char byte[2], enable[2];
struct mvpp2_prs_entry pe;
@@ -1948,13 +2083,13 @@ static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask)
int tid;
/* Go through the all entries with MVPP2_PRS_LU_VID */
- for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
- tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
- if (!port->priv->prs_shadow[tid].valid ||
- port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
+ for (tid = MVPP2_PRS_VID_PORT_FIRST(port_id);
+ tid <= MVPP2_PRS_VID_PORT_LAST(port_id); tid++) {
+ if (!priv->prs_shadow[tid].valid ||
+ priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
continue;
- mvpp2_prs_init_from_hw(port->priv, &pe, tid);
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
@@ -1984,7 +2119,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
memset(&pe, 0, sizeof(pe));
/* Scan TCAM and see if entry with this <vid,port> already exist */
- tid = mvpp2_prs_vid_range_find(port, vid, mask);
+ tid = mvpp2_prs_vid_range_find(priv, port->id, vid, mask);
reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
if (reg_val & MVPP2_DSA_EXTENDED)
@@ -2042,7 +2177,7 @@ void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
int tid;
/* Scan TCAM and see if entry with this <vid,port> already exist */
- tid = mvpp2_prs_vid_range_find(port, vid, 0xfff);
+ tid = mvpp2_prs_vid_range_find(priv, port->id, vid, 0xfff);
/* No such entry */
if (tid < 0)
@@ -2060,7 +2195,8 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
- if (priv->prs_shadow[tid].valid) {
+ if (priv->prs_shadow[tid].valid &&
+ priv->prs_shadow[tid].lu == MVPP2_PRS_LU_VID) {
mvpp2_prs_hw_inv(priv, tid);
priv->prs_shadow[tid].valid = false;
}
@@ -2190,6 +2326,10 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
if (err)
return err;
+ err = mvpp2_prs_mpls_init(priv);
+ if (err)
+ return err;
+
return 0;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
index 4b68dd374733..c0da6d645076 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
@@ -28,7 +28,8 @@
#define MVPP2_PRS_IPV4_MC 0xe0
#define MVPP2_PRS_IPV4_MC_MASK 0xf0
#define MVPP2_PRS_IPV4_BC_MASK 0xff
-#define MVPP2_PRS_IPV4_IHL 0x5
+#define MVPP2_PRS_IPV4_IHL_MIN 0x5
+#define MVPP2_PRS_IPV4_IHL_MAX 0xf
#define MVPP2_PRS_IPV4_IHL_MASK 0xf
#define MVPP2_PRS_IPV6_MC 0xff
#define MVPP2_PRS_IPV6_MC_MASK 0xff
@@ -51,6 +52,7 @@
#define MVPP2_PRS_AI_MASK 0xff
#define MVPP2_PRS_PORT_MASK 0xff
#define MVPP2_PRS_LU_MASK 0xf
+#define MVPP2_PRS_WORD_MASK 0xffff
/* TCAM entries in registers are accessed using 16 data bits + 16 enable bits */
#define MVPP2_PRS_BYTE_TO_WORD(byte) ((byte) / 2)
@@ -103,10 +105,11 @@
#define MVPP2_PE_MAC_RANGE_START (MVPP2_PE_MAC_RANGE_END - \
MVPP2_PRS_MAC_RANGE_SIZE + 1)
/* VLAN filtering range */
-#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
+#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 32)
#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \
MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1)
-#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1)
+#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1)
+#define MVPP2_PE_MH_SKIP_PRS (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
@@ -263,6 +266,7 @@ enum mvpp2_prs_lookup {
MVPP2_PRS_LU_IP4,
MVPP2_PRS_LU_IP6,
MVPP2_PRS_LU_FLOWS,
+ MVPP2_PRS_LU_MPLS,
MVPP2_PRS_LU_LAST,
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
index 543a1d047567..c6acb318f4bc 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Kconfig
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -9,8 +9,8 @@ config OCTEONTX2_MBOX
config OCTEONTX2_AF
tristate "Marvell OcteonTX2 RVU Admin Function driver"
select OCTEONTX2_MBOX
+ select NET_DEVLINK
depends on (64BIT && COMPILE_TEST) || ARM64
- depends on PCI
help
This driver supports Marvell's OcteonTX2 Resource Virtualization
Unit's admin function manager which manages all RVU HW resources
@@ -29,6 +29,7 @@ config NDC_DIS_DYNAMIC_CACHING
config OCTEONTX2_PF
tristate "Marvell OcteonTX2 NIC Physical Function driver"
select OCTEONTX2_MBOX
+ select NET_DEVLINK
depends on (64BIT && COMPILE_TEST) || ARM64
depends on PCI
help
@@ -39,3 +40,11 @@ config OCTEONTX2_VF
depends on OCTEONTX2_PF
help
This driver supports Marvell's OcteonTX2 NIC virtual function.
+
+config OCTEONTX2_BPHY_RFOE_NETDEV
+ tristate "OcteonTX2 BPHY RFOE netdev driver"
+ depends on ARM64
+ help
+ This driver provides support for processing packets received/sent by
+ BPHY RFOE MHAB such as eCPRI control, PTP and other ethernet packets
+ in Linux kernel. The rest of packets are processed by ODP application.
diff --git a/drivers/net/ethernet/marvell/octeontx2/Makefile b/drivers/net/ethernet/marvell/octeontx2/Makefile
index 0064a69e0f72..53743791546c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/Makefile
@@ -6,3 +6,4 @@
obj-$(CONFIG_OCTEONTX2_MBOX) += af/
obj-$(CONFIG_OCTEONTX2_AF) += af/
obj-$(CONFIG_OCTEONTX2_PF) += nic/
+obj-$(CONFIG_OCTEONTX2_BPHY_RFOE_NETDEV) += bphy/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 2f7a861d0c7b..e79230603c0c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -1,12 +1,16 @@
# SPDX-License-Identifier: GPL-2.0
#
-# Makefile for Marvell's OcteonTX2 RVU Admin Function driver
+# Makefile for Marvell's RVU Admin Function driver
#
ccflags-y += -I$(src)
-obj-$(CONFIG_OCTEONTX2_MBOX) += octeontx2_mbox.o
-obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
+obj-$(CONFIG_OCTEONTX2_MBOX) += rvu_mbox.o
+obj-$(CONFIG_OCTEONTX2_AF) += rvu_af.o
-octeontx2_mbox-y := mbox.o rvu_trace.o
-octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
- rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o
+rvu_mbox-y := mbox.o rvu_trace.o
+rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
+ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o \
+ rvu_validation.o rvu_sso.o rvu_tim.o \
+ rvu_cpt.o rvu_npc_fs.o rvu_fixes.o \
+ rvu_sdp.o rvu_ree.o rvu_cn10k.o rpm.o rvu_devlink.o \
+ rvu_switch.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index fc27a40202c6..48f2a36c90c3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -1,11 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Marvell OcteonTx2 CGX driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
@@ -14,61 +11,53 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include "cgx.h"
+#include "rvu.h"
+#include "lmac_common.h"
-#define DRV_NAME "octeontx2-cgx"
-#define DRV_STRING "Marvell OcteonTX2 CGX/MAC Driver"
-
-/**
- * struct lmac
- * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion
- * @cmd_lock: Lock to serialize the command interface
- * @resp: command response
- * @link_info: link related information
- * @event_cb: callback for linkchange events
- * @event_cb_lock: lock for serializing callback with unregister
- * @cmd_pend: flag set before new command is started
- * flag cleared after command response is received
- * @cgx: parent cgx port
- * @lmac_id: lmac port id
- * @name: lmac port name
- */
-struct lmac {
- wait_queue_head_t wq_cmd_cmplt;
- struct mutex cmd_lock;
- u64 resp;
- struct cgx_link_user_info link_info;
- struct cgx_event_cb event_cb;
- spinlock_t event_cb_lock;
- bool cmd_pend;
- struct cgx *cgx;
- u8 lmac_id;
- char *name;
-};
+#define DRV_NAME "Marvell-CGX/RPM"
+#define DRV_STRING "Marvell CGX/RPM Driver"
-struct cgx {
- void __iomem *reg_base;
- struct pci_dev *pdev;
- u8 cgx_id;
- u8 lmac_count;
- struct lmac *lmac_idmap[MAX_LMAC_PER_CGX];
- struct work_struct cgx_cmd_work;
- struct workqueue_struct *cgx_cmd_workq;
- struct list_head cgx_list;
-};
+#define CGX_RX_STAT_GLOBAL_INDEX 9
static LIST_HEAD(cgx_list);
/* Convert firmware speed encoding to user format(Mbps) */
-static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX];
+static const u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX] = {
+ [CGX_LINK_NONE] = 0,
+ [CGX_LINK_10M] = 10,
+ [CGX_LINK_100M] = 100,
+ [CGX_LINK_1G] = 1000,
+ [CGX_LINK_2HG] = 2500,
+ [CGX_LINK_5G] = 5000,
+ [CGX_LINK_10G] = 10000,
+ [CGX_LINK_20G] = 20000,
+ [CGX_LINK_25G] = 25000,
+ [CGX_LINK_40G] = 40000,
+ [CGX_LINK_50G] = 50000,
+ [CGX_LINK_80G] = 80000,
+ [CGX_LINK_100G] = 100000,
+};
/* Convert firmware lmac type encoding to string */
-static char *cgx_lmactype_string[LMAC_MODE_MAX];
+static const char *cgx_lmactype_string[LMAC_MODE_MAX] = {
+ [LMAC_MODE_SGMII] = "SGMII",
+ [LMAC_MODE_XAUI] = "XAUI",
+ [LMAC_MODE_RXAUI] = "RXAUI",
+ [LMAC_MODE_10G_R] = "10G_R",
+ [LMAC_MODE_40G_R] = "40G_R",
+ [LMAC_MODE_QSGMII] = "QSGMII",
+ [LMAC_MODE_25G_R] = "25G_R",
+ [LMAC_MODE_50G_R] = "50G_R",
+ [LMAC_MODE_100G_R] = "100G_R",
+ [LMAC_MODE_USXGMII] = "USXGMII",
+};
/* CGX PHY management internal APIs */
static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
@@ -76,22 +65,63 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
/* Supported devices */
static const struct pci_device_id cgx_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
{ 0, } /* end of table */
};
MODULE_DEVICE_TABLE(pci, cgx_id_table);
-static void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
+static bool is_dev_rpm(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ return (cgx->pdev->device == PCI_DEVID_CN10K_RPM);
+}
+
+bool is_lmac_valid(struct cgx *cgx, int lmac_id)
+{
+ if (!cgx || lmac_id < 0 || lmac_id >= MAX_LMAC_PER_CGX)
+ return false;
+ return test_bit(lmac_id, &cgx->lmac_bmap);
+}
+
+/* Helper function to get sequential index
+ * given the enabled LMAC of a CGX
+ */
+static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
+{
+ int tmp, id = 0;
+
+ for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
+ if (tmp == lmac_id)
+ break;
+ id++;
+ }
+
+ return id;
+}
+
+struct mac_ops *get_mac_ops(void *cgxd)
+{
+ if (!cgxd)
+ return cgxd;
+
+ return ((struct cgx *)cgxd)->mac_ops;
+}
+
+void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
{
- writeq(val, cgx->reg_base + (lmac << 18) + offset);
+ writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
+ offset);
}
-static u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
+u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
{
- return readq(cgx->reg_base + (lmac << 18) + offset);
+ return readq(cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
+ offset);
}
-static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
+struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
{
if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
return NULL;
@@ -135,6 +165,26 @@ void *cgx_get_pdata(int cgx_id)
return NULL;
}
+void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+
+ /* Software must not access disabled LMAC registers */
+ if (!is_lmac_valid(cgx_dev, lmac_id))
+ return;
+ cgx_write(cgx_dev, lmac_id, offset, val);
+}
+
+u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+
+ /* Software must not access disabled LMAC registers */
+ if (!is_lmac_valid(cgx_dev, lmac_id))
+ return 0;
+ return cgx_read(cgx_dev, lmac_id, offset);
+}
+
int cgx_get_cgxid(void *cgxd)
{
struct cgx *cgx = cgxd;
@@ -145,6 +195,16 @@ int cgx_get_cgxid(void *cgxd)
return cgx->cgx_id;
}
+u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ u64 cfg;
+
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_CFG);
+
+ return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT;
+}
+
/* Ensure the required lock for event queue(where asynchronous events are
* posted) is acquired before calling this API. Else an asynchronous event(with
* latest link status) can reach the destination before this function returns
@@ -172,32 +232,261 @@ static u64 mac2u64 (u8 *mac_addr)
return mac;
}
+static void cfg2mac(u64 cfg, u8 *mac_addr)
+{
+ int i, index = 0;
+
+ for (i = ETH_ALEN - 1; i >= 0; i--, index++)
+ mac_addr[i] = (cfg >> (8 * index)) & 0xFF;
+}
+
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ int index, id;
u64 cfg;
+ /* access mac_ops to know csr_offset */
+ mac_ops = cgx_dev->mac_ops;
+
/* copy 6bytes from macaddr */
/* memcpy(&cfg, mac_addr, 6); */
cfg = mac2u64 (mac_addr);
- cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)),
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max;
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)),
cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
- cfg |= CGX_DMAC_CTL0_CAM_ENABLE;
+ cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE |
+ CGX_DMAC_MCAST_MODE);
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ return 0;
+}
+
+u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id)
+{
+ struct mac_ops *mac_ops;
+ struct cgx *cgx = cgxd;
+
+ if (!cgxd || !is_lmac_valid(cgxd, lmac_id))
+ return 0;
+
+ cgx = cgxd;
+ /* Get mac_ops to know csr offset */
+ mac_ops = cgx->mac_ops;
+
+ return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+}
+
+u64 cgx_read_dmac_entry(void *cgxd, int index)
+{
+ struct mac_ops *mac_ops;
+ struct cgx *cgx;
+
+ if (!cgxd)
+ return 0;
+
+ cgx = cgxd;
+ mac_ops = cgx->mac_ops;
+ return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8)));
+}
+
+int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ int index, idx;
+ u64 cfg = 0;
+ int id;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Get available index where entry is to be installed */
+ idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap);
+ if (idx < 0)
+ return idx;
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + idx;
+
+ cfg = mac2u64 (mac_addr);
+ cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
+ cfg |= ((u64)lmac_id << 49);
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
+
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT);
+
+ if (is_multicast_ether_addr(mac_addr)) {
+ cfg &= ~GENMASK_ULL(2, 1);
+ cfg |= CGX_DMAC_MCAST_MODE_CAM;
+ lmac->mcast_filters_count++;
+ } else if (!lmac->mcast_filters_count) {
+ cfg |= CGX_DMAC_MCAST_MODE;
+ }
+
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ return idx;
+}
+EXPORT_SYMBOL(cgx_lmac_addr_add);
+
+int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ u8 index = 0, id;
+ u64 cfg;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Restore index 0 to its default init value as done during
+ * cgx_lmac_init
+ */
+ set_bit(0, lmac->mac_to_index_bmap.bmap);
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
+
+ /* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg &= ~CGX_DMAC_CAM_ACCEPT;
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
return 0;
}
+EXPORT_SYMBOL(cgx_lmac_addr_reset);
+
+/* Allows caller to change macaddress associated with index
+ * in dmac filter table including index 0 reserved for
+ * interface mac address
+ */
+int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct mac_ops *mac_ops;
+ struct lmac *lmac;
+ u64 cfg;
+ int id;
+
+ lmac = lmac_pdata(lmac_id, cgx_dev);
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Validate the index */
+ if (index >= lmac->mac_to_index_bmap.max)
+ return -EINVAL;
+
+ /* ensure index is already set */
+ if (!test_bit(index, lmac->mac_to_index_bmap.bmap))
+ return -EINVAL;
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+
+ cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
+ cfg &= ~CGX_RX_DMAC_ADR_MASK;
+ cfg |= mac2u64 (mac_addr);
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
+ return 0;
+}
+
+int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ u8 mac[ETH_ALEN];
+ u64 cfg;
+ int id;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Validate the index */
+ if (index >= lmac->mac_to_index_bmap.max)
+ return -EINVAL;
+
+ /* Skip deletion for reserved index i.e. index 0 */
+ if (index == 0)
+ return 0;
+
+ rvu_free_rsrc(&lmac->mac_to_index_bmap, index);
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+
+ /* Read MAC address to check whether it is ucast or mcast */
+ cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
+
+ cfg2mac(cfg, mac);
+ if (is_multicast_ether_addr(mac))
+ lmac->mcast_filters_count--;
+
+ if (!lmac->mcast_filters_count) {
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg &= ~GENMASK_ULL(2, 1);
+ cfg |= CGX_DMAC_MCAST_MODE;
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+ }
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
+
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_addr_del);
+
+int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+
+ if (lmac)
+ return lmac->mac_to_index_bmap.max;
+
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_addr_max_entries_get);
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ int index;
u64 cfg;
+ int id;
- cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
+ mac_ops = cgx_dev->mac_ops;
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max;
+
+ cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8);
return cfg & CGX_RX_DMAC_ADR_MASK;
}
@@ -205,15 +494,28 @@ int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
{
struct cgx *cgx = cgxd;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
return 0;
}
-static inline u8 cgx_get_lmac_type(struct cgx *cgx, int lmac_id)
+int cgx_get_pkind(void *cgxd, u8 lmac_id, int *pkind)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ *pkind = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP);
+ *pkind = *pkind & 0x3F;
+ return 0;
+}
+
+u8 cgx_get_lmac_type(void *cgxd, int lmac_id)
{
+ struct cgx *cgx = cgxd;
u64 cfg;
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
@@ -227,10 +529,10 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
u8 lmac_type;
u64 cfg;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
- lmac_type = cgx_get_lmac_type(cgx, lmac_id);
+ lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac_id);
if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
if (enable)
@@ -252,33 +554,50 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
{
struct cgx *cgx = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx);
+ u16 max_dmac = lmac->mac_to_index_bmap.max;
+ struct mac_ops *mac_ops;
+ int index, i;
u64 cfg = 0;
+ int id;
if (!cgx)
return;
+ id = get_sequence_id_of_lmac(cgx, lmac_id);
+
+ mac_ops = cgx->mac_ops;
if (enable) {
/* Enable promiscuous mode on LMAC */
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
- cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE);
- cfg |= CGX_DMAC_BCAST_MODE;
+ cfg &= ~CGX_DMAC_CAM_ACCEPT;
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
- cfg = cgx_read(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
- cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
- cgx_write(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
+ for (i = 0; i < max_dmac; i++) {
+ index = id * max_dmac + i;
+ cfg = cgx_read(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
+ cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg);
+ }
} else {
/* Disable promiscuous mode */
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
- cfg = cgx_read(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
- cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
- cgx_write(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
+ for (i = 0; i < max_dmac; i++) {
+ index = id * max_dmac + i;
+ cfg = cgx_read(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
+ if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) {
+ cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8),
+ cfg);
+ }
+ }
}
}
@@ -286,27 +605,54 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
{
struct cgx *cgx = cgxd;
+ u8 rx_pause, tx_pause;
+ bool is_pfc_enabled;
+ struct lmac *lmac;
u64 cfg;
if (!cgx)
return;
- if (enable) {
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return;
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ /* Pause frames are not enabled just return */
+ if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max))
+ return;
+
+ cgx_lmac_get_pause_frm_status(cgx, lmac_id, &rx_pause, &tx_pause);
+ is_pfc_enabled = rx_pause ? false : true;
+
+ if (enable) {
+ if (!is_pfc_enabled) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ cfg |= CGXX_SMUX_CBFC_CTL_BCK_EN;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+ }
} else {
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ if (!is_pfc_enabled) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ cfg &= ~CGXX_SMUX_CBFC_CTL_BCK_EN;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+ }
}
}
@@ -314,8 +660,13 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
{
struct cgx *cgx = cgxd;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
+
+ /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */
+ if (idx >= CGX_RX_STAT_GLOBAL_INDEX)
+ lmac_id = 0;
+
*rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
return 0;
}
@@ -324,25 +675,119 @@ int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
{
struct cgx *cgx = cgxd;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
+
*tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
return 0;
}
+int cgx_stats_rst(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ int stat_id;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ for (stat_id = 0 ; stat_id < CGX_RX_STATS_COUNT; stat_id++) {
+ if (stat_id >= CGX_RX_STAT_GLOBAL_INDEX)
+ /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_STAT0 + (stat_id * 8)), 0);
+ else
+ cgx_write(cgx, lmac_id,
+ (CGXX_CMRX_RX_STAT0 + (stat_id * 8)), 0);
+ }
+
+ for (stat_id = 0 ; stat_id < CGX_TX_STATS_COUNT; stat_id++)
+ cgx_write(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (stat_id * 8), 0);
+
+ return 0;
+}
+
+u64 cgx_features_get(void *cgxd)
+{
+ return ((struct cgx *)cgxd)->hw_features;
+}
+
+static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo)
+{
+ if (!linfo->fec)
+ return 0;
+
+ switch (linfo->lmac_type_id) {
+ case LMAC_MODE_SGMII:
+ case LMAC_MODE_XAUI:
+ case LMAC_MODE_RXAUI:
+ case LMAC_MODE_QSGMII:
+ return 0;
+ case LMAC_MODE_10G_R:
+ case LMAC_MODE_25G_R:
+ case LMAC_MODE_100G_R:
+ case LMAC_MODE_USXGMII:
+ return 1;
+ case LMAC_MODE_40G_R:
+ return 4;
+ case LMAC_MODE_50G_R:
+ if (linfo->fec == OTX2_FEC_BASER)
+ return 2;
+ else
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
+{
+ int stats, fec_stats_count = 0;
+ int corr_reg, uncorr_reg;
+ struct cgx *cgx = cgxd;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+ fec_stats_count =
+ cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info);
+ if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
+ corr_reg = CGXX_SPUX_LNX_FEC_CORR_BLOCKS;
+ uncorr_reg = CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS;
+ } else {
+ corr_reg = CGXX_SPUX_RSFEC_CORR;
+ uncorr_reg = CGXX_SPUX_RSFEC_UNCORR;
+ }
+ for (stats = 0; stats < fec_stats_count; stats++) {
+ rsp->fec_corr_blks +=
+ cgx_read(cgx, lmac_id, corr_reg + (stats * 8));
+ rsp->fec_uncorr_blks +=
+ cgx_read(cgx, lmac_id, uncorr_reg + (stats * 8));
+ }
+ return 0;
+}
+
+u64 cgx_get_lmac_tx_fifo_status(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return 0;
+ return cgx_read(cgx, lmac_id, CGXX_CMRX_TX_FIFO_LEN);
+}
+EXPORT_SYMBOL(cgx_get_lmac_tx_fifo_status);
+
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
{
struct cgx *cgx = cgxd;
u64 cfg;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
if (enable)
- cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
+ cfg |= DATA_PKT_RX_EN | DATA_PKT_TX_EN;
else
- cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
+ cfg &= ~(DATA_PKT_RX_EN | DATA_PKT_TX_EN);
cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
return 0;
}
@@ -352,7 +797,7 @@ int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
struct cgx *cgx = cgxd;
u64 cfg, last;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
@@ -367,15 +812,32 @@ int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
return !!(last & DATA_PKT_TX_EN);
}
-int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
- u8 *tx_pause, u8 *rx_pause)
+static int cgx_lmac_get_higig2_pause_frm_status(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL);
+
+ *rx_pause = !!(cfg & CGXX_SMUX_HG2_CONTROL_RX_ENABLE);
+ *tx_pause = !!(cfg & CGXX_SMUX_HG2_CONTROL_TX_ENABLE);
+ return 0;
+}
+
+int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause)
{
struct cgx *cgx = cgxd;
u64 cfg;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
+ if (is_higig2_enabled(cgxd, lmac_id))
+ return cgx_lmac_get_higig2_pause_frm_status(cgxd, lmac_id,
+ tx_pause, rx_pause);
+
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
*rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
@@ -384,14 +846,51 @@ int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
return 0;
}
-int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
- u8 tx_pause, u8 rx_pause)
+static int cgx_lmac_enadis_higig2_pause_frm(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause)
{
struct cgx *cgx = cgxd;
u64 cfg;
- if (!cgx || lmac_id >= cgx->lmac_count)
- return -ENODEV;
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL);
+ cfg &= ~CGXX_SMUX_HG2_CONTROL_RX_ENABLE;
+ cfg |= rx_pause ? CGXX_SMUX_HG2_CONTROL_RX_ENABLE : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL, cfg);
+
+ /* Forward PAUSE information to TX block */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL);
+ cfg &= ~CGXX_SMUX_HG2_CONTROL_TX_ENABLE;
+ cfg |= tx_pause ? CGXX_SMUX_HG2_CONTROL_TX_ENABLE : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL, cfg);
+
+ /* allow intra packet hg2 generation */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
+ cfg &= ~CGXX_SMUX_TX_PAUSE_PKT_HG2_INTRA_EN;
+ cfg |= tx_pause ? CGXX_SMUX_TX_PAUSE_PKT_HG2_INTRA_EN : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL, cfg);
+
+ cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
+ if (tx_pause) {
+ cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id);
+ } else {
+ cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
+ cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
+ }
+ cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
+
+ return 0;
+}
+
+static int cgx_lmac_enadis_8023_pause_frm(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
@@ -411,30 +910,37 @@ int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
}
cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
+
return 0;
}
-static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
+int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause)
{
- u64 cfg;
+ struct cgx *cgx = cgxd;
- if (!cgx || lmac_id >= cgx->lmac_count)
- return;
- if (enable) {
- /* Enable receive pause frames */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+ if (is_higig2_enabled(cgxd, lmac_id))
+ return cgx_lmac_enadis_higig2_pause_frm(cgxd, lmac_id,
+ tx_pause, rx_pause);
+ else
+ return cgx_lmac_enadis_8023_pause_frm(cgxd, lmac_id,
+ tx_pause, rx_pause);
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_enadis_pause_frm);
- /* Enable pause frames transmission */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
- cfg |= CGX_SMUX_TX_CTL_L2P_BP_CONV;
- cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+ if (!is_lmac_valid(cgx, lmac_id))
+ return;
+
+ if (enable) {
/* Set pause time and interval */
cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
DEFAULT_PAUSE_TIME);
@@ -443,6 +949,12 @@ static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
cfg | (DEFAULT_PAUSE_TIME / 2));
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
+ cfg = FIELD_SET(HG2_INTRA_INTERVAL, (DEFAULT_PAUSE_TIME / 2),
+ cfg);
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
+ cfg);
+
cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME,
DEFAULT_PAUSE_TIME);
@@ -451,21 +963,128 @@ static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
cfg &= ~0xFFFFULL;
cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
cfg | (DEFAULT_PAUSE_TIME / 2));
- } else {
- /* ALL pause frames received are completely ignored */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ }
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+ /* ALL pause frames received are completely ignored */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL);
+ cfg &= ~CGXX_SMUX_HG2_CONTROL_RX_ENABLE;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL, cfg);
+
+ /* Disable pause frames transmission */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL);
+ cfg &= ~CGXX_SMUX_HG2_CONTROL_TX_ENABLE;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_HG2_CONTROL, cfg);
+
+ cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
+ cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
+ cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
+ cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
+}
+
+int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ int pfvf_idx)
+{
+ struct cgx *cgx = cgxd;
+ struct lmac *lmac;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return -ENODEV;
+
+ if (!rx_pause)
+ clear_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
+ else
+ set_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
+
+ if (!tx_pause)
+ clear_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
+ else
+ set_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
+
+ /* check if other pfvfs are using flow control */
+ if (!rx_pause && bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) {
+ dev_warn(&cgx->pdev->dev,
+ "Receive Flow control disable not permitted as its used by other PFVFs\n");
+ return -EPERM;
+ }
+
+ if (!tx_pause && bitmap_weight(lmac->tx_fc_pfvf_bmap.bmap, lmac->tx_fc_pfvf_bmap.max)) {
+ dev_warn(&cgx->pdev->dev,
+ "Transmit Flow control disable not permitted as its used by other PFVFs\n");
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause,
+ u8 rx_pause, u16 pfc_en)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ /* Return as no traffic classes are requested */
+ if (tx_pause && !pfc_en)
+ return 0;
- /* Disable pause frames transmission */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
- cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
- cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+
+ if (rx_pause) {
+ cfg |= (CGXX_SMUX_CBFC_CTL_RX_EN |
+ CGXX_SMUX_CBFC_CTL_BCK_EN |
+ CGXX_SMUX_CBFC_CTL_DRP_EN);
+ } else {
+ cfg &= ~(CGXX_SMUX_CBFC_CTL_RX_EN |
+ CGXX_SMUX_CBFC_CTL_BCK_EN |
+ CGXX_SMUX_CBFC_CTL_DRP_EN);
}
+
+ if (tx_pause)
+ cfg |= CGXX_SMUX_CBFC_CTL_TX_EN;
+ else
+ cfg &= ~CGXX_SMUX_CBFC_CTL_TX_EN;
+
+ cfg = FIELD_SET(CGX_PFC_CLASS_MASK, pfc_en, cfg);
+
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+
+ /* Write source MAC address which will be filled into PFC packet */
+ cfg = cgx_lmac_addr_get(cgx->cgx_id, lmac_id);
+ cgx_write(cgx, lmac_id, CGXX_SMUX_SMAC, cfg);
+
+ return 0;
+}
+
+int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+
+ *rx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_RX_EN);
+ *tx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_TX_EN);
+
+ return 0;
}
void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
@@ -498,7 +1117,7 @@ void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
}
/* CGX Firmware interface low level support */
-static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
+int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
{
struct cgx *cgx = lmac->cgx;
struct device *dev;
@@ -530,9 +1149,9 @@ static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
dev = &cgx->pdev->dev;
- dev_err(dev, "cgx port %d:%d cmd timeout\n",
- cgx->cgx_id, lmac->lmac_id);
- err = -EIO;
+ dev_err(dev, "cgx port %d:%d cmd %lld timeout\n",
+ cgx->cgx_id, lmac->lmac_id, FIELD_GET(CMDREG_ID, req));
+ err = LMAC_AF_ERR_CMD_TIMEOUT;
goto unlock;
}
@@ -546,8 +1165,7 @@ unlock:
return err;
}
-static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp,
- struct cgx *cgx, int lmac_id)
+int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id)
{
struct lmac *lmac;
int err;
@@ -569,43 +1187,229 @@ static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp,
return err;
}
-static inline void cgx_link_usertable_init(void)
-{
- cgx_speed_mbps[CGX_LINK_NONE] = 0;
- cgx_speed_mbps[CGX_LINK_10M] = 10;
- cgx_speed_mbps[CGX_LINK_100M] = 100;
- cgx_speed_mbps[CGX_LINK_1G] = 1000;
- cgx_speed_mbps[CGX_LINK_2HG] = 2500;
- cgx_speed_mbps[CGX_LINK_5G] = 5000;
- cgx_speed_mbps[CGX_LINK_10G] = 10000;
- cgx_speed_mbps[CGX_LINK_20G] = 20000;
- cgx_speed_mbps[CGX_LINK_25G] = 25000;
- cgx_speed_mbps[CGX_LINK_40G] = 40000;
- cgx_speed_mbps[CGX_LINK_50G] = 50000;
- cgx_speed_mbps[CGX_LINK_100G] = 100000;
-
- cgx_lmactype_string[LMAC_MODE_SGMII] = "SGMII";
- cgx_lmactype_string[LMAC_MODE_XAUI] = "XAUI";
- cgx_lmactype_string[LMAC_MODE_RXAUI] = "RXAUI";
- cgx_lmactype_string[LMAC_MODE_10G_R] = "10G_R";
- cgx_lmactype_string[LMAC_MODE_40G_R] = "40G_R";
- cgx_lmactype_string[LMAC_MODE_QSGMII] = "QSGMII";
- cgx_lmactype_string[LMAC_MODE_25G_R] = "25G_R";
- cgx_lmactype_string[LMAC_MODE_50G_R] = "50G_R";
- cgx_lmactype_string[LMAC_MODE_100G_R] = "100G_R";
- cgx_lmactype_string[LMAC_MODE_USXGMII] = "USXGMII";
+static int cgx_link_usertable_index_map(int speed)
+{
+ switch (speed) {
+ case SPEED_10:
+ return CGX_LINK_10M;
+ case SPEED_100:
+ return CGX_LINK_100M;
+ case SPEED_1000:
+ return CGX_LINK_1G;
+ case SPEED_2500:
+ return CGX_LINK_2HG;
+ case SPEED_5000:
+ return CGX_LINK_5G;
+ case SPEED_10000:
+ return CGX_LINK_10G;
+ case SPEED_20000:
+ return CGX_LINK_20G;
+ case SPEED_25000:
+ return CGX_LINK_25G;
+ case SPEED_40000:
+ return CGX_LINK_40G;
+ case SPEED_50000:
+ return CGX_LINK_50G;
+ case 80000:
+ return CGX_LINK_80G;
+ case SPEED_100000:
+ return CGX_LINK_100G;
+ case SPEED_UNKNOWN:
+ return CGX_LINK_NONE;
+ }
+ return CGX_LINK_NONE;
}
+static void set_mod_args(struct cgx_set_link_mode_args *args,
+ u32 speed, u8 duplex, u8 autoneg, u64 mode)
+{
+ int mode_baseidx;
+ u8 cgx_mode;
+
+ /* Fill default values incase of user did not pass
+ * valid parameters
+ */
+ if (args->duplex == DUPLEX_UNKNOWN)
+ args->duplex = duplex;
+ if (args->speed == SPEED_UNKNOWN)
+ args->speed = speed;
+ if (args->an == AUTONEG_UNKNOWN)
+ args->an = autoneg;
+
+ /* Derive mode_base_idx and mode fields based
+ * on cgx_mode value
+ */
+ cgx_mode = find_first_bit((unsigned long *)&mode,
+ CGX_MODE_MAX);
+ args->mode = mode;
+ mode_baseidx = cgx_mode - 41;
+ if (mode_baseidx > 0) {
+ args->mode_baseidx = 1;
+ args->mode = BIT_ULL(mode_baseidx);
+ }
+}
+
+static void otx2_map_ethtool_link_modes(u64 bitmask,
+ struct cgx_set_link_mode_args *args)
+{
+ switch (bitmask) {
+ case ETHTOOL_LINK_MODE_10baseT_Half_BIT:
+ set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_10baseT_Full_BIT:
+ set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_100baseT_Half_BIT:
+ set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_100baseT_Full_BIT:
+ set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_1000baseT_Half_BIT:
+ set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_1000baseT_Full_BIT:
+ set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_1000baseX_Full_BIT:
+ set_mod_args(args, 1000, 0, 0, BIT_ULL(CGX_MODE_1000_BASEX));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseT_Full_BIT:
+ set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_QSGMII));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseSR_Full_BIT:
+ set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseLR_Full_BIT:
+ set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2M));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseKR_Full_BIT:
+ set_mod_args(args, 10000, 0, 1, BIT_ULL(CGX_MODE_10G_KR));
+ break;
+ case ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT:
+ set_mod_args(args, 20000, 0, 0, BIT_ULL(CGX_MODE_20G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_25000baseSR_Full_BIT:
+ set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseR_FEC_BIT:
+ set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_C2M));
+ break;
+ case ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT:
+ set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_2_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_25000baseCR_Full_BIT:
+ set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_CR));
+ break;
+ case ETHTOOL_LINK_MODE_25000baseKR_Full_BIT:
+ set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_KR));
+ break;
+ case ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT:
+ set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT:
+ set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2M));
+ break;
+ case ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT:
+ set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_CR4));
+ break;
+ case ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT:
+ set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_KR4));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT:
+ set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40GAUI_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT:
+ set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT:
+ set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_4_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseDR_Full_BIT:
+ set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2M));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT:
+ set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_CR));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT:
+ set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_KR));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT:
+ set_mod_args(args, 80000, 0, 0, BIT_ULL(CGX_MODE_80GAUI_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT:
+ set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT:
+ set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2M));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT:
+ set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_CR4));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT:
+ set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_KR4));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseSR_Full_BIT:
+ set_mod_args(args, 50000, 0, 0,
+ BIT_ULL(CGX_MODE_LAUI_2_C2C_BIT));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT:
+ set_mod_args(args, 50000, 0, 0,
+ BIT_ULL(CGX_MODE_LAUI_2_C2M_BIT));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseCR_Full_BIT:
+ set_mod_args(args, 50000, 0, 1,
+ BIT_ULL(CGX_MODE_50GBASE_CR2_C_BIT));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseKR_Full_BIT:
+ set_mod_args(args, 50000, 0, 1,
+ BIT_ULL(CGX_MODE_50GBASE_KR2_C_BIT));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT:
+ set_mod_args(args, 100000, 0, 0,
+ BIT_ULL(CGX_MODE_100GAUI_2_C2C_BIT));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT:
+ set_mod_args(args, 100000, 0, 0,
+ BIT_ULL(CGX_MODE_100GAUI_2_C2M_BIT));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT:
+ set_mod_args(args, 100000, 0, 1,
+ BIT_ULL(CGX_MODE_100GBASE_CR2_BIT));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT:
+ set_mod_args(args, 100000, 0, 1,
+ BIT_ULL(CGX_MODE_100GBASE_KR2_BIT));
+ break;
+ case ETHTOOL_LINK_MODE_1000baseKX_Full_BIT:
+ set_mod_args(args, 1000, 0, 0,
+ BIT_ULL(CGX_MODE_SFI_1G_BIT));
+ break;
+ case ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT:
+ set_mod_args(args, 25000, 0, 1,
+ BIT_ULL(CGX_MODE_25GBASE_CR_C_BIT));
+ break;
+ case ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT:
+ set_mod_args(args, 25000, 0, 1,
+ BIT_ULL(CGX_MODE_25GBASE_KR_C_BIT));
+ break;
+ default:
+ set_mod_args(args, 0, 1, 0, BIT_ULL(CGX_MODE_MAX));
+ break;
+ }
+}
static inline void link_status_user_format(u64 lstat,
struct cgx_link_user_info *linfo,
struct cgx *cgx, u8 lmac_id)
{
- char *lmac_string;
+ const char *lmac_string;
linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
- linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
+ linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat);
+ linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat);
+ linfo->lmac_type_id = FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, lstat);
lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
}
@@ -632,6 +1436,8 @@ static inline void cgx_link_change_handler(u64 lstat,
lmac->link_info = event.link_uinfo;
linfo = &lmac->link_info;
+ if (err_type == CGX_ERR_SPEED_CHANGE_INVALID)
+ return;
/* Ensure callback doesn't get unregistered until we finish it */
spin_lock(&lmac->event_cb_lock);
@@ -660,7 +1466,8 @@ static inline bool cgx_cmdresp_is_linkevent(u64 event)
id = FIELD_GET(EVTREG_ID, event);
if (id == CGX_CMD_LINK_BRING_UP ||
- id == CGX_CMD_LINK_BRING_DOWN)
+ id == CGX_CMD_LINK_BRING_DOWN ||
+ id == CGX_CMD_MODE_CHANGE)
return true;
else
return false;
@@ -676,12 +1483,16 @@ static inline bool cgx_event_is_linkevent(u64 event)
static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
{
+ u64 event, offset, clear_bit;
struct lmac *lmac = data;
struct cgx *cgx;
- u64 event;
cgx = lmac->cgx;
+ /* Clear SW_INT for RPM and CMR_INT for CGX */
+ offset = cgx->mac_ops->int_register;
+ clear_bit = cgx->mac_ops->int_ena_bit;
+
event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
if (!FIELD_GET(EVTREG_ACK, event))
@@ -704,7 +1515,7 @@ static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
/* Release thread waiting for completion */
lmac->cmd_pend = false;
- wake_up_interruptible(&lmac->wq_cmd_cmplt);
+ wake_up(&lmac->wq_cmd_cmplt);
break;
case CGX_EVT_ASYNC:
if (cgx_event_is_linkevent(event))
@@ -717,7 +1528,7 @@ static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
* Ack the interrupt register as well.
*/
cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
- cgx_write(lmac->cgx, lmac->lmac_id, CGXX_CMRX_INT, FW_CGX_INT);
+ cgx_write(lmac->cgx, lmac->lmac_id, offset, clear_bit);
return IRQ_HANDLED;
}
@@ -761,20 +1572,108 @@ int cgx_get_fwdata_base(u64 *base)
{
u64 req = 0, resp;
struct cgx *cgx;
+ int first_lmac;
int err;
cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list);
if (!cgx)
return -ENXIO;
+ first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
- err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
+ err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac);
if (!err)
*base = FIELD_GET(RESP_FWD_BASE, resp);
return err;
}
+int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
+ int cgx_id, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u64 req = 0, resp;
+
+ if (!cgx)
+ return -ENODEV;
+
+ otx2_map_ethtool_link_modes(args.mode, &args);
+ if (!args.speed && args.duplex && !args.an)
+ return -EINVAL;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req);
+ req = FIELD_SET(CMDMODECHANGE_SPEED,
+ cgx_link_usertable_index_map(args.speed), req);
+ req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req);
+ req = FIELD_SET(CMDMODECHANGE_AN, args.an, req);
+ req = FIELD_SET(CMDMODECHANGE_MODE_BASEIDX, args.mode_baseidx, req);
+ req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req);
+
+ return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+}
+
+int cgx_set_fec(u64 fec, int cgx_id, int lmac_id)
+{
+ u64 req = 0, resp;
+ struct cgx *cgx;
+ int err = 0;
+
+ cgx = cgx_get_pdata(cgx_id);
+ if (!cgx)
+ return -ENXIO;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_SET_FEC, req);
+ req = FIELD_SET(CMDSETFEC, fec, req);
+ err = cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+ if (err)
+ return err;
+
+ cgx->lmac_idmap[lmac_id]->link_info.fec =
+ FIELD_GET(RESP_LINKSTAT_FEC, resp);
+ return cgx->lmac_idmap[lmac_id]->link_info.fec;
+}
+
+int cgx_get_phy_fec_stats(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u64 req = 0, resp;
+
+ if (!cgx)
+ return -ENODEV;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_PHY_FEC_STATS, req);
+ return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+}
+
+int cgx_set_phy_mod_type(int mod, void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u64 req = 0, resp;
+
+ if (!cgx)
+ return -ENODEV;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_SET_PHY_MOD_TYPE, req);
+ req = FIELD_SET(CMDSETPHYMODTYPE, mod, req);
+ return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+}
+
+int cgx_get_phy_mod_type(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u64 req = 0, resp;
+ int err;
+
+ if (!cgx)
+ return -ENODEV;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_PHY_MOD_TYPE, req);
+ err = cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+ if (!err)
+ return FIELD_GET(RESP_GETPHYMODTYPE, resp);
+ return err;
+}
+
static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
{
u64 req = 0;
@@ -790,10 +1689,11 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
{
+ int first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
u64 req = 0;
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
- return cgx_fwi_cmd_generic(req, resp, cgx, 0);
+ return cgx_fwi_cmd_generic(req, resp, cgx, first_lmac);
}
static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
@@ -814,8 +1714,7 @@ static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
dev_dbg(dev, "Firmware command interface version = %d.%d\n",
major_ver, minor_ver);
- if (major_ver != CGX_FIRMWARE_MAJOR_VER ||
- minor_ver != CGX_FIRMWARE_MINOR_VER)
+ if (major_ver != CGX_FIRMWARE_MAJOR_VER)
return -EIO;
else
return 0;
@@ -827,8 +1726,8 @@ static void cgx_lmac_linkup_work(struct work_struct *work)
struct device *dev = &cgx->pdev->dev;
int i, err;
- /* Do Link up for all the lmacs */
- for (i = 0; i < cgx->lmac_count; i++) {
+ /* Do Link up for all the enabled lmacs */
+ for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
err = cgx_fwi_link_change(cgx, i, true);
if (err)
dev_info(dev, "cgx port %d:%d Link up command failed\n",
@@ -836,6 +1735,17 @@ static void cgx_lmac_linkup_work(struct work_struct *work)
}
}
+int cgx_set_link_state(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx)
+ return -ENODEV;
+
+ return cgx_fwi_link_change(cgx, lmac_id, enable);
+}
+EXPORT_SYMBOL(cgx_set_link_state);
+
int cgx_lmac_linkup_start(void *cgxd)
{
struct cgx *cgx = cgxd;
@@ -848,17 +1758,112 @@ int cgx_lmac_linkup_start(void *cgxd)
return 0;
}
+void cgx_lmac_enadis_higig2(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 req = 0, resp;
+
+ /* disable 802.3 pause frames before enabling higig2 */
+ if (enable) {
+ cgx_lmac_enadis_8023_pause_frm(cgxd, lmac_id, false, false);
+ cgx_lmac_enadis_higig2_pause_frm(cgxd, lmac_id, true, true);
+ }
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_HIGIG, req);
+ req = FIELD_SET(CMDREG_ENABLE, enable, req);
+ cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+
+ /* enable 802.3 pause frames as higig2 disabled */
+ if (!enable) {
+ cgx_lmac_enadis_higig2_pause_frm(cgxd, lmac_id, false, false);
+ cgx_lmac_enadis_8023_pause_frm(cgxd, lmac_id, true, true);
+ }
+}
+
+bool is_higig2_enabled(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ return (cfg & CGXX_SMUX_TX_CTL_HIGIG_EN);
+}
+
+static void cgx_lmac_get_fifolen(struct cgx *cgx)
+{
+ u64 cfg;
+
+ cfg = cgx_read(cgx, 0, CGX_CONST);
+ cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
+}
+
+static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac,
+ int cnt, bool req_free)
+{
+ struct mac_ops *mac_ops = cgx->mac_ops;
+ u64 offset, ena_bit;
+ unsigned int irq;
+ int err;
+
+ irq = pci_irq_vector(cgx->pdev, mac_ops->lmac_fwi +
+ cnt * mac_ops->irq_offset);
+ offset = mac_ops->int_set_reg;
+ ena_bit = mac_ops->int_ena_bit;
+
+ if (req_free) {
+ free_irq(irq, lmac);
+ return 0;
+ }
+
+ err = request_irq(irq, cgx_fwi_event_handler, 0, lmac->name, lmac);
+ if (err)
+ return err;
+
+ /* Enable interrupt */
+ cgx_write(cgx, lmac->lmac_id, offset, ena_bit);
+ return 0;
+}
+
+int cgx_get_nr_lmacs(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ return cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7ULL;
+}
+
+u8 cgx_get_lmacid(void *cgxd, u8 lmac_index)
+{
+ struct cgx *cgx = cgxd;
+
+ return cgx->lmac_idmap[lmac_index]->lmac_id;
+}
+
+unsigned long cgx_get_lmac_bmap(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ return cgx->lmac_bmap;
+}
+
static int cgx_lmac_init(struct cgx *cgx)
{
struct lmac *lmac;
+ u64 lmac_list;
int i, err;
- cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7;
+ cgx_lmac_get_fifolen(cgx);
+
+ /* lmac_list specifies which lmacs are enabled
+ * when bit n is set to 1, LMAC[n] is enabled
+ */
+ if (cgx->mac_ops->non_contiguous_serdes_lane)
+ lmac_list = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
+
if (cgx->lmac_count > MAX_LMAC_PER_CGX)
cgx->lmac_count = MAX_LMAC_PER_CGX;
for (i = 0; i < cgx->lmac_count; i++) {
- lmac = kcalloc(1, sizeof(struct lmac), GFP_KERNEL);
+ lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL);
if (!lmac)
return -ENOMEM;
lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
@@ -867,29 +1872,56 @@ static int cgx_lmac_init(struct cgx *cgx)
goto err_lmac_free;
}
sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
- lmac->lmac_id = i;
+ if (cgx->mac_ops->non_contiguous_serdes_lane) {
+ lmac->lmac_id = __ffs64(lmac_list);
+ lmac_list &= ~BIT_ULL(lmac->lmac_id);
+ } else {
+ lmac->lmac_id = i;
+ }
+
lmac->cgx = cgx;
+ lmac->mac_to_index_bmap.max =
+ MAX_DMAC_ENTRIES_PER_CGX / cgx->lmac_count;
+ err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
+ if (err)
+ goto err_name_free;
+
+ /* Reserve first entry for default MAC address */
+ set_bit(0, lmac->mac_to_index_bmap.bmap);
+
+ lmac->rx_fc_pfvf_bmap.max = 128;
+ err = rvu_alloc_bitmap(&lmac->rx_fc_pfvf_bmap);
+ if (err)
+ goto err_dmac_bmap_free;
+
+ lmac->tx_fc_pfvf_bmap.max = 128;
+ err = rvu_alloc_bitmap(&lmac->tx_fc_pfvf_bmap);
+ if (err)
+ goto err_rx_fc_bmap_free;
+
init_waitqueue_head(&lmac->wq_cmd_cmplt);
mutex_init(&lmac->cmd_lock);
spin_lock_init(&lmac->event_cb_lock);
- err = request_irq(pci_irq_vector(cgx->pdev,
- CGX_LMAC_FWI + i * 9),
- cgx_fwi_event_handler, 0, lmac->name, lmac);
- if (err)
- goto err_irq;
- /* Enable interrupt */
- cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
- FW_CGX_INT);
+ err = cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, false);
+ if (err)
+ goto err_bitmap_free;
/* Add reference */
- cgx->lmac_idmap[i] = lmac;
- cgx_lmac_pause_frm_config(cgx, i, true);
+ cgx->lmac_idmap[lmac->lmac_id] = lmac;
+ set_bit(lmac->lmac_id, &cgx->lmac_bmap);
+ cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
}
return cgx_lmac_verify_fwi_version(cgx);
-err_irq:
+err_bitmap_free:
+ rvu_free_bitmap(&lmac->tx_fc_pfvf_bmap);
+err_rx_fc_bmap_free:
+ rvu_free_bitmap(&lmac->rx_fc_pfvf_bmap);
+err_dmac_bmap_free:
+ rvu_free_bitmap(&lmac->mac_to_index_bmap);
+err_name_free:
kfree(lmac->name);
err_lmac_free:
kfree(lmac);
@@ -908,12 +1940,13 @@ static int cgx_lmac_exit(struct cgx *cgx)
}
/* Free all lmac related resources */
- for (i = 0; i < cgx->lmac_count; i++) {
- cgx_lmac_pause_frm_config(cgx, i, false);
+ for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
lmac = cgx->lmac_idmap[i];
if (!lmac)
continue;
- free_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), lmac);
+ cgx->mac_ops->mac_pause_frm_config(cgx, i, false);
+ cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
+ kfree(lmac->mac_to_index_bmap.bmap);
kfree(lmac->name);
kfree(lmac);
}
@@ -921,6 +1954,44 @@ static int cgx_lmac_exit(struct cgx *cgx)
return 0;
}
+static void cgx_populate_features(struct cgx *cgx)
+{
+ if (is_dev_rpm(cgx))
+ cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM |
+ RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
+ else
+ cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_HIGIG2 |
+ RVU_LMAC_FEAT_PTP | RVU_LMAC_FEAT_DMACF);
+}
+
+struct mac_ops cgx_mac_ops = {
+ .name = "cgx",
+ .csr_offset = 0,
+ .lmac_offset = 18,
+ .int_register = CGXX_CMRX_INT,
+ .int_set_reg = CGXX_CMRX_INT_ENA_W1S,
+ .irq_offset = 9,
+ .int_ena_bit = FW_CGX_INT,
+ .lmac_fwi = CGX_LMAC_FWI,
+ .non_contiguous_serdes_lane = false,
+ .rx_stats_cnt = 9,
+ .tx_stats_cnt = 18,
+ .get_nr_lmacs = cgx_get_nr_lmacs,
+ .get_lmac_type = cgx_get_lmac_type,
+ .mac_lmac_intl_lbk = cgx_lmac_internal_loopback,
+ .mac_get_rx_stats = cgx_get_rx_stats,
+ .mac_get_tx_stats = cgx_get_tx_stats,
+ .mac_enadis_rx_pause_fwding = cgx_lmac_enadis_rx_pause_fwding,
+ .mac_get_pause_frm_status = cgx_lmac_get_pause_frm_status,
+ .mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm,
+ .mac_pause_frm_config = cgx_lmac_pause_frm_config,
+ .mac_enadis_ptp_config = cgx_lmac_ptp_config,
+ .mac_rx_tx_enable = cgx_lmac_rx_tx_enable,
+ .mac_tx_enable = cgx_lmac_tx_enable,
+ .pfc_config = cgx_lmac_pfc_config,
+ .mac_get_pfc_frm_cfg = cgx_lmac_get_pfc_frm_cfg,
+};
+
static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
@@ -934,6 +2005,12 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, cgx);
+ /* Use mac_ops to get MAC specific features */
+ if (pdev->device == PCI_DEVID_CN10K_RPM)
+ cgx->mac_ops = rpm_get_mac_ops();
+ else
+ cgx->mac_ops = &cgx_mac_ops;
+
err = pci_enable_device(pdev);
if (err) {
dev_err(dev, "Failed to enable PCI device\n");
@@ -955,7 +2032,26 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_release_regions;
}
- nvec = CGX_NVEC;
+
+ cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
+ & CGX_ID_MASK;
+
+ /* Skip probe if CGX is not mapped to NIX */
+ if (!is_cgx_mapped_to_nix(pdev->subsystem_device, cgx->cgx_id)) {
+ dev_notice(dev, "CGX %d not mapped to NIX, skipping probe\n", cgx->cgx_id);
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
+ if (!cgx->lmac_count) {
+ dev_notice(dev, "CGX %d LMAC count is zero, skipping probe\n", cgx->cgx_id);
+ err = -EOPNOTSUPP;
+ goto err_release_regions;
+ }
+
+ nvec = pci_msix_vec_count(cgx->pdev);
+
err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
if (err < 0 || err != nvec) {
dev_err(dev, "Request for %d msix vectors failed, err %d\n",
@@ -963,9 +2059,6 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_release_regions;
}
- cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
- & CGX_ID_MASK;
-
/* init wq for processing linkup requests */
INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
@@ -977,7 +2070,10 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
list_add(&cgx->cgx_list, &cgx_list);
- cgx_link_usertable_init();
+
+ cgx_populate_features(cgx);
+
+ mutex_init(&cgx->lock);
err = cgx_lmac_init(cgx);
if (err)
@@ -1002,8 +2098,11 @@ static void cgx_remove(struct pci_dev *pdev)
{
struct cgx *cgx = pci_get_drvdata(pdev);
- cgx_lmac_exit(cgx);
- list_del(&cgx->cgx_list);
+ if (cgx) {
+ cgx_lmac_exit(cgx);
+ list_del(&cgx->cgx_list);
+ }
+
pci_free_irq_vectors(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 27ca3291682b..ff8fee22473f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -1,11 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 CGX driver
+/* Marvell OcteonTx2 CGX driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef CGX_H
@@ -13,6 +10,7 @@
#include "mbox.h"
#include "cgx_fw_if.h"
+#include "rpm.h"
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_CGX 0xA059
@@ -22,11 +20,15 @@
#define CGX_ID_MASK 0x7
#define MAX_LMAC_PER_CGX 4
-#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */
+#define MAX_DMAC_ENTRIES_PER_CGX 32
#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
/* Registers */
#define CGXX_CMRX_CFG 0x00
+#define CMR_P2X_SEL_MASK GENMASK_ULL(61, 59)
+#define CMR_P2X_SEL_SHIFT 59ULL
+#define CMR_P2X_SEL_NIX0 1ULL
+#define CMR_P2X_SEL_NIX1 2ULL
#define CMR_EN BIT_ULL(55)
#define DATA_PKT_TX_EN BIT_ULL(53)
#define DATA_PKT_RX_EN BIT_ULL(54)
@@ -38,20 +40,31 @@
#define CGXX_CMRX_RX_ID_MAP 0x060
#define CGXX_CMRX_RX_STAT0 0x070
#define CGXX_CMRX_RX_LMACS 0x128
-#define CGXX_CMRX_RX_DMAC_CTL0 0x1F8
+#define CGXX_CMRX_RX_DMAC_CTL0 (0x1F8 + mac_ops->csr_offset)
#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3)
#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3)
+#define CGX_DMAC_MCAST_MODE_CAM BIT_ULL(2)
#define CGX_DMAC_MCAST_MODE BIT_ULL(1)
#define CGX_DMAC_BCAST_MODE BIT_ULL(0)
-#define CGXX_CMRX_RX_DMAC_CAM0 0x200
+#define CGXX_CMRX_RX_DMAC_CAM0 (0x200 + mac_ops->csr_offset)
#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48)
+#define CGX_DMAC_CAM_ENTRY_LMACID GENMASK_ULL(50, 49)
#define CGXX_CMRX_RX_DMAC_CAM1 0x400
#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0)
+#define CGXX_CMRX_TX_FIFO_LEN 0x618
+#define CGXX_CMRX_TX_LMAC_IDLE BIT_ULL(14)
+#define CGXX_CMRX_TX_LMAC_E_IDLE BIT_ULL(29)
#define CGXX_CMRX_TX_STAT0 0x700
#define CGXX_SCRATCH0_REG 0x1050
#define CGXX_SCRATCH1_REG 0x1058
#define CGX_CONST 0x2000
+#define CGX_CONST_RXFIFO_SIZE GENMASK_ULL(23, 0)
#define CGXX_SPUX_CONTROL1 0x10000
+#define CGXX_SPUX_LNX_FEC_CORR_BLOCKS 0x10700
+#define CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS 0x10800
+#define CGXX_SPUX_RSFEC_CORR 0x10088
+#define CGXX_SPUX_RSFEC_UNCORR 0x10090
+
#define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14)
#define CGXX_GMP_PCS_MRX_CTL 0x30000
#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14)
@@ -63,22 +76,34 @@
#define CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK BIT_ULL(3)
#define CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE BIT_ULL(12)
#define CGXX_SMUX_TX_CTL 0x20178
+#define CGXX_SMUX_TX_CTL_HIGIG_EN BIT_ULL(8)
#define CGXX_SMUX_TX_PAUSE_PKT_TIME 0x20110
#define CGXX_SMUX_TX_PAUSE_PKT_INTERVAL 0x20120
+#define CGXX_SMUX_SMAC 0x20108
+#define CGXX_SMUX_CBFC_CTL 0x20218
+#define CGXX_SMUX_CBFC_CTL_RX_EN BIT_ULL(0)
+#define CGXX_SMUX_CBFC_CTL_TX_EN BIT_ULL(1)
+#define CGXX_SMUX_CBFC_CTL_DRP_EN BIT_ULL(2)
+#define CGXX_SMUX_CBFC_CTL_BCK_EN BIT_ULL(3)
+#define CGX_PFC_CLASS_MASK GENMASK_ULL(47, 32)
+#define CGXX_SMUX_TX_PAUSE_PKT_HG2_INTRA_EN BIT_ULL(32)
+#define HG2_INTRA_INTERVAL GENMASK_ULL(31, 16)
#define CGXX_GMP_GMI_TX_PAUSE_PKT_TIME 0x38230
#define CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL 0x38248
#define CGX_SMUX_TX_CTL_L2P_BP_CONV BIT_ULL(7)
#define CGXX_CMR_RX_OVR_BP 0x130
#define CGX_CMR_RX_OVR_BP_EN(X) BIT_ULL(((X) + 8))
#define CGX_CMR_RX_OVR_BP_BP(X) BIT_ULL(((X) + 4))
+#define CGXX_SMUX_HG2_CONTROL 0x20210
+#define CGXX_SMUX_HG2_CONTROL_TX_ENABLE BIT_ULL(18)
+#define CGXX_SMUX_HG2_CONTROL_RX_ENABLE BIT_ULL(17)
#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
#define CGX_EVENT_REG CGXX_SCRATCH0_REG
#define CGX_CMD_TIMEOUT 2200 /* msecs */
#define DEFAULT_PAUSE_TIME 0x7FF
-#define CGX_NVEC 37
-#define CGX_LMAC_FWI 0
+#define CGX_LMAC_FWI 0
enum cgx_nix_stat_type {
NIX_STATS_RX,
@@ -126,10 +151,16 @@ int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id);
int cgx_lmac_evh_unregister(void *cgxd, int lmac_id);
int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat);
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
+int cgx_stats_rst(void *cgxd, int lmac_id);
+u64 cgx_get_lmac_tx_fifo_status(void *cgxd, int lmac_id);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
+int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id);
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id);
+int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
+int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index);
+int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id);
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
@@ -137,10 +168,39 @@ int cgx_get_link_info(void *cgxd, int lmac_id,
struct cgx_link_user_info *linfo);
int cgx_lmac_linkup_start(void *cgxd);
int cgx_get_fwdata_base(u64 *base);
-int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
- u8 *tx_pause, u8 *rx_pause);
-int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
- u8 tx_pause, u8 rx_pause);
+int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause);
+int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause);
+void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable);
void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable);
-
+int cgx_set_link_state(void *cgxd, int lmac_id, bool enable);
+int cgx_set_phy_mod_type(int mod, void *cgxd, int lmac_id);
+int cgx_get_phy_mod_type(void *cgxd, int lmac_id);
+void cgx_lmac_enadis_higig2(void *cgxd, int lmac_id, bool enable);
+bool is_higig2_enabled(void *cgxd, int lmac_id);
+int cgx_get_pkind(void *cgxd, u8 lmac_id, int *pkind);
+u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id);
+int cgx_set_fec(u64 fec, int cgx_id, int lmac_id);
+int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp);
+int cgx_get_phy_fec_stats(void *cgxd, int lmac_id);
+int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
+ int cgx_id, int lmac_id);
+u64 cgx_features_get(void *cgxd);
+struct mac_ops *get_mac_ops(void *cgxd);
+int cgx_get_nr_lmacs(void *cgxd);
+void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val);
+u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset);
+u8 cgx_get_lmac_type(void *cgx, int lmac_id);
+u8 cgx_get_lmacid(void *cgxd, u8 lmac_index);
+unsigned long cgx_get_lmac_bmap(void *cgxd);
+u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id);
+u64 cgx_read_dmac_entry(void *cgxd, int index);
+int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index);
+int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause);
+int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ int pfvf_idx);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
index c3702fa58b6b..fd7bda8024bc 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -1,11 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 CGX driver
+/* Marvell OcteonTx2 CGX driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __CGX_FW_INTF_H__
@@ -14,6 +11,10 @@
#include <linux/bitops.h>
#include <linux/bitfield.h>
+/* Major version would change only if there is structural change in
+ * existing commands and due to which functionaliy is impacted.
+ * Minor version would change with new command/structure additions
+ */
#define CGX_FIRMWARE_MAJOR_VER 1
#define CGX_FIRMWARE_MINOR_VER 0
@@ -43,7 +44,13 @@ enum cgx_error_type {
CGX_ERR_TRAINING_FAIL,
CGX_ERR_RX_EQU_FAIL,
CGX_ERR_SPUX_BER_FAIL,
- CGX_ERR_SPUX_RSFEC_ALGN_FAIL, /* = 22 */
+ CGX_ERR_SPUX_RSFEC_ALGN_FAIL,
+ CGX_ERR_SPUX_MARKER_LOCK_FAIL,
+ CGX_ERR_SET_FEC_INVALID,
+ CGX_ERR_SET_FEC_FAIL,
+ CGX_ERR_MODULE_INVALID,
+ CGX_ERR_MODULE_NOT_PRESENT,
+ CGX_ERR_SPEED_CHANGE_INVALID,
};
/* LINK speed types */
@@ -59,10 +66,53 @@ enum cgx_link_speed {
CGX_LINK_25G,
CGX_LINK_40G,
CGX_LINK_50G,
+ CGX_LINK_80G,
CGX_LINK_100G,
CGX_LINK_SPEED_MAX,
};
+enum CGX_MODE_ {
+ CGX_MODE_SGMII,
+ CGX_MODE_1000_BASEX,
+ CGX_MODE_QSGMII,
+ CGX_MODE_10G_C2C,
+ CGX_MODE_10G_C2M,
+ CGX_MODE_10G_KR,
+ CGX_MODE_20G_C2C,
+ CGX_MODE_25G_C2C,
+ CGX_MODE_25G_C2M,
+ CGX_MODE_25G_2_C2C,
+ CGX_MODE_25G_CR,
+ CGX_MODE_25G_KR,
+ CGX_MODE_40G_C2C,
+ CGX_MODE_40G_C2M,
+ CGX_MODE_40G_CR4,
+ CGX_MODE_40G_KR4,
+ CGX_MODE_40GAUI_C2C,
+ CGX_MODE_50G_C2C,
+ CGX_MODE_50G_C2M,
+ CGX_MODE_50G_4_C2C,
+ CGX_MODE_50G_CR,
+ CGX_MODE_50G_KR,
+ CGX_MODE_80GAUI_C2C,
+ CGX_MODE_100G_C2C,
+ CGX_MODE_100G_C2M,
+ CGX_MODE_100G_CR4,
+ CGX_MODE_100G_KR4,
+ CGX_MODE_LAUI_2_C2C_BIT,
+ CGX_MODE_LAUI_2_C2M_BIT,
+ CGX_MODE_50GBASE_CR2_C_BIT,
+ CGX_MODE_50GBASE_KR2_C_BIT, /* = 30 */
+ CGX_MODE_100GAUI_2_C2C_BIT,
+ CGX_MODE_100GAUI_2_C2M_BIT,
+ CGX_MODE_100GBASE_CR2_BIT,
+ CGX_MODE_100GBASE_KR2_BIT,
+ CGX_MODE_SFI_1G_BIT,
+ CGX_MODE_25GBASE_CR_C_BIT,
+ CGX_MODE_25GBASE_KR_C_BIT,
+ CGX_MODE_MAX /* = 38 */
+};
+
/* REQUEST ID types. Input to firmware */
enum cgx_cmd_id {
CGX_CMD_NONE,
@@ -75,12 +125,25 @@ enum cgx_cmd_id {
CGX_CMD_INTERNAL_LBK,
CGX_CMD_EXTERNAL_LBK,
CGX_CMD_HIGIG,
- CGX_CMD_LINK_STATE_CHANGE,
+ CGX_CMD_LINK_STAT_CHANGE,
CGX_CMD_MODE_CHANGE, /* hot plug support */
CGX_CMD_INTF_SHUTDOWN,
CGX_CMD_GET_MKEX_PRFL_SIZE,
CGX_CMD_GET_MKEX_PRFL_ADDR,
CGX_CMD_GET_FWD_BASE, /* get base address of shared FW data */
+ CGX_CMD_GET_LINK_MODES, /* Supported Link Modes */
+ CGX_CMD_SET_LINK_MODE,
+ CGX_CMD_GET_SUPPORTED_FEC,
+ CGX_CMD_SET_FEC,
+ CGX_CMD_GET_AN,
+ CGX_CMD_SET_AN,
+ CGX_CMD_GET_ADV_LINK_MODES,
+ CGX_CMD_GET_ADV_FEC,
+ CGX_CMD_GET_PHY_MOD_TYPE, /* line-side modulation type: NRZ or PAM4 */
+ CGX_CMD_SET_PHY_MOD_TYPE,
+ CGX_CMD_PRBS,
+ CGX_CMD_DISPLAY_EYE,
+ CGX_CMD_GET_PHY_FEC_STATS,
};
/* async event ids */
@@ -171,13 +234,20 @@ struct cgx_lnk_sts {
uint64_t full_duplex:1;
uint64_t speed:4; /* cgx_link_speed */
uint64_t err_type:10;
- uint64_t reserved2:39;
+ uint64_t an:1; /* AN supported or not */
+ uint64_t fec:2; /* FEC type if enabled, if not 0 */
+ uint64_t lmac_type:8;
+ uint64_t mode:8;
+ uint64_t reserved2:20;
};
#define RESP_LINKSTAT_UP GENMASK_ULL(9, 9)
#define RESP_LINKSTAT_FDUPLEX GENMASK_ULL(10, 10)
#define RESP_LINKSTAT_SPEED GENMASK_ULL(14, 11)
#define RESP_LINKSTAT_ERRTYPE GENMASK_ULL(24, 15)
+#define RESP_LINKSTAT_AN GENMASK_ULL(25, 25)
+#define RESP_LINKSTAT_FEC GENMASK_ULL(27, 26)
+#define RESP_LINKSTAT_LMAC_TYPE GENMASK_ULL(35, 28)
/* scratchx(1) CSR used for non-secure SW->ATF communication
* This CSR acts as a command register
@@ -199,4 +269,23 @@ struct cgx_lnk_sts {
#define CMDLINKCHANGE_FULLDPLX BIT_ULL(9)
#define CMDLINKCHANGE_SPEED GENMASK_ULL(13, 10)
+#define CMDSETFEC GENMASK_ULL(9, 8)
+/* command argument to be passed for cmd ID - CGX_CMD_MODE_CHANGE */
+#define CMDMODECHANGE_SPEED GENMASK_ULL(11, 8)
+#define CMDMODECHANGE_DUPLEX GENMASK_ULL(12, 12)
+#define CMDMODECHANGE_AN GENMASK_ULL(13, 13)
+/* this field categorize the mode ID(FLAGS) range to accommodate
+ * more modes.
+ * To specify mode ID range of 0 - 41, this field will be 0.
+ * To specify mode ID range of 42 - 83, this field will be 1.
+ */
+#define CMDMODECHANGE_MODE_BASEIDX GENMASK_ULL(21, 20)
+#define CMDMODECHANGE_FLAGS GENMASK_ULL(63, 22)
+
+/* command argument to be passed for cmd ID - CGX_CMD_SET_PHY_MOD_TYPE */
+#define CMDSETPHYMODTYPE GENMASK_ULL(8, 8)
+
+/* response to cmd ID - RESP_GETPHYMODTYPE */
+#define RESP_GETPHYMODTYPE GENMASK_ULL(9, 9)
+
#endif /* __CGX_FW_INTF_H__ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index f48eb66ed021..8931864ee110 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -1,11 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Copyright (C) 2018 Marvell.
*/
#ifndef COMMON_H
@@ -64,8 +60,8 @@ static inline int qmem_alloc(struct device *dev, struct qmem **q,
qmem->entry_sz = entry_sz;
qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN;
- qmem->base = dma_alloc_coherent(dev, qmem->alloc_sz,
- &qmem->iova, GFP_KERNEL);
+ qmem->base = dma_alloc_attrs(dev, qmem->alloc_sz, &qmem->iova,
+ GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS);
if (!qmem->base)
return -ENOMEM;
@@ -84,9 +80,10 @@ static inline void qmem_free(struct device *dev, struct qmem *qmem)
return;
if (qmem->base)
- dma_free_coherent(dev, qmem->alloc_sz,
- qmem->base - qmem->align,
- qmem->iova - qmem->align);
+ dma_free_attrs(dev, qmem->alloc_sz,
+ qmem->base - qmem->align,
+ qmem->iova - qmem->align,
+ DMA_ATTR_FORCE_CONTIGUOUS);
devm_kfree(dev, qmem);
}
@@ -146,15 +143,14 @@ enum nix_scheduler {
#define TXSCH_RR_QTM_MAX ((1 << 24) - 1)
#define TXSCH_TL1_DFLT_RR_QTM TXSCH_RR_QTM_MAX
#define TXSCH_TL1_DFLT_RR_PRIO (0x1ull)
-#define MAX_SCHED_WEIGHT 0xFF
-#define DFLT_RR_WEIGHT 71
-#define DFLT_RR_QTM ((DFLT_RR_WEIGHT * TXSCH_RR_QTM_MAX) \
- / MAX_SCHED_WEIGHT)
+#define CN10K_MAX_DWRR_WEIGHT 16384 /* Weight is 14bit on CN10K */
/* Min/Max packet sizes, excluding FCS */
#define NIC_HW_MIN_FRS 40
#define NIC_HW_MAX_FRS 9212
#define SDP_HW_MAX_FRS 65535
+#define CN10K_LMAC_LINK_MAX_FRS 16380 /* 16k - FCS */
+#define CN10K_LBK_LINK_MAX_FRS 65535 /* 64k */
/* NIX RX action operation*/
#define NIX_RX_ACTIONOP_DROP (0x0ull)
@@ -162,6 +158,8 @@ enum nix_scheduler {
#define NIX_RX_ACTIONOP_UCAST_IPSEC (0x2ull)
#define NIX_RX_ACTIONOP_MCAST (0x3ull)
#define NIX_RX_ACTIONOP_RSS (0x4ull)
+/* Use the RX action set in the default unicast entry */
+#define NIX_RX_ACTION_DEFAULT (0xfull)
/* NIX TX action operation*/
#define NIX_TX_ACTIONOP_DROP (0x0ull)
@@ -174,17 +172,31 @@ enum nix_scheduler {
#define NPC_MCAM_KEY_X2 1
#define NPC_MCAM_KEY_X4 2
-#define NIX_INTF_RX 0
-#define NIX_INTF_TX 1
+#define NIX_INTFX_RX(a) (0x0ull | (a) << 1)
+#define NIX_INTFX_TX(a) (0x1ull | (a) << 1)
+
+/* Default interfaces are NIX0_RX and NIX0_TX */
+#define NIX_INTF_RX NIX_INTFX_RX(0)
+#define NIX_INTF_TX NIX_INTFX_TX(0)
#define NIX_INTF_TYPE_CGX 0
#define NIX_INTF_TYPE_LBK 1
+#define NIX_INTF_TYPE_SDP 2
#define MAX_LMAC_PKIND 12
#define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b))
#define NIX_LINK_LBK(a) (12 + (a))
#define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c))
#define NIX_CHAN_LBK_CHX(a, b) (0 + 0x100 * (a) + (b))
+#define NIX_CHAN_SDP_CH_START (0x700ull)
+#define NIX_CHAN_SDP_CHX(a) (NIX_CHAN_SDP_CH_START + (a))
+#define NIX_CHAN_SDP_NUM_CHANS 256
+#define NIX_CHAN_CPT_CH_START (0x800ull)
+
+/* The mask is to extract lower 10-bits of channel number
+ * which CPT will pass to X2P.
+ */
+#define NIX_CHAN_CPT_X2P_MASK (0x3ffull)
/* NIX LSO format indices.
* As of now TSO is the only one using, so statically assigning indices.
@@ -206,6 +218,8 @@ enum ndc_idx_e {
NIX0_RX = 0x0,
NIX0_TX = 0x1,
NPA0_U = 0x2,
+ NIX1_RX = 0x4,
+ NIX1_TX = 0x5,
};
enum ndc_ctype_e {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
new file mode 100644
index 000000000000..9fc73844d5c0
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell CN10K RPM driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include "rvu.h"
+#include "cgx.h"
+/**
+ * struct lmac - per lmac locks and properties
+ * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion
+ * @cmd_lock: Lock to serialize the command interface
+ * @resp: command response
+ * @link_info: link related information
+ * @mac_to_index_bmap: Mac address to CGX table index mapping
+ * @rx_fc_pfvf_bmap: Receive flow control enabled netdev mapping
+ * @tx_fc_pfvf_bmap: Transmit flow control enabled netdev mapping
+ * @event_cb: callback for linkchange events
+ * @event_cb_lock: lock for serializing callback with unregister
+ * @cgx: parent cgx port
+ * @mcast_filters_count: count of multicast address filters
+ * @lmac_id: lmac port id
+ * @cmd_pend: flag set before new command is started
+ * flag cleared after command response is received
+ * @name: lmac port name
+ */
+struct lmac {
+ wait_queue_head_t wq_cmd_cmplt;
+ /* Lock to serialize the command interface */
+ struct mutex cmd_lock;
+ u64 resp;
+ struct cgx_link_user_info link_info;
+ struct rsrc_bmap mac_to_index_bmap;
+ struct rsrc_bmap rx_fc_pfvf_bmap;
+ struct rsrc_bmap tx_fc_pfvf_bmap;
+ struct cgx_event_cb event_cb;
+ /* lock for serializing callback with unregister */
+ spinlock_t event_cb_lock;
+ struct cgx *cgx;
+ u8 mcast_filters_count;
+ u8 lmac_id;
+ bool cmd_pend;
+ char *name;
+};
+
+/* CGX & RPM has different feature set
+ * update the structure fields with different one
+ */
+struct mac_ops {
+ char *name;
+ /* Features like DMAC FILTER csrs differs by fixed
+ * bar offset for example
+ * CGX DMAC_CTL0 0x1f8
+ * RPM DMAC_CTL0 0x4ff8
+ */
+ u64 csr_offset;
+ /* For ATF to send events to kernel, there is no dedicated interrupt
+ * defined hence CGX uses OVERFLOW bit in CMR_INT. RPM block supports
+ * SW_INT so that ATF triggers this interrupt after processing of
+ * requested command
+ */
+ u64 int_register;
+ u64 int_set_reg;
+ /* lmac offset is different is RPM */
+ u8 lmac_offset;
+ u8 irq_offset;
+ u8 int_ena_bit;
+ u8 lmac_fwi;
+ u32 fifo_len;
+ bool non_contiguous_serdes_lane;
+ /* RPM & CGX differs in number of Receive/transmit stats */
+ u8 rx_stats_cnt;
+ u8 tx_stats_cnt;
+
+ /* Incase of RPM get number of lmacs from RPMX_CMR_RX_LMACS[LMAC_EXIST]
+ * number of setbits in lmac_exist tells number of lmacs
+ */
+ int (*get_nr_lmacs)(void *cgx);
+ u8 (*get_lmac_type)(void *cgx, int lmac_id);
+ int (*mac_lmac_intl_lbk)(void *cgx, int lmac_id,
+ bool enable);
+ /* Register Stats related functions */
+ int (*mac_get_rx_stats)(void *cgx, int lmac_id,
+ int idx, u64 *rx_stat);
+ int (*mac_get_tx_stats)(void *cgx, int lmac_id,
+ int idx, u64 *tx_stat);
+ /* Enable LMAC Pause Frame Configuration */
+ void (*mac_enadis_rx_pause_fwding)(void *cgxd,
+ int lmac_id,
+ bool enable);
+ int (*mac_get_pause_frm_status)(void *cgxd,
+ int lmac_id,
+ u8 *tx_pause,
+ u8 *rx_pause);
+ int (*mac_enadis_pause_frm)(void *cgxd,
+ int lmac_id,
+ u8 tx_pause,
+ u8 rx_pause);
+ void (*mac_pause_frm_config)(void *cgxd,
+ int lmac_id,
+ bool enable);
+ /* Enable/Disable Inbound PTP */
+ void (*mac_enadis_ptp_config)(void *cgxd,
+ int lmac_id,
+ bool enable);
+ int (*mac_rx_tx_enable)(void *cgxd, int lmac_id, bool enable);
+ int (*mac_tx_enable)(void *cgxd, int lmac_id, bool enable);
+
+ int (*pfc_config)(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause, u16 pfc_en);
+
+ int (*mac_get_pfc_frm_cfg)(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause);
+
+};
+
+struct cgx {
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+ u8 cgx_id;
+ u8 lmac_count;
+ struct lmac *lmac_idmap[MAX_LMAC_PER_CGX];
+ struct work_struct cgx_cmd_work;
+ struct workqueue_struct *cgx_cmd_workq;
+ struct list_head cgx_list;
+ u64 hw_features;
+ struct mac_ops *mac_ops;
+ /* Lock to serialize read/write of global csrs like
+ * RPMX_MTI_STAT_DATA_HI_CDC etc
+ */
+ struct mutex lock;
+ unsigned long lmac_bmap; /* bitmap of enabled lmacs */
+};
+
+typedef struct cgx rpm_t;
+
+/* Function Declarations */
+void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val);
+u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset);
+struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx);
+int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac);
+int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id);
+bool is_lmac_valid(struct cgx *cgx, int lmac_id);
+struct mac_ops *rpm_get_mac_ops(void);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index bbabb8e64201..2898931d5260 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -1,11 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
@@ -20,9 +17,9 @@ static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
{
- void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
+ void *hw_mbase = mdev->hwbase;
tx_hdr = hw_mbase + mbox->tx_start;
rx_hdr = hw_mbase + mbox->rx_start;
@@ -56,12 +53,9 @@ void otx2_mbox_destroy(struct otx2_mbox *mbox)
}
EXPORT_SYMBOL(otx2_mbox_destroy);
-int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
- void *reg_base, int direction, int ndevs)
+static int otx2_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ void *reg_base, int direction, int ndevs)
{
- struct otx2_mbox_dev *mdev;
- int devid;
-
switch (direction) {
case MBOX_DIR_AFPF:
case MBOX_DIR_PFVF:
@@ -121,7 +115,6 @@ int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
}
mbox->reg_base = reg_base;
- mbox->hwbase = hwbase;
mbox->pdev = pdev;
mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
@@ -129,11 +122,27 @@ int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
otx2_mbox_destroy(mbox);
return -ENOMEM;
}
-
mbox->ndevs = ndevs;
+
+ return 0;
+}
+
+int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
+ void *reg_base, int direction, int ndevs)
+{
+ struct otx2_mbox_dev *mdev;
+ int devid, err;
+
+ err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs);
+ if (err)
+ return err;
+
+ mbox->hwbase = hwbase;
+
for (devid = 0; devid < ndevs; devid++) {
mdev = &mbox->dev[devid];
mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE);
+ mdev->hwbase = mdev->mbase;
spin_lock_init(&mdev->mbox_lock);
/* Init header to reset value */
otx2_mbox_reset(mbox, devid);
@@ -143,6 +152,35 @@ int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
}
EXPORT_SYMBOL(otx2_mbox_init);
+/* Initialize mailbox with the set of mailbox region addresses
+ * in the array hwbase.
+ */
+int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase,
+ struct pci_dev *pdev, void *reg_base,
+ int direction, int ndevs)
+{
+ struct otx2_mbox_dev *mdev;
+ int devid, err;
+
+ err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs);
+ if (err)
+ return err;
+
+ mbox->hwbase = hwbase[0];
+
+ for (devid = 0; devid < ndevs; devid++) {
+ mdev = &mbox->dev[devid];
+ mdev->mbase = hwbase[devid];
+ mdev->hwbase = hwbase[devid];
+ spin_lock_init(&mdev->mbox_lock);
+ /* Init header to reset value */
+ otx2_mbox_reset(mbox, devid);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2_mbox_regions_init);
+
int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
{
unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
@@ -175,9 +213,9 @@ EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
{
- void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
+ void *hw_mbase = mdev->hwbase;
tx_hdr = hw_mbase + mbox->tx_start;
rx_hdr = hw_mbase + mbox->rx_start;
@@ -371,5 +409,5 @@ const char *otx2_mbox_id2name(u16 id)
}
EXPORT_SYMBOL(otx2_mbox_id2name);
-MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_AUTHOR("Marvell.");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 263a21129416..98dc16ab639e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -1,11 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef MBOX_H
@@ -36,7 +33,7 @@
#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull))
-#define MBOX_RSP_TIMEOUT 2000 /* Time(ms) to wait for mbox response */
+#define MBOX_RSP_TIMEOUT 3000 /* Time(ms) to wait for mbox response */
#define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */
@@ -52,6 +49,7 @@
struct otx2_mbox_dev {
void *mbase; /* This dev's mbox region */
+ void *hwbase;
spinlock_t mbox_lock;
u16 msg_size; /* Total msg size to be sent */
u16 rsp_size; /* Total rsp size to be sure the reply is ok */
@@ -86,7 +84,7 @@ struct mbox_msghdr {
#define OTX2_MBOX_REQ_SIG (0xdead)
#define OTX2_MBOX_RSP_SIG (0xbeef)
u16 sig; /* Signature, for validating corrupted msgs */
-#define OTX2_MBOX_VERSION (0x0001)
+#define OTX2_MBOX_VERSION (0x000b)
u16 ver; /* Version of msg's structure for this ID */
u16 next_msgoff; /* Offset of next msg within mailbox region */
int rc; /* Msg process'ed response code */
@@ -98,6 +96,9 @@ void otx2_mbox_destroy(struct otx2_mbox *mbox);
int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase,
struct pci_dev *pdev, void __force *reg_base,
int direction, int ndevs);
+int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase,
+ struct pci_dev *pdev, void __force *reg_base,
+ int direction, int ndevs);
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
@@ -126,10 +127,15 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
M(READY, 0x001, ready, msg_req, ready_msg_rsp) \
M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \
M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \
+M(FREE_RSRC_CNT, 0x004, free_rsrc_cnt, msg_req, free_rsrcs_rsp) \
M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
+M(NDC_SYNC_OP, 0x009, ndc_sync_op, ndc_sync_op, msg_rsp) \
+M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \
+ msg_rsp) \
+M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \
@@ -149,6 +155,33 @@ M(CGX_PTP_RX_ENABLE, 0x20C, cgx_ptp_rx_enable, msg_req, msg_rsp) \
M(CGX_PTP_RX_DISABLE, 0x20D, cgx_ptp_rx_disable, msg_req, msg_rsp) \
M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \
cgx_pause_frm_cfg) \
+M(CGX_FW_DATA_GET, 0x20F, cgx_get_aux_link_info, msg_req, cgx_fw_data) \
+M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode) \
+M(CGX_MAC_ADDR_ADD, 0x211, cgx_mac_addr_add, cgx_mac_addr_add_req, \
+ cgx_mac_addr_add_rsp) \
+M(CGX_MAC_ADDR_DEL, 0x212, cgx_mac_addr_del, cgx_mac_addr_del_req, \
+ msg_rsp) \
+M(CGX_MAC_MAX_ENTRIES_GET, 0x213, cgx_mac_max_entries_get, msg_req, \
+ cgx_max_dmac_entries_get_rsp) \
+M(CGX_SET_LINK_STATE, 0x214, cgx_set_link_state, \
+ cgx_set_link_state_msg, msg_rsp) \
+M(CGX_GET_PHY_MOD_TYPE, 0x215, cgx_get_phy_mod_type, msg_req, \
+ cgx_phy_mod_type) \
+M(CGX_SET_PHY_MOD_TYPE, 0x216, cgx_set_phy_mod_type, cgx_phy_mod_type, \
+ msg_rsp) \
+M(CGX_FEC_STATS, 0x217, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \
+M(CGX_SET_LINK_MODE, 0x218, cgx_set_link_mode, cgx_set_link_mode_req,\
+ cgx_set_link_mode_rsp) \
+M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req, msg_rsp) \
+M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp) \
+M(CGX_FEATURES_GET, 0x21B, cgx_features_get, msg_req, \
+ cgx_features_info_msg) \
+M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \
+M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, msg_req, msg_rsp) \
+M(CGX_MAC_ADDR_UPDATE, 0x21E, cgx_mac_addr_update, cgx_mac_addr_update_req, \
+ msg_rsp) \
+M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg, \
+ cgx_pfc_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
npa_lf_alloc_req, npa_lf_alloc_rsp) \
@@ -156,8 +189,66 @@ M(NPA_LF_FREE, 0x401, npa_lf_free, msg_req, msg_rsp) \
M(NPA_AQ_ENQ, 0x402, npa_aq_enq, npa_aq_enq_req, npa_aq_enq_rsp) \
M(NPA_HWCTX_DISABLE, 0x403, npa_hwctx_disable, hwctx_disable_req, msg_rsp)\
/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \
+M(SSO_LF_ALLOC, 0x600, sso_lf_alloc, \
+ sso_lf_alloc_req, sso_lf_alloc_rsp) \
+M(SSO_LF_FREE, 0x601, sso_lf_free, \
+ sso_lf_free_req, msg_rsp) \
+M(SSOW_LF_ALLOC, 0x602, ssow_lf_alloc, \
+ ssow_lf_alloc_req, msg_rsp) \
+M(SSOW_LF_FREE, 0x603, ssow_lf_free, \
+ ssow_lf_free_req, msg_rsp) \
+M(SSO_HW_SETCONFIG, 0x604, sso_hw_setconfig, \
+ sso_hw_setconfig, msg_rsp) \
+M(SSO_GRP_SET_PRIORITY, 0x605, sso_grp_set_priority, \
+ sso_grp_priority, msg_rsp) \
+M(SSO_GRP_GET_PRIORITY, 0x606, sso_grp_get_priority, \
+ sso_info_req, sso_grp_priority) \
+M(SSO_WS_CACHE_INV, 0x607, sso_ws_cache_inv, msg_req, msg_rsp) \
+M(SSO_GRP_QOS_CONFIG, 0x608, sso_grp_qos_config, sso_grp_qos_cfg, msg_rsp)\
+M(SSO_GRP_GET_STATS, 0x609, sso_grp_get_stats, sso_info_req, sso_grp_stats)\
+M(SSO_HWS_GET_STATS, 0x610, sso_hws_get_stats, sso_info_req, sso_hws_stats)\
+M(SSO_HW_RELEASE_XAQ, 0x611, sso_hw_release_xaq_aura, \
+ sso_release_xaq, msg_rsp) \
+M(SSO_CONFIG_LSW, 0x612, ssow_config_lsw, \
+ ssow_config_lsw, msg_rsp) \
+M(SSO_HWS_CHNG_MSHIP, 0x613, ssow_chng_mship, ssow_chng_mship, msg_rsp)\
/* TIM mbox IDs (range 0x800 - 0x9FF) */ \
+M(TIM_LF_ALLOC, 0x800, tim_lf_alloc, \
+ tim_lf_alloc_req, tim_lf_alloc_rsp) \
+M(TIM_LF_FREE, 0x801, tim_lf_free, tim_ring_req, msg_rsp) \
+M(TIM_CONFIG_RING, 0x802, tim_config_ring, tim_config_req, msg_rsp)\
+M(TIM_ENABLE_RING, 0x803, tim_enable_ring, tim_ring_req, tim_enable_rsp)\
+M(TIM_DISABLE_RING, 0x804, tim_disable_ring, tim_ring_req, msg_rsp) \
+M(TIM_GET_MIN_INTVL, 0x805, tim_get_min_intvl, tim_intvl_req, \
+ tim_intvl_rsp) \
/* CPT mbox IDs (range 0xA00 - 0xBFF) */ \
+M(CPT_LF_ALLOC, 0xA00, cpt_lf_alloc, cpt_lf_alloc_req_msg, \
+ msg_rsp) \
+M(CPT_LF_FREE, 0xA01, cpt_lf_free, msg_req, msg_rsp) \
+M(CPT_RD_WR_REGISTER, 0xA02, cpt_rd_wr_register, cpt_rd_wr_reg_msg, \
+ cpt_rd_wr_reg_msg) \
+M(CPT_INLINE_IPSEC_CFG, 0xA04, cpt_inline_ipsec_cfg, \
+ cpt_inline_ipsec_cfg_msg, msg_rsp) \
+M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \
+M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \
+ msg_rsp) \
+M(CPT_CTX_CACHE_SYNC, 0xA07, cpt_ctx_cache_sync, msg_req, msg_rsp) \
+/* REE mbox IDs (range 0xE00 - 0xFFF) */ \
+M(REE_CONFIG_LF, 0xE01, ree_config_lf, ree_lf_req_msg, \
+ msg_rsp) \
+M(REE_RD_WR_REGISTER, 0xE02, ree_rd_wr_register, ree_rd_wr_reg_msg, \
+ ree_rd_wr_reg_msg) \
+M(REE_RULE_DB_PROG, 0xE03, ree_rule_db_prog, \
+ ree_rule_db_prog_req_msg, \
+ msg_rsp) \
+M(REE_RULE_DB_LEN_GET, 0xE04, ree_rule_db_len_get, ree_req_msg, \
+ ree_rule_db_len_rsp_msg) \
+M(REE_RULE_DB_GET, 0xE05, ree_rule_db_get, \
+ ree_rule_db_get_req_msg, \
+ ree_rule_db_get_rsp_msg) \
+/* SDP mbox IDs (range 0x1000 - 0x11FF) */ \
+M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \
+M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \
/* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \
M(NPC_MCAM_ALLOC_ENTRY, 0x6000, npc_mcam_alloc_entry, npc_mcam_alloc_entry_req,\
npc_mcam_alloc_entry_rsp) \
@@ -188,19 +279,35 @@ M(NPC_MCAM_ALLOC_AND_WRITE_ENTRY, 0x600b, npc_mcam_alloc_and_write_entry, \
npc_mcam_alloc_and_write_entry_rsp) \
M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, \
msg_req, npc_get_kex_cfg_rsp) \
+M(NPC_INSTALL_FLOW, 0x600d, npc_install_flow, \
+ npc_install_flow_req, npc_install_flow_rsp) \
+M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \
+ npc_delete_flow_req, msg_rsp) \
+M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \
+ npc_mcam_read_entry_req, \
+ npc_mcam_read_entry_rsp) \
+M(NPC_SET_PKIND, 0x6010, npc_set_pkind, \
+ npc_set_pkind, msg_rsp) \
+M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \
+ msg_req, npc_mcam_read_base_rule_rsp) \
+M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats, \
+ npc_mcam_get_stats_req, \
+ npc_mcam_get_stats_rsp) \
/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \
nix_lf_alloc_req, nix_lf_alloc_rsp) \
-M(NIX_LF_FREE, 0x8001, nix_lf_free, msg_req, msg_rsp) \
+M(NIX_LF_FREE, 0x8001, nix_lf_free, nix_lf_free_req, msg_rsp) \
M(NIX_AQ_ENQ, 0x8002, nix_aq_enq, nix_aq_enq_req, nix_aq_enq_rsp) \
M(NIX_HWCTX_DISABLE, 0x8003, nix_hwctx_disable, \
hwctx_disable_req, msg_rsp) \
M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc, \
nix_txsch_alloc_req, nix_txsch_alloc_rsp) \
M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free, nix_txsch_free_req, msg_rsp) \
-M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, msg_rsp) \
+M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, \
+ nix_txschq_config) \
M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp) \
-M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, msg_rsp) \
+M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, \
+ nix_vtag_config_rsp) \
M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, \
nix_rss_flowkey_cfg, \
nix_rss_flowkey_cfg_rsp) \
@@ -216,22 +323,45 @@ M(NIX_SET_RX_CFG, 0x8010, nix_set_rx_cfg, nix_rx_cfg, msg_rsp) \
M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
nix_lso_format_cfg, \
nix_lso_format_cfg_rsp) \
-M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp) \
M(NIX_LF_PTP_TX_ENABLE, 0x8013, nix_lf_ptp_tx_enable, msg_req, msg_rsp) \
M(NIX_LF_PTP_TX_DISABLE, 0x8014, nix_lf_ptp_tx_disable, msg_req, msg_rsp) \
+M(NIX_SET_VLAN_TPID, 0x8015, nix_set_vlan_tpid, nix_set_vlan_tpid, msg_rsp) \
M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \
nix_bp_cfg_rsp) \
M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \
+M(NIX_INLINE_IPSEC_CFG, 0x8019, nix_inline_ipsec_cfg, \
+ nix_inline_ipsec_cfg, msg_rsp) \
+M(NIX_INLINE_IPSEC_LF_CFG, 0x801a, nix_inline_ipsec_lf_cfg, \
+ nix_inline_ipsec_lf_cfg, msg_rsp) \
+M(NIX_CN10K_AQ_ENQ, 0x801b, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \
+ nix_cn10k_aq_enq_rsp) \
+M(NIX_GET_HW_INFO, 0x801c, nix_get_hw_info, msg_req, nix_hw_info) \
+M(NIX_BANDPROF_ALLOC, 0x801d, nix_bandprof_alloc, nix_bandprof_alloc_req, \
+ nix_bandprof_alloc_rsp) \
+M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
+ msg_rsp) \
+M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
+ nix_bandprof_get_hwinfo_rsp) \
+M(NIX_CPT_BP_ENABLE, 0x8020, nix_cpt_bp_enable, nix_bp_cfg_req, \
+ nix_bp_cfg_rsp) \
+M(NIX_CPT_BP_DISABLE, 0x8021, nix_cpt_bp_disable, nix_bp_cfg_req, \
+ msg_rsp) \
+M(NIX_RX_SW_SYNC, 0x8022, nix_rx_sw_sync, msg_req, msg_rsp)
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
#define MBOX_UP_CGX_MESSAGES \
-M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp)
+M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp) \
+M(CGX_PTP_RX_INFO, 0xC01, cgx_ptp_rx_info, cgx_ptp_rx_info_msg, msg_rsp)
+
+#define MBOX_UP_CPT_MESSAGES \
+M(CPT_INST_LMTST, 0xD00, cpt_inst_lmtst, cpt_inst_lmtst_req, msg_rsp)
enum {
#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
MBOX_MESSAGES
MBOX_UP_CGX_MESSAGES
+MBOX_UP_CPT_MESSAGES
#undef M
};
@@ -271,6 +401,17 @@ struct ready_msg_rsp {
* or to detach partial of a cetain resource type.
* Rest of the fields specify how many of what type to
* be attached.
+ * To request LFs from two blocks of same type this mailbox
+ * can be sent twice as below:
+ * struct rsrc_attach *attach;
+ * .. Allocate memory for message ..
+ * attach->cptlfs = 3; <3 LFs from CPT0>
+ * .. Send message ..
+ * .. Allocate memory for message ..
+ * attach->modify = 1;
+ * attach->cpt_blkaddr = BLKADDR_CPT1;
+ * attach->cptlfs = 2; <2 LFs from CPT1>
+ * .. Send message ..
*/
struct rsrc_attach {
struct mbox_msghdr hdr;
@@ -281,6 +422,9 @@ struct rsrc_attach {
u16 ssow;
u16 timlfs;
u16 cptlfs;
+ u16 reelfs;
+ int cpt_blkaddr; /* BLKADDR_CPT0/BLKADDR_CPT1 or 0 for BLKADDR_CPT0 */
+ int ree_blkaddr; /* BLKADDR_REE0/BLKADDR_REE1 or 0 for BLKADDR_REE0 */
};
/* Structure for relinquishing resources.
@@ -297,6 +441,27 @@ struct rsrc_detach {
u8 ssow:1;
u8 timlfs:1;
u8 cptlfs:1;
+ u8 reelfs:1;
+};
+
+/*
+ * Number of resources available to the caller.
+ * In reply to MBOX_MSG_FREE_RSRC_CNT.
+ */
+struct free_rsrcs_rsp {
+ struct mbox_msghdr hdr;
+ u16 schq[NIX_TXSCH_LVL_CNT];
+ u16 sso;
+ u16 tim;
+ u16 ssow;
+ u16 cpt;
+ u8 npa;
+ u8 nix;
+ u16 schq_nix1[NIX_TXSCH_LVL_CNT];
+ u8 nix1;
+ u8 cpt1;
+ u8 ree0;
+ u8 ree1;
};
#define MSIX_VECTOR_INVALID 0xFFFF
@@ -306,14 +471,20 @@ struct msix_offset_rsp {
struct mbox_msghdr hdr;
u16 npa_msixoff;
u16 nix_msixoff;
- u8 sso;
- u8 ssow;
- u8 timlfs;
- u8 cptlfs;
+ u16 sso;
+ u16 ssow;
+ u16 timlfs;
+ u16 cptlfs;
u16 sso_msixoff[MAX_RVU_BLKLF_CNT];
u16 ssow_msixoff[MAX_RVU_BLKLF_CNT];
u16 timlf_msixoff[MAX_RVU_BLKLF_CNT];
u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 cpt1_lfs;
+ u16 ree0_lfs;
+ u16 ree1_lfs;
+ u16 cpt1_lf_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 ree0_lf_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 ree1_lf_msixoff[MAX_RVU_BLKLF_CNT];
};
struct get_hw_cap_rsp {
@@ -322,16 +493,48 @@ struct get_hw_cap_rsp {
u8 nix_shaping; /* Is shaping and coloring supported */
};
+struct ndc_sync_op {
+ struct mbox_msghdr hdr;
+ u8 nix_lf_tx_sync;
+ u8 nix_lf_rx_sync;
+ u8 npa_lf_sync;
+};
+
+struct lmtst_tbl_setup_req {
+ struct mbox_msghdr hdr;
+ u64 dis_sched_early_comp :1;
+ u64 sch_ena :1;
+ u64 dis_line_pref :1;
+ u64 ssow_pf_func :13;
+ u16 base_pcifunc;
+ u8 use_local_lmt_region;
+ u64 lmt_iova;
+ u64 rsvd[4];
+};
+
+struct set_vf_perm {
+ struct mbox_msghdr hdr;
+ u16 vf;
+#define RESET_VF_PERM BIT_ULL(0)
+#define VF_TRUSTED BIT_ULL(1)
+ u64 flags;
+};
+
/* CGX mbox message formats */
struct cgx_stats_rsp {
struct mbox_msghdr hdr;
-#define CGX_RX_STATS_COUNT 13
-#define CGX_TX_STATS_COUNT 18
+#define CGX_RX_STATS_COUNT 9
+#define CGX_TX_STATS_COUNT 18
u64 rx_stats[CGX_RX_STATS_COUNT];
u64 tx_stats[CGX_TX_STATS_COUNT];
};
+struct cgx_fec_stats_rsp {
+ struct mbox_msghdr hdr;
+ u64 fec_corr_blks;
+ u64 fec_uncorr_blks;
+};
/* Structure for requesting the operation for
* setting/getting mac address in the CGX interface
*/
@@ -340,11 +543,45 @@ struct cgx_mac_addr_set_or_get {
u8 mac_addr[ETH_ALEN];
};
+/* Structure for requesting the operation to
+ * add DMAC filter entry into CGX interface
+ */
+struct cgx_mac_addr_add_req {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* Structure for response against the operation to
+ * add DMAC filter entry into CGX interface
+ */
+struct cgx_mac_addr_add_rsp {
+ struct mbox_msghdr hdr;
+ u8 index;
+};
+
+/* Structure for requesting the operation to
+ * delete DMAC filter entry from CGX interface
+ */
+struct cgx_mac_addr_del_req {
+ struct mbox_msghdr hdr;
+ u8 index;
+};
+
+/* Structure for response against the operation to
+ * get maximum supported DMAC filter entries
+ */
+struct cgx_max_dmac_entries_get_rsp {
+ struct mbox_msghdr hdr;
+ u8 max_dmac_filters;
+};
+
struct cgx_link_user_info {
uint64_t link_up:1;
uint64_t full_duplex:1;
uint64_t lmac_type_id:4;
uint64_t speed:20; /* speed in Mbps */
+ uint64_t an:1; /* AN supported or not */
+ uint64_t fec:2; /* FEC type if enabled else 0 */
#define LMACTYPE_STR_LEN 16
char lmac_type[LMACTYPE_STR_LEN];
};
@@ -354,6 +591,11 @@ struct cgx_link_info_msg {
struct cgx_link_user_info link_info;
};
+struct cgx_ptp_rx_info_msg {
+ struct mbox_msghdr hdr;
+ u8 ptp_en;
+};
+
struct cgx_pause_frm_cfg {
struct mbox_msghdr hdr;
u8 set;
@@ -363,6 +605,151 @@ struct cgx_pause_frm_cfg {
u8 tx_pause;
};
+enum fec_type {
+ OTX2_FEC_NONE,
+ OTX2_FEC_BASER,
+ OTX2_FEC_RS,
+ OTX2_FEC_STATS_CNT = 2,
+ OTX2_FEC_OFF,
+};
+
+struct fec_mode {
+ struct mbox_msghdr hdr;
+ int fec;
+};
+
+struct sfp_eeprom_s {
+#define SFP_EEPROM_SIZE 256
+ u16 sff_id;
+ u8 buf[SFP_EEPROM_SIZE];
+ u64 reserved;
+};
+
+struct phy_s {
+ struct {
+ u64 can_change_mod_type:1;
+ u64 mod_type:1;
+ u64 has_fec_stats:1;
+ } misc;
+ struct fec_stats_s {
+ u32 rsfec_corr_cws;
+ u32 rsfec_uncorr_cws;
+ u32 brfec_corr_blks;
+ u32 brfec_uncorr_blks;
+ } fec_stats;
+};
+
+struct cgx_lmac_fwdata_s {
+ u16 rw_valid;
+ u64 supported_fec;
+ u64 supported_an;
+ u64 supported_link_modes;
+ /* only applicable if AN is supported */
+ u64 advertised_fec;
+ u64 advertised_link_modes;
+ /* Only applicable if SFP/QSFP slot is present */
+ struct sfp_eeprom_s sfp_eeprom;
+ struct phy_s phy;
+#define LMAC_FWDATA_RESERVED_MEM 1021
+ u64 reserved[LMAC_FWDATA_RESERVED_MEM];
+};
+
+struct cgx_fw_data {
+ struct mbox_msghdr hdr;
+ struct cgx_lmac_fwdata_s fwdata;
+};
+
+struct cgx_set_link_mode_args {
+ u32 speed;
+ u8 duplex;
+ u8 an;
+ u8 mode_baseidx;
+ u64 mode;
+};
+
+struct cgx_set_link_mode_req {
+#define AUTONEG_UNKNOWN 0xff
+ struct mbox_msghdr hdr;
+ struct cgx_set_link_mode_args args;
+};
+
+struct cgx_set_link_mode_rsp {
+ struct mbox_msghdr hdr;
+ int status;
+};
+
+struct cgx_set_link_state_msg {
+ struct mbox_msghdr hdr;
+ u8 enable; /* '1' for link up, '0' for link down */
+};
+
+struct cgx_phy_mod_type {
+ struct mbox_msghdr hdr;
+ int mod;
+};
+
+struct cgx_mac_addr_update_req {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+ u8 index;
+};
+
+#define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */
+#define RVU_LMAC_FEAT_HIGIG2 BIT_ULL(1)
+ /* flow control from physical link higig2 messages */
+#define RVU_LMAC_FEAT_PTP BIT_ULL(2) /* precison time protocol */
+#define RVU_LMAC_FEAT_DMACF BIT_ULL(3) /* DMAC FILTER */
+#define RVU_MAC_VERSION BIT_ULL(4)
+#define RVU_MAC_CGX BIT_ULL(5)
+#define RVU_MAC_RPM BIT_ULL(6)
+
+struct cgx_features_info_msg {
+ struct mbox_msghdr hdr;
+ u64 lmac_features;
+};
+
+struct rpm_stats_rsp {
+ struct mbox_msghdr hdr;
+#define RPM_RX_STATS_COUNT 43
+#define RPM_TX_STATS_COUNT 34
+ u64 rx_stats[RPM_RX_STATS_COUNT];
+ u64 tx_stats[RPM_TX_STATS_COUNT];
+};
+
+struct cgx_pfc_cfg {
+ struct mbox_msghdr hdr;
+ u8 rx_pause;
+ u8 tx_pause;
+ u16 pfc_en; /* bitmap indicating pfc enabled traffic classes */
+};
+
+struct cgx_pfc_rsp {
+ struct mbox_msghdr hdr;
+ u8 rx_pause;
+ u8 tx_pause;
+};
+
+ /* NPA mbox message formats */
+
+struct npc_set_pkind {
+ struct mbox_msghdr hdr;
+#define OTX2_PRIV_FLAGS_DEFAULT BIT_ULL(0)
+#define OTX2_PRIV_FLAGS_EDSA BIT_ULL(1)
+#define OTX2_PRIV_FLAGS_HIGIG BIT_ULL(2)
+#define OTX2_PRIV_FLAGS_FDSA BIT_ULL(3)
+#define OTX2_PRIV_FLAGS_CUSTOM BIT_ULL(63)
+ u64 mode;
+#define PKIND_TX BIT_ULL(0)
+#define PKIND_RX BIT_ULL(1)
+ u8 dir;
+ u8 pkind; /* valid only in case custom flag */
+ u8 var_len_off; /* Offset of custom header length field.
+ * Valid only for pkind NPC_RX_CUSTOM_PRE_L2_PKIND
+ */
+ u8 var_len_off_mask; /* Mask for length with in offset */
+ u8 shift_dir; /* shift direction to get length of the header at var_len_off */
+};
+
/* NPA mbox message formats */
/* NPA mailbox error codes
@@ -391,6 +778,7 @@ struct npa_lf_alloc_rsp {
u32 stack_pg_ptrs; /* No of ptrs per stack page */
u32 stack_pg_bytes; /* Size of stack page */
u16 qints; /* NPA_AF_CONST::QINTS */
+ u8 cache_lines; /*BATCH ALLOC DMA */
};
/* NPA AQ enqueue msg */
@@ -459,6 +847,29 @@ enum nix_af_status {
NIX_AF_ERR_LSO_CFG_FAIL = -418,
NIX_AF_INVAL_NPA_PF_FUNC = -419,
NIX_AF_INVAL_SSO_PF_FUNC = -420,
+ NIX_AF_ERR_TX_VTAG_NOSPC = -421,
+ NIX_AF_ERR_RX_VTAG_INUSE = -422,
+ NIX_AF_ERR_PTP_CONFIG_FAIL = -423,
+ NIX_AF_ERR_NPC_KEY_NOT_SUPP = -424,
+ NIX_AF_ERR_INVALID_NIXBLK = -425,
+ NIX_AF_ERR_INVALID_BANDPROF = -426,
+ NIX_AF_ERR_IPOLICER_NOTSUPP = -427,
+ NIX_AF_ERR_BANDPROF_INVAL_REQ = -428,
+ NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429,
+ NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430,
+ NIX_AF_ERR_LINK_CREDITS = -431,
+};
+
+/* For NIX RX vtag action */
+enum nix_rx_vtag0_type {
+ NIX_AF_LFX_RX_VTAG_TYPE0, /* reserved for rx vlan offload */
+ NIX_AF_LFX_RX_VTAG_TYPE1,
+ NIX_AF_LFX_RX_VTAG_TYPE2,
+ NIX_AF_LFX_RX_VTAG_TYPE3,
+ NIX_AF_LFX_RX_VTAG_TYPE4,
+ NIX_AF_LFX_RX_VTAG_TYPE5,
+ NIX_AF_LFX_RX_VTAG_TYPE6,
+ NIX_AF_LFX_RX_VTAG_TYPE7,
};
/* For NIX LF context alloc and init */
@@ -475,6 +886,9 @@ struct nix_lf_alloc_req {
u16 sso_func;
u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */
u64 way_mask;
+#define NIX_LF_RSS_TAG_LSB_AS_ADDER BIT_ULL(0)
+#define NIX_LF_LBK_BLK_SEL BIT_ULL(1)
+ u64 flags;
};
struct nix_lf_alloc_rsp {
@@ -491,6 +905,54 @@ struct nix_lf_alloc_rsp {
u8 lf_tx_stats; /* NIX_AF_CONST1::LF_TX_STATS */
u16 cints; /* NIX_AF_CONST2::CINTS */
u16 qints; /* NIX_AF_CONST2::QINTS */
+ u8 hw_rx_tstamp_en;
+ u8 cgx_links; /* No. of CGX links present in HW */
+ u8 lbk_links; /* No. of LBK links present in HW */
+ u8 sdp_links; /* No. of SDP links present in HW */
+ u8 tx_link; /* Transmit channel link number */
+};
+
+struct nix_lf_free_req {
+ struct mbox_msghdr hdr;
+#define NIX_LF_DISABLE_FLOWS BIT_ULL(0)
+#define NIX_LF_DONT_FREE_TX_VTAG BIT_ULL(1)
+ u64 flags;
+};
+
+/* CN10K NIX AQ enqueue msg */
+struct nix_cn10k_aq_enq_req {
+ struct mbox_msghdr hdr;
+ u32 qidx;
+ u8 ctype;
+ u8 op;
+ union {
+ struct nix_cn10k_rq_ctx_s rq;
+ struct nix_cn10k_sq_ctx_s sq;
+ struct nix_cq_ctx_s cq;
+ struct nix_rsse_s rss;
+ struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
+ };
+ union {
+ struct nix_cn10k_rq_ctx_s rq_mask;
+ struct nix_cn10k_sq_ctx_s sq_mask;
+ struct nix_cq_ctx_s cq_mask;
+ struct nix_rsse_s rss_mask;
+ struct nix_rx_mce_s mce_mask;
+ struct nix_bandprof_s prof_mask;
+ };
+};
+
+struct nix_cn10k_aq_enq_rsp {
+ struct mbox_msghdr hdr;
+ union {
+ struct nix_cn10k_rq_ctx_s rq;
+ struct nix_cn10k_sq_ctx_s sq;
+ struct nix_cq_ctx_s cq;
+ struct nix_rsse_s rss;
+ struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
+ };
};
/* NIX AQ enqueue msg */
@@ -505,6 +967,7 @@ struct nix_aq_enq_req {
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
+ u64 prof;
};
union {
struct nix_rq_ctx_s rq_mask;
@@ -512,6 +975,7 @@ struct nix_aq_enq_req {
struct nix_cq_ctx_s cq_mask;
struct nix_rsse_s rss_mask;
struct nix_rx_mce_s mce_mask;
+ u64 prof_mask;
};
};
@@ -523,6 +987,7 @@ struct nix_aq_enq_rsp {
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
};
};
@@ -563,6 +1028,7 @@ struct nix_txsch_free_req {
struct nix_txschq_config {
struct mbox_msghdr hdr;
u8 lvl; /* SMQ/MDQ/TL4/TL3/TL2/TL1 */
+ u8 read;
#define TXSCHQ_IDX_SHIFT 16
#define TXSCHQ_IDX_MASK (BIT_ULL(10) - 1)
#define TXSCHQ_IDX(reg, shift) (((reg) >> (shift)) & TXSCHQ_IDX_MASK)
@@ -570,6 +1036,8 @@ struct nix_txschq_config {
#define MAX_REGS_PER_MBOX_MSG 20
u64 reg[MAX_REGS_PER_MBOX_MSG];
u64 regval[MAX_REGS_PER_MBOX_MSG];
+ /* All 0's => overwrite with new value */
+ u64 regval_mask[MAX_REGS_PER_MBOX_MSG];
};
struct nix_vtag_config {
@@ -583,14 +1051,40 @@ struct nix_vtag_config {
union {
/* valid when cfg_type is '0' */
struct {
- /* tx vlan0 tag(C-VLAN) */
- u64 vlan0;
- /* tx vlan1 tag(S-VLAN) */
- u64 vlan1;
- /* insert tx vlan tag */
- u8 insert_vlan :1;
- /* insert tx double vlan tag */
- u8 double_vlan :1;
+ u64 vtag0;
+ u64 vtag1;
+
+ /* cfg_vtag0 & cfg_vtag1 fields are valid
+ * when free_vtag0 & free_vtag1 are '0's.
+ */
+ /* cfg_vtag0 = 1 to configure vtag0 */
+ u8 cfg_vtag0 :1;
+ /* cfg_vtag1 = 1 to configure vtag1 */
+ u8 cfg_vtag1 :1;
+
+ /* vtag0_idx & vtag1_idx are only valid when
+ * both cfg_vtag0 & cfg_vtag1 are '0's,
+ * these fields are used along with free_vtag0
+ * & free_vtag1 to free the nix lf's tx_vlan
+ * configuration.
+ *
+ * Denotes the indices of tx_vtag def registers
+ * that needs to be cleared and freed.
+ */
+ int vtag0_idx;
+ int vtag1_idx;
+
+ /* free_vtag0 & free_vtag1 fields are valid
+ * when cfg_vtag0 & cfg_vtag1 are '0's.
+ */
+ /* free_vtag0 = 1 clears vtag0 configuration
+ * vtag0_idx denotes the index to be cleared.
+ */
+ u8 free_vtag0 :1;
+ /* free_vtag1 = 1 clears vtag1 configuration
+ * vtag1_idx denotes the index to be cleared.
+ */
+ u8 free_vtag1 :1;
} tx;
/* valid when cfg_type is '1' */
@@ -605,6 +1099,19 @@ struct nix_vtag_config {
};
};
+struct nix_vtag_config_rsp {
+ struct mbox_msghdr hdr;
+ int vtag0_idx;
+ int vtag1_idx;
+ /* Indices of tx_vtag def registers used to configure
+ * tx vtag0 & vtag1 headers, these indices are valid
+ * when nix_vtag_config mbox requested for vtag0 and/
+ * or vtag1 configuration.
+ */
+};
+
+#define NIX_FLOW_KEY_TYPE_L3_L4_MASK (~(0xf << 28))
+
struct nix_rss_flowkey_cfg {
struct mbox_msghdr hdr;
int mcam_index; /* MCAM entry index to modify */
@@ -626,7 +1133,16 @@ struct nix_rss_flowkey_cfg {
#define NIX_FLOW_KEY_TYPE_INNR_UDP BIT(15)
#define NIX_FLOW_KEY_TYPE_INNR_SCTP BIT(16)
#define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
+#define NIX_FLOW_KEY_TYPE_CH_LEN_90B BIT(18)
+#define NIX_FLOW_KEY_TYPE_CUSTOM0 BIT(19)
#define NIX_FLOW_KEY_TYPE_VLAN BIT(20)
+#define NIX_FLOW_KEY_TYPE_IPV4_PROTO BIT(21)
+#define NIX_FLOW_KEY_TYPE_AH BIT(22)
+#define NIX_FLOW_KEY_TYPE_ESP BIT(23)
+#define NIX_FLOW_KEY_TYPE_L4_DST_ONLY BIT(28)
+#define NIX_FLOW_KEY_TYPE_L4_SRC_ONLY BIT(29)
+#define NIX_FLOW_KEY_TYPE_L3_DST_ONLY BIT(30)
+#define NIX_FLOW_KEY_TYPE_L3_SRC_ONLY BIT(31)
u32 flowkey_cfg; /* Flowkey types selected */
u8 group; /* RSS context or group */
};
@@ -665,6 +1181,7 @@ struct nix_rx_mode {
#define NIX_RX_MODE_UCAST BIT(0)
#define NIX_RX_MODE_PROMISC BIT(1)
#define NIX_RX_MODE_ALLMULTI BIT(2)
+#define NIX_RX_MODE_USE_MCE BIT(3)
u16 mode;
};
@@ -672,6 +1189,7 @@ struct nix_rx_cfg {
struct mbox_msghdr hdr;
#define NIX_RX_OL3_VERIFY BIT(0)
#define NIX_RX_OL4_VERIFY BIT(1)
+#define NIX_RX_DROP_RE BIT(2)
u8 len_verify; /* Outer L3/L4 len check */
#define NIX_RX_CSUM_OL4_VERIFY BIT(0)
u8 csum_verify; /* Outer L4 checksum verification */
@@ -698,6 +1216,14 @@ struct nix_lso_format_cfg_rsp {
u8 lso_format_idx;
};
+struct nix_set_vlan_tpid {
+ struct mbox_msghdr hdr;
+#define NIX_VLAN_TYPE_INNER 0
+#define NIX_VLAN_TYPE_OUTER 1
+ u8 vlan_type;
+ u16 tpid;
+};
+
struct nix_bp_cfg_req {
struct mbox_msghdr hdr;
u16 chan_base; /* Starting channel number */
@@ -717,6 +1243,209 @@ struct nix_bp_cfg_rsp {
u8 chan_cnt; /* Number of channel for which bpids are assigned */
};
+/* Global NIX inline IPSec configuration */
+struct nix_inline_ipsec_cfg {
+ struct mbox_msghdr hdr;
+ u32 cpt_credit;
+ struct {
+ u8 egrp;
+ u8 opcode;
+ u16 param1;
+ u16 param2;
+ } gen_cfg;
+ struct {
+ u16 cpt_pf_func;
+ u8 cpt_slot;
+ } inst_qsel;
+ u8 enable;
+};
+
+/* Per NIX LF inline IPSec configuration */
+struct nix_inline_ipsec_lf_cfg {
+ struct mbox_msghdr hdr;
+ u64 sa_base_addr;
+ struct {
+ u32 tag_const;
+ u16 lenm1_max;
+ u8 sa_pow2_size;
+ u8 tt;
+ } ipsec_cfg0;
+ struct {
+ u32 sa_idx_max;
+ u8 sa_idx_w;
+ } ipsec_cfg1;
+ u8 enable;
+};
+
+struct nix_hw_info {
+ struct mbox_msghdr hdr;
+ u16 vwqe_delay;
+ u16 max_mtu;
+ u16 min_mtu;
+ u32 rpm_dwrr_mtu;
+ u32 sdp_dwrr_mtu;
+ u64 rsvd[16]; /* Add reserved fields for future expansion */
+};
+
+struct nix_bandprof_alloc_req {
+ struct mbox_msghdr hdr;
+ /* Count of profiles needed per layer */
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+};
+
+struct nix_bandprof_alloc_rsp {
+ struct mbox_msghdr hdr;
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+
+ /* There is no need to allocate morethan 1 bandwidth profile
+ * per RQ of a PF_FUNC's NIXLF. So limit the maximum
+ * profiles to 64 per PF_FUNC.
+ */
+#define MAX_BANDPROF_PER_PFFUNC 64
+ u16 prof_idx[BAND_PROF_NUM_LAYERS][MAX_BANDPROF_PER_PFFUNC];
+};
+
+struct nix_bandprof_free_req {
+ struct mbox_msghdr hdr;
+ u8 free_all;
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+ u16 prof_idx[BAND_PROF_NUM_LAYERS][MAX_BANDPROF_PER_PFFUNC];
+};
+
+struct nix_bandprof_get_hwinfo_rsp {
+ struct mbox_msghdr hdr;
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+ u32 policer_timeunit;
+};
+
+/* SSO mailbox error codes
+ * Range 501 - 600.
+ */
+enum sso_af_status {
+ SSO_AF_ERR_PARAM = -501,
+ SSO_AF_ERR_LF_INVALID = -502,
+ SSO_AF_ERR_AF_LF_ALLOC = -503,
+ SSO_AF_ERR_GRP_EBUSY = -504,
+ SSO_AF_INVAL_NPA_PF_FUNC = -505,
+};
+
+struct sso_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ int node;
+ u16 hwgrps;
+};
+
+struct sso_lf_alloc_rsp {
+ struct mbox_msghdr hdr;
+ u32 xaq_buf_size;
+ u32 xaq_wq_entries;
+ u32 in_unit_entries;
+ u16 hwgrps;
+};
+
+struct sso_lf_free_req {
+ struct mbox_msghdr hdr;
+ int node;
+ u16 hwgrps;
+};
+
+struct sso_hw_setconfig {
+ struct mbox_msghdr hdr;
+ u32 npa_aura_id;
+ u16 npa_pf_func;
+ u16 hwgrps;
+};
+
+struct sso_release_xaq {
+ struct mbox_msghdr hdr;
+ u16 hwgrps;
+};
+
+struct sso_info_req {
+ struct mbox_msghdr hdr;
+ union {
+ u16 grp;
+ u16 hws;
+ };
+};
+
+struct sso_grp_priority {
+ struct mbox_msghdr hdr;
+ u16 grp;
+ u8 priority;
+ u8 affinity;
+ u8 weight;
+};
+
+/* SSOW mailbox error codes
+ * Range 601 - 700.
+ */
+enum ssow_af_status {
+ SSOW_AF_ERR_PARAM = -601,
+ SSOW_AF_ERR_LF_INVALID = -602,
+ SSOW_AF_ERR_AF_LF_ALLOC = -603,
+ SSOW_AF_ERR_INVALID_CFG = -604,
+};
+
+struct ssow_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ int node;
+ u16 hws;
+};
+
+struct ssow_lf_free_req {
+ struct mbox_msghdr hdr;
+ int node;
+ u16 hws;
+};
+
+struct ssow_config_lsw {
+ struct mbox_msghdr hdr;
+#define SSOW_LSW_DIS 0
+#define SSOW_LSW_GW_WAIT 1
+#define SSOW_LSW_GW_IMM 2
+ u8 lsw_mode;
+#define SSOW_WQE_REL_LSW_WAIT 0
+#define SSOW_WQE_REL_IMM 1
+ u8 wqe_release;
+};
+
+struct ssow_chng_mship {
+ struct mbox_msghdr hdr;
+ u8 set;
+ u8 enable;
+ u8 hws;
+ u16 nb_hwgrps;
+ u16 hwgrps[MAX_RVU_BLKLF_CNT];
+};
+
+struct sso_grp_qos_cfg {
+ struct mbox_msghdr hdr;
+ u16 grp;
+ u32 xaq_limit;
+ u16 taq_thr;
+ u16 iaq_thr;
+};
+
+struct sso_grp_stats {
+ struct mbox_msghdr hdr;
+ u16 grp;
+ u64 ws_pc;
+ u64 ext_pc;
+ u64 wa_pc;
+ u64 ts_pc;
+ u64 ds_pc;
+ u64 dq_pc;
+ u64 aw_status;
+ u64 page_cnt;
+};
+
+struct sso_hws_stats {
+ struct mbox_msghdr hdr;
+ u16 hws;
+ u64 arbitration;
+};
+
/* NPC mbox message structs */
#define NPC_MCAM_ENTRY_INVALID 0xFFFF
@@ -730,6 +1459,15 @@ enum npc_af_status {
NPC_MCAM_ALLOC_DENIED = -702,
NPC_MCAM_ALLOC_FAILED = -703,
NPC_MCAM_PERM_DENIED = -704,
+ NPC_AF_ERR_HIGIG_CONFIG_FAIL = -705,
+ NPC_AF_ERR_HIGIG_NOT_SUPPORTED = -706,
+ NPC_FLOW_INTF_INVALID = -707,
+ NPC_FLOW_CHAN_INVALID = -708,
+ NPC_FLOW_NO_NIXLF = -709,
+ NPC_FLOW_NOT_SUPPORTED = -710,
+ NPC_FLOW_VF_PERM_DENIED = -711,
+ NPC_FLOW_VF_NOT_INIT = -712,
+ NPC_FLOW_VF_OVERLAP = -713,
};
struct npc_mcam_alloc_entry_req {
@@ -865,20 +1603,453 @@ struct npc_get_kex_cfg_rsp {
u8 mkex_pfl_name[MKEX_NAME_LEN];
};
+struct flow_msg {
+ unsigned char dmac[6];
+ unsigned char smac[6];
+ __be16 etype;
+ __be16 vlan_etype;
+ __be16 vlan_tci;
+ union {
+ __be32 ip4src;
+ __be32 ip6src[4];
+ };
+ union {
+ __be32 ip4dst;
+ __be32 ip6dst[4];
+ };
+ u8 tos;
+ u8 ip_ver;
+ u8 ip_proto;
+ u8 tc;
+ __be16 sport;
+ __be16 dport;
+};
+
+struct npc_install_flow_req {
+ struct mbox_msghdr hdr;
+ struct flow_msg packet;
+ struct flow_msg mask;
+ u64 features;
+ u16 entry;
+ u16 channel;
+ u16 chan_mask;
+ u8 intf;
+ u8 set_cntr; /* If counter is available set counter for this entry ? */
+ u8 default_rule;
+ u8 append; /* overwrite(0) or append(1) flow to default rule? */
+ u16 vf;
+ /* action */
+ u32 index;
+ u16 match_id;
+ u8 flow_key_alg;
+ u8 op;
+ /* vtag rx action */
+ u8 vtag0_type;
+ u8 vtag0_valid;
+ u8 vtag1_type;
+ u8 vtag1_valid;
+ /* vtag tx action */
+ u16 vtag0_def;
+ u8 vtag0_op;
+ u16 vtag1_def;
+ u8 vtag1_op;
+};
+
+struct npc_install_flow_rsp {
+ struct mbox_msghdr hdr;
+ int counter; /* negative if no counter else counter number */
+};
+
+struct npc_delete_flow_req {
+ struct mbox_msghdr hdr;
+ u16 entry;
+ u16 start;/*Disable range of entries */
+ u16 end;
+ u8 all; /* PF + VFs */
+};
+
+struct npc_mcam_read_entry_req {
+ struct mbox_msghdr hdr;
+ u16 entry; /* MCAM entry to read */
+};
+
+struct npc_mcam_read_entry_rsp {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry_data;
+ u8 intf;
+ u8 enable;
+};
+
+struct npc_mcam_read_base_rule_rsp {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry;
+};
+
+struct npc_mcam_get_stats_req {
+ struct mbox_msghdr hdr;
+ u16 entry; /* mcam entry */
+};
+
+struct npc_mcam_get_stats_rsp {
+ struct mbox_msghdr hdr;
+ u64 stat; /* counter stats */
+ u8 stat_ena; /* enabled */
+};
+
+/* TIM mailbox error codes
+ * Range 801 - 900.
+ */
+enum tim_af_status {
+ TIM_AF_NO_RINGS_LEFT = -801,
+ TIM_AF_INVAL_NPA_PF_FUNC = -802,
+ TIM_AF_INVAL_SSO_PF_FUNC = -803,
+ TIM_AF_RING_STILL_RUNNING = -804,
+ TIM_AF_LF_INVALID = -805,
+ TIM_AF_CSIZE_NOT_ALIGNED = -806,
+ TIM_AF_CSIZE_TOO_SMALL = -807,
+ TIM_AF_CSIZE_TOO_BIG = -808,
+ TIM_AF_INTERVAL_TOO_SMALL = -809,
+ TIM_AF_INVALID_BIG_ENDIAN_VALUE = -810,
+ TIM_AF_INVALID_CLOCK_SOURCE = -811,
+ TIM_AF_GPIO_CLK_SRC_NOT_ENABLED = -812,
+ TIM_AF_INVALID_BSIZE = -813,
+ TIM_AF_INVALID_ENABLE_PERIODIC = -814,
+ TIM_AF_INVALID_ENABLE_DONTFREE = -815,
+ TIM_AF_ENA_DONTFRE_NSET_PERIODIC = -816,
+ TIM_AF_RING_ALREADY_DISABLED = -817,
+};
+
+enum tim_clk_srcs {
+ TIM_CLK_SRCS_TENNS = 0,
+ TIM_CLK_SRCS_GPIO = 1,
+ TIM_CLK_SRCS_GTI = 2,
+ TIM_CLK_SRCS_PTP = 3,
+ TIM_CLK_SRCS_SYNCE = 4,
+ TIM_CLK_SRCS_BTS = 5,
+ TIM_CLK_SRSC_INVALID,
+};
+
+enum tim_gpio_edge {
+ TIM_GPIO_NO_EDGE = 0,
+ TIM_GPIO_LTOH_TRANS = 1,
+ TIM_GPIO_HTOL_TRANS = 2,
+ TIM_GPIO_BOTH_TRANS = 3,
+ TIM_GPIO_INVALID,
+};
+
+struct tim_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ u16 ring;
+ u16 npa_pf_func;
+ u16 sso_pf_func;
+};
+
+struct tim_ring_req {
+ struct mbox_msghdr hdr;
+ u16 ring;
+};
+
+struct tim_config_req {
+ struct mbox_msghdr hdr;
+ u16 ring;
+ u8 bigendian;
+ u8 clocksource;
+ u8 enableperiodic;
+ u8 enabledontfreebuffer;
+ u32 bucketsize;
+ u32 chunksize;
+ u32 interval; /* Cycles between traversal */
+ u8 gpioedge;
+ u8 rsvd[7];
+ u64 intervalns; /* Nanoseconds between traversal */
+ u64 clockfreq;
+};
+
+struct tim_lf_alloc_rsp {
+ struct mbox_msghdr hdr;
+ u64 tenns_clk;
+};
+
+struct tim_enable_rsp {
+ struct mbox_msghdr hdr;
+ u64 timestarted;
+ u32 currentbucket;
+};
+
+struct tim_intvl_req {
+ struct mbox_msghdr hdr;
+ u8 clocksource;
+ u64 clockfreq;
+};
+
+struct tim_intvl_rsp {
+ struct mbox_msghdr hdr;
+ u64 intvl_cyc;
+ u64 intvl_ns;
+};
+
+/* CPT mailbox error codes
+ * Range 901 - 1000.
+ */
+enum cpt_af_status {
+ CPT_AF_ERR_PARAM = -901,
+ CPT_AF_ERR_GRP_INVALID = -902,
+ CPT_AF_ERR_LF_INVALID = -903,
+ CPT_AF_ERR_ACCESS_DENIED = -904,
+ CPT_AF_ERR_SSO_PF_FUNC_INVALID = -905,
+ CPT_AF_ERR_NIX_PF_FUNC_INVALID = -906,
+ CPT_AF_ERR_INLINE_IPSEC_INB_ENA = -907,
+ CPT_AF_ERR_INLINE_IPSEC_OUT_ENA = -908
+};
+
+/* CPT mbox message formats */
+
+struct cpt_rd_wr_reg_msg {
+ struct mbox_msghdr hdr;
+ u64 reg_offset;
+ u64 *ret_val;
+ u64 val;
+ u8 is_write;
+ u8 blkaddr; /* BLKADDR_CPT0/BLKADDR_CPT1 or 0 for BLKADDR_CPT0 */
+};
+
+struct cpt_lf_alloc_req_msg {
+ struct mbox_msghdr hdr;
+ u16 nix_pf_func;
+ u16 sso_pf_func;
+ u16 eng_grpmsk;
+ u8 blkaddr; /* BLKADDR_CPT0/BLKADDR_CPT1 or 0 for BLKADDR_CPT0 */
+};
+
+#define CPT_INLINE_INBOUND 0
+#define CPT_INLINE_OUTBOUND 1
+struct cpt_inline_ipsec_cfg_msg {
+ struct mbox_msghdr hdr;
+ u8 enable;
+ u8 slot;
+ u8 dir;
+ u8 sso_pf_func_ovrd;
+ u16 sso_pf_func; /* inbound path SSO_PF_FUNC */
+ u16 nix_pf_func; /* outbound path NIX_PF_FUNC */
+};
+
+/* Mailbox message request and response format for CPT stats. */
+struct cpt_sts_req {
+ struct mbox_msghdr hdr;
+ u8 blkaddr;
+};
+
+struct cpt_sts_rsp {
+ struct mbox_msghdr hdr;
+ u64 inst_req_pc;
+ u64 inst_lat_pc;
+ u64 rd_req_pc;
+ u64 rd_lat_pc;
+ u64 rd_uc_pc;
+ u64 active_cycles_pc;
+ u64 ctx_mis_pc;
+ u64 ctx_hit_pc;
+ u64 ctx_aop_pc;
+ u64 ctx_aop_lat_pc;
+ u64 ctx_ifetch_pc;
+ u64 ctx_ifetch_lat_pc;
+ u64 ctx_ffetch_pc;
+ u64 ctx_ffetch_lat_pc;
+ u64 ctx_wback_pc;
+ u64 ctx_wback_lat_pc;
+ u64 ctx_psh_pc;
+ u64 ctx_psh_lat_pc;
+ u64 ctx_err;
+ u64 ctx_enc_id;
+ u64 ctx_flush_timer;
+ u64 rxc_time;
+ u64 rxc_time_cfg;
+ u64 rxc_active_sts;
+ u64 rxc_zombie_sts;
+ u64 busy_sts_ae;
+ u64 free_sts_ae;
+ u64 busy_sts_se;
+ u64 free_sts_se;
+ u64 busy_sts_ie;
+ u64 free_sts_ie;
+ u64 exe_err_info;
+ u64 cptclk_cnt;
+ u64 diag;
+ u64 rxc_dfrg;
+ u64 x2p_link_cfg0;
+ u64 x2p_link_cfg1;
+};
+
+/* Mailbox message request format to configure reassembly timeout. */
+struct cpt_rxc_time_cfg_req {
+ struct mbox_msghdr hdr;
+ int blkaddr;
+ u32 step;
+ u16 zombie_thres;
+ u16 zombie_limit;
+ u16 active_thres;
+ u16 active_limit;
+};
+
+/* Mailbox message request format to request for CPT_INST_S lmtst. */
+struct cpt_inst_lmtst_req {
+ struct mbox_msghdr hdr;
+ u64 inst[8];
+ u64 rsvd;
+};
+
+/* REE mailbox error codes
+ * Range 1001 - 1100.
+ */
+enum ree_af_status {
+ REE_AF_ERR_RULE_UNKNOWN_VALUE = -1001,
+ REE_AF_ERR_LF_NO_MORE_RESOURCES = -1002,
+ REE_AF_ERR_LF_INVALID = -1003,
+ REE_AF_ERR_ACCESS_DENIED = -1004,
+ REE_AF_ERR_RULE_DB_PARTIAL = -1005,
+ REE_AF_ERR_RULE_DB_EQ_BAD_VALUE = -1006,
+ REE_AF_ERR_RULE_DB_BLOCK_ALLOC_FAILED = -1007,
+ REE_AF_ERR_BLOCK_NOT_IMPLEMENTED = -1008,
+ REE_AF_ERR_RULE_DB_INC_OFFSET_TOO_BIG = -1009,
+ REE_AF_ERR_RULE_DB_OFFSET_TOO_BIG = -1010,
+ REE_AF_ERR_Q_IS_GRACEFUL_DIS = -1011,
+ REE_AF_ERR_Q_NOT_GRACEFUL_DIS = -1012,
+ REE_AF_ERR_RULE_DB_ALLOC_FAILED = -1013,
+ REE_AF_ERR_RULE_DB_TOO_BIG = -1014,
+ REE_AF_ERR_RULE_DB_GEQ_BAD_VALUE = -1015,
+ REE_AF_ERR_RULE_DB_LEQ_BAD_VALUE = -1016,
+ REE_AF_ERR_RULE_DB_WRONG_LENGTH = -1017,
+ REE_AF_ERR_RULE_DB_WRONG_OFFSET = -1018,
+ REE_AF_ERR_RULE_DB_BLOCK_TOO_BIG = -1019,
+ REE_AF_ERR_RULE_DB_SHOULD_FILL_REQUEST = -1020,
+ REE_AF_ERR_RULE_DBI_ALLOC_FAILED = -1021,
+ REE_AF_ERR_LF_WRONG_PRIORITY = -1022,
+ REE_AF_ERR_LF_SIZE_TOO_BIG = -1023,
+ REE_AF_ERR_GRAPH_ADDRESS_TOO_BIG = -1024,
+ REE_AF_ERR_BAD_RULE_TYPE = -1025,
+};
+
+/* REE mbox message formats */
+
+struct ree_req_msg {
+ struct mbox_msghdr hdr;
+ u32 blkaddr;
+};
+
+struct ree_lf_req_msg {
+ struct mbox_msghdr hdr;
+ u32 blkaddr;
+ u32 size;
+ u8 lf;
+ u8 pri;
+};
+
+struct ree_rule_db_prog_req_msg {
+ struct mbox_msghdr hdr;
+/* Rule DB passed in MBOX and is copied to internal REE DB
+ * This size should be power of 2 to fit into rule DB internal blocks
+ */
+#define REE_RULE_DB_REQ_BLOCK_SIZE (MBOX_SIZE >> 1)
+ u8 rule_db[REE_RULE_DB_REQ_BLOCK_SIZE];
+ u32 blkaddr; /* REE0 or REE1 */
+ u32 total_len; /* Total len of rule db */
+ u32 offset; /* Offset of current rule db block */
+ u16 len; /* Length of rule db block */
+ u8 is_last; /* Is this the last block */
+ u8 is_incremental; /* Is incremental flow */
+ u8 is_dbi; /* Is rule db incremental */
+};
+
+struct ree_rule_db_get_req_msg {
+ struct mbox_msghdr hdr;
+ u32 blkaddr;
+ u32 offset; /* Retrieve db from this offset */
+ u8 is_dbi; /* Is request for rule db incremental */
+};
+
+struct ree_rd_wr_reg_msg {
+ struct mbox_msghdr hdr;
+ u64 reg_offset;
+ u64 *ret_val;
+ u64 val;
+ u32 blkaddr;
+ u8 is_write;
+};
+
+struct ree_rule_db_len_rsp_msg {
+ struct mbox_msghdr hdr;
+ u32 blkaddr;
+ u32 len;
+ u32 inc_len;
+};
+
+struct ree_rule_db_get_rsp_msg {
+ struct mbox_msghdr hdr;
+#define REE_RULE_DB_RSP_BLOCK_SIZE (MBOX_DOWN_TX_SIZE - SZ_1K)
+ u8 rule_db[REE_RULE_DB_RSP_BLOCK_SIZE];
+ u32 total_len; /* Total len of rule db */
+ u32 offset; /* Offset of current rule db block */
+ u16 len; /* Length of rule db block */
+ u8 is_last; /* Is this the last block */
+};
+
enum ptp_op {
PTP_OP_ADJFINE = 0,
PTP_OP_GET_CLOCK = 1,
+ PTP_OP_GET_TSTMP = 2,
+ PTP_OP_SET_THRESH = 3,
+ PTP_OP_SET_CLOCK = 4,
+ PTP_OP_ADJ_CLOCK = 5,
};
struct ptp_req {
struct mbox_msghdr hdr;
u8 op;
s64 scaled_ppm;
+ u8 is_pmu;
+ u64 thresh;
+ u64 nsec;
+ s64 delta;
};
struct ptp_rsp {
struct mbox_msghdr hdr;
u64 clk;
+ u64 tsc;
+};
+
+struct sdp_node_info {
+ /* Node to which this PF belons to */
+ u8 node_id;
+ u8 max_vfs;
+ u8 num_pf_rings;
+ u8 pf_srn;
+#define SDP_MAX_VFS 128
+ u8 vf_rings[SDP_MAX_VFS];
+};
+
+struct sdp_chan_info_msg {
+ struct mbox_msghdr hdr;
+ struct sdp_node_info info;
+};
+
+struct sdp_get_chan_info_msg {
+ struct mbox_msghdr hdr;
+ u16 chan_base;
+ u16 num_chan;
+};
+
+/* CGX mailbox error codes
+ * Range 1101 - 1200.
+ */
+enum cgx_af_status {
+ LMAC_AF_ERR_INVALID_PARAM = -1101,
+ LMAC_AF_ERR_PF_NOT_MAPPED = -1102,
+ LMAC_AF_ERR_PERM_DENIED = -1103,
+ LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED = -1104,
+ LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED = -1105,
+ LMAC_AF_ERR_CMD_TIMEOUT = -1106,
+ LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED = -1107,
};
#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 407b9477da24..b7ab87c8df92 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -1,16 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef NPC_H
#define NPC_H
+#define NPC_KEX_CHAN_MASK 0xFFFULL
+
enum NPC_LID_E {
NPC_LID_LA = 0,
NPC_LID_LB,
@@ -28,11 +27,12 @@ enum npc_kpu_la_ltype {
NPC_LT_LA_8023 = 1,
NPC_LT_LA_ETHER,
NPC_LT_LA_IH_NIX_ETHER,
- NPC_LT_LA_IH_8_ETHER,
- NPC_LT_LA_IH_4_ETHER,
- NPC_LT_LA_IH_2_ETHER,
- NPC_LT_LA_HIGIG2_ETHER,
+ NPC_LT_LA_HIGIG2_ETHER = 7,
NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_LT_LA_CPT_HDR,
+ NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_LT_LA_CUSTOM_PRE_L2_ETHER,
NPC_LT_LA_CUSTOM0 = 0xE,
NPC_LT_LA_CUSTOM1 = 0xF,
};
@@ -42,7 +42,7 @@ enum npc_kpu_lb_ltype {
NPC_LT_LB_CTAG,
NPC_LT_LB_STAG_QINQ,
NPC_LT_LB_BTAG,
- NPC_LT_LB_ITAG,
+ NPC_LT_LB_PPPOE,
NPC_LT_LB_DSA,
NPC_LT_LB_DSA_VLAN,
NPC_LT_LB_EDSA,
@@ -50,12 +50,18 @@ enum npc_kpu_lb_ltype {
NPC_LT_LB_EXDSA,
NPC_LT_LB_EXDSA_VLAN,
NPC_LT_LB_FDSA,
+ NPC_LT_LB_VLAN_EXDSA,
NPC_LT_LB_CUSTOM0 = 0xE,
NPC_LT_LB_CUSTOM1 = 0xF,
};
+/* Don't modify ltypes up to IP6_EXT, otherwise length and checksum of IP
+ * headers may not be checked correctly. IPv4 ltypes and IPv6 ltypes must
+ * differ only at bit 0 so mask 0xE can be used to detect extended headers.
+ */
enum npc_kpu_lc_ltype {
- NPC_LT_LC_IP = 1,
+ NPC_LT_LC_PTP = 1,
+ NPC_LT_LC_IP,
NPC_LT_LC_IP_OPT,
NPC_LT_LC_IP6,
NPC_LT_LC_IP6_EXT,
@@ -63,8 +69,8 @@ enum npc_kpu_lc_ltype {
NPC_LT_LC_RARP,
NPC_LT_LC_MPLS,
NPC_LT_LC_NSH,
- NPC_LT_LC_PTP,
NPC_LT_LC_FCOE,
+ NPC_LT_LC_NGIO,
NPC_LT_LC_CUSTOM0 = 0xE,
NPC_LT_LC_CUSTOM1 = 0xF,
};
@@ -145,8 +151,84 @@ enum npc_kpu_lh_ltype {
* Software assigns pkind for each incoming port such as CGX
* Ethernet interfaces, LBK interfaces, etc.
*/
+#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_CUSTOM_PRE_L2_PKIND
+
enum npc_pkind_type {
- NPC_TX_DEF_PKIND = 63ULL, /* NIX-TX PKIND */
+ NPC_RX_LBK_PKIND = 0ULL,
+ NPC_RX_CUSTOM_PRE_L2_PKIND = 55ULL,
+ NPC_RX_VLAN_EXDSA_PKIND = 56ULL,
+ NPC_RX_CHLEN24B_PKIND = 57ULL,
+ NPC_RX_CPT_HDR_PKIND,
+ NPC_RX_CHLEN90B_PKIND,
+ NPC_TX_HIGIG_PKIND,
+ NPC_RX_HIGIG_PKIND,
+ NPC_RX_EDSA_PKIND,
+ NPC_TX_DEF_PKIND, /* NIX-TX PKIND */
+};
+
+enum npc_interface_type {
+ NPC_INTF_MODE_DEF,
+ NPC_INTF_MODE_EDSA,
+ NPC_INTF_MODE_HIGIG,
+ NPC_INTF_MODE_FDSA,
+};
+
+/* list of known and supported fields in packet header and
+ * fields present in key structure.
+ */
+enum key_fields {
+ NPC_DMAC,
+ NPC_SMAC,
+ NPC_ETYPE,
+ NPC_VLAN_ETYPE_CTAG, /* 0x8100 */
+ NPC_VLAN_ETYPE_STAG, /* 0x88A8 */
+ NPC_OUTER_VID,
+ NPC_TOS,
+ NPC_SIP_IPV4,
+ NPC_DIP_IPV4,
+ NPC_SIP_IPV6,
+ NPC_DIP_IPV6,
+ NPC_IPPROTO_TCP,
+ NPC_IPPROTO_UDP,
+ NPC_IPPROTO_SCTP,
+ NPC_IPPROTO_AH,
+ NPC_IPPROTO_ESP,
+ NPC_IPPROTO_ICMP,
+ NPC_IPPROTO_ICMP6,
+ NPC_SPORT_TCP,
+ NPC_DPORT_TCP,
+ NPC_SPORT_UDP,
+ NPC_DPORT_UDP,
+ NPC_SPORT_SCTP,
+ NPC_DPORT_SCTP,
+ NPC_FDSA_VAL,
+ NPC_HEADER_FIELDS_MAX,
+ NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */
+ NPC_PF_FUNC, /* Valid when Tx */
+ NPC_ERRLEV,
+ NPC_ERRCODE,
+ NPC_LXMB,
+ NPC_LA,
+ NPC_LB,
+ NPC_LC,
+ NPC_LD,
+ NPC_LE,
+ NPC_LF,
+ NPC_LG,
+ NPC_LH,
+ /* Ethertype for untagged frame */
+ NPC_ETYPE_ETHER,
+ /* Ethertype for single tagged frame */
+ NPC_ETYPE_TAG1,
+ /* Ethertype for double tagged frame */
+ NPC_ETYPE_TAG2,
+ /* outer vlan tci for single tagged frame */
+ NPC_VLAN_TAG1,
+ /* outer vlan tci for double tagged frame */
+ NPC_VLAN_TAG2,
+ /* other header fields programmed to extract but not of our interest */
+ NPC_UNKNOWN,
+ NPC_KEY_FIELDS_MAX,
};
struct npc_kpu_profile_cam {
@@ -158,7 +240,7 @@ struct npc_kpu_profile_cam {
u16 dp1_mask;
u16 dp2;
u16 dp2_mask;
-};
+} __packed;
struct npc_kpu_profile_action {
u8 errlev;
@@ -178,13 +260,13 @@ struct npc_kpu_profile_action {
u8 mask;
u8 right;
u8 shift;
-};
+} __packed;
struct npc_kpu_profile {
int cam_entries;
int action_entries;
- const struct npc_kpu_profile_cam *cam;
- const struct npc_kpu_profile_action *action;
+ struct npc_kpu_profile_cam *cam;
+ struct npc_kpu_profile_action *action;
};
/* NPC KPU register formats */
@@ -306,6 +388,22 @@ struct nix_rx_action {
#endif
};
+struct nix_tx_action {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_48 :16;
+ u64 match_id :16;
+ u64 index :20;
+ u64 rsvd_11_8 :8;
+ u64 op :4;
+#else
+ u64 op :4;
+ u64 rsvd_11_8 :8;
+ u64 index :20;
+ u64 match_id :16;
+ u64 rsvd_63_48 :16;
+#endif
+};
+
/* NPC_AF_INTFX_KEX_CFG field masks */
#define NPC_PARSE_NIBBLE GENMASK_ULL(30, 0)
@@ -332,10 +430,41 @@ struct nix_rx_action {
#define NPC_PARSE_NIBBLE_LH_LTYPE BIT_ULL(30)
/* NIX Receive Vtag Action Structure */
-#define VTAG0_VALID_BIT BIT_ULL(15)
-#define VTAG0_TYPE_MASK GENMASK_ULL(14, 12)
-#define VTAG0_LID_MASK GENMASK_ULL(10, 8)
-#define VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
+#define RX_VTAG0_VALID_BIT BIT_ULL(15)
+#define RX_VTAG0_TYPE_MASK GENMASK_ULL(14, 12)
+#define RX_VTAG0_LID_MASK GENMASK_ULL(10, 8)
+#define RX_VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
+#define RX_VTAG1_VALID_BIT BIT_ULL(47)
+#define RX_VTAG1_TYPE_MASK GENMASK_ULL(46, 44)
+#define RX_VTAG1_LID_MASK GENMASK_ULL(42, 40)
+#define RX_VTAG1_RELPTR_MASK GENMASK_ULL(39, 32)
+
+/* NIX Transmit Vtag Action Structure */
+#define TX_VTAG0_DEF_MASK GENMASK_ULL(25, 16)
+#define TX_VTAG0_OP_MASK GENMASK_ULL(13, 12)
+#define TX_VTAG0_LID_MASK GENMASK_ULL(10, 8)
+#define TX_VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
+#define TX_VTAG1_DEF_MASK GENMASK_ULL(57, 48)
+#define TX_VTAG1_OP_MASK GENMASK_ULL(45, 44)
+#define TX_VTAG1_LID_MASK GENMASK_ULL(42, 40)
+#define TX_VTAG1_RELPTR_MASK GENMASK_ULL(39, 32)
+
+/* NPC MCAM reserved entry index per nixlf */
+#define NIXLF_UCAST_ENTRY 0
+#define NIXLF_BCAST_ENTRY 1
+#define NIXLF_ALLMULTI_ENTRY 2
+#define NIXLF_PROMISC_ENTRY 3
+
+struct npc_coalesced_kpu_prfl {
+#define NPC_SIGN 0x00666f727063706e
+#define NPC_PRFL_NAME "npc_prfls_array"
+#define NPC_NAME_LEN 32
+ u64 signature; /* "npcprof\0" (8 bytes/ASCII characters) */
+ u8 name[NPC_NAME_LEN]; /* KPU Profile name */
+ u64 version; /* KPU firmware/profile version */
+ u8 num_prfl; /* No of NPC profiles. */
+ u16 prfl_sz[0];
+};
struct npc_mcam_kex {
/* MKEX Profle Header */
@@ -355,11 +484,42 @@ struct npc_mcam_kex {
u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
} __packed;
+struct npc_kpu_fwdata {
+ int entries;
+ /* What follows is:
+ * struct npc_kpu_profile_cam[entries];
+ * struct npc_kpu_profile_action[entries];
+ */
+ u8 data[0];
+} __packed;
+
+struct rvu_npc_mcam_rule {
+ struct flow_msg packet;
+ struct flow_msg mask;
+ u8 intf;
+ union {
+ struct nix_tx_action tx_action;
+ struct nix_rx_action rx_action;
+ };
+ u64 vtag_action;
+ struct list_head list;
+ u64 features;
+ u16 owner;
+ u16 entry;
+ u16 cntr;
+ bool has_cntr;
+ u8 default_rule;
+ bool enable;
+ bool vfvlan_cfg;
+ u16 chan;
+ u16 chan_mask;
+};
+
struct npc_lt_def {
u8 ltype_mask;
u8 ltype_match;
u8 lid;
-};
+} __packed;
struct npc_lt_def_ipsec {
u8 ltype_mask;
@@ -367,7 +527,30 @@ struct npc_lt_def_ipsec {
u8 lid;
u8 spi_offset;
u8 spi_nz;
-};
+} __packed;
+
+struct npc_lt_def_apad {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 valid;
+} __packed;
+
+struct npc_lt_def_color {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 noffset;
+ u8 offset;
+} __packed;
+
+struct npc_lt_def_et {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 valid;
+ u8 offset;
+} __packed;
struct npc_lt_def_cfg {
struct npc_lt_def rx_ol2;
@@ -386,6 +569,40 @@ struct npc_lt_def_cfg {
struct npc_lt_def pck_oip4;
struct npc_lt_def pck_oip6;
struct npc_lt_def pck_iip4;
-};
+ struct npc_lt_def_apad rx_apad0;
+ struct npc_lt_def_apad rx_apad1;
+ struct npc_lt_def_color ovlan;
+ struct npc_lt_def_color ivlan;
+ struct npc_lt_def_color rx_gen0_color;
+ struct npc_lt_def_color rx_gen1_color;
+ struct npc_lt_def_et rx_et[2];
+} __packed;
+
+/* Loadable KPU profile firmware data */
+struct npc_kpu_profile_fwdata {
+#define KPU_SIGN 0x00666f727075706b
+#define KPU_NAME_LEN 32
+/** Maximum number of custom KPU entries supported by the built-in profile. */
+#define KPU_MAX_CST_ENT 6
+ /* KPU Profle Header */
+ u64 signature; /* "kpuprof\0" (8 bytes/ASCII characters) */
+ u8 name[KPU_NAME_LEN]; /* KPU Profile name */
+ u64 version; /* KPU profile version */
+ u8 kpus;
+ u8 reserved[7];
+
+ /* Default MKEX profile to be used with this KPU profile. May be
+ * overridden with mkex_profile module parameter. Format is same as for
+ * the MKEX profile to streamline processing.
+ */
+ struct npc_mcam_kex mkex;
+ /* LTYPE values for specific HW offloaded protocols. */
+ struct npc_lt_def_cfg lt_def;
+ /* Dynamically sized data:
+ * Custom KPU CAM and ACTION configuration entries.
+ * struct npc_kpu_fwdata kpu[kpus];
+ */
+ u8 data[0];
+} __packed;
#endif /* NPC_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index 0e4af93be0fb..5b712d0aa327 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -1,17 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef NPC_PROFILE_H
#define NPC_PROFILE_H
-#define NPC_KPU_PROFILE_VER 0x0000000100050000
+#define NPC_KPU_PROFILE_VER 0x0000000100070000
+#define NPC_KPU_VER_MAJ(ver) (u16)(((ver) >> 32) & 0xFFFF)
+#define NPC_KPU_VER_MIN(ver) (u16)(((ver) >> 16) & 0xFFFF)
+#define NPC_KPU_VER_PATCH(ver) (u16)((ver) & 0xFFFF)
#define NPC_IH_W 0x8000
#define NPC_IH_UTAG 0x2000
@@ -20,6 +20,7 @@
#define NPC_ETYPE_IP6 0x86dd
#define NPC_ETYPE_ARP 0x0806
#define NPC_ETYPE_RARP 0x8035
+#define NPC_ETYPE_NGIO 0x8842
#define NPC_ETYPE_MPLSU 0x8847
#define NPC_ETYPE_MPLSM 0x8848
#define NPC_ETYPE_ETAG 0x893f
@@ -33,6 +34,10 @@
#define NPC_ETYPE_PPP 0x880b
#define NPC_ETYPE_NSH 0x894f
#define NPC_ETYPE_DSA 0xdada
+#define NPC_ETYPE_PPPOE 0x8864
+
+#define NPC_PPP_IP 0x0021
+#define NPC_PPP_IP6 0x0057
#define NPC_IPNH_HOP 0
#define NPC_IPNH_ICMP 1
@@ -142,7 +147,7 @@
#define NPC_DSA_EDSA 0x8000
#define NPC_DSA_FDSA 0xc000
-#define NPC_KEXOF_DMAC 8
+#define NPC_KEXOF_DMAC 9
#define MKEX_SIGN 0x19bbfdbd15f /* strtoull of "mkexprof" with base:36 */
#define KEX_LD_CFG(bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \
(((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
@@ -150,6 +155,7 @@
/* Rx parse key extract nibble enable */
#define NPC_PARSE_NIBBLE_INTF_RX (NPC_PARSE_NIBBLE_CHAN | \
+ NPC_PARSE_NIBBLE_ERRCODE | \
NPC_PARSE_NIBBLE_LA_LTYPE | \
NPC_PARSE_NIBBLE_LB_LTYPE | \
NPC_PARSE_NIBBLE_LC_LTYPE | \
@@ -170,25 +176,30 @@ enum npc_kpu_parser_state {
NPC_S_KPU1_EXDSA,
NPC_S_KPU1_HIGIG2,
NPC_S_KPU1_IH_NIX_HIGIG2,
+ NPC_S_KPU1_CUSTOM_PRE_L2,
+ NPC_S_KPU1_CPT_HDR,
+ NPC_S_KPU1_VLAN_EXDSA,
NPC_S_KPU2_CTAG,
NPC_S_KPU2_CTAG2,
NPC_S_KPU2_SBTAG,
NPC_S_KPU2_QINQ,
NPC_S_KPU2_ETAG,
- NPC_S_KPU2_ITAG,
- NPC_S_KPU2_PREHEADER,
NPC_S_KPU2_EXDSA,
+ NPC_S_KPU2_CPT_CTAG,
+ NPC_S_KPU2_CPT_QINQ,
NPC_S_KPU3_CTAG,
NPC_S_KPU3_STAG,
NPC_S_KPU3_QINQ,
- NPC_S_KPU3_ITAG,
NPC_S_KPU3_CTAG_C,
NPC_S_KPU3_STAG_C,
NPC_S_KPU3_QINQ_C,
NPC_S_KPU3_DSA,
+ NPC_S_KPU3_VLAN_EXDSA,
NPC_S_KPU4_MPLS,
NPC_S_KPU4_NSH,
NPC_S_KPU4_FDSA,
+ NPC_S_KPU4_VLAN_EXDSA,
+ NPC_S_KPU4_PPPOE,
NPC_S_KPU5_IP,
NPC_S_KPU5_IP6,
NPC_S_KPU5_ARP,
@@ -198,13 +209,20 @@ enum npc_kpu_parser_state {
NPC_S_KPU5_MPLS,
NPC_S_KPU5_MPLS_PL,
NPC_S_KPU5_NSH,
+ NPC_S_KPU5_CPT_IP,
+ NPC_S_KPU5_CPT_IP6,
+ NPC_S_KPU5_NGIO,
NPC_S_KPU6_IP6_EXT,
NPC_S_KPU6_IP6_HOP_DEST,
NPC_S_KPU6_IP6_ROUT,
NPC_S_KPU6_IP6_FRAG,
+ NPC_S_KPU6_IP6_CPT_FRAG,
+ NPC_S_KPU6_IP6_CPT_HOP_DEST,
+ NPC_S_KPU6_IP6_CPT_ROUT,
NPC_S_KPU7_IP6_EXT,
NPC_S_KPU7_IP6_ROUT,
NPC_S_KPU7_IP6_FRAG,
+ NPC_S_KPU7_CPT_IP6_FRAG,
NPC_S_KPU8_TCP,
NPC_S_KPU8_UDP,
NPC_S_KPU8_SCTP,
@@ -265,7 +283,6 @@ enum npc_kpu_la_lflag {
NPC_F_LA_L_UNK_ETYPE = 1,
NPC_F_LA_L_WITH_VLAN,
NPC_F_LA_L_WITH_ETAG,
- NPC_F_LA_L_WITH_ITAG,
NPC_F_LA_L_WITH_MPLS,
NPC_F_LA_L_WITH_NSH,
};
@@ -442,7 +459,28 @@ enum NPC_ERRLEV_E {
NPC_ERRLEV_ENUM_LAST = 16,
};
-static const struct npc_kpu_profile_action ikpu_action_entries[] = {
+#define NPC_KPU_NOP_CAM \
+ { \
+ NPC_S_NA, 0xff, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ }
+
+#define NPC_KPU_NOP_ACTION \
+ { \
+ NPC_ERRLEV_RE, NPC_EC_NOERR, \
+ 0, 0, 0, 0, 0, \
+ NPC_S_NA, 0, 0, \
+ NPC_LID_LA, NPC_LT_NA, \
+ 0, \
+ 0, 0, 0, 0, \
+ }
+
+static struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
@@ -941,8 +979,8 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_PRE_L2_ETHER,
0,
0, 0, 0, 0,
@@ -950,7 +988,7 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
+ NPC_S_KPU1_VLAN_EXDSA, 0, 0,
NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -959,8 +997,8 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 24, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
0,
0, 0, 0, 0,
@@ -968,17 +1006,17 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
+ NPC_S_KPU1_CPT_HDR, 40, 0,
NPC_LID_LA, NPC_LT_NA,
0,
- 0, 0, 0, 0,
+ 0, 7, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 90, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
0,
0, 0, 0, 0,
@@ -1021,7 +1059,13 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU1_ETHER, 0xff,
NPC_ETYPE_IP,
@@ -1123,7 +1167,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_ITAG,
+ NPC_ETYPE_MPLSU,
0xffff,
0x0000,
0x0000,
@@ -1132,7 +1176,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_MPLSU,
+ NPC_ETYPE_MPLSM,
0xffff,
0x0000,
0x0000,
@@ -1141,7 +1185,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_MPLSM,
+ NPC_ETYPE_NSH,
0xffff,
0x0000,
0x0000,
@@ -1150,7 +1194,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_NSH,
+ NPC_ETYPE_DSA,
0xffff,
0x0000,
0x0000,
@@ -1159,7 +1203,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_DSA,
+ NPC_ETYPE_PPPOE,
0xffff,
0x0000,
0x0000,
@@ -1294,15 +1338,6 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH_NIX, 0xff,
- NPC_ETYPE_ITAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_IH_NIX, 0xff,
NPC_ETYPE_MPLSU,
0xffff,
0x0000,
@@ -1339,33 +1374,6 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH, 0xff,
- NPC_IH_W|NPC_IH_UTAG,
- NPC_IH_W|NPC_IH_UTAG,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_IH, 0xff,
- NPC_IH_W,
- NPC_IH_W|NPC_IH_UTAG,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_IH, 0xff,
- 0x0000,
- NPC_IH_W|NPC_IH_UTAG,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_IH, 0xff,
0x0000,
0x0000,
0x0000,
@@ -1501,15 +1509,6 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_HIGIG2, 0xff,
- NPC_ETYPE_ITAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_HIGIG2, 0xff,
NPC_ETYPE_MPLSU,
0xffff,
0x0000,
@@ -1645,7 +1644,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
- NPC_ETYPE_ITAG,
+ NPC_ETYPE_MPLSU,
0xffff,
0x0000,
0x0000,
@@ -1654,7 +1653,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
- NPC_ETYPE_MPLSU,
+ NPC_ETYPE_MPLSM,
0xffff,
0x0000,
0x0000,
@@ -1663,7 +1662,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
- NPC_ETYPE_MPLSM,
+ NPC_ETYPE_NSH,
0xffff,
0x0000,
0x0000,
@@ -1672,6 +1671,132 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_NSH,
0xffff,
0x0000,
@@ -1680,7 +1805,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
0x0000,
0x0000,
0x0000,
@@ -1689,6 +1814,51 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -1699,7 +1869,13 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU2_CTAG, 0xff,
NPC_ETYPE_IP,
@@ -1783,6 +1959,33 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
},
{
NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_NGIO,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
0x0000,
0x0000,
0x0000,
@@ -2226,15 +2429,6 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
NPC_S_KPU2_ETAG, 0xff,
NPC_ETYPE_SBTAG,
0xffff,
- NPC_ETYPE_ITAG,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ETAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
0x0000,
0x0000,
0x0000,
@@ -2313,159 +2507,6 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_RARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
NPC_S_KPU2_CTAG2, 0xff,
NPC_ETYPE_IP,
0xffff,
@@ -2565,114 +2606,6 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_RARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_PTP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_FCOE,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_QINQ,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_MPLSU,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_MPLSM,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_NSH,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
NPC_S_KPU2_EXDSA, 0xff,
NPC_DSA_EDSA,
NPC_DSA_EDSA,
@@ -2817,6 +2750,42 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU2_CPT_CTAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CPT_CTAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CPT_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CPT_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -2827,7 +2796,13 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu3_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU3_CTAG, 0xff,
NPC_ETYPE_IP,
@@ -3243,159 +3218,6 @@ static const struct npc_kpu_profile_cam kpu3_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_RARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
NPC_S_KPU3_CTAG_C, 0xff,
NPC_ETYPE_IP,
0xffff,
@@ -3936,6 +3758,15 @@ static const struct npc_kpu_profile_cam kpu3_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU3_VLAN_EXDSA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -3946,7 +3777,13 @@ static const struct npc_kpu_profile_cam kpu3_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu4_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU4_MPLS, 0xff,
NPC_MPLS_S,
@@ -4084,6 +3921,78 @@ static const struct npc_kpu_profile_cam kpu4_cam_entries[] = {
},
{
NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
0x0000,
NPC_DSA_FDSA,
0x0000,
@@ -4092,6 +4001,87 @@ static const struct npc_kpu_profile_cam kpu4_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_PPPOE, 0xff,
+ NPC_PPP_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_PPPOE, 0xff,
+ NPC_PPP_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -4102,7 +4092,13 @@ static const struct npc_kpu_profile_cam kpu4_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU5_IP, 0xff,
0x0000,
@@ -4662,6 +4658,438 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ NPC_IP_TTL_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0001,
+ NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_GRE,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP6,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_MPLS,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_GRE,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP6,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_MPLS,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ 0x0000,
+ NPC_IP6_HOP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_HOP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_DEST << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_MOBILITY << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_HOSTID << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_SHIM6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_NGIO, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -4672,7 +5100,13 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu6_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU6_IP6_EXT, 0xff,
0x0000,
@@ -5007,6 +5441,330 @@ static const struct npc_kpu_profile_cam kpu6_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -5017,7 +5775,13 @@ static const struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu7_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU7_IP6_EXT, 0xff,
0x0000,
@@ -5226,6 +5990,105 @@ static const struct npc_kpu_profile_cam kpu7_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -5236,7 +6099,13 @@ static const struct npc_kpu_profile_cam kpu7_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU8_TCP, 0xff,
0x0000,
@@ -5977,7 +6846,13 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu9_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
NPC_MPLS_S,
@@ -6378,17 +7253,8 @@ static const struct npc_kpu_profile_cam kpu9_cam_entries[] = {
NPC_S_KPU9_GTPU, 0xff,
0x0000,
0x0000,
- NPC_GTP_PT_GTP | NPC_GTP_VER1 | NPC_GTP_MT_G_PDU,
- NPC_GTP_PT_MASK | NPC_GTP_VER_MASK | NPC_GTP_MT_MASK,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU9_GTPU, 0xff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
+ NPC_GTP_PT_GTP | NPC_GTP_VER1,
+ NPC_GTP_PT_MASK | NPC_GTP_VER_MASK,
0x0000,
0x0000,
},
@@ -6448,7 +7314,13 @@ static const struct npc_kpu_profile_cam kpu9_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu10_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU10_TU_MPLS, 0xff,
NPC_MPLS_S,
@@ -6613,7 +7485,13 @@ static const struct npc_kpu_profile_cam kpu10_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu11_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU11_TU_ETHER, 0xff,
NPC_ETYPE_IP,
@@ -6922,7 +7800,13 @@ static const struct npc_kpu_profile_cam kpu11_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_TCP,
@@ -7177,7 +8061,13 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu13_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU13_TU_IP6_EXT, 0xff,
0x0000,
@@ -7189,7 +8079,13 @@ static const struct npc_kpu_profile_cam kpu13_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu14_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU14_TU_IP6_EXT, 0xff,
0x0000,
@@ -7201,7 +8097,13 @@ static const struct npc_kpu_profile_cam kpu14_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu15_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU15_TU_TCP, 0xff,
0x0000,
@@ -7402,7 +8304,13 @@ static const struct npc_kpu_profile_cam kpu15_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu16_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU16_TCP_DATA, 0xff,
0x0000,
@@ -7459,7 +8367,13 @@ static const struct npc_kpu_profile_cam kpu16_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu1_action_entries[] = {
+static struct npc_kpu_profile_action kpu1_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 3, 0,
@@ -7518,7 +8432,7 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 0, 0, 0,
+ 4, 8, 12, 0, 0,
NPC_S_KPU2_CTAG, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
@@ -7550,14 +8464,6 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 18, 22, 26, 0, 0,
- NPC_S_KPU2_ITAG, 12, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ITAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 2, 0,
NPC_S_KPU4_MPLS, 14, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
@@ -7590,6 +8496,14 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 2, 0,
+ NPC_S_KPU4_PPPOE, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LA, NPC_LT_LA_8023,
@@ -7701,17 +8615,7 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
8, 12, 26, 0, 0,
NPC_S_KPU2_ETAG, 20, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
- NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
- | NPC_F_LA_L_WITH_ETAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 18, 22, 26, 0, 0,
- NPC_S_KPU2_ITAG, 20, 1,
- NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
- NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
- | NPC_F_LA_L_WITH_ITAG,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
0, 0, 0, 0,
},
{
@@ -7747,30 +8651,6 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 12, 14, 16, 0, 0,
- NPC_S_KPU2_PREHEADER, 8, 1,
- NPC_LID_LA, NPC_LT_LA_IH_8_ETHER,
- 0,
- 1, 0xff, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 12, 14, 16, 0, 0,
- NPC_S_KPU2_PREHEADER, 4, 1,
- NPC_LID_LA, NPC_LT_LA_IH_4_ETHER,
- 0,
- 1, 0xff, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 12, 14, 16, 0, 0,
- NPC_S_KPU2_PREHEADER, 2, 1,
- NPC_LID_LA, NPC_LT_LA_IH_2_ETHER,
- 0,
- 1, 0xff, 0, 0,
- },
- {
NPC_ERRLEV_LA, NPC_EC_IH_LENGTH,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
@@ -7788,7 +8668,7 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 16, 2, 0,
+ 4, 8, 12, 2, 0,
NPC_S_KPU4_FDSA, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
0,
@@ -7891,17 +8771,7 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
8, 12, 26, 0, 0,
NPC_S_KPU2_ETAG, 28, 1,
NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
- NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
- | NPC_F_LA_L_WITH_ETAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 18, 22, 26, 0, 0,
- NPC_S_KPU2_ITAG, 28, 1,
- NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
- NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
- | NPC_F_LA_L_WITH_ITAG,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
0, 0, 0, 0,
},
{
@@ -8025,17 +8895,7 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
8, 12, 26, 0, 0,
NPC_S_KPU2_ETAG, 36, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
- NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
- | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 18, 22, 26, 0, 0,
- NPC_S_KPU2_ITAG, 36, 1,
- NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
- NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
- | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ITAG,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
0, 0, 0, 0,
},
{
@@ -8075,6 +8935,166 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_CPT_IP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_CPT_IP6, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CPT_CTAG, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CPT_QINQ, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 0, 0, 1, 0,
+ NPC_S_KPU3_VLAN_EXDSA, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LA, NPC_EC_L2_K1,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -8084,7 +9104,13 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu2_action_entries[] = {
+static struct npc_kpu_profile_action kpu2_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
@@ -8159,6 +9185,30 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_NGIO, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LB, NPC_LT_LB_CTAG,
@@ -8551,14 +9601,6 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 16, 20, 24, 0, 0,
- NPC_S_KPU3_ITAG, 14, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_BTAG_ITAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 0, 0, 0,
NPC_S_KPU3_STAG, 10, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
@@ -8632,142 +9674,6 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 20, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 20, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 20, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_RARP, 20, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 28, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 28, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 28, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
NPC_S_KPU5_IP, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -8856,102 +9762,6 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_RARP, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_PTP, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_FCOE, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 0, 0, 0,
- NPC_S_KPU3_CTAG_C, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 20, 0, 0,
- NPC_S_KPU3_STAG_C, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 0, 0, 0,
- NPC_S_KPU3_QINQ_C, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 10, 1, 0,
- NPC_S_KPU4_MPLS, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 10, 1, 0,
- NPC_S_KPU4_MPLS, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 0, 0, 1, 0,
- NPC_S_KPU4_NSH, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
NPC_S_KPU5_IP, 18, 1,
NPC_LID_LB, NPC_LT_LB_EDSA,
NPC_F_LB_L_EDSA,
@@ -9078,6 +9888,38 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_CPT_IP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_CPT_IP6, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_CPT_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_CPT_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K3,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -9087,11 +9929,17 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu3_action_entries[] = {
+static struct npc_kpu_profile_action kpu3_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
- NPC_S_KPU5_IP, 4, 0,
+ NPC_S_KPU5_IP, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9099,7 +9947,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
6, 0, 0, 1, 0,
- NPC_S_KPU5_IP6, 4, 0,
+ NPC_S_KPU5_IP6, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9107,7 +9955,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_ARP, 4, 0,
+ NPC_S_KPU5_ARP, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9115,7 +9963,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_RARP, 4, 0,
+ NPC_S_KPU5_RARP, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9123,7 +9971,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_PTP, 4, 0,
+ NPC_S_KPU5_PTP, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9131,7 +9979,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_FCOE, 4, 0,
+ NPC_S_KPU5_FCOE, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9139,7 +9987,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 0, 0,
- NPC_S_KPU4_MPLS, 4, 0,
+ NPC_S_KPU4_MPLS, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9147,7 +9995,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 0, 0,
- NPC_S_KPU4_MPLS, 4, 0,
+ NPC_S_KPU4_MPLS, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9155,7 +10003,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 0, 0, 0, 0,
- NPC_S_KPU4_NSH, 4, 0,
+ NPC_S_KPU4_NSH, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9458,142 +10306,6 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 18, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 18, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 18, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_RARP, 18, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 1, 0,
- NPC_S_KPU5_IP, 26, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
- NPC_S_KPU5_IP6, 26, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU5_ARP, 26, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 1, 0,
- NPC_S_KPU5_IP, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
- NPC_S_KPU5_IP6, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU5_ARP, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 1, 0,
- NPC_S_KPU5_IP, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
- NPC_S_KPU5_IP6, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU5_ARP, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
NPC_S_KPU5_IP, 4, 1,
NPC_LID_LB, NPC_LT_LB_CTAG,
@@ -10073,6 +10785,14 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU4_VLAN_EXDSA, 12, 1,
+ NPC_LID_LB, NPC_LT_LB_VLAN_EXDSA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K3,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -10082,7 +10802,13 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu4_action_entries[] = {
+static struct npc_kpu_profile_action kpu4_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
@@ -10205,6 +10931,70 @@ static const struct npc_kpu_profile_action kpu4_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LB, NPC_LT_LB_FDSA,
@@ -10212,6 +11002,78 @@ static const struct npc_kpu_profile_action kpu4_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_ARP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_RARP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_PTP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_FCOE, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 10, 0,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 10, 0,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K4,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -10221,7 +11083,13 @@ static const struct npc_kpu_profile_action kpu4_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu5_action_entries[] = {
+static struct npc_kpu_profile_action kpu5_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_LC, NPC_EC_IP_TTL_0,
0, 0, 0, 0, 1,
@@ -10719,6 +11587,390 @@ static const struct npc_kpu_profile_action kpu5_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_LC, NPC_EC_IP_TTL_0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP_FRAG_OFFSET_1,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_IP_FRAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_UDP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_IGMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_IP_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_6TO4,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_MPLS_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 2, 0,
+ NPC_S_KPU8_UDP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_IGMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_IP_IN_IP,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_6TO4,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_MPLS_IN_IP,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP6_HOP_0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_UDP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_L_IP6_TUN_IP6,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_L_IP6_MPLS_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_HOP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_DEST,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_ROUT, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_ROUT,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_FRAG, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_U_IP6_FRAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_MOBILITY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_HOSTID,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_SHIM6,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP6_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NGIO,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LC, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -10728,7 +11980,13 @@ static const struct npc_kpu_profile_action kpu5_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu6_action_entries[] = {
+static struct npc_kpu_profile_action kpu6_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -11026,6 +12284,294 @@ static const struct npc_kpu_profile_action kpu6_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU7_IP6_ROUT, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU7_CPT_IP6_FRAG, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU7_CPT_IP6_FRAG, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LC, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -11035,7 +12581,13 @@ static const struct npc_kpu_profile_action kpu6_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu7_action_entries[] = {
+static struct npc_kpu_profile_action kpu7_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -11221,6 +12773,94 @@ static const struct npc_kpu_profile_action kpu7_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 0, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 0, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 4, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LC, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -11230,7 +12870,13 @@ static const struct npc_kpu_profile_action kpu7_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu8_action_entries[] = {
+static struct npc_kpu_profile_action kpu8_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_FIN_ONLY,
0, 0, 0, 0, 1,
@@ -11889,7 +13535,13 @@ static const struct npc_kpu_profile_action kpu8_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu9_action_entries[] = {
+static struct npc_kpu_profile_action kpu9_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
@@ -12244,18 +13896,10 @@ static const struct npc_kpu_profile_action kpu9_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU12_TU_IP, 8, 1,
- NPC_LID_LE, NPC_LT_LE_GTPU,
- NPC_F_LE_L_GTPU_G_PDU,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 1,
+ 8, 0, 6, 2, 1,
NPC_S_NA, 0, 1,
NPC_LID_LE, NPC_LT_LE_GTPU,
- NPC_F_LE_L_GTPU_UNK,
+ 0,
0, 0, 0, 0,
},
{
@@ -12308,7 +13952,13 @@ static const struct npc_kpu_profile_action kpu9_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu10_action_entries[] = {
+static struct npc_kpu_profile_action kpu10_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
@@ -12455,7 +14105,13 @@ static const struct npc_kpu_profile_action kpu10_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu11_action_entries[] = {
+static struct npc_kpu_profile_action kpu11_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 0, 0,
@@ -12730,7 +14386,13 @@ static const struct npc_kpu_profile_action kpu11_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu12_action_entries[] = {
+static struct npc_kpu_profile_action kpu12_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 12, 0, 2, 0,
@@ -12957,7 +14619,13 @@ static const struct npc_kpu_profile_action kpu12_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu13_action_entries[] = {
+static struct npc_kpu_profile_action kpu13_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -12968,7 +14636,13 @@ static const struct npc_kpu_profile_action kpu13_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu14_action_entries[] = {
+static struct npc_kpu_profile_action kpu14_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -12979,7 +14653,13 @@ static const struct npc_kpu_profile_action kpu14_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu15_action_entries[] = {
+static struct npc_kpu_profile_action kpu15_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_FIN_ONLY,
0, 0, 0, 0, 1,
@@ -13158,7 +14838,13 @@ static const struct npc_kpu_profile_action kpu15_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu16_action_entries[] = {
+static struct npc_kpu_profile_action kpu16_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -13209,7 +14895,7 @@ static const struct npc_kpu_profile_action kpu16_action_entries[] = {
},
};
-static const struct npc_kpu_profile npc_kpu_profiles[] = {
+static struct npc_kpu_profile npc_kpu_profiles[] = {
{
ARRAY_SIZE(kpu1_cam_entries),
ARRAY_SIZE(kpu1_action_entries),
@@ -13314,6 +15000,16 @@ static const struct npc_lt_def_cfg npc_lt_defaults = {
.ltype_match = NPC_LT_LA_ETHER,
.ltype_mask = 0x0F,
},
+ .ovlan = {
+ .lid = NPC_LID_LB,
+ .ltype_match = NPC_LT_LB_CTAG,
+ .ltype_mask = 0x0F,
+ },
+ .ivlan = {
+ .lid = NPC_LID_LB,
+ .ltype_match = NPC_LT_LB_STAG_QINQ,
+ .ltype_mask = 0x0F,
+ },
.rx_oip4 = {
.lid = NPC_LID_LC,
.ltype_match = NPC_LT_LC_IP,
@@ -13392,9 +15088,35 @@ static const struct npc_lt_def_cfg npc_lt_defaults = {
.ltype_match = NPC_LT_LG_TU_IP,
.ltype_mask = 0x0F,
},
+ .rx_apad0 = {
+ .valid = 0,
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP6,
+ .ltype_mask = 0x0F,
+ },
+ .rx_apad1 = {
+ .valid = 0,
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP6,
+ .ltype_mask = 0x0F,
+ },
+ .rx_et = {
+ {
+ .offset = -2,
+ .valid = 1,
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_NA,
+ .ltype_mask = 0x0,
+ },
+ {
+ .lid = NPC_LID_LB,
+ .ltype_match = NPC_LT_NA,
+ .ltype_mask = 0x0,
+ },
+ },
};
-static const struct npc_mcam_kex npc_mkex_default = {
+static struct npc_mcam_kex npc_mkex_default = {
.mkex_sign = MKEX_SIGN,
.name = "default",
.kpu_version = NPC_KPU_PROFILE_VER,
@@ -13410,30 +15132,40 @@ static const struct npc_mcam_kex npc_mkex_default = {
[NPC_LID_LA] = {
/* Layer A: Ethernet: */
[NPC_LT_LA_ETHER] = {
- /* DMAC: 6 bytes, KW1[47:0] */
+ /* DMAC: 6 bytes, KW1[55:8] */
KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_KEXOF_DMAC),
- /* Ethertype: 2 bytes, KW0[47:32] */
- KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x4),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x5),
+ },
+ /* Layer A: HiGig2: */
+ [NPC_LT_LA_HIGIG2_ETHER] = {
+ /* Classification: 2 bytes, KW1[23:8] */
+ KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, NPC_KEXOF_DMAC),
+ /* VID: 2 bytes, KW1[39:24] */
+ KEX_LD_CFG(0x01, 0xc, 0x1, 0x0,
+ NPC_KEXOF_DMAC + 2),
},
},
[NPC_LID_LB] = {
/* Layer B: Single VLAN (CTAG) */
- /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
[NPC_LT_LB_CTAG] = {
- KEX_LD_CFG(0x03, 0x2, 0x1, 0x0, 0x4),
+ /* CTAG VLAN: 2 bytes, KW1[7:0], KW0[63:56] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x7),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x5),
},
/* Layer B: Stacked VLAN (STAG|QinQ) */
[NPC_LT_LB_STAG_QINQ] = {
- /* Outer VLAN: 2 bytes, KW0[63:48] */
- KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
- /* Ethertype: 2 bytes, KW0[47:32] */
- KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x4),
+ /* Outer VLAN: 2 bytes, KW1[7:0], KW0[63:56] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x7),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x5),
},
[NPC_LT_LB_FDSA] = {
- /* SWITCH PORT: 1 byte, KW0[63:48] */
- KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0x6),
- /* Ethertype: 2 bytes, KW0[47:32] */
- KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x4),
+ /* SWITCH PORT: 1 byte, KW0[63:56] */
+ KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0x7),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x5),
},
},
[NPC_LID_LC] = {
@@ -13477,6 +15209,13 @@ static const struct npc_mcam_kex npc_mkex_default = {
/* DMAC: 6 bytes, KW1[63:16] */
KEX_LD_CFG(0x05, 0x8, 0x1, 0x0, 0xa),
},
+ /* Layer A: HiGig2: */
+ [NPC_LT_LA_IH_NIX_HIGIG2_ETHER] = {
+ /* PF_FUNC: 2B , KW0 [47:32] */
+ KEX_LD_CFG(0x01, 0x0, 0x1, 0x0, 0x4),
+ /* VID: 2 bytes, KW1[31:16] */
+ KEX_LD_CFG(0x01, 0x10, 0x1, 0x0, 0xa),
+ },
},
[NPC_LID_LB] = {
/* Layer B: Single VLAN (CTAG) */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index f69f4f35ae48..b500b165732b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Marvell PTP driver
*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020 Marvell.
+ *
*/
#include <linux/bitfield.h>
@@ -19,75 +20,147 @@
#define PCI_SUBSYS_DEVID_OCTX2_98xx_PTP 0xB100
#define PCI_SUBSYS_DEVID_OCTX2_96XX_PTP 0xB200
#define PCI_SUBSYS_DEVID_OCTX2_95XX_PTP 0xB300
-#define PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP 0xB400
+#define PCI_SUBSYS_DEVID_OCTX2_95XXN_PTP 0xB400
#define PCI_SUBSYS_DEVID_OCTX2_95MM_PTP 0xB500
+#define PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP 0xB600
#define PCI_DEVID_OCTEONTX2_RST 0xA085
+#define PCI_DEVID_CN10K_PTP 0xA09E
+#define PCI_SUBSYS_DEVID_CN10K_A_PTP 0xB900
+#define PCI_SUBSYS_DEVID_CNF10K_A_PTP 0xBA00
+#define PCI_SUBSYS_DEVID_CNF10K_B_PTP 0xBC00
#define PCI_PTP_BAR_NO 0
-#define PCI_RST_BAR_NO 0
#define PTP_CLOCK_CFG 0xF00ULL
#define PTP_CLOCK_CFG_PTP_EN BIT_ULL(0)
+#define PTP_CLOCK_CFG_EXT_CLK_EN BIT_ULL(1)
+#define PTP_CLOCK_CFG_EXT_CLK_IN_MASK GENMASK_ULL(7, 2)
+#define PTP_CLOCK_CFG_TSTMP_EDGE BIT_ULL(9)
+#define PTP_CLOCK_CFG_TSTMP_EN BIT_ULL(8)
+#define PTP_CLOCK_CFG_TSTMP_IN_MASK GENMASK_ULL(15, 10)
+#define PTP_CLOCK_CFG_PPS_EN BIT_ULL(30)
+#define PTP_CLOCK_CFG_PPS_INV BIT_ULL(31)
+
+#define PTP_PPS_HI_INCR 0xF60ULL
+#define PTP_PPS_LO_INCR 0xF68ULL
+#define PTP_PPS_THRESH_HI 0xF58ULL
+
#define PTP_CLOCK_LO 0xF08ULL
#define PTP_CLOCK_HI 0xF10ULL
#define PTP_CLOCK_COMP 0xF18ULL
+#define PTP_TIMESTAMP 0xF20ULL
+#define PTP_CLOCK_SEC 0xFD0ULL
-#define RST_BOOT 0x1600ULL
-#define RST_MUL_BITS GENMASK_ULL(38, 33)
-#define CLOCK_BASE_RATE 50000000ULL
+#define CYCLE_MULT 1000
-static u64 get_clock_rate(void)
+static struct ptp *first_ptp_block;
+static const struct pci_device_id ptp_id_table[];
+
+static bool cn10k_ptp_errata(struct ptp *ptp)
{
- u64 cfg, ret = CLOCK_BASE_RATE * 16;
- struct pci_dev *pdev;
- void __iomem *base;
+ if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP ||
+ ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP)
+ return true;
+ return false;
+}
- /* To get the input clock frequency with which PTP co-processor
- * block is running the base frequency(50 MHz) needs to be multiplied
- * with multiplier bits present in RST_BOOT register of RESET block.
- * Hence below code gets the multiplier bits from the RESET PCI
- * device present in the system.
- */
- pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
- PCI_DEVID_OCTEONTX2_RST, NULL);
- if (!pdev)
- goto error;
+static bool is_ptp_tsfmt_sec_nsec(struct ptp *ptp)
+{
+ if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP ||
+ ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP)
+ return true;
+ return false;
+}
- base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO);
- if (!base)
- goto error_put_pdev;
+static u64 read_ptp_tstmp_sec_nsec(struct ptp *ptp)
+{
+ u64 sec, sec1, nsec;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ptp->ptp_lock, flags);
+ sec = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL;
+ nsec = readq(ptp->reg_base + PTP_CLOCK_HI);
+ sec1 = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL;
+ /* check nsec rollover */
+ if (sec1 > sec) {
+ nsec = readq(ptp->reg_base + PTP_CLOCK_HI);
+ sec = sec1;
+ }
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
- cfg = readq(base + RST_BOOT);
- ret = CLOCK_BASE_RATE * FIELD_GET(RST_MUL_BITS, cfg);
+ return sec * NSEC_PER_SEC + nsec;
+}
- iounmap(base);
+static u64 read_ptp_tstmp_nsec(struct ptp *ptp)
+{
+ return readq(ptp->reg_base + PTP_CLOCK_HI);
+}
-error_put_pdev:
- pci_dev_put(pdev);
+static u64 ptp_calc_adjusted_comp(u64 ptp_clock_freq)
+{
+ u64 comp, adj = 0, cycles_per_sec, ns_drift = 0;
+ u32 ptp_clock_nsec, cycle_time;
+ int cycle;
+
+ /* Errata:
+ * Issue #1: At the time of 1 sec rollover of the nano-second counter,
+ * the nano-second counter is set to 0. However, it should be set to
+ * (existing counter_value - 10^9).
+ *
+ * Issue #2: The nano-second counter rolls over at 0x3B9A_C9FF.
+ * It should roll over at 0x3B9A_CA00.
+ */
-error:
- return ret;
+ /* calculate ptp_clock_comp value */
+ comp = ((u64)1000000000ULL << 32) / ptp_clock_freq;
+ /* use CYCLE_MULT to avoid accuracy loss due to integer arithmetic */
+ cycle_time = NSEC_PER_SEC * CYCLE_MULT / ptp_clock_freq;
+ /* cycles per sec */
+ cycles_per_sec = ptp_clock_freq;
+
+ /* check whether ptp nanosecond counter rolls over early */
+ cycle = cycles_per_sec - 1;
+ ptp_clock_nsec = (cycle * comp) >> 32;
+ while (ptp_clock_nsec < NSEC_PER_SEC) {
+ if (ptp_clock_nsec == 0x3B9AC9FF)
+ goto calc_adj_comp;
+ cycle++;
+ ptp_clock_nsec = (cycle * comp) >> 32;
+ }
+ /* compute nanoseconds lost per second when nsec counter rolls over */
+ ns_drift = ptp_clock_nsec - NSEC_PER_SEC;
+ /* calculate ptp_clock_comp adjustment */
+ if (ns_drift > 0) {
+ adj = comp * ns_drift;
+ adj = adj / 1000000000ULL;
+ }
+ /* speed up the ptp clock to account for nanoseconds lost */
+ comp += adj;
+ return comp;
+
+calc_adj_comp:
+ /* slow down the ptp clock to not rollover early */
+ adj = comp * cycle_time;
+ adj = adj / 1000000000ULL;
+ adj = adj / CYCLE_MULT;
+ comp -= adj;
+
+ return comp;
}
struct ptp *ptp_get(void)
{
- struct pci_dev *pdev;
- struct ptp *ptp;
+ struct ptp *ptp = first_ptp_block;
- /* If the PTP pci device is found on the system and ptp
- * driver is bound to it then the PTP pci device is returned
- * to the caller(rvu driver).
- */
- pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
- PCI_DEVID_OCTEONTX2_PTP, NULL);
- if (!pdev)
+ /* Check PTP block is present in hardware */
+ if (!pci_dev_present(ptp_id_table))
return ERR_PTR(-ENODEV);
- ptp = pci_get_drvdata(pdev);
+ /* Check driver is bound to PTP block */
if (!ptp)
ptp = ERR_PTR(-EPROBE_DEFER);
- if (IS_ERR(ptp))
- pci_dev_put(pdev);
+ else
+ pci_dev_get(ptp->pdev);
return ptp;
}
@@ -103,8 +176,8 @@ void ptp_put(struct ptp *ptp)
static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
{
bool neg_adj = false;
- u64 comp;
- u64 adj;
+ u32 freq, freq_adj;
+ u64 comp, adj;
s64 ppb;
if (scaled_ppm < 0) {
@@ -126,24 +199,150 @@ static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
* where tbase is the basic compensation value calculated
* initialy in the probe function.
*/
- comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
/* convert scaled_ppm to ppb */
ppb = 1 + scaled_ppm;
ppb *= 125;
ppb >>= 13;
- adj = comp * ppb;
- adj = div_u64(adj, 1000000000ull);
- comp = neg_adj ? comp - adj : comp + adj;
+ if (cn10k_ptp_errata(ptp)) {
+ /* calculate the new frequency based on ppb */
+ freq_adj = (ptp->clock_rate * ppb) / 1000000000ULL;
+ freq = neg_adj ? ptp->clock_rate + freq_adj : ptp->clock_rate - freq_adj;
+ comp = ptp_calc_adjusted_comp(freq);
+ } else {
+ comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+ adj = comp * ppb;
+ adj = div_u64(adj, 1000000000ull);
+ comp = neg_adj ? comp - adj : comp + adj;
+ }
writeq(comp, ptp->reg_base + PTP_CLOCK_COMP);
return 0;
}
-static int ptp_get_clock(struct ptp *ptp, u64 *clk)
+static inline u64 get_tsc(bool is_pmu)
+{
+#if defined(CONFIG_ARM64)
+ return is_pmu ? read_sysreg(pmccntr_el0) : read_sysreg(cntvct_el0);
+#else
+ return 0;
+#endif
+}
+
+static int ptp_get_clock(struct ptp *ptp, bool is_pmu, u64 *clk, u64 *tsc)
{
- /* Return the current PTP clock */
- *clk = readq(ptp->reg_base + PTP_CLOCK_HI);
+ u64 end, start;
+ u8 retries = 0;
+
+ do {
+ start = get_tsc(0);
+ *tsc = get_tsc(is_pmu);
+ *clk = ptp->read_ptp_tstmp(ptp);
+ end = get_tsc(0);
+ retries++;
+ } while (((end - start) > 50) && retries < 5);
+
+ return 0;
+}
+
+/* On CN10K the ptp time is represented by set of registers one for seconds and other
+ * for nano seconds where as on 96xx ptp time is represented by single register.
+ * nano second register on CN10K rolls over after each second.
+ */
+static int ptp_set_clock(struct ptp *ptp, u64 nsec)
+{
+ if (is_ptp_tsfmt_sec_nsec(ptp)) {
+ writeq(nsec / NSEC_PER_SEC, ptp->reg_base + PTP_CLOCK_SEC);
+ writeq(nsec % NSEC_PER_SEC, ptp->reg_base + PTP_CLOCK_HI);
+ } else {
+ writeq(nsec, ptp->reg_base + PTP_CLOCK_HI);
+ }
+
+ return 0;
+}
+
+static int ptp_adj_clock(struct ptp *ptp, s64 delta)
+{
+ u64 regval, sec;
+
+ regval = readq(ptp->reg_base + PTP_CLOCK_HI);
+ regval += delta;
+
+ if (is_ptp_tsfmt_sec_nsec(ptp)) {
+ sec = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL;
+ sec += regval / NSEC_PER_SEC;
+ writeq(sec, ptp->reg_base + PTP_CLOCK_SEC);
+ writeq(regval % NSEC_PER_SEC, ptp->reg_base + PTP_CLOCK_HI);
+ } else {
+ writeq(regval, ptp->reg_base + PTP_CLOCK_HI);
+ }
+
+ return 0;
+}
+
+void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
+{
+ struct pci_dev *pdev;
+ u64 clock_comp;
+ u64 clock_cfg;
+
+ if (!ptp)
+ return;
+
+ pdev = ptp->pdev;
+
+ if (!sclk) {
+ dev_err(&pdev->dev, "PTP input clock cannot be zero\n");
+ return;
+ }
+
+ /* sclk is in MHz */
+ ptp->clock_rate = sclk * 1000000;
+
+ /* Enable PTP clock */
+ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+
+ if (ext_clk_freq) {
+ ptp->clock_rate = ext_clk_freq;
+ /* Set GPIO as PTP clock source */
+ clock_cfg &= ~PTP_CLOCK_CFG_EXT_CLK_IN_MASK;
+ clock_cfg |= PTP_CLOCK_CFG_EXT_CLK_EN;
+ }
+
+ if (extts) {
+ clock_cfg |= PTP_CLOCK_CFG_TSTMP_EDGE;
+ /* Set GPIO as timestamping source */
+ clock_cfg &= ~PTP_CLOCK_CFG_TSTMP_IN_MASK;
+ clock_cfg |= PTP_CLOCK_CFG_TSTMP_EN;
+ }
+
+ clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
+ clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV;
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
+
+ /* Set 50% duty cycle for 1Hz output */
+ writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR);
+ writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR);
+
+ if (cn10k_ptp_errata(ptp))
+ clock_comp = ptp_calc_adjusted_comp(ptp->clock_rate);
+ else
+ clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+
+ /* Initial compensation value to start the nanosecs counter */
+ writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
+}
+
+static int ptp_get_tstmp(struct ptp *ptp, u64 *clk)
+{
+ *clk = readq(ptp->reg_base + PTP_TIMESTAMP);
+
+ return 0;
+}
+
+static int ptp_set_thresh(struct ptp *ptp, u64 thresh)
+{
+ writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI);
return 0;
}
@@ -153,8 +352,6 @@ static int ptp_probe(struct pci_dev *pdev,
{
struct device *dev = &pdev->dev;
struct ptp *ptp;
- u64 clock_comp;
- u64 clock_cfg;
int err;
ptp = devm_kzalloc(dev, sizeof(*ptp), GFP_KERNEL);
@@ -175,18 +372,15 @@ static int ptp_probe(struct pci_dev *pdev,
ptp->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
- ptp->clock_rate = get_clock_rate();
-
- /* Enable PTP clock */
- clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
- clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
- writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
-
- clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
- /* Initial compensation value to start the nanosecs counter */
- writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
-
pci_set_drvdata(pdev, ptp);
+ if (!first_ptp_block)
+ first_ptp_block = ptp;
+
+ spin_lock_init(&ptp->ptp_lock);
+ if (is_ptp_tsfmt_sec_nsec(ptp))
+ ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec;
+ else
+ ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
return 0;
@@ -201,6 +395,9 @@ error:
* `dev->driver_data`.
*/
pci_set_drvdata(pdev, ERR_PTR(err));
+ if (!first_ptp_block)
+ first_ptp_block = ERR_PTR(err);
+
return 0;
}
@@ -230,10 +427,14 @@ static const struct pci_device_id ptp_id_table[] = {
PCI_SUBSYS_DEVID_OCTX2_95XX_PTP) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
PCI_VENDOR_ID_CAVIUM,
- PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP) },
+ PCI_SUBSYS_DEVID_OCTX2_95XXN_PTP) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
PCI_VENDOR_ID_CAVIUM,
PCI_SUBSYS_DEVID_OCTX2_95MM_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_PTP) },
{ 0, }
};
@@ -264,7 +465,20 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
err = ptp_adjfine(rvu->ptp, req->scaled_ppm);
break;
case PTP_OP_GET_CLOCK:
- err = ptp_get_clock(rvu->ptp, &rsp->clk);
+ err = ptp_get_clock(rvu->ptp, req->is_pmu, &rsp->clk,
+ &rsp->tsc);
+ break;
+ case PTP_OP_GET_TSTMP:
+ err = ptp_get_tstmp(rvu->ptp, &rsp->clk);
+ break;
+ case PTP_OP_SET_THRESH:
+ err = ptp_set_thresh(rvu->ptp, req->thresh);
+ break;
+ case PTP_OP_SET_CLOCK:
+ err = ptp_set_clock(rvu->ptp, req->nsec);
+ break;
+ case PTP_OP_ADJ_CLOCK:
+ err = ptp_adj_clock(rvu->ptp, req->delta);
break;
default:
err = -EINVAL;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
index 878bc395d28f..95a955159f40 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell PTP driver
*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020 Marvell.
+ *
*/
#ifndef PTP_H
@@ -14,11 +15,14 @@
struct ptp {
struct pci_dev *pdev;
void __iomem *reg_base;
+ u64 (*read_ptp_tstmp)(struct ptp *ptp);
+ spinlock_t ptp_lock; /* lock */
u32 clock_rate;
};
struct ptp *ptp_get(void);
void ptp_put(struct ptp *ptp);
+void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts);
extern struct pci_driver ptp_driver;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
new file mode 100644
index 000000000000..42669432c438
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -0,0 +1,464 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CN10K RPM driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include "cgx.h"
+#include "lmac_common.h"
+
+static struct mac_ops rpm_mac_ops = {
+ .name = "rpm",
+ .csr_offset = 0x4e00,
+ .lmac_offset = 20,
+ .int_register = RPMX_CMRX_SW_INT,
+ .int_set_reg = RPMX_CMRX_SW_INT_ENA_W1S,
+ .irq_offset = 1,
+ .int_ena_bit = BIT_ULL(0),
+ .lmac_fwi = RPM_LMAC_FWI,
+ .non_contiguous_serdes_lane = true,
+ .rx_stats_cnt = 43,
+ .tx_stats_cnt = 34,
+ .get_nr_lmacs = rpm_get_nr_lmacs,
+ .get_lmac_type = rpm_get_lmac_type,
+ .mac_lmac_intl_lbk = rpm_lmac_internal_loopback,
+ .mac_get_rx_stats = rpm_get_rx_stats,
+ .mac_get_tx_stats = rpm_get_tx_stats,
+ .mac_enadis_rx_pause_fwding = rpm_lmac_enadis_rx_pause_fwding,
+ .mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status,
+ .mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm,
+ .mac_pause_frm_config = rpm_lmac_pause_frm_config,
+ .mac_enadis_ptp_config = rpm_lmac_ptp_config,
+ .mac_rx_tx_enable = rpm_lmac_rx_tx_enable,
+ .mac_tx_enable = rpm_lmac_tx_enable,
+ .pfc_config = rpm_lmac_pfc_config,
+ .mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
+};
+
+struct mac_ops *rpm_get_mac_ops(void)
+{
+ return &rpm_mac_ops;
+}
+
+static void rpm_write(rpm_t *rpm, u64 lmac, u64 offset, u64 val)
+{
+ cgx_write(rpm, lmac, offset, val);
+}
+
+static u64 rpm_read(rpm_t *rpm, u64 lmac, u64 offset)
+{
+ return cgx_read(rpm, lmac, offset);
+}
+
+int rpm_get_nr_lmacs(void *rpmd)
+{
+ rpm_t *rpm = rpmd;
+
+ return hweight8(rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS) & 0xFULL);
+}
+
+int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg, last;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ last = cfg;
+ if (enable)
+ cfg |= RPM_TX_EN;
+ else
+ cfg &= ~(RPM_TX_EN);
+
+ if (cfg != last)
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ return !!(last & RPM_TX_EN);
+}
+
+int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ if (enable)
+ cfg |= RPM_RX_EN | RPM_TX_EN;
+ else
+ cfg &= ~(RPM_RX_EN | RPM_TX_EN);
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ return 0;
+}
+
+void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ struct lmac *lmac;
+ u64 cfg;
+
+ if (!rpm)
+ return;
+
+ lmac = lmac_pdata(lmac_id, rpm);
+ if (!lmac)
+ return;
+
+ /* Pause frames are not enabled just return */
+ if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max))
+ return;
+
+ if (enable) {
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ } else {
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ }
+}
+
+int rpm_lmac_get_pause_frm_status(void *rpmd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ if (!(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE)) {
+ *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE);
+ *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE);
+ }
+
+ return 0;
+}
+
+static void rpm_cfg_pfc_quanta_thresh(rpm_t *rpm, int lmac_id, u16 pfc_en,
+ bool enable)
+{
+ u64 quanta_offset = 0, quanta_thresh = 0, cfg;
+ int i, shift;
+
+ /* Set pause time and interval */
+ for_each_set_bit(i, (unsigned long *)&pfc_en, 16) {
+ switch (i) {
+ case 0:
+ case 1:
+ quanta_offset = RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL01_QUANTA_THRESH;
+ break;
+ case 2:
+ case 3:
+ quanta_offset = RPMX_MTI_MAC100X_CL23_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL23_QUANTA_THRESH;
+ break;
+ case 4:
+ case 5:
+ quanta_offset = RPMX_MTI_MAC100X_CL45_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL45_QUANTA_THRESH;
+ break;
+ case 6:
+ case 7:
+ quanta_offset = RPMX_MTI_MAC100X_CL67_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL67_QUANTA_THRESH;
+ break;
+ case 8:
+ case 9:
+ quanta_offset = RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL89_QUANTA_THRESH;
+ break;
+ case 10:
+ case 11:
+ quanta_offset = RPMX_MTI_MAC100X_CL1011_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1011_QUANTA_THRESH;
+ break;
+ case 12:
+ case 13:
+ quanta_offset = RPMX_MTI_MAC100X_CL1213_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1213_QUANTA_THRESH;
+ break;
+ case 14:
+ case 15:
+ quanta_offset = RPMX_MTI_MAC100X_CL1415_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1415_QUANTA_THRESH;
+ break;
+ }
+
+ if (!quanta_offset || !quanta_thresh)
+ continue;
+
+ shift = (i % 2) ? 1 : 0;
+ cfg = rpm_read(rpm, lmac_id, quanta_offset);
+ if (enable) {
+ cfg |= ((u64)RPM_DEFAULT_PAUSE_TIME << shift * 16);
+ } else {
+ if (!shift)
+ cfg &= ~GENMASK_ULL(15, 0);
+ else
+ cfg &= ~GENMASK_ULL(31, 16);
+ }
+ rpm_write(rpm, lmac_id, quanta_offset, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, quanta_thresh);
+ if (enable) {
+ cfg |= ((u64)(RPM_DEFAULT_PAUSE_TIME / 2) << shift * 16);
+ } else {
+ if (!shift)
+ cfg &= ~GENMASK_ULL(15, 0);
+ else
+ cfg &= ~GENMASK_ULL(31, 16);
+ }
+ rpm_write(rpm, lmac_id, quanta_thresh, cfg);
+ }
+}
+
+int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
+ u8 rx_pause)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
+ cfg |= rx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ cfg |= rx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ cfg |= tx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ cfg = rpm_read(rpm, 0, RPMX_CMR_RX_OVR_BP);
+ if (tx_pause) {
+ /* Configure CL0 Pause Quanta & threshold for 802.3X frames */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 1, true);
+ cfg &= ~RPMX_CMR_RX_OVR_BP_EN(lmac_id);
+ } else {
+ /* Disable all Pause Quanta & threshold values */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false);
+ cfg |= RPMX_CMR_RX_OVR_BP_EN(lmac_id);
+ cfg &= ~RPMX_CMR_RX_OVR_BP_BP(lmac_id);
+ }
+ rpm_write(rpm, 0, RPMX_CMR_RX_OVR_BP, cfg);
+ return 0;
+}
+
+void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ /* ALL pause frames received are completely ignored */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Disable forward pause to TX block */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Disable pause frames transmission */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Enable channel mask for all LMACS */
+ rpm_write(rpm, 0, RPMX_CMR_CHAN_MSK_OR, ~0ULL);
+}
+
+int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat)
+{
+ rpm_t *rpm = rpmd;
+ u64 val_lo, val_hi;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ mutex_lock(&rpm->lock);
+
+ /* Update idx to point per lmac Rx statistics page */
+ idx += lmac_id * rpm->mac_ops->rx_stats_cnt;
+
+ /* Read lower 32 bits of counter */
+ val_lo = rpm_read(rpm, 0, RPMX_MTI_STAT_RX_STAT_PAGES_COUNTERX +
+ (idx * 8));
+
+ /* upon read of lower 32 bits, higher 32 bits are written
+ * to RPMX_MTI_STAT_DATA_HI_CDC
+ */
+ val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
+
+ *rx_stat = (val_hi << 32 | val_lo);
+
+ mutex_unlock(&rpm->lock);
+ return 0;
+}
+
+int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat)
+{
+ rpm_t *rpm = rpmd;
+ u64 val_lo, val_hi;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ mutex_lock(&rpm->lock);
+
+ /* Update idx to point per lmac Tx statistics page */
+ idx += lmac_id * rpm->mac_ops->tx_stats_cnt;
+
+ val_lo = rpm_read(rpm, 0, RPMX_MTI_STAT_TX_STAT_PAGES_COUNTERX +
+ (idx * 8));
+ val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
+
+ *tx_stat = (val_hi << 32 | val_lo);
+
+ mutex_unlock(&rpm->lock);
+ return 0;
+}
+
+u8 rpm_get_lmac_type(void *rpmd, int lmac_id)
+{
+ rpm_t *rpm = rpmd;
+ u64 req = 0, resp;
+ int err;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_LINK_STS, req);
+ err = cgx_fwi_cmd_generic(req, &resp, rpm, 0);
+ if (!err)
+ return FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, resp);
+ return err;
+}
+
+int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u8 lmac_type;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+ lmac_type = rpm->mac_ops->get_lmac_type(rpm, lmac_id);
+
+ if (lmac_type == LMAC_MODE_QSGMII || lmac_type == LMAC_MODE_SGMII) {
+ dev_err(&rpm->pdev->dev, "loopback not supported for LPC mode\n");
+ return 0;
+ }
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1);
+
+ if (enable)
+ cfg |= RPMX_MTI_PCS_LBK;
+ else
+ cfg &= ~RPMX_MTI_PCS_LBK;
+ rpm_write(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1, cfg);
+
+ return 0;
+}
+
+void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_CFG);
+ if (enable) {
+ cfg |= RPMX_RX_TS_PREPEND;
+ cfg |= RPMX_TX_PTP_1S_SUPPORT;
+ } else {
+ cfg &= ~RPMX_RX_TS_PREPEND;
+ cfg &= ~RPMX_TX_PTP_1S_SUPPORT;
+ }
+
+ rpm_write(rpm, lmac_id, RPMX_CMRX_CFG, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_XIF_MODE);
+
+ if (enable) {
+ cfg |= RPMX_ONESTEP_ENABLE;
+ cfg &= ~RPMX_TS_BINARY_MODE;
+ } else {
+ cfg &= ~RPMX_ONESTEP_ENABLE;
+ }
+
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_XIF_MODE, cfg);
+}
+
+int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 pfc_en)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ /* reset PFC class quanta and threshold */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+
+ if (rx_pause) {
+ cfg &= ~(RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
+ } else {
+ cfg |= (RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
+ }
+
+ if (tx_pause) {
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, pfc_en, true);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ } else {
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xfff, false);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ }
+
+ if (!rx_pause && !tx_pause)
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
+ else
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
+
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
+ cfg = FIELD_SET(RPM_PFC_CLASS_MASK, pfc_en, cfg);
+ rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, cfg);
+
+ return 0;
+}
+
+int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause, u8 *rx_pause)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ if (cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE) {
+ *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE);
+ *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
new file mode 100644
index 000000000000..398f3d1af499
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell CN10K RPM driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef RPM_H
+#define RPM_H
+
+#include <linux/bits.h>
+
+/* PCI device IDs */
+#define PCI_DEVID_CN10K_RPM 0xA060
+
+/* Registers */
+#define RPMX_CMRX_CFG 0x00
+#define RPMX_RX_TS_PREPEND BIT_ULL(22)
+#define RPMX_TX_PTP_1S_SUPPORT BIT_ULL(17)
+#define RPMX_CMRX_SW_INT 0x180
+#define RPMX_CMRX_SW_INT_W1S 0x188
+#define RPMX_CMRX_SW_INT_ENA_W1S 0x198
+#define RPMX_CMRX_LINK_CFG 0x1070
+#define RPMX_MTI_PCS100X_CONTROL1 0x20000
+#define RPMX_MTI_LPCSX_CONTROL(id) (0x30000 | ((id) * 0x100))
+#define RPMX_MTI_PCS_LBK BIT_ULL(14)
+
+#define RPMX_CMRX_LINK_RANGE_MASK GENMASK_ULL(19, 16)
+#define RPMX_CMRX_LINK_BASE_MASK GENMASK_ULL(11, 0)
+
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG 0x8010
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE BIT_ULL(29)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE BIT_ULL(28)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE BIT_ULL(8)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE BIT_ULL(19)
+#define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8
+#define RPMX_MTI_MAC100X_CL23_PAUSE_QUANTA 0x80B0
+#define RPMX_MTI_MAC100X_CL45_PAUSE_QUANTA 0x80B8
+#define RPMX_MTI_MAC100X_CL67_PAUSE_QUANTA 0x80C0
+#define RPMX_MTI_MAC100X_CL01_QUANTA_THRESH 0x80C8
+#define RPMX_MTI_MAC100X_CL23_QUANTA_THRESH 0x80D0
+#define RPMX_MTI_MAC100X_CL45_QUANTA_THRESH 0x80D8
+#define RPMX_MTI_MAC100X_CL67_QUANTA_THRESH 0x80E0
+#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
+#define RPMX_MTI_MAC100X_CL1011_PAUSE_QUANTA 0x8110
+#define RPMX_MTI_MAC100X_CL1213_PAUSE_QUANTA 0x8118
+#define RPMX_MTI_MAC100X_CL1415_PAUSE_QUANTA 0x8120
+#define RPMX_MTI_MAC100X_CL89_QUANTA_THRESH 0x8128
+#define RPMX_MTI_MAC100X_CL1011_QUANTA_THRESH 0x8130
+#define RPMX_MTI_MAC100X_CL1213_QUANTA_THRESH 0x8138
+#define RPMX_MTI_MAC100X_CL1415_QUANTA_THRESH 0x8140
+#define RPM_DEFAULT_PAUSE_TIME 0xFFFF
+#define RPMX_CMR_RX_OVR_BP 0x4120
+#define RPMX_CMR_RX_OVR_BP_EN(x) BIT_ULL((x) + 8)
+#define RPMX_CMR_RX_OVR_BP_BP(x) BIT_ULL((x) + 4)
+#define RPMX_CMR_CHAN_MSK_OR 0x4118
+#define RPMX_MTI_STAT_RX_STAT_PAGES_COUNTERX 0x12000
+#define RPMX_MTI_STAT_TX_STAT_PAGES_COUNTERX 0x13000
+#define RPMX_MTI_STAT_DATA_HI_CDC 0x10038
+#define RPM_LMAC_FWI 0xa
+#define RPM_TX_EN BIT_ULL(0)
+#define RPM_RX_EN BIT_ULL(1)
+#define RPMX_CMRX_PRT_CBFC_CTL 0x5B08
+#define RPMX_CMRX_PRT_CBFC_CTL_LOGL_EN_RX_SHIFT 33
+#define RPMX_CMRX_PRT_CBFC_CTL_PHYS_BP_SHIFT 16
+#define RPMX_CMRX_PRT_CBFC_CTL_LOGL_EN_TX_SHIFT 0
+#define RPM_PFC_CLASS_MASK GENMASK_ULL(48, 33)
+#define RPMX_MTI_MAC100X_CL89_QUANTA_THRESH 0x8128
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_PAD_EN BIT_ULL(11)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE BIT_ULL(8)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD BIT_ULL(7)
+#define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8
+#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
+#define RPM_DEFAULT_PAUSE_TIME 0xFFFF
+
+#define RPMX_MTI_MAC100X_XIF_MODE 0x8100
+#define RPMX_ONESTEP_ENABLE BIT_ULL(5)
+#define RPMX_TS_BINARY_MODE BIT_ULL(11)
+
+/* Function Declarations */
+int rpm_get_nr_lmacs(void *rpmd);
+u8 rpm_get_lmac_type(void *rpmd, int lmac_id);
+int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable);
+void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_get_pause_frm_status(void *cgxd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause);
+void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
+ u8 rx_pause);
+int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat);
+int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat);
+void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause);
+#endif /* RPM_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index c26652436c53..4049b616a9eb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -1,11 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
@@ -22,8 +19,8 @@
#include "rvu_trace.h"
-#define DRV_NAME "octeontx2-af"
-#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
+#define DRV_NAME "rvu_af"
+#define DRV_STRING "Marvell RVU Admin Function Driver"
static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
@@ -48,7 +45,7 @@ static const struct pci_device_id rvu_id_table[] = {
{ 0, } /* end of table */
};
-MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
+MODULE_AUTHOR("Marvell.");
MODULE_DESCRIPTION(DRV_STRING);
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(pci, rvu_id_table);
@@ -57,6 +54,10 @@ static char *mkex_profile; /* MKEX profile name */
module_param(mkex_profile, charp, 0000);
MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
+static char *kpu_profile; /* KPU profile name */
+module_param(kpu_profile, charp, 0000);
+MODULE_PARM_DESC(kpu_profile, "KPU profile name string");
+
static void rvu_setup_hw_capabilities(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -66,8 +67,10 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu)
hw->cap.nix_shaping = true;
hw->cap.nix_tx_link_bp = true;
hw->cap.nix_rx_multicast = true;
+ hw->cap.nix_shaper_toggle_wait = false;
+ hw->rvu = rvu;
- if (is_rvu_96xx_B0(rvu)) {
+ if (is_rvu_pre_96xx_C0(rvu)) {
hw->cap.nix_fixed_txschq_mapping = true;
hw->cap.nix_txsch_per_cgx_lmac = 4;
hw->cap.nix_txsch_per_lbk_lmac = 132;
@@ -77,6 +80,11 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu)
if (is_rvu_96xx_A0(rvu))
hw->cap.nix_rx_multicast = false;
}
+ if (!is_rvu_pre_96xx_C0(rvu))
+ hw->cap.nix_shaper_toggle_wait = true;
+
+ if (!is_rvu_otx2(rvu))
+ hw->cap.per_pf_mbox_regs = true;
}
/* Poll a RVU block's register 'offset', for a 'zero'
@@ -186,6 +194,14 @@ int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
return (rsrc->max - used);
}
+bool is_rsrc_free(struct rsrc_bmap *rsrc, int id)
+{
+ if (!rsrc->bmap)
+ return false;
+
+ return !test_bit(id, rsrc->bmap);
+}
+
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
{
rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
@@ -195,6 +211,11 @@ int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
return 0;
}
+void rvu_free_bitmap(struct rsrc_bmap *rsrc)
+{
+ kfree(rsrc->bmap);
+}
+
/* Get block LF's HW index from a PF_FUNC's block slot number */
int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
{
@@ -220,6 +241,9 @@ int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
* multiple blocks of same type.
*
* @pcifunc has to be zero when no LF is yet attached.
+ *
+ * For a pcifunc if LFs are attached from multiple blocks of same type, then
+ * return blkaddr of first encountered block.
*/
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
{
@@ -257,6 +281,12 @@ int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
goto exit;
}
break;
+ case BLKTYPE_REE:
+ if (!pcifunc) {
+ blkaddr = BLKADDR_REE0;
+ goto exit;
+ }
+ break;
}
/* Check if this is a RVU PF or VF */
@@ -268,20 +298,59 @@ int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
devnum = rvu_get_pf(pcifunc);
}
- /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' */
+ /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or
+ * 'BLKADDR_NIX1'.
+ */
if (blktype == BLKTYPE_NIX) {
- reg = is_pf ? RVU_PRIV_PFX_NIX0_CFG : RVU_PRIV_HWVFX_NIX0_CFG;
+ reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(0) :
+ RVU_PRIV_HWVFX_NIXX_CFG(0);
cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
- if (cfg)
+ if (cfg) {
blkaddr = BLKADDR_NIX0;
+ goto exit;
+ }
+
+ reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(1) :
+ RVU_PRIV_HWVFX_NIXX_CFG(1);
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg)
+ blkaddr = BLKADDR_NIX1;
}
- /* Check if the 'pcifunc' has a CPT LF from 'BLKADDR_CPT0' */
if (blktype == BLKTYPE_CPT) {
- reg = is_pf ? RVU_PRIV_PFX_CPT0_CFG : RVU_PRIV_HWVFX_CPT0_CFG;
+ reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(0) :
+ RVU_PRIV_HWVFX_CPTX_CFG(0);
cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
- if (cfg)
+ if (cfg) {
blkaddr = BLKADDR_CPT0;
+ goto exit;
+ }
+
+ reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(1) :
+ RVU_PRIV_HWVFX_CPTX_CFG(1);
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg)
+ blkaddr = BLKADDR_CPT1;
+ }
+
+ /* Check if the 'pcifunc' has a REE LF from 'BLKADDR_REE0' or
+ * 'BLKADDR_REE1'. If pcifunc has REE LFs from both then only
+ * BLKADDR_REE0 is returned.
+ */
+ if (blktype == BLKTYPE_REE) {
+ reg = is_pf ? RVU_PRIV_PFX_REEX_CFG(0) :
+ RVU_PRIV_HWVFX_REEX_CFG(0);
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg) {
+ blkaddr = BLKADDR_REE0;
+ goto exit;
+ }
+
+ reg = is_pf ? RVU_PRIV_PFX_REEX_CFG(1) :
+ RVU_PRIV_HWVFX_REEX_CFG(1);
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg)
+ blkaddr = BLKADDR_REE1;
}
exit:
@@ -316,31 +385,44 @@ static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
block->fn_map[lf] = attach ? pcifunc : 0;
- switch (block->type) {
- case BLKTYPE_NPA:
+ switch (block->addr) {
+ case BLKADDR_NPA:
pfvf->npalf = attach ? true : false;
num_lfs = pfvf->npalf;
break;
- case BLKTYPE_NIX:
+ case BLKADDR_NIX0:
+ case BLKADDR_NIX1:
pfvf->nixlf = attach ? true : false;
num_lfs = pfvf->nixlf;
break;
- case BLKTYPE_SSO:
+ case BLKADDR_SSO:
attach ? pfvf->sso++ : pfvf->sso--;
num_lfs = pfvf->sso;
break;
- case BLKTYPE_SSOW:
+ case BLKADDR_SSOW:
attach ? pfvf->ssow++ : pfvf->ssow--;
num_lfs = pfvf->ssow;
break;
- case BLKTYPE_TIM:
+ case BLKADDR_TIM:
attach ? pfvf->timlfs++ : pfvf->timlfs--;
num_lfs = pfvf->timlfs;
break;
- case BLKTYPE_CPT:
+ case BLKADDR_CPT0:
attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
num_lfs = pfvf->cptlfs;
break;
+ case BLKADDR_CPT1:
+ attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--;
+ num_lfs = pfvf->cpt1_lfs;
+ break;
+ case BLKADDR_REE0:
+ attach ? pfvf->ree0_lfs++ : pfvf->ree0_lfs--;
+ num_lfs = pfvf->ree0_lfs;
+ break;
+ case BLKADDR_REE1:
+ attach ? pfvf->ree1_lfs++ : pfvf->ree1_lfs--;
+ num_lfs = pfvf->ree1_lfs;
+ break;
}
reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
@@ -463,12 +545,18 @@ int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
{
struct rvu_block *block = &rvu->hw->block[blkaddr];
+ int err;
if (!block->implemented)
return;
rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
- rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
+ err = rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
+ if (err) {
+ dev_err(rvu->dev, "HW block:%d reset timeout retrying again\n", blkaddr);
+ while (rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true) == -EBUSY)
+ ;
+ }
}
static void rvu_reset_all_blocks(struct rvu *rvu)
@@ -476,13 +564,19 @@ static void rvu_reset_all_blocks(struct rvu *rvu)
/* Do a HW reset of all RVU blocks */
rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NIX1, NIX_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_CPT1, CPT_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NIX1_RX, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NIX1_TX, NDC_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_REE0, REE_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_REE1, REE_AF_BLK_RST);
}
static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
@@ -624,7 +718,7 @@ setup_vfmsix:
}
/* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
- * create a IOMMU mapping for the physcial address configured by
+ * create an IOMMU mapping for the physical address configured by
* firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
*/
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
@@ -666,6 +760,8 @@ static void rvu_free_hw_resources(struct rvu *rvu)
rvu_npa_freemem(rvu);
rvu_npc_freemem(rvu);
rvu_nix_freemem(rvu);
+ rvu_sso_freemem(rvu);
+ rvu_ree_freemem(rvu);
/* Free block LF bitmaps */
for (id = 0; id < BLK_COUNT; id++) {
@@ -705,6 +801,10 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
u64 *mac;
for (pf = 0; pf < hw->total_pfs; pf++) {
+ /* For PF0(AF), Assign MAC address to only VFs (LBKVFs) */
+ if (!pf)
+ goto lbkvf;
+
if (!is_pf_cgxmapped(rvu, pf))
continue;
/* Assign MAC address to PF */
@@ -718,7 +818,9 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
} else {
eth_random_addr(pfvf->mac_addr);
}
+ ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
+lbkvf:
/* Assign MAC address to VFs */
rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
for (vf = 0; vf < numvfs; vf++, hwvf++) {
@@ -732,6 +834,7 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
} else {
eth_random_addr(pfvf->mac_addr);
}
+ ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
}
}
}
@@ -767,6 +870,121 @@ static void rvu_fwdata_exit(struct rvu *rvu)
iounmap(rvu->fwdata);
}
+static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkid;
+ u64 cfg;
+
+ /* Init NIX LF's bitmap */
+ block = &hw->block[blkaddr];
+ if (!block->implemented)
+ return 0;
+ blkid = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ block->lf.max = cfg & 0xFFF;
+ block->addr = blkaddr;
+ block->type = BLKTYPE_NIX;
+ block->lfshift = 8;
+ block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_NIXX_CFG(blkid);
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIXX_CFG(blkid);
+ block->lfcfg_reg = NIX_PRIV_LFX_CFG;
+ block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = NIX_AF_LF_RST;
+ block->rvu = rvu;
+ sprintf(block->name, "NIX%d", blkid);
+ rvu->nix_blkaddr[blkid] = blkaddr;
+ return rvu_alloc_bitmap(&block->lf);
+}
+
+static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkid;
+ u64 cfg;
+
+ /* Init CPT LF's bitmap */
+ block = &hw->block[blkaddr];
+ if (!block->implemented)
+ return 0;
+ blkid = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
+ cfg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
+ block->lf.max = cfg & 0xFF;
+ block->addr = blkaddr;
+ block->type = BLKTYPE_CPT;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_CPTX_CFG(blkid);
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPTX_CFG(blkid);
+ block->lfcfg_reg = CPT_PRIV_LFX_CFG;
+ block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = CPT_AF_LF_RST;
+ block->rvu = rvu;
+ sprintf(block->name, "CPT%d", blkid);
+ return rvu_alloc_bitmap(&block->lf);
+}
+
+static int rvu_setup_ree_hw_resource(struct rvu *rvu, int blkaddr, int blkid)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int err;
+ u64 cfg;
+
+ /* Init REE LF's bitmap */
+ block = &hw->block[blkaddr];
+ if (!block->implemented)
+ return 0;
+ cfg = rvu_read64(rvu, blkaddr, REE_AF_CONSTANTS);
+ block->lf.max = cfg & 0xFF;
+ block->addr = blkaddr;
+ block->type = BLKTYPE_REE;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = REE_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_REEX_CFG(blkid);
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_REEX_CFG(blkid);
+ block->lfcfg_reg = REE_PRIV_LFX_CFG;
+ block->msixcfg_reg = REE_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = REE_AF_LF_RST;
+ block->rvu = rvu;
+ sprintf(block->name, "REE%d", blkid);
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err)
+ return err;
+ return 0;
+}
+
+static void rvu_get_lbk_bufsize(struct rvu *rvu)
+{
+ struct pci_dev *pdev = NULL;
+ void __iomem *base;
+ u64 lbk_const;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_LBK, pdev);
+ if (!pdev)
+ return;
+
+ base = pci_ioremap_bar(pdev, 0);
+ if (!base)
+ goto err_put;
+
+ lbk_const = readq(base + LBK_CONST);
+
+ /* cache fifo size */
+ rvu->hw->lbk_bufsize = FIELD_GET(LBK_CONST_BUF_SIZE, lbk_const);
+
+ iounmap(base);
+err_put:
+ pci_dev_put(pdev);
+}
+
+/* Function to perform operations (read/write) on lmtst map table */
static int rvu_setup_hw_resources(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -780,6 +998,9 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
hw->total_vfs = (cfg >> 20) & 0xFFF;
hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
+ if (!is_rvu_otx2(rvu))
+ rvu_apr_block_cn10k_init(rvu);
+
/* Init NPA LF's bitmap */
block = &hw->block[BLKADDR_NPA];
if (!block->implemented)
@@ -795,33 +1016,30 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
block->lfcfg_reg = NPA_PRIV_LFX_CFG;
block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
block->lfreset_reg = NPA_AF_LF_RST;
+ block->rvu = rvu;
sprintf(block->name, "NPA");
err = rvu_alloc_bitmap(&block->lf);
- if (err)
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate NPA LF bitmap\n", __func__);
return err;
+ }
nix:
- /* Init NIX LF's bitmap */
- block = &hw->block[BLKADDR_NIX0];
- if (!block->implemented)
- goto sso;
- cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2);
- block->lf.max = cfg & 0xFFF;
- block->addr = BLKADDR_NIX0;
- block->type = BLKTYPE_NIX;
- block->lfshift = 8;
- block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
- block->pf_lfcnt_reg = RVU_PRIV_PFX_NIX0_CFG;
- block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIX0_CFG;
- block->lfcfg_reg = NIX_PRIV_LFX_CFG;
- block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
- block->lfreset_reg = NIX_AF_LF_RST;
- sprintf(block->name, "NIX");
- err = rvu_alloc_bitmap(&block->lf);
- if (err)
+ err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0);
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate NIX0 LFs bitmap\n", __func__);
return err;
+ }
+
+ err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1);
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate NIX1 LFs bitmap\n", __func__);
+ return err;
+ }
-sso:
/* Init SSO group's bitmap */
block = &hw->block[BLKADDR_SSO];
if (!block->implemented)
@@ -838,10 +1056,14 @@ sso:
block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
+ block->rvu = rvu;
sprintf(block->name, "SSO GROUP");
err = rvu_alloc_bitmap(&block->lf);
- if (err)
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate SSO LF bitmap\n", __func__);
return err;
+ }
ssow:
/* Init SSO workslot's bitmap */
@@ -859,10 +1081,14 @@ ssow:
block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
block->lfreset_reg = SSOW_AF_LF_HWS_RST;
+ block->rvu = rvu;
sprintf(block->name, "SSOWS");
err = rvu_alloc_bitmap(&block->lf);
- if (err)
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate SSOW LF bitmap\n", __func__);
return err;
+ }
tim:
/* Init TIM LF's bitmap */
@@ -881,52 +1107,66 @@ tim:
block->lfcfg_reg = TIM_PRIV_LFX_CFG;
block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
block->lfreset_reg = TIM_AF_LF_RST;
+ block->rvu = rvu;
sprintf(block->name, "TIM");
err = rvu_alloc_bitmap(&block->lf);
- if (err)
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate TIM LF bitmap\n", __func__);
return err;
+ }
cpt:
- /* Init CPT LF's bitmap */
- block = &hw->block[BLKADDR_CPT0];
- if (!block->implemented)
- goto init;
- cfg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS0);
- block->lf.max = cfg & 0xFF;
- block->addr = BLKADDR_CPT0;
- block->type = BLKTYPE_CPT;
- block->multislot = true;
- block->lfshift = 3;
- block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
- block->pf_lfcnt_reg = RVU_PRIV_PFX_CPT0_CFG;
- block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPT0_CFG;
- block->lfcfg_reg = CPT_PRIV_LFX_CFG;
- block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
- block->lfreset_reg = CPT_AF_LF_RST;
- sprintf(block->name, "CPT");
- err = rvu_alloc_bitmap(&block->lf);
+ err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0);
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate CPT0 LF bitmap\n", __func__);
+ return err;
+ }
+ err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1);
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate CPT1 LF bitmap\n", __func__);
+ return err;
+ }
+
+ /* REE */
+ err = rvu_setup_ree_hw_resource(rvu, BLKADDR_REE0, 0);
+ if (err)
+ return err;
+ err = rvu_setup_ree_hw_resource(rvu, BLKADDR_REE1, 1);
if (err)
return err;
-init:
/* Allocate memory for PFVF data */
rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
sizeof(struct rvu_pfvf), GFP_KERNEL);
- if (!rvu->pf)
+ if (!rvu->pf) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate memory for PF's rvu_pfvf struct\n", __func__);
return -ENOMEM;
+ }
rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
sizeof(struct rvu_pfvf), GFP_KERNEL);
- if (!rvu->hwvf)
+ if (!rvu->hwvf) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate memory for VF's rvu_pfvf struct\n", __func__);
return -ENOMEM;
+ }
mutex_init(&rvu->rsrc_lock);
- rvu_fwdata_init(rvu);
+ err = rvu_fwdata_init(rvu);
+ if (err)
+ goto msix_err;
err = rvu_setup_msix_resources(rvu);
- if (err)
+ if (err) {
+ dev_err(rvu->dev,
+ "%s: Failed to setup MSIX resources\n", __func__);
return err;
+ }
for (blkid = 0; blkid < BLK_COUNT; blkid++) {
block = &hw->block[blkid];
@@ -947,24 +1187,70 @@ init:
rvu_scan_block(rvu, block);
}
- err = rvu_npc_init(rvu);
+ err = rvu_set_channels_base(rvu);
if (err)
+ goto msix_err;
+
+ err = rvu_npc_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize npc\n", __func__);
goto npc_err;
+ }
err = rvu_cgx_init(rvu);
- if (err)
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize cgx\n", __func__);
goto cgx_err;
+ }
+
+ err = rvu_sso_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize sso\n", __func__);
+ goto sso_err;
+ }
+
+ err = rvu_tim_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize tim\n", __func__);
+ goto sso_err;
+ }
+
+ err = rvu_sdp_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize sdp\n", __func__);
+ goto sso_err;
+ }
/* Assign MACs for CGX mapped functions */
rvu_setup_pfvf_macaddress(rvu);
err = rvu_npa_init(rvu);
- if (err)
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize npa\n", __func__);
goto npa_err;
+ }
+
+ rvu_get_lbk_bufsize(rvu);
err = rvu_nix_init(rvu);
- if (err)
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize nix\n", __func__);
+ goto nix_err;
+ }
+
+ err = rvu_ree_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize ree\n", __func__);
goto nix_err;
+ }
+
+ err = rvu_cpt_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize cpt\n", __func__);
+ goto nix_err;
+ }
+
+ rvu_program_channels(rvu);
return 0;
@@ -972,6 +1258,8 @@ nix_err:
rvu_nix_freemem(rvu);
npa_err:
rvu_npa_freemem(rvu);
+sso_err:
+ rvu_sso_freemem(rvu);
cgx_err:
rvu_cgx_exit(rvu);
npc_err:
@@ -1035,7 +1323,34 @@ int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
/* Get current count of a RVU block's LF/slots
* provisioned to a given RVU func.
*/
-static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype)
+u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr)
+{
+ switch (blkaddr) {
+ case BLKADDR_NPA:
+ return pfvf->npalf ? 1 : 0;
+ case BLKADDR_NIX0:
+ case BLKADDR_NIX1:
+ return pfvf->nixlf ? 1 : 0;
+ case BLKADDR_SSO:
+ return pfvf->sso;
+ case BLKADDR_SSOW:
+ return pfvf->ssow;
+ case BLKADDR_TIM:
+ return pfvf->timlfs;
+ case BLKADDR_CPT0:
+ return pfvf->cptlfs;
+ case BLKADDR_CPT1:
+ return pfvf->cpt1_lfs;
+ case BLKADDR_REE0:
+ return pfvf->ree0_lfs;
+ case BLKADDR_REE1:
+ return pfvf->ree1_lfs;
+ }
+ return 0;
+}
+
+/* Return true if LFs of block type are attached to pcifunc */
+static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype)
{
switch (blktype) {
case BLKTYPE_NPA:
@@ -1043,15 +1358,18 @@ static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype)
case BLKTYPE_NIX:
return pfvf->nixlf ? 1 : 0;
case BLKTYPE_SSO:
- return pfvf->sso;
+ return !!pfvf->sso;
case BLKTYPE_SSOW:
- return pfvf->ssow;
+ return !!pfvf->ssow;
case BLKTYPE_TIM:
- return pfvf->timlfs;
+ return !!pfvf->timlfs;
case BLKTYPE_CPT:
- return pfvf->cptlfs;
+ return pfvf->cptlfs || pfvf->cpt1_lfs;
+ case BLKTYPE_REE:
+ return pfvf->ree0_lfs || pfvf->ree1_lfs;
}
- return 0;
+
+ return false;
}
bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
@@ -1064,7 +1382,7 @@ bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
pfvf = rvu_get_pfvf(rvu, pcifunc);
/* Check if this PFFUNC has a LF of type blktype attached */
- if (!rvu_get_rsrc_mapcount(pfvf, blktype))
+ if (blktype != BLKTYPE_SSO && !is_blktype_attached(pfvf, blktype))
return false;
return true;
@@ -1075,6 +1393,9 @@ static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
{
u64 val;
+ if (block->type == BLKTYPE_TIM)
+ return rvu_tim_lookup_rsrc(rvu, block, pcifunc, slot);
+
val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
rvu_write64(rvu, block->addr, block->lookup_reg, val);
/* Wait for the lookup to finish */
@@ -1091,6 +1412,60 @@ static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
return (val & 0xFFF);
}
+int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
+ u16 global_slot, u16 *slot_in_block)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int numlfs, total_lfs = 0, nr_blocks = 0;
+ int i, num_blkaddr[BLK_COUNT] = { 0 };
+ struct rvu_block *block;
+ int blkaddr = -ENODEV;
+ u16 start_slot;
+
+ if (!is_blktype_attached(pfvf, blktype))
+ return -ENODEV;
+
+ /* Get all the block addresses from which LFs are attached to
+ * the given pcifunc in num_blkaddr[].
+ */
+ for (blkaddr = BLKADDR_RVUM; blkaddr < BLK_COUNT; blkaddr++) {
+ block = &rvu->hw->block[blkaddr];
+ if (block->type != blktype)
+ continue;
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ continue;
+
+ numlfs = rvu_get_rsrc_mapcount(pfvf, blkaddr);
+ if (numlfs) {
+ total_lfs += numlfs;
+ num_blkaddr[nr_blocks] = blkaddr;
+ nr_blocks++;
+ }
+ }
+
+ if (global_slot >= total_lfs)
+ return -ENODEV;
+
+ /* Based on the given global slot number retrieve the
+ * correct block address out of all attached block
+ * addresses and slot number in that block.
+ */
+ total_lfs = 0;
+ blkaddr = -ENODEV;
+ for (i = 0; i < nr_blocks; i++) {
+ numlfs = rvu_get_rsrc_mapcount(pfvf, num_blkaddr[i]);
+ total_lfs += numlfs;
+ if (global_slot < total_lfs) {
+ blkaddr = num_blkaddr[i];
+ start_slot = total_lfs - numlfs;
+ *slot_in_block = global_slot - start_slot;
+ break;
+ }
+ }
+
+ return blkaddr;
+}
+
static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -1103,9 +1478,12 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
if (blkaddr < 0)
return;
+ if (blktype == BLKTYPE_NIX)
+ rvu_nix_reset_mac(pfvf, pcifunc);
+
block = &hw->block[blkaddr];
- num_lfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
if (!num_lfs)
return;
@@ -1156,6 +1534,8 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
continue;
else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
continue;
+ else if ((blkid == BLKADDR_NIX1) && !detach->nixlf)
+ continue;
else if ((blkid == BLKADDR_SSO) && !detach->sso)
continue;
else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
@@ -1164,6 +1544,12 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
continue;
else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
continue;
+ else if ((blkid == BLKADDR_CPT1) && !detach->cptlfs)
+ continue;
+ else if ((blkid == BLKADDR_REE0) && !detach->reelfs)
+ continue;
+ else if ((blkid == BLKADDR_REE1) && !detach->reelfs)
+ continue;
}
rvu_detach_block(rvu, pcifunc, block->type);
}
@@ -1179,8 +1565,84 @@ int rvu_mbox_handler_detach_resources(struct rvu *rvu,
return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
}
-static void rvu_attach_block(struct rvu *rvu, int pcifunc,
- int blktype, int num_lfs)
+int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int blkaddr = BLKADDR_NIX0, vf;
+ struct rvu_pfvf *pf;
+
+ pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+
+ /* All CGX mapped PFs are set with assigned NIX block during init */
+ if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
+ blkaddr = pf->nix_blkaddr;
+ } else if (is_afvf(pcifunc)) {
+ vf = pcifunc - 1;
+ /* Assign NIX based on VF number. All even numbered VFs get
+ * NIX0 and odd numbered gets NIX1
+ */
+ blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0;
+ /* NIX1 is not present on all silicons */
+ if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
+ blkaddr = BLKADDR_NIX0;
+ }
+
+ /* if SDP1 then the blkaddr is NIX1 */
+ if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1)
+ blkaddr = BLKADDR_NIX1;
+
+ switch (blkaddr) {
+ case BLKADDR_NIX1:
+ pfvf->nix_blkaddr = BLKADDR_NIX1;
+ pfvf->nix_rx_intf = NIX_INTFX_RX(1);
+ pfvf->nix_tx_intf = NIX_INTFX_TX(1);
+ break;
+ case BLKADDR_NIX0:
+ default:
+ pfvf->nix_blkaddr = BLKADDR_NIX0;
+ pfvf->nix_rx_intf = NIX_INTFX_RX(0);
+ pfvf->nix_tx_intf = NIX_INTFX_TX(0);
+ break;
+ }
+
+ return pfvf->nix_blkaddr;
+}
+
+static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype,
+ u16 pcifunc, struct rsrc_attach *attach)
+{
+ int blkaddr;
+
+ switch (blktype) {
+ case BLKTYPE_NIX:
+ blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc);
+ break;
+ case BLKTYPE_CPT:
+ if (attach->hdr.ver < RVU_MULTI_BLK_VER)
+ return rvu_get_blkaddr(rvu, blktype, 0);
+ blkaddr = attach->cpt_blkaddr ? attach->cpt_blkaddr :
+ BLKADDR_CPT0;
+ if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
+ return -ENODEV;
+ break;
+ case BLKTYPE_REE:
+ blkaddr = attach->ree_blkaddr ? attach->ree_blkaddr :
+ BLKADDR_REE0;
+ if (blkaddr != BLKADDR_REE0 && blkaddr != BLKADDR_REE1)
+ return -ENODEV;
+ break;
+ default:
+ return rvu_get_blkaddr(rvu, blktype, 0);
+ }
+
+ if (is_block_implemented(rvu->hw, blkaddr))
+ return blkaddr;
+
+ return -ENODEV;
+}
+
+static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
+ int num_lfs, struct rsrc_attach *attach)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct rvu_hwinfo *hw = rvu->hw;
@@ -1192,7 +1654,7 @@ static void rvu_attach_block(struct rvu *rvu, int pcifunc,
if (!num_lfs)
return;
- blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
+ blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc, attach);
if (blkaddr < 0)
return;
@@ -1221,12 +1683,20 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
struct rsrc_attach *req, u16 pcifunc)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int free_lfs, mappedlfs, blkaddr;
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_block *block;
- int free_lfs, mappedlfs;
+ int ret;
+
+ ret = rvu_check_rsrc_policy(rvu, req, pcifunc);
+ if (ret) {
+ dev_err(rvu->dev, "Func 0x%x: Resource policy check failed\n",
+ pcifunc);
+ return ret;
+ }
/* Only one NPA LF can be attached */
- if (req->npalf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NPA)) {
+ if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) {
block = &hw->block[BLKADDR_NPA];
free_lfs = rvu_rsrc_free_count(&block->lf);
if (!free_lfs)
@@ -1239,8 +1709,12 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
}
/* Only one NIX LF can be attached */
- if (req->nixlf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NIX)) {
- block = &hw->block[BLKADDR_NIX0];
+ if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) {
+ blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX,
+ pcifunc, req);
+ if (blkaddr < 0)
+ return blkaddr;
+ block = &hw->block[blkaddr];
free_lfs = rvu_rsrc_free_count(&block->lf);
if (!free_lfs)
goto fail;
@@ -1260,7 +1734,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
pcifunc, req->sso, block->lf.max);
return -EINVAL;
}
- mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
free_lfs = rvu_rsrc_free_count(&block->lf);
/* Check if additional resources are available */
if (req->sso > mappedlfs &&
@@ -1276,7 +1750,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
pcifunc, req->sso, block->lf.max);
return -EINVAL;
}
- mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
free_lfs = rvu_rsrc_free_count(&block->lf);
if (req->ssow > mappedlfs &&
((req->ssow - mappedlfs) > free_lfs))
@@ -1291,7 +1765,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
pcifunc, req->timlfs, block->lf.max);
return -EINVAL;
}
- mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
free_lfs = rvu_rsrc_free_count(&block->lf);
if (req->timlfs > mappedlfs &&
((req->timlfs - mappedlfs) > free_lfs))
@@ -1299,20 +1773,43 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
}
if (req->cptlfs) {
- block = &hw->block[BLKADDR_CPT0];
+ blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_CPT,
+ pcifunc, req);
+ if (blkaddr < 0)
+ return blkaddr;
+ block = &hw->block[blkaddr];
if (req->cptlfs > block->lf.max) {
dev_err(&rvu->pdev->dev,
"Func 0x%x: Invalid CPTLF req, %d > max %d\n",
pcifunc, req->cptlfs, block->lf.max);
return -EINVAL;
}
- mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
free_lfs = rvu_rsrc_free_count(&block->lf);
if (req->cptlfs > mappedlfs &&
((req->cptlfs - mappedlfs) > free_lfs))
goto fail;
}
+ if (req->hdr.ver >= RVU_MULTI_BLK_VER && req->reelfs) {
+ blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_REE,
+ pcifunc, req);
+ if (blkaddr < 0)
+ return blkaddr;
+ block = &hw->block[blkaddr];
+ if (req->reelfs > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid REELF req, %d > max %d\n",
+ pcifunc, req->reelfs, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (req->reelfs > mappedlfs &&
+ ((req->reelfs - mappedlfs) > free_lfs))
+ goto fail;
+ }
+
return 0;
fail:
@@ -1320,6 +1817,22 @@ fail:
return -ENOSPC;
}
+static bool rvu_attach_from_same_block(struct rvu *rvu, int blktype,
+ struct rsrc_attach *attach)
+{
+ int blkaddr, num_lfs;
+
+ blkaddr = rvu_get_attach_blkaddr(rvu, blktype,
+ attach->hdr.pcifunc, attach);
+ if (blkaddr < 0)
+ return false;
+
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, attach->hdr.pcifunc),
+ blkaddr);
+ /* Requester already has LFs from given block ? */
+ return !!num_lfs;
+}
+
int rvu_mbox_handler_attach_resources(struct rvu *rvu,
struct rsrc_attach *attach,
struct msg_rsp *rsp)
@@ -1340,10 +1853,10 @@ int rvu_mbox_handler_attach_resources(struct rvu *rvu,
/* Now attach the requested resources */
if (attach->npalf)
- rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach);
if (attach->nixlf)
- rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach);
if (attach->sso) {
/* RVU func doesn't know which exact LF or slot is attached
@@ -1353,25 +1866,38 @@ int rvu_mbox_handler_attach_resources(struct rvu *rvu,
*/
if (attach->modify)
rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
- rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, attach->sso);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO,
+ attach->sso, attach);
}
if (attach->ssow) {
if (attach->modify)
rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
- rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, attach->ssow);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW,
+ attach->ssow, attach);
}
if (attach->timlfs) {
if (attach->modify)
rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
- rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, attach->timlfs);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM,
+ attach->timlfs, attach);
}
if (attach->cptlfs) {
- if (attach->modify)
+ if (attach->modify &&
+ rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach))
rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
- rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, attach->cptlfs);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT,
+ attach->cptlfs, attach);
+ }
+
+ if (attach->hdr.ver >= RVU_MULTI_BLK_VER && attach->reelfs) {
+ if (attach->modify &&
+ rvu_attach_from_same_block(rvu, BLKTYPE_REE, attach))
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_REE);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_REE,
+ attach->reelfs, attach);
}
exit:
@@ -1434,6 +1960,8 @@ static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
(lf << block->lfshift), cfg & ~0x7FFULL);
offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
+ if (offset == MSIX_VECTOR_INVALID)
+ return;
/* Update the mapping */
for (vec = 0; vec < nvecs; vec++)
@@ -1449,7 +1977,7 @@ int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
- int lf, slot;
+ int lf, slot, blkaddr;
pfvf = rvu_get_pfvf(rvu, pcifunc);
if (!pfvf->msix.bmap)
@@ -1459,8 +1987,14 @@ int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
- lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NIX0], pcifunc, 0);
- rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NIX0, lf);
+ /* Get BLKADDR from which LFs are attached to pcifunc */
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0) {
+ rsp->nix_msixoff = MSIX_VECTOR_INVALID;
+ } else {
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, blkaddr, lf);
+ }
rsp->sso = pfvf->sso;
for (slot = 0; slot < rsp->sso; slot++) {
@@ -1489,6 +2023,28 @@ int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
rsp->cptlf_msixoff[slot] =
rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
}
+
+ rsp->cpt1_lfs = pfvf->cpt1_lfs;
+ for (slot = 0; slot < rsp->cpt1_lfs; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT1], pcifunc, slot);
+ rsp->cpt1_lf_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT1, lf);
+ }
+
+ rsp->ree0_lfs = pfvf->ree0_lfs;
+ for (slot = 0; slot < rsp->ree0_lfs; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_REE0], pcifunc, slot);
+ rsp->ree0_lf_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_REE0, lf);
+ }
+
+ rsp->ree1_lfs = pfvf->ree1_lfs;
+ for (slot = 0; slot < rsp->ree1_lfs; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_REE1], pcifunc, slot);
+ rsp->ree1_lf_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_REE1, lf);
+ }
+
return 0;
}
@@ -1512,6 +2068,13 @@ int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_ndc_sync(struct rvu *rvu, int lfblkaddr, int lfidx, u64 lfoffset)
+{
+ /* Sync cached info for this LF in NDC to LLC/DRAM */
+ rvu_write64(rvu, lfblkaddr, lfoffset, BIT_ULL(12) | lfidx);
+ return rvu_poll_reg(rvu, lfblkaddr, lfoffset, BIT_ULL(12), true);
+}
+
int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
struct get_hw_cap_rsp *rsp)
{
@@ -1523,6 +2086,107 @@ int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_mbox_handler_ndc_sync_op(struct rvu *rvu,
+ struct ndc_sync_op *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int err, lfidx, lfblkaddr;
+
+ if (req->npa_lf_sync) {
+ /* Get NPA LF data */
+ lfblkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
+ if (lfblkaddr < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ lfidx = rvu_get_lf(rvu, &hw->block[lfblkaddr], pcifunc, 0);
+ if (lfidx < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ /* Sync NPA NDC */
+ err = rvu_ndc_sync(rvu, lfblkaddr,
+ lfidx, NPA_AF_NDC_SYNC);
+ if (err)
+ dev_err(rvu->dev,
+ "NDC-NPA sync failed for LF %u\n", lfidx);
+ }
+
+ if (!req->nix_lf_tx_sync && !req->nix_lf_rx_sync)
+ return 0;
+
+ /* Get NIX LF data */
+ lfblkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (lfblkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ lfidx = rvu_get_lf(rvu, &hw->block[lfblkaddr], pcifunc, 0);
+ if (lfidx < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (req->nix_lf_tx_sync) {
+ /* Sync NIX TX NDC */
+ err = rvu_ndc_sync(rvu, lfblkaddr,
+ lfidx, NIX_AF_NDC_TX_SYNC);
+ if (err)
+ dev_err(rvu->dev,
+ "NDC-NIX-TX sync fail for LF %u\n", lfidx);
+ }
+
+ if (req->nix_lf_rx_sync) {
+ /* Sync NIX RX NDC */
+ err = rvu_ndc_sync(rvu, lfblkaddr,
+ lfidx, NIX_AF_NDC_RX_SYNC);
+ if (err)
+ dev_err(rvu->dev,
+ "NDC-NIX-RX sync failed for LF %u\n", lfidx);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int blkaddr, nixlf;
+ u16 target;
+
+ /* Only PF can add VF permissions */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc))
+ return -EOPNOTSUPP;
+
+ target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1);
+ pfvf = rvu_get_pfvf(rvu, target);
+
+ if (req->flags & RESET_VF_PERM) {
+ pfvf->flags &= RVU_CLEAR_VF_PERM;
+ } else if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) ^
+ (req->flags & VF_TRUSTED)) {
+ change_bit(PF_SET_VF_TRUSTED, &pfvf->flags);
+ /* disable multicast and promisc entries */
+ if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags)) {
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, target);
+ if (blkaddr < 0)
+ return 0;
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
+ target, 0);
+ if (nixlf < 0)
+ return 0;
+ npc_enadis_default_mce_entry(rvu, target, nixlf,
+ NIXLF_ALLMULTI_ENTRY,
+ false);
+ npc_enadis_default_mce_entry(rvu, target, nixlf,
+ NIXLF_PROMISC_ENTRY,
+ false);
+ }
+ }
+
+ return 0;
+}
+
static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
struct mbox_msghdr *req)
{
@@ -1741,41 +2405,105 @@ static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
__rvu_mbox_up_handler(mwork, TYPE_AFVF);
}
+static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+ int num, int type)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int region;
+ u64 bar4;
+
+ /* For cn10k platform VF mailbox regions of a PF follows after the
+ * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from
+ * RVU_PF_VF_BAR4_ADDR register.
+ */
+ if (type == TYPE_AFVF) {
+ for (region = 0; region < num; region++) {
+ if (hw->cap.per_pf_mbox_regs) {
+ bar4 = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFX_BAR4_ADDR(0)) +
+ MBOX_SIZE;
+ bar4 += region * MBOX_SIZE;
+ } else {
+ bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
+ bar4 += region * MBOX_SIZE;
+ }
+ mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
+ if (!mbox_addr[region])
+ goto error;
+ }
+ return 0;
+ }
+
+ /* For cn10k platform AF <-> PF mailbox region of a PF is read from per
+ * PF registers. Whereas for Octeontx2 it is read from
+ * RVU_AF_PF_BAR4_ADDR register.
+ */
+ for (region = 0; region < num; region++) {
+ if (hw->cap.per_pf_mbox_regs) {
+ bar4 = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFX_BAR4_ADDR(region));
+ } else {
+ bar4 = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_AF_PF_BAR4_ADDR);
+ bar4 += region * MBOX_SIZE;
+ }
+ mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
+ if (!mbox_addr[region])
+ goto error;
+ }
+ return 0;
+
+error:
+ while (region--)
+ iounmap((void __iomem *)mbox_addr[region]);
+ return -ENOMEM;
+}
+
static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
int type, int num,
void (mbox_handler)(struct work_struct *),
void (mbox_up_handler)(struct work_struct *))
{
- void __iomem *hwbase = NULL, *reg_base;
- int err, i, dir, dir_up;
+ int err = -EINVAL, i, dir, dir_up;
+ void __iomem *reg_base;
struct rvu_work *mwork;
+ void **mbox_regions;
const char *name;
- u64 bar4_addr;
+
+ mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
+ if (!mbox_regions)
+ return -ENOMEM;
switch (type) {
case TYPE_AFPF:
name = "rvu_afpf_mailbox";
- bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR);
dir = MBOX_DIR_AFPF;
dir_up = MBOX_DIR_AFPF_UP;
reg_base = rvu->afreg_base;
+ err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF);
+ if (err)
+ goto free_regions;
break;
case TYPE_AFVF:
name = "rvu_afvf_mailbox";
- bar4_addr = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
dir = MBOX_DIR_PFVF;
dir_up = MBOX_DIR_PFVF_UP;
reg_base = rvu->pfreg_base;
+ err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF);
+ if (err)
+ goto free_regions;
break;
default:
- return -EINVAL;
+ return err;
}
mw->mbox_wq = alloc_workqueue(name,
WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
num);
- if (!mw->mbox_wq)
- return -ENOMEM;
+ if (!mw->mbox_wq) {
+ err = -ENOMEM;
+ goto unmap_regions;
+ }
mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
sizeof(struct rvu_work), GFP_KERNEL);
@@ -1791,23 +2519,13 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
goto exit;
}
- /* Mailbox is a reserved memory (in RAM) region shared between
- * RVU devices, shouldn't be mapped as device memory to allow
- * unaligned accesses.
- */
- hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * num);
- if (!hwbase) {
- dev_err(rvu->dev, "Unable to map mailbox region\n");
- err = -ENOMEM;
- goto exit;
- }
-
- err = otx2_mbox_init(&mw->mbox, hwbase, rvu->pdev, reg_base, dir, num);
+ err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev,
+ reg_base, dir, num);
if (err)
goto exit;
- err = otx2_mbox_init(&mw->mbox_up, hwbase, rvu->pdev,
- reg_base, dir_up, num);
+ err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev,
+ reg_base, dir_up, num);
if (err)
goto exit;
@@ -1820,25 +2538,36 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
mwork->rvu = rvu;
INIT_WORK(&mwork->work, mbox_up_handler);
}
-
+ kfree(mbox_regions);
return 0;
+
exit:
- if (hwbase)
- iounmap((void __iomem *)hwbase);
destroy_workqueue(mw->mbox_wq);
+unmap_regions:
+ while (num--)
+ iounmap((void __iomem *)mbox_regions[num]);
+free_regions:
+ kfree(mbox_regions);
return err;
}
static void rvu_mbox_destroy(struct mbox_wq_info *mw)
{
+ struct otx2_mbox *mbox = &mw->mbox;
+ struct otx2_mbox_dev *mdev;
+ int devid;
+
if (mw->mbox_wq) {
flush_workqueue(mw->mbox_wq);
destroy_workqueue(mw->mbox_wq);
mw->mbox_wq = NULL;
}
- if (mw->mbox.hwbase)
- iounmap((void __iomem *)mw->mbox.hwbase);
+ for (devid = 0; devid < mbox->ndevs; devid++) {
+ mdev = &mbox->dev[devid];
+ if (mdev->hwbase)
+ iounmap((void __iomem *)mdev->hwbase);
+ }
otx2_mbox_destroy(&mw->mbox);
otx2_mbox_destroy(&mw->mbox_up);
@@ -1934,6 +2663,125 @@ static void rvu_enable_mbox_intr(struct rvu *rvu)
INTR_MASK(hw->total_pfs) & ~1ULL);
}
+static void rvu_npa_lf_mapped_nix_lf_teardown(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *nix_block;
+ struct rsrc_detach detach;
+ u16 nix_pcifunc;
+ int blkaddr, lf;
+ u64 regval;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return;
+
+ nix_block = &hw->block[blkaddr];
+ for (lf = 0; lf < nix_block->lf.max; lf++) {
+ /* Loop through all the NIX LFs and check if the NPA lf is
+ * being used based on pcifunc.
+ */
+ regval = rvu_read64(rvu, blkaddr, NIX_AF_LFX_CFG(lf));
+ if ((regval & 0xFFFF) != pcifunc)
+ continue;
+
+ nix_pcifunc = nix_block->fn_map[lf];
+
+ /* Skip NIX LF attached to the pcifunc as it is already
+ * quiesced.
+ */
+ if (nix_pcifunc == pcifunc)
+ continue;
+
+ detach.partial = true;
+ detach.nixlf = true;
+ /* Teardown the NIX LF. */
+ rvu_nix_lf_teardown(rvu, nix_pcifunc, blkaddr, lf);
+ rvu_lf_reset(rvu, nix_block, lf);
+ /* Detach the NIX LF. */
+ rvu_detach_rsrcs(rvu, &detach, nix_pcifunc);
+ }
+}
+
+static void rvu_npa_lf_mapped_sso_lf_teardown(struct rvu *rvu, u16 pcifunc)
+{
+ u16 sso_pcifunc, match_cnt = 0;
+ int npa_blkaddr, blkaddr, lf;
+ struct rvu_block *sso_block;
+ struct rsrc_detach detach;
+ u16 *pcifunc_arr;
+ u64 regval;
+
+ pcifunc_arr = kcalloc(rvu->hw->total_pfs + rvu->hw->total_vfs,
+ sizeof(*pcifunc_arr), GFP_KERNEL);
+ if (!pcifunc_arr)
+ return;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ npa_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return;
+
+ regval = BIT_ULL(16) | pcifunc;
+ rvu_write64(rvu, npa_blkaddr, NPA_AF_BAR2_SEL, regval);
+
+ sso_block = &rvu->hw->block[blkaddr];
+ for (lf = 0; lf < sso_block->lf.max; lf++) {
+ regval = rvu_read64(rvu, blkaddr, SSO_AF_XAQX_GMCTL(lf));
+ if ((regval & 0xFFFF) != pcifunc)
+ continue;
+
+ regval = rvu_read64(rvu, blkaddr, sso_block->lfcfg_reg |
+ (lf << sso_block->lfshift));
+ rvu_sso_lf_drain_queues(rvu, sso_pcifunc, lf, regval & 0xF);
+
+ regval = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_XAQ_AURA(lf));
+ rvu_sso_deinit_xaq_aura(rvu, sso_pcifunc, pcifunc, regval, lf);
+ }
+
+ for (lf = 0; lf < sso_block->lf.max; lf++) {
+ regval = rvu_read64(rvu, blkaddr, SSO_AF_XAQX_GMCTL(lf));
+ if ((regval & 0xFFFF) != pcifunc)
+ continue;
+
+ regval = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_XAQ_AURA(lf));
+ if (rvu_sso_poll_aura_cnt(rvu, npa_blkaddr, regval))
+ dev_err(rvu->dev,
+ "[%d]Failed to free XAQs to aura[%lld]\n",
+ __LINE__, regval);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_XAQ_AURA(lf), 0);
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_GMCTL(lf), 0);
+ }
+
+ for (lf = 0; lf < sso_block->lf.max; lf++) {
+ regval = rvu_read64(rvu, blkaddr, SSO_AF_XAQX_GMCTL(lf));
+ if ((regval & 0xFFFF) != pcifunc)
+ continue;
+
+ sso_pcifunc = sso_block->fn_map[lf];
+ regval = rvu_read64(rvu, blkaddr, sso_block->lfcfg_reg |
+ (lf << sso_block->lfshift));
+ /* Save SSO PF_FUNC info to detach all LFs of that PF_FUNC at
+ * once later.
+ */
+ rvu_sso_lf_teardown(rvu, sso_pcifunc, lf, regval & 0xF);
+ rvu_lf_reset(rvu, sso_block, lf);
+ pcifunc_arr[match_cnt] = sso_pcifunc;
+ match_cnt++;
+ }
+
+ detach.partial = true;
+ detach.sso = true;
+
+ for (sso_pcifunc = 0; sso_pcifunc < match_cnt; sso_pcifunc++)
+ rvu_detach_rsrcs(rvu, &detach, pcifunc_arr[sso_pcifunc]);
+ kfree(pcifunc_arr);
+}
+
static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
{
struct rvu_block *block;
@@ -1942,25 +2790,62 @@ static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
block = &rvu->hw->block[blkaddr];
num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
- block->type);
+ block->addr);
if (!num_lfs)
return;
+
+ if (block->addr == BLKADDR_SSO) {
+ for (slot = 0; slot < num_lfs; slot++) {
+ lf = rvu_get_lf(rvu, block, pcifunc, slot);
+ if (lf < 0)
+ continue;
+ rvu_sso_lf_drain_queues(rvu, pcifunc, lf, slot);
+ }
+ rvu_sso_cleanup_xaq_aura(rvu, pcifunc, num_lfs);
+ }
+
for (slot = 0; slot < num_lfs; slot++) {
lf = rvu_get_lf(rvu, block, pcifunc, slot);
if (lf < 0)
continue;
/* Cleanup LF and reset it */
- if (block->addr == BLKADDR_NIX0)
+ if (block->addr == BLKADDR_NIX0 || block->addr == BLKADDR_NIX1)
rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
- else if (block->addr == BLKADDR_NPA)
+ else if (block->addr == BLKADDR_NPA) {
+ rvu_npa_lf_mapped_nix_lf_teardown(rvu, pcifunc);
+ rvu_npa_lf_mapped_sso_lf_teardown(rvu, pcifunc);
rvu_npa_lf_teardown(rvu, pcifunc, lf);
+ } else if (block->addr == BLKADDR_SSO)
+ rvu_sso_lf_teardown(rvu, pcifunc, lf, slot);
+ else if (block->addr == BLKADDR_SSOW)
+ rvu_ssow_lf_teardown(rvu, pcifunc, lf, slot);
+ else if (block->addr == BLKADDR_TIM)
+ rvu_tim_lf_teardown(rvu, pcifunc, lf, slot);
+ else if ((block->addr == BLKADDR_CPT0) ||
+ (block->addr == BLKADDR_CPT1))
+ rvu_cpt_lf_teardown(rvu, pcifunc, block->addr, lf,
+ slot);
err = rvu_lf_reset(rvu, block, lf);
if (err) {
dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
block->addr, lf);
}
+
+ if (block->addr == BLKADDR_SSO)
+ rvu_sso_hwgrp_config_thresh(rvu, block->addr, lf);
+ }
+}
+
+static void rvu_sso_pfvf_rst(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (pfvf->sso_uniq_ident) {
+ rvu_free_rsrc(&hw->sso.pfvf_ident, pfvf->sso_uniq_ident);
+ pfvf->sso_uniq_ident = 0;
}
}
@@ -1973,12 +2858,24 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
* 3. Cleanup pools (NPA)
*/
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT1);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_REE0);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_REE1);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
+ rvu_reset_lmt_map_tbl(rvu, pcifunc);
rvu_detach_rsrcs(rvu, NULL, pcifunc);
+ rvu_sso_pfvf_rst(rvu, pcifunc);
+ /* In scenarios where PF/VF drivers detach NIXLF without freeing MCAM
+ * entries, check and free the MCAM entries explicitly to avoid leak.
+ * Since LF is detached use LF number as -1.
+ */
+ rvu_npc_free_mcam_entries(rvu, pcifunc, -1);
+
mutex_unlock(&rvu->flr_lock);
}
@@ -2044,11 +2941,12 @@ static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs)
for (vf = 0; vf < numvfs; vf++) {
if (!(intr & BIT_ULL(vf)))
continue;
- dev = vf + start_vf + rvu->hw->total_pfs;
- queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
/* Clear and disable the interrupt */
rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf));
+
+ dev = vf + start_vf + rvu->hw->total_pfs;
+ queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
}
}
@@ -2064,14 +2962,14 @@ static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
if (intr & (1ULL << pf)) {
- /* PF is already dead do only AF related operations */
- queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
/* clear interrupt */
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT,
BIT_ULL(pf));
/* Disable the interrupt */
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
BIT_ULL(pf));
+ /* PF is already dead do only AF related operations */
+ queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
}
}
@@ -2148,6 +3046,10 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
{
int irq;
+ rvu_sso_unregister_interrupts(rvu);
+ rvu_cpt_unregister_interrupts(rvu);
+ rvu_ree_unregister_interrupts(rvu);
+
/* Disable the Mbox interrupt */
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
@@ -2357,8 +3259,20 @@ static int rvu_register_interrupts(struct rvu *rvu)
goto fail;
}
rvu->irq_allocated[offset] = true;
- return 0;
+ ret = rvu_sso_register_interrupts(rvu);
+ if (ret)
+ goto fail;
+
+ ret = rvu_cpt_register_interrupts(rvu);
+ if (ret)
+ goto fail;
+
+ ret = rvu_ree_register_interrupts(rvu);
+ if (ret)
+ goto fail;
+
+ return 0;
fail:
rvu_unregister_interrupts(rvu);
return ret;
@@ -2455,9 +3369,7 @@ static void rvu_enable_afvf_intr(struct rvu *rvu)
rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
}
-#define PCI_DEVID_OCTEONTX2_LBK 0xA061
-
-static int lbk_get_num_chans(void)
+int rvu_get_num_lbk_chans(void)
{
struct pci_dev *pdev;
void __iomem *base;
@@ -2492,7 +3404,7 @@ static int rvu_enable_sriov(struct rvu *rvu)
return 0;
}
- chans = lbk_get_num_chans();
+ chans = rvu_get_num_lbk_chans();
if (chans < 0)
return chans;
@@ -2505,6 +3417,12 @@ static int rvu_enable_sriov(struct rvu *rvu)
if (!vfs)
return 0;
+ /* LBK channel number 63 is used for switching packets between
+ * CGX mapped VFs. Hence limit LBK pairs till 62 only.
+ */
+ if (vfs > 62)
+ vfs = 62;
+
/* Save VFs number for reference in VF interrupts handlers.
* Since interrupts might start arriving during SRIOV enablement
* ordinary API cannot be used to get number of enabled VFs.
@@ -2543,6 +3461,8 @@ static void rvu_update_module_params(struct rvu *rvu)
strscpy(rvu->mkex_pfl_name,
mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
+ strscpy(rvu->kpu_pfl_name,
+ kpu_profile ? kpu_profile : default_pfl_name, KPU_NAME_LEN);
}
static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -2588,8 +3508,11 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rvu->ptp = ptp_get();
if (IS_ERR(rvu->ptp)) {
err = PTR_ERR(rvu->ptp);
- if (err == -EPROBE_DEFER)
+ if (err == -EPROBE_DEFER) {
+ dev_err(dev,
+ "PTP driver not loaded, deferring probe\n");
goto err_release_regions;
+ }
rvu->ptp = NULL;
}
@@ -2620,28 +3543,57 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
rvu->hw->total_pfs, rvu_afpf_mbox_handler,
rvu_afpf_mbox_up_handler);
- if (err)
+ if (err) {
+ dev_err(dev, "%s: Failed to initialize mbox\n", __func__);
goto err_hwsetup;
+ }
err = rvu_flr_init(rvu);
- if (err)
+ if (err) {
+ dev_err(dev, "%s: Failed to initialize flr\n", __func__);
goto err_mbox;
+ }
err = rvu_register_interrupts(rvu);
- if (err)
+ if (err) {
+ dev_err(dev, "%s: Failed to register interrupts\n", __func__);
goto err_flr;
+ }
+
+ err = rvu_register_dl(rvu);
+ if (err) {
+ dev_err(dev, "%s: Failed to register devlink\n", __func__);
+ goto err_irq;
+ }
rvu_setup_rvum_blk_revid(rvu);
+ err = rvu_policy_init(rvu);
+ if (err)
+ goto err_dl;
+
/* Enable AF's VFs (if any) */
err = rvu_enable_sriov(rvu);
- if (err)
- goto err_irq;
+ if (err) {
+ dev_err(dev, "%s: Failed to enable sriov\n", __func__);
+ goto err_policy;
+ }
/* Initialize debugfs */
rvu_dbg_init(rvu);
+ mutex_init(&rvu->rswitch.switch_lock);
+
+ if (rvu->fwdata)
+ ptp_start(rvu->ptp, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate,
+ rvu->fwdata->ptp_ext_tstamp);
+
return 0;
+
+err_policy:
+ rvu_policy_destroy(rvu);
+err_dl:
+ rvu_unregister_dl(rvu);
err_irq:
rvu_unregister_interrupts(rvu);
err_flr:
@@ -2672,6 +3624,8 @@ static void rvu_remove(struct pci_dev *pdev)
struct rvu *rvu = pci_get_drvdata(pdev);
rvu_dbg_exit(rvu);
+ rvu_policy_destroy(rvu);
+ rvu_unregister_dl(rvu);
rvu_unregister_interrupts(rvu);
rvu_flr_wq_destroy(rvu);
rvu_cgx_exit(rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 90eed3160915..8504d276bc57 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -1,26 +1,34 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef RVU_H
#define RVU_H
#include <linux/pci.h>
+#include <net/devlink.h>
+
#include "rvu_struct.h"
+#include "rvu_devlink.h"
#include "common.h"
#include "mbox.h"
+#include "npc.h"
+#include "rvu_validation.h"
+#include "rvu_reg.h"
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065
+#define PCI_DEVID_OCTEONTX2_LBK 0xA061
/* Subsystem Device ID */
+#define PCI_SUBSYS_DEVID_98XX 0xB100
#define PCI_SUBSYS_DEVID_96XX 0xB200
+#define PCI_SUBSYS_DEVID_CN10K_A 0xB900
+#define PCI_SUBSYS_DEVID_CNF10K_A 0xBA00
+#define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00
/* PCI BAR nos */
#define PCI_AF_REG_BAR_NUM 0
@@ -28,6 +36,7 @@
#define PCI_MBOX_BAR_NUM 4
#define NAME_SIZE 32
+#define MAX_NIX_BLKS 2
/* PF_FUNC */
#define RVU_PFVF_PF_SHIFT 10
@@ -42,6 +51,10 @@ struct dump_ctx {
bool all;
};
+struct cpt_dump_ctx {
+ char e_type[NAME_SIZE];
+};
+
struct rvu_debugfs {
struct dentry *root;
struct dentry *cgx_root;
@@ -50,11 +63,16 @@ struct rvu_debugfs {
struct dentry *npa;
struct dentry *nix;
struct dentry *npc;
+ struct dentry *sso;
+ struct dentry *sso_hwgrp;
+ struct dentry *sso_hws;
+ struct dentry *cpt;
struct dump_ctx npa_aura_ctx;
struct dump_ctx npa_pool_ctx;
struct dump_ctx nix_cq_ctx;
struct dump_ctx nix_rq_ctx;
struct dump_ctx nix_sq_ctx;
+ struct cpt_dump_ctx cpt_ctx;
int npa_qsize_id;
int nix_qsize_id;
};
@@ -88,6 +106,7 @@ struct rvu_block {
u64 msixcfg_reg;
u64 lfreset_reg;
unsigned char name[NAME_SIZE];
+ struct rvu *rvu;
};
struct nix_mcast {
@@ -104,6 +123,36 @@ struct nix_mce_list {
int max;
};
+/* layer metadata to uniquely identify a packet header field */
+struct npc_layer_mdata {
+ u8 lid;
+ u8 ltype;
+ u8 hdr;
+ u8 key;
+ u8 len;
+};
+
+/* Structure to represent a field present in the
+ * generated key. A key field may present anywhere and can
+ * be of any size in the generated key. Once this structure
+ * is populated for fields of interest then field's presence
+ * and location (if present) can be known.
+ */
+struct npc_key_field {
+ /* Masks where all set bits indicate position
+ * of a field in the key
+ */
+ u64 kw_mask[NPC_MAX_KWS_IN_KEY];
+ /* Number of words in the key a field spans. If a field is
+ * of 16 bytes and key offset is 4 then the field will use
+ * 4 bytes in KW0, 8 bytes in KW1 and 4 bytes in KW2 and
+ * nr_kws will be 3(KW0, KW1 and KW2).
+ */
+ int nr_kws;
+ /* used by packet header fields */
+ struct npc_layer_mdata layer_mdata;
+};
+
struct npc_mcam {
struct rsrc_bmap counters;
struct mutex lock; /* MCAM entries and counters update lock */
@@ -115,6 +164,7 @@ struct npc_mcam {
u16 *entry2cntr_map;
u16 *cntr2pfvf_map;
u16 *cntr_refcnt;
+ u16 *entry2target_pffunc;
u8 keysize; /* MCAM keysize 112/224/448 bits */
u8 banks; /* Number of MCAM banks */
u8 banks_per_entry;/* Number of keywords in key */
@@ -126,7 +176,49 @@ struct npc_mcam {
u16 lprio_start;
u16 hprio_count;
u16 hprio_end;
- u16 rx_miss_act_cntr; /* Counter for RX MISS action */
+ u16 rx_miss_act_cntr; /* Counter for RX MISS action */
+ /* fields present in the generated key */
+ struct npc_key_field tx_key_fields[NPC_KEY_FIELDS_MAX];
+ struct npc_key_field rx_key_fields[NPC_KEY_FIELDS_MAX];
+ u64 tx_features;
+ u64 rx_features;
+ struct list_head mcam_rules;
+};
+
+struct sso_rsrc {
+ u8 sso_hws;
+ u16 sso_hwgrps;
+ u16 sso_xaq_num_works;
+ u16 sso_xaq_buf_size;
+ u16 sso_iue;
+ u64 iaq_rsvd;
+ u64 iaq_max;
+ u64 taq_rsvd;
+ u64 taq_max;
+ struct rsrc_bmap pfvf_ident;
+};
+
+enum tim_ring_interval {
+ TIM_INTERVAL_1US = 0,
+ TIM_INTERVAL_10US,
+ TIM_INTERVAL_1MS,
+ TIM_INTERVAL_INVAL,
+};
+
+struct tim_rsrc {
+ u16 rings_per_intvl[TIM_INTERVAL_INVAL];
+ enum tim_ring_interval *ring_intvls;
+};
+
+struct ree_rsrc {
+ struct qmem *graph_ctx; /* Graph base address - used by HW */
+ struct qmem *prefix_ctx; /* Prefix blocks - used by HW */
+ void **ruledb; /* ROF file from application */
+ u8 *ruledbi; /* Incremental checksum instructions */
+ u32 aq_head; /* AF AQ head address */
+ u32 ruledb_len; /* Length of ruledb */
+ u32 ruledbi_len; /* Length of ruledbi */
+ u8 ruledb_blocks; /* Number of blocks pointed by ruledb */
};
/* Structure for per RVU func info ie PF/VF */
@@ -137,7 +229,11 @@ struct rvu_pfvf {
u16 ssow;
u16 cptlfs;
u16 timlfs;
+ u16 cpt1_lfs;
+ u16 ree0_lfs;
+ u16 ree1_lfs;
u8 cgx_lmac;
+ u8 sso_uniq_ident;
/* Block LF's MSIX vector info */
struct rsrc_bmap msix; /* Bitmap for MSIX vector alloc */
@@ -169,21 +265,47 @@ struct rvu_pfvf {
u16 maxlen;
u16 minlen;
+ bool hw_rx_tstamp_en; /* Is rx_tstamp enabled */
u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
+ u8 default_mac[ETH_ALEN]; /* MAC address from FWdata */
- /* Broadcast pkt replication info */
+ /* Broadcast/Multicast/Promisc pkt replication info */
u16 bcast_mce_idx;
+ u16 mcast_mce_idx;
+ u16 promisc_mce_idx;
struct nix_mce_list bcast_mce_list;
+ struct nix_mce_list mcast_mce_list;
+ struct nix_mce_list promisc_mce_list;
+ bool use_mce_list;
- /* VLAN offload */
- struct mcam_entry entry;
- int rxvlan_index;
- bool rxvlan;
+ /* For resource limits */
+ struct pci_dev *pdev;
+ struct kobject *limits_kobj;
bool cgx_in_use; /* this PF/VF using CGX? */
int cgx_users; /* number of cgx users - used only by PFs */
+ int intf_mode;
+ u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */
+ u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */
+ u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */
+ u8 lbkid; /* NIX0/1 lbk link ID */
+ u64 lmt_base_addr; /* Preseving the pcifunc's lmtst base addr*/
+ u64 lmt_map_ent_w1; /*Preseving the word1 of lmtst map table entry*/
+ unsigned long flags;
+ struct sdp_node_info *sdp_info;
+
+ struct rvu_npc_mcam_rule *def_ucast_rule;
+};
+
+enum rvu_pfvf_flags {
+ NIXLF_INITIALIZED = 0,
+ PF_SET_VF_MAC,
+ PF_SET_VF_CFG,
+ PF_SET_VF_TRUSTED,
};
+#define RVU_CLEAR_VF_PERM ~GENMASK(PF_SET_VF_TRUSTED, PF_SET_VF_MAC)
+
struct nix_txsch {
struct rsrc_bmap schq;
u8 lvl;
@@ -218,12 +340,32 @@ struct nix_lso {
u8 in_use;
};
+struct nix_txvlan {
+#define NIX_TX_VTAG_DEF_MAX 0x400
+ struct rsrc_bmap rsrc;
+ u16 *entry2pfvf_map;
+ struct mutex rsrc_lock; /* Serialize resource alloc/free */
+};
+
+struct nix_ipolicer {
+ struct rsrc_bmap band_prof;
+ u16 *pfvf_map;
+ u16 *match_id;
+ u16 *ref_count;
+};
+
struct nix_hw {
+ int blkaddr;
+ struct rvu *rvu;
struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
struct nix_mcast mcast;
struct nix_flowkey flowkey;
struct nix_mark_format mark_format;
struct nix_lso lso;
+ struct nix_txvlan txvlan;
+ struct nix_ipolicer *ipolicer;
+ u64 *tx_credits;
+ void *tx_stall;
};
/* RVU block's capabilities or functionality,
@@ -237,8 +379,13 @@ struct hw_cap {
u16 nix_txsch_per_sdp_lmac; /* Max Q's transmitting to SDP LMAC */
bool nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
bool nix_shaping; /* Is shaping and coloring supported */
+ bool nix_shaper_toggle_wait; /* Shaping toggle needs poll/wait */
bool nix_tx_link_bp; /* Can link backpressure TL queues ? */
bool nix_rx_multicast; /* Rx packet replication support */
+ bool nix_common_dwrr_mtu; /* Common DWRR MTU for quantum config */
+ bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */
+ bool programmable_chans; /* Channels programmable ? */
+ bool ipolicer;
};
struct rvu_hwinfo {
@@ -247,16 +394,32 @@ struct rvu_hwinfo {
u16 max_vfs_per_pf; /* Max VFs that can be attached to a PF */
u8 cgx;
u8 lmac_per_cgx;
+ u16 cgx_chan_base; /* CGX base channel number */
+ u16 lbk_chan_base; /* LBK base channel number */
+ u16 sdp_chan_base; /* SDP base channel number */
+ u16 cpt_chan_base; /* CPT base channel number */
u8 cgx_links;
u8 lbk_links;
u8 sdp_links;
+ u8 cpt_links; /* Number of CPT links */
u8 npc_kpus; /* No of parser units */
+ u8 npc_pkinds; /* No of port kinds */
+ u8 npc_intfs; /* No of interfaces */
+ u8 npc_kpu_entries; /* No of KPU entries */
+ u16 npc_counters; /* No of match stats counters */
+ u32 lbk_bufsize; /* FIFO size supported by LBK */
+ bool npc_ext_set; /* Extended register set */
+ u64 npc_stat_ena; /* Match stats enable bit */
struct hw_cap cap;
struct rvu_block block[BLK_COUNT]; /* Block info */
- struct nix_hw *nix0;
+ struct nix_hw *nix;
+ struct rvu *rvu;
struct npc_pkind pkind;
struct npc_mcam mcam;
+ struct sso_rsrc sso;
+ struct tim_rsrc tim;
+ struct ree_rsrc *ree;
};
struct mbox_wq_info {
@@ -285,8 +448,14 @@ struct rvu_fwdata {
u64 mcam_addr;
u64 mcam_sz;
u64 msixtr_base;
-#define FWDATA_RESERVED_MEM 1023
+ u32 ptp_ext_clk_rate;
+ u32 ptp_ext_tstamp;
+#define FWDATA_RESERVED_MEM 1022
u64 reserved[FWDATA_RESERVED_MEM];
+#define CGX_MAX 5
+#define CGX_LMACS_MAX 4
+ struct cgx_lmac_fwdata_s cgx_fw_data[CGX_MAX][CGX_LMACS_MAX];
+ /* Do not add new fields below this line */
};
struct ptp;
@@ -300,11 +469,22 @@ struct npc_kpu_profile_adapter {
const struct npc_lt_def_cfg *lt_def;
const struct npc_kpu_profile_action *ikpu; /* array[pkinds] */
const struct npc_kpu_profile *kpu; /* array[kpus] */
- const struct npc_mcam_kex *mkex;
+ struct npc_mcam_kex *mkex;
+ bool custom;
size_t pkinds;
size_t kpus;
};
+#define RVU_SWITCH_LBK_CHAN 63
+
+struct rvu_switch {
+ struct mutex switch_lock; /* Serialize flow installation */
+ u32 used_entries;
+ u16 *entry2pcifunc;
+ u16 mode;
+ u16 start_entry;
+};
+
struct rvu {
void __iomem *afreg_base;
void __iomem *pfreg_base;
@@ -313,8 +493,10 @@ struct rvu {
struct rvu_hwinfo *hw;
struct rvu_pfvf *pf;
struct rvu_pfvf *hwvf;
+ struct rvu_limits pf_limits;
struct mutex rsrc_lock; /* Serialize resource alloc/free */
int vfs; /* Number of VFs attached to RVU */
+ int nix_blkaddr[MAX_NIX_BLKS];
/* Mbox */
struct mbox_wq_info afpf_wq_info;
@@ -334,6 +516,7 @@ struct rvu {
/* CGX */
#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */
+ u16 cgx_mapped_vfs; /* maximum CGX mapped VFs */
u8 cgx_mapped_pfs;
u8 cgx_cnt_max; /* CGX port count max */
u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */
@@ -349,18 +532,28 @@ struct rvu {
struct mutex cgx_cfg_lock; /* serialize cgx configuration */
char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */
+ char kpu_pfl_name[KPU_NAME_LEN]; /* Configured KPU profile name */
/* Firmware data */
struct rvu_fwdata *fwdata;
+ void *kpu_fwdata;
+ size_t kpu_fwdata_sz;
+ void __iomem *kpu_prfl_addr;
/* NPC KPU data */
struct npc_kpu_profile_adapter kpu;
struct ptp *ptp;
+ int cpt_pf_num;
+
#ifdef CONFIG_DEBUG_FS
struct rvu_debugfs rvu_dbg;
#endif
+ struct rvu_devlink *rvu_dl;
+
+ /* RVU switch implementation over NPC with DMAC rules */
+ struct rvu_switch rswitch;
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -384,30 +577,132 @@ static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
}
/* Silicon revisions */
+static inline bool is_rvu_pre_96xx_C0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+ /* 96XX A0/B0, 95XX A0/A1/B0 chips */
+ return ((pdev->revision == 0x00) || (pdev->revision == 0x01) ||
+ (pdev->revision == 0x10) || (pdev->revision == 0x11) ||
+ (pdev->revision == 0x14));
+}
+
static inline bool is_rvu_96xx_A0(struct rvu *rvu)
{
struct pci_dev *pdev = rvu->pdev;
- return (pdev->revision == 0x00) &&
- (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
+ return (pdev->revision == 0x00);
}
static inline bool is_rvu_96xx_B0(struct rvu *rvu)
{
struct pci_dev *pdev = rvu->pdev;
- return ((pdev->revision == 0x00) || (pdev->revision == 0x01)) &&
- (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
+ return (pdev->revision == 0x00) || (pdev->revision == 0x01);
+}
+
+static inline bool is_rvu_95xx_A0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ return (pdev->revision == 0x10) || (pdev->revision == 0x11);
+}
+
+/* REVID for PCIe devices.
+ * Bits 0..1: minor pass, bit 3..2: major pass
+ * bits 7..4: midr id
+ */
+#define PCI_REVISION_ID_96XX 0x00
+#define PCI_REVISION_ID_95XX 0x10
+#define PCI_REVISION_ID_95XXN 0x20
+#define PCI_REVISION_ID_98XX 0x30
+#define PCI_REVISION_ID_95XXMM 0x40
+#define PCI_REVISION_ID_95XXO 0xE0
+
+static inline bool is_rvu_otx2(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ u8 midr = pdev->revision & 0xF0;
+
+ return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX ||
+ midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX ||
+ midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
+}
+
+static inline bool is_cgx_mapped_to_nix(unsigned short id, u8 cgx_id)
+{
+ /* On CNF10KA and CNF10KB silicons only two CGX blocks are connected
+ * to NIX.
+ */
+ if (id == PCI_SUBSYS_DEVID_CNF10K_A || id == PCI_SUBSYS_DEVID_CNF10K_B)
+ return cgx_id <= 1;
+
+ return !(cgx_id && !(id == PCI_SUBSYS_DEVID_96XX ||
+ id == PCI_SUBSYS_DEVID_98XX ||
+ id == PCI_SUBSYS_DEVID_CN10K_A));
+}
+
+static inline u16 rvu_nix_chan_cgx(struct rvu *rvu, u8 cgxid,
+ u8 lmacid, u8 chan)
+{
+ u64 nix_const = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST);
+ u16 cgx_chans = nix_const & 0xFFULL;
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (!hw->cap.programmable_chans)
+ return NIX_CHAN_CGX_LMAC_CHX(cgxid, lmacid, chan);
+
+ return rvu->hw->cgx_chan_base +
+ (cgxid * hw->lmac_per_cgx + lmacid) * cgx_chans + chan;
+}
+
+static inline u16 rvu_nix_chan_lbk(struct rvu *rvu, u8 lbkid,
+ u8 chan)
+{
+ u64 nix_const = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST);
+ u16 lbk_chans = (nix_const >> 16) & 0xFFULL;
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (!hw->cap.programmable_chans)
+ return NIX_CHAN_LBK_CHX(lbkid, chan);
+
+ return rvu->hw->lbk_chan_base + lbkid * lbk_chans + chan;
+}
+
+static inline u16 rvu_nix_chan_sdp(struct rvu *rvu, u8 chan)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (!hw->cap.programmable_chans)
+ return NIX_CHAN_SDP_CHX(chan);
+
+ return hw->sdp_chan_base + chan;
+}
+
+static inline u16 rvu_nix_chan_cpt(struct rvu *rvu, u8 chan)
+{
+ return rvu->hw->cpt_chan_base + chan;
}
/* Function Prototypes
* RVU
*/
-static inline int is_afvf(u16 pcifunc)
+static inline bool is_afvf(u16 pcifunc)
{
return !(pcifunc & ~RVU_PFVF_FUNC_MASK);
}
+static inline bool is_vf(u16 pcifunc)
+{
+ return !!(pcifunc & RVU_PFVF_FUNC_MASK);
+}
+
+/* check if PF_FUNC is AF */
+static inline bool is_pffunc_af(u16 pcifunc)
+{
+ return !pcifunc;
+}
+
static inline bool is_rvu_fwdata_valid(struct rvu *rvu)
{
return (rvu->fwdata->header_magic == RVU_FWDATA_HEADER_MAGIC) &&
@@ -415,11 +710,14 @@ static inline bool is_rvu_fwdata_valid(struct rvu *rvu)
}
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
+void rvu_free_bitmap(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
+bool is_rsrc_free(struct rsrc_bmap *rsrc, int id);
int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
+u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr);
int rvu_get_pf(u16 pcifunc);
struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
@@ -429,6 +727,10 @@ int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
+int rvu_get_num_lbk_chans(void);
+int rvu_ndc_sync(struct rvu *rvu, int lfblkid, int lfidx, u64 lfoffset);
+int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
+ u16 global_slot, u16 *slot_in_block);
/* RVU HW reg validation */
enum regmap_block {
@@ -443,10 +745,17 @@ int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
int qsize, int inst_size, int res_size);
void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);
+/* SDP APIs */
+int rvu_sdp_init(struct rvu *rvu);
+bool is_sdp_pfvf(u16 pcifunc);
+bool is_sdp_pf(u16 pcifunc);
+bool is_sdp_vf(u16 pcifunc);
+
/* CGX APIs */
static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
{
- return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs);
+ return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs) &&
+ !is_sdp_pf(pf << RVU_PFVF_PF_SHIFT);
}
static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
@@ -455,6 +764,12 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
*lmac_id = (map & 0xF);
}
+static inline bool is_cgx_vf(struct rvu *rvu, u16 pcifunc)
+{
+ return ((pcifunc & RVU_PFVF_FUNC_MASK) &&
+ is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)));
+}
+
#define M(_name, _id, fn_name, req, rsp) \
int rvu_mbox_handler_ ## fn_name(struct rvu *, struct req *, struct rsp *);
MBOX_MESSAGES
@@ -465,9 +780,36 @@ int rvu_cgx_exit(struct rvu *rvu);
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu);
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start);
void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable);
+void rvu_cgx_enadis_higig2(struct rvu *rvu, int pf, bool enable);
+bool rvu_cgx_is_higig2_enabled(struct rvu *rvu, int pf);
+void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc);
int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start);
int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index,
int rxtxflag, u64 *stat);
+bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc);
+bool rvu_cgx_is_pkind_config_permitted(struct rvu *rvu, u16 pcifunc);
+void *rvu_first_cgx_pdata(struct rvu *rvu);
+int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
+int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable);
+int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause);
+
+/* SSO APIs */
+int rvu_sso_init(struct rvu *rvu);
+void rvu_sso_freemem(struct rvu *rvu);
+int rvu_sso_register_interrupts(struct rvu *rvu);
+void rvu_sso_unregister_interrupts(struct rvu *rvu);
+int rvu_sso_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot_id);
+int rvu_ssow_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot_id);
+void rvu_sso_hwgrp_config_thresh(struct rvu *rvu, int blkaddr, int lf);
+void rvu_sso_block_cn10k_init(struct rvu *rvu, int blkaddr);
+void rvu_sso_lf_drain_queues(struct rvu *rvu, u16 pcifunc, int lf, int slot);
+int rvu_sso_cleanup_xaq_aura(struct rvu *rvu, u16 pcifunc, int hwgrp);
+int rvu_sso_poll_aura_cnt(struct rvu *rvu, int npa_blkaddr, int aura);
+void rvu_sso_deinit_xaq_aura(struct rvu *rvu, int blkaddr, int npa_blkaddr,
+ int aura, int lf);
+
/* NPA APIs */
int rvu_npa_init(struct rvu *rvu);
void rvu_npa_freemem(struct rvu *rvu);
@@ -484,7 +826,26 @@ void rvu_nix_freemem(struct rvu *rvu);
int rvu_get_nixlf_count(struct rvu *rvu);
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr);
-int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
+int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
+ struct nix_mce_list *mce_list,
+ int mce_idx, int mcam_index, bool add);
+void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
+ struct nix_mce_list **mce_list, int *mce_idx);
+bool rvu_nix_is_ptp_tx_enabled(struct rvu *rvu, u16 pcifunc);
+struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr);
+int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr);
+void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc);
+int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
+ struct nix_hw **nix_hw, int *blkaddr);
+int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
+ u16 rq_idx, u16 match_id);
+int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_cn10k_aq_enq_req *aq_req,
+ struct nix_cn10k_aq_enq_rsp *aq_rsp,
+ u16 pcifunc, u8 ctype, u32 qidx);
+int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc);
+u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu);
+u32 convert_bytes_to_dwrr_mtu(u32 bytes);
/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
@@ -495,14 +856,21 @@ int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool en);
void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, u8 *mac_addr);
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
- int nixlf, u64 chan, bool allmulti);
-void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
-void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
+ int nixlf, u64 chan, u8 chan_cnt);
+void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable);
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan);
-void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable);
-int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable);
+void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ u64 chan);
+void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable);
+void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, int type, bool enable);
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
@@ -513,6 +881,52 @@ void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
int blkaddr, int *alloc_cnt,
int *enable_cnt);
+bool is_npc_intf_tx(u8 intf);
+bool is_npc_intf_rx(u8 intf);
+bool is_npc_interface_valid(struct rvu *rvu, u8 intf);
+int npc_flow_steering_init(struct rvu *rvu, int blkaddr);
+const char *npc_get_field_name(u8 hdr);
+int npc_get_bank(struct npc_mcam *mcam, int index);
+void npc_mcam_enable_flows(struct rvu *rvu, u16 target);
+void npc_mcam_disable_flows(struct rvu *rvu, u16 target);
+void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, bool enable);
+void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 src, struct mcam_entry *entry,
+ u8 *intf, u8 *ena);
+bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
+u32 rvu_cgx_get_fifolen(struct rvu *rvu);
+void *rvu_first_cgx_pdata(struct rvu *rvu);
+int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
+ int type);
+bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr,
+ int index);
+
+/* CPT APIs */
+int rvu_cpt_register_interrupts(struct rvu *rvu);
+void rvu_cpt_unregister_interrupts(struct rvu *rvu);
+int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf,
+ int slot);
+int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc);
+int rvu_cpt_init(struct rvu *rvu);
+
+/* CN10K RVU */
+int rvu_set_channels_base(struct rvu *rvu);
+void rvu_program_channels(struct rvu *rvu);
+
+/* CN10K RVU - LMT*/
+void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc);
+void rvu_apr_block_cn10k_init(struct rvu *rvu);
+
+/* TIM APIs */
+int rvu_tim_init(struct rvu *rvu);
+int rvu_tim_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot);
+
+/* REE APIs */
+int rvu_ree_init(struct rvu *rvu);
+void rvu_ree_freemem(struct rvu *rvu);
+int rvu_ree_register_interrupts(struct rvu *rvu);
+void rvu_ree_unregister_interrupts(struct rvu *rvu);
#ifdef CONFIG_DEBUG_FS
void rvu_dbg_init(struct rvu *rvu);
@@ -521,4 +935,37 @@ void rvu_dbg_exit(struct rvu *rvu);
static inline void rvu_dbg_init(struct rvu *rvu) {}
static inline void rvu_dbg_exit(struct rvu *rvu) {}
#endif
+
+/* HW workarounds/fixes */
+void rvu_nix_txsch_lock(struct nix_hw *nix_hw);
+void rvu_nix_txsch_unlock(struct nix_hw *nix_hw);
+void rvu_nix_update_link_credits(struct rvu *rvu, int blkaddr,
+ int link, u64 ncredits);
+
+void rvu_nix_update_sq_smq_mapping(struct rvu *rvu, int blkaddr, int nixlf,
+ u16 sq, u16 smq);
+void rvu_nix_txsch_config_changed(struct nix_hw *nix_hw);
+ssize_t rvu_nix_get_tx_stall_counters(struct nix_hw *nix_hw,
+ char __user *buffer, loff_t *ppos);
+int rvu_nix_fixes_init(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr);
+void rvu_nix_fixes_exit(struct rvu *rvu, struct nix_hw *nix_hw);
+int rvu_tim_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
+ u16 pcifunc, int slot);
+int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena);
+bool is_parse_nibble_config_valid(struct rvu *rvu,
+ struct npc_mcam_kex *mcam_kex);
+int
+rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
+ u64 pkind, u8 var_len_off, u8 var_len_off_mask,
+ u8 shift_dir);
+void rvu_tim_hw_fixes(struct rvu *rvu, int blkaddr);
+
+/* CN10K NIX */
+void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw);
+
+/* RVU Switch */
+void rvu_switch_enable(struct rvu *rvu);
+void rvu_switch_disable(struct rvu *rvu);
+void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
+
#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index f4ecc755eaff..9e4b370fb2b9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -1,11 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/types.h>
@@ -14,6 +11,7 @@
#include "rvu.h"
#include "cgx.h"
+#include "lmac_common.h"
#include "rvu_reg.h"
#include "rvu_trace.h"
@@ -42,13 +40,27 @@ static struct _req_type __maybe_unused \
MBOX_UP_CGX_MESSAGES
#undef M
+bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
+{
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return 0;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+
+ return (cgx_features_get(cgxd) & feature);
+}
+
/* Returns bitmap of mapped PFs */
-static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
+static u64 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
{
return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
}
-static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
+int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
{
unsigned long pfmap;
@@ -58,7 +70,8 @@ static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
if (!pfmap)
return -ENODEV;
else
- return find_first_bit(&pfmap, 16);
+ return find_first_bit(&pfmap,
+ rvu->cgx_cnt_max * MAX_LMAC_PER_CGX);
}
static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
@@ -74,13 +87,44 @@ void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
return rvu->cgx_idmap[cgx_id];
}
+/* Return first enabled CGX instance if none are enabled then return NULL */
+void *rvu_first_cgx_pdata(struct rvu *rvu)
+{
+ int first_enabled_cgx = 0;
+ void *cgxd = NULL;
+
+ for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) {
+ cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu);
+ if (cgxd)
+ break;
+ }
+
+ return cgxd;
+}
+
+/* Based on P2X connectivity find mapped NIX block for a PF */
+static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
+ int cgx_id, int lmac_id)
+{
+ struct rvu_pfvf *pfvf = &rvu->pf[pf];
+ u8 p2x;
+
+ p2x = cgx_lmac_get_p2x(cgx_id, lmac_id);
+ /* Firmware sets P2X_SELECT as either NIX0 or NIX1 */
+ pfvf->nix_blkaddr = BLKADDR_NIX0;
+ if (p2x == CMR_P2X_SEL_NIX1)
+ pfvf->nix_blkaddr = BLKADDR_NIX1;
+}
+
static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
{
struct npc_pkind *pkind = &rvu->hw->pkind;
int cgx_cnt_max = rvu->cgx_cnt_max;
- int cgx, lmac_cnt, lmac;
int pf = PF_CGXMAP_BASE;
+ unsigned long lmac_bmap;
int size, free_pkind;
+ int cgx, lmac, iter;
+ int numvfs, hwvfs;
if (!cgx_cnt_max)
return 0;
@@ -102,7 +146,7 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
/* Reverse map table */
rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
- cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
+ cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u64),
GFP_KERNEL);
if (!rvu->cgxlmac2pf_map)
return -ENOMEM;
@@ -111,13 +155,19 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
if (!rvu_cgx_pdata(cgx, rvu))
continue;
- lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
- for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
+ lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
+ for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
+ lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
+ iter);
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
+ rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
rvu->cgx_mapped_pfs++;
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
+ rvu->cgx_mapped_vfs += numvfs;
+ pf++;
}
}
return 0;
@@ -139,8 +189,10 @@ static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
&qentry->link_event.link_uinfo);
qentry->link_event.cgx_id = cgx_id;
qentry->link_event.lmac_id = lmac_id;
- if (err)
+ if (err) {
+ kfree(qentry);
goto skip_add;
+ }
list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
skip_add:
spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
@@ -183,16 +235,13 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
do {
- pfid = find_first_bit(&pfmap, 16);
+ pfid = find_first_bit(&pfmap,
+ rvu->cgx_cnt_max * MAX_LMAC_PER_CGX);
clear_bit(pfid, &pfmap);
/* check if notification is enabled */
- if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
- dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
- event->cgx_id, event->lmac_id,
- linfo->link_up ? "UP" : "DOWN");
+ if (!test_bit(pfid, &rvu->pf_notify_bmap))
continue;
- }
/* Send mbox message to PF */
msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
@@ -236,6 +285,7 @@ static void cgx_evhandler_task(struct work_struct *work)
static int cgx_lmac_event_handler_init(struct rvu *rvu)
{
+ unsigned long lmac_bmap;
struct cgx_event_cb cb;
int cgx, lmac, err;
void *cgxd;
@@ -256,7 +306,8 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu)
cgxd = rvu_cgx_pdata(cgx, rvu);
if (!cgxd)
continue;
- for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
+ lmac_bmap = cgx_get_lmac_bmap(cgxd);
+ for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) {
err = cgx_lmac_evh_register(&cb, cgxd, lmac);
if (err)
dev_err(rvu->dev,
@@ -288,7 +339,7 @@ int rvu_cgx_init(struct rvu *rvu)
rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
if (!rvu->cgx_cnt_max) {
dev_info(rvu->dev, "No CGX devices found!\n");
- return -ENODEV;
+ return 0;
}
rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
@@ -334,6 +385,7 @@ int rvu_cgx_init(struct rvu *rvu)
int rvu_cgx_exit(struct rvu *rvu)
{
+ unsigned long lmac_bmap;
int cgx, lmac;
void *cgxd;
@@ -341,7 +393,8 @@ int rvu_cgx_exit(struct rvu *rvu)
cgxd = rvu_cgx_pdata(cgx, rvu);
if (!cgxd)
continue;
- for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++)
+ lmac_bmap = cgx_get_lmac_bmap(cgxd);
+ for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX)
cgx_lmac_evh_unregister(cgxd, lmac);
}
@@ -356,7 +409,7 @@ int rvu_cgx_exit(struct rvu *rvu)
* VF's of mapped PF and other PFs are not allowed. This fn() checks
* whether a PFFUNC is permitted to do the config or not.
*/
-static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
+bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
{
if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
@@ -366,6 +419,7 @@ static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
{
+ struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
@@ -375,26 +429,92 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
/* Set / clear CTL_BCK to control pause frame forwarding to NIX */
if (enable)
- cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
+ mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
else
- cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
+ mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
+}
+
+void rvu_cgx_enadis_higig2(struct rvu *rvu, int pf, bool enable)
+{
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ cgx_lmac_enadis_higig2(cgxd, lmac_id, enable);
+}
+
+bool rvu_cgx_is_higig2_enabled(struct rvu *rvu, int pf)
+{
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_HIGIG2))
+ return 0;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return false;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+
+ return is_higig2_enabled(cgxd, lmac_id);
}
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
{
int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
+ void *cgxd;
if (!is_cgx_config_permitted(rvu, pcifunc))
- return -EPERM;
+ return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
- cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
+ return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start);
+}
- return 0;
+int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
+{
+ struct mac_ops *mac_ops;
+
+ mac_ops = get_mac_ops(cgxd);
+ return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
+}
+
+void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
+{
+ int pf = rvu_get_pf(pcifunc);
+ int i = 0, lmac_count = 0;
+ u8 max_dmac_filters;
+ u8 cgx_id, lmac_id;
+ void *cgx_dev;
+
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgx_dev = cgx_get_pdata(cgx_id);
+ lmac_count = cgx_get_lmac_cnt(cgx_dev);
+ max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count;
+
+ for (i = 0; i < max_dmac_filters; i++)
+ cgx_lmac_addr_del(cgx_id, lmac_id, i);
+
+ /* As cgx_lmac_addr_del does not clear entry for index 0
+ * so it needs to be done explicitly
+ */
+ cgx_lmac_addr_reset(cgx_id, lmac_id);
}
int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
@@ -411,78 +531,194 @@ int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
return 0;
}
-int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
- struct cgx_stats_rsp *rsp)
+static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req,
+ void *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct mac_ops *mac_ops;
int stat = 0, err = 0;
u64 tx_stat, rx_stat;
u8 cgx_idx, lmac;
void *cgxd;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
- return -ENODEV;
+ return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ mac_ops = get_mac_ops(cgxd);
- /* Rx stats */
- while (stat < CGX_RX_STATS_COUNT) {
- err = cgx_get_rx_stats(cgxd, lmac, stat, &rx_stat);
+ while (stat < mac_ops->rx_stats_cnt) {
+ err = mac_ops->mac_get_rx_stats(cgxd, lmac, stat, &rx_stat);
if (err)
return err;
- rsp->rx_stats[stat] = rx_stat;
+ if (mac_ops->rx_stats_cnt == RPM_RX_STATS_COUNT)
+ ((struct rpm_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
+ else
+ ((struct cgx_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
stat++;
}
/* Tx stats */
stat = 0;
- while (stat < CGX_TX_STATS_COUNT) {
- err = cgx_get_tx_stats(cgxd, lmac, stat, &tx_stat);
+ while (stat < mac_ops->tx_stats_cnt) {
+ err = mac_ops->mac_get_tx_stats(cgxd, lmac, stat, &tx_stat);
if (err)
return err;
- rsp->tx_stats[stat] = tx_stat;
+ if (mac_ops->tx_stats_cnt == RPM_TX_STATS_COUNT)
+ ((struct rpm_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
+ else
+ ((struct cgx_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
stat++;
}
return 0;
}
+int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
+ struct cgx_stats_rsp *rsp)
+{
+ return rvu_lmac_get_stats(rvu, req, (void *)rsp);
+}
+
+int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
+ struct rpm_stats_rsp *rsp)
+{
+ return rvu_lmac_get_stats(rvu, req, (void *)rsp);
+}
+
+int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
+ struct msg_req *req,
+ struct cgx_fec_stats_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ return cgx_get_fec_stats(cgxd, lmac, rsp);
+}
+
+int rvu_mbox_handler_cgx_stats_rst(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct rvu_pfvf *parent_pf;
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ parent_pf = &rvu->pf[pf];
+ /* To ensure reset cgx stats won't affect VF stats,
+ * check if it used by only PF interface.
+ * If not, return
+ */
+ if (parent_pf->cgx_users > 1) {
+ dev_info(rvu->dev, "CGX busy, could not reset statistics\n");
+ return 0;
+ }
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+
+ return cgx_stats_rst(cgxd, lmac);
+}
+
int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
- return -EPERM;
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ pfvf = &rvu->pf[pf];
+ memcpy(pfvf->mac_addr, req->mac_addr, ETH_ALEN);
cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
return 0;
}
-int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
- struct cgx_mac_addr_set_or_get *req,
- struct cgx_mac_addr_set_or_get *rsp)
+int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
+ struct cgx_mac_addr_add_req *req,
+ struct cgx_mac_addr_add_rsp *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
- int rc = 0, i;
- u64 cfg;
+ int rc = 0;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
- return -EPERM;
+ return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
+ if (rc >= 0) {
+ rsp->index = rc;
+ return 0;
+ }
+
+ return rc;
+}
+
+int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
+ struct cgx_mac_addr_del_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
+}
+
+int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
+ struct msg_req *req,
+ struct cgx_max_dmac_entries_get_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* If msg is received from PFs(which are not mapped to CGX LMACs)
+ * or VF then no entries are allocated for DMAC filters at CGX level.
+ * So returning zero.
+ */
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) {
+ rsp->max_dmac_filters = 0;
+ return 0;
+ }
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
+ struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ int i;
- rsp->hdr.rc = rc;
- cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
- /* copy 48 bit mac address to req->mac_addr */
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
+
+ /* copy 48 bit mac address to rsp->mac_addr */
for (i = 0; i < ETH_ALEN; i++)
- rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
+ rsp->mac_addr[i] = pfvf->mac_addr[i];
+
return 0;
}
@@ -494,7 +730,7 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
- return -EPERM;
+ return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -509,7 +745,7 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
- return -EPERM;
+ return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -517,30 +753,59 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
return 0;
}
+static void cgx_notify_up_ptp_info(struct rvu *rvu, int pf, bool enable)
+{
+ struct cgx_ptp_rx_info_msg *msg;
+ int err;
+
+ /* Send mbox message to PF */
+ msg = otx2_mbox_alloc_msg_cgx_ptp_rx_info(rvu, pf);
+ if (!msg) {
+ dev_err(rvu->dev, "ptp notification to pf %d failed\n", pf);
+ return;
+ }
+
+ msg->ptp_en = enable;
+ otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pf);
+ err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
+ if (err)
+ dev_err(rvu->dev, "ptp notification to pf %d failed\n", pf);
+}
+
static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
- /* This msg is expected only from PFs that are mapped to CGX LMACs,
- * if received from other PF/VF simply ACK, nothing to do.
- */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
- !is_pf_cgxmapped(rvu, pf))
- return -ENODEV;
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
+ return 0;
+
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
+ return -EPERM;
+
+ /* Silicon does not support enabling time stamp in higig mode */
+ if (rvu_cgx_is_higig2_enabled(rvu, pf))
+ return NIX_AF_ERR_PTP_CONFIG_FAIL;
+
+ cgx_notify_up_ptp_info(rvu, pf, enable);
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgxd = rvu_cgx_pdata(cgx_id, rvu);
- cgx_lmac_ptp_config(cgxd, lmac_id, enable);
+ mac_ops = get_mac_ops(cgxd);
+ mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, enable);
+
/* If PTP is enabled then inform NPC that packets to be
* parsed by this PF will have their data shifted by 8 bytes
* and if PTP is disabled then no shift is required
*/
if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
- return -EINVAL;
-
+ return NIX_AF_ERR_PTP_CONFIG_FAIL;
+ /* This flag is required to clean up CGX conf if app gets killed */
+ pfvf->hw_rx_tstamp_en = enable;
return 0;
}
@@ -562,7 +827,7 @@ static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, pcifunc))
- return -EPERM;
+ return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -600,7 +865,7 @@ int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
pf = rvu_get_pf(req->hdr.pcifunc);
if (!is_pf_cgxmapped(rvu, pf))
- return -ENODEV;
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -612,14 +877,16 @@ int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
{
int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, pcifunc))
- return -EPERM;
+ return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
- return cgx_lmac_internal_loopback(rvu_cgx_pdata(cgx_id, rvu),
+ return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu),
lmac_id, en);
}
@@ -637,28 +904,84 @@ int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 rx_pfc = 0, tx_pfc = 0;
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
+ return 0;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &tx_pfc, &rx_pfc);
+ if (tx_pfc || rx_pfc) {
+ dev_warn(rvu->dev,
+ "Can not configure 802.3X flow control as PFC frames are enabled");
+ return LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
+ pcifunc & RVU_PFVF_FUNC_MASK)) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return LMAC_AF_ERR_PERM_DENIED;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return mac_ops->mac_enadis_pause_frm(cgxd, lmac_id, tx_pause, rx_pause);
+}
+
int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
struct cgx_pause_frm_cfg *req,
struct cgx_pause_frm_cfg *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
+ int err = 0;
+ void *cgxd;
/* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
* if received from other PF/VF simply ACK, nothing to do.
*/
if (!is_pf_cgxmapped(rvu, pf))
- return -ENODEV;
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
if (req->set)
- cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
- req->tx_pause, req->rx_pause);
+ err = rvu_cgx_cfg_pause_frm(rvu, req->hdr.pcifunc, req->tx_pause, req->rx_pause);
else
- cgx_lmac_get_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
- &rsp->tx_pause, &rsp->rx_pause);
- return 0;
+ mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
+
+ return err;
+}
+
+int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
}
/* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
@@ -706,6 +1029,42 @@ int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
return 0;
}
+/* Dont allow cgx mapped VFs to overwrite PKIND config
+ * incase of special PKINDs are configured like (HIGIG/EDSA)
+ */
+bool rvu_cgx_is_pkind_config_permitted(struct rvu *rvu, u16 pcifunc)
+{
+ int rc, pf, rxpkind;
+ u8 cgx_id, lmac_id;
+
+ pf = rvu_get_pf(pcifunc);
+
+ /* Ret here for PFs or non cgx interfaces */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ return true;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return true;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ rc = cgx_get_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, &rxpkind);
+ if (rc)
+ return false;
+
+ switch (rxpkind) {
+ /* Add here specific pkinds reserved for pkt parsing */
+ case NPC_RX_HIGIG_PKIND:
+ case NPC_RX_EDSA_PKIND:
+ rc = false;
+ break;
+ default:
+ rc = true;
+ }
+
+ return rc;
+}
+
int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
{
struct rvu_pfvf *parent_pf, *pfvf;
@@ -752,3 +1111,231 @@ exit:
mutex_unlock(&rvu->cgx_cfg_lock);
return err;
}
+
+int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
+ struct fec_mode *req,
+ struct fec_mode *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
+
+ if (req->fec == OTX2_FEC_OFF)
+ req->fec = OTX2_FEC_NONE;
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rsp->fec = cgx_set_fec(req->fec, cgx_id, lmac_id);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
+ struct cgx_fw_data *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!rvu->fwdata)
+ return LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ memcpy(&rsp->fwdata, &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
+ sizeof(struct cgx_lmac_fwdata_s));
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
+ struct cgx_set_link_mode_req *req,
+ struct cgx_set_link_mode_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_set_link_state(struct rvu *rvu,
+ struct cgx_set_link_state_msg *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ u8 cgx_id, lmac_id;
+ int pf, err;
+
+ pf = rvu_get_pf(pcifunc);
+
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ err = cgx_set_link_state(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ !!req->enable);
+ if (err)
+ dev_warn(rvu->dev, "Cannot set link state to %s, err %d\n",
+ (req->enable) ? "enable" : "disable", err);
+
+ return err;
+}
+
+int rvu_mbox_handler_cgx_set_phy_mod_type(struct rvu *rvu,
+ struct cgx_phy_mod_type *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_set_phy_mod_type(req->mod, rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id);
+}
+
+int rvu_mbox_handler_cgx_get_phy_mod_type(struct rvu *rvu, struct msg_req *req,
+ struct cgx_phy_mod_type *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rsp->mod = cgx_get_phy_mod_type(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
+ if (rsp->mod < 0)
+ return rsp->mod;
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
+ struct msg_req *req,
+ struct cgx_features_info_msg *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return 0;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ rsp->lmac_features = cgx_features_get(cgxd);
+
+ return 0;
+}
+
+u32 rvu_cgx_get_fifolen(struct rvu *rvu)
+{
+ struct mac_ops *mac_ops;
+ u32 fifo_len;
+
+ mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
+ fifo_len = mac_ops ? mac_ops->fifo_len : 0;
+
+ return fifo_len;
+}
+
+int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_lmac_addr_reset(cgx_id, lmac_id);
+}
+
+int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
+ struct cgx_mac_addr_update_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
+}
+
+int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause,
+ u8 rx_pause, u16 pfc_en)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 rx_8023 = 0, tx_8023 = 0;
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &tx_8023, &rx_8023);
+ if (tx_8023 || rx_8023) {
+ dev_warn(rvu->dev,
+ "Can not configure PFC as 802.3X pause frames are enabled");
+ return LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
+ pcifunc & RVU_PFVF_FUNC_MASK)) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return LMAC_AF_ERR_PERM_DENIED;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return mac_ops->pfc_config(cgxd, lmac_id, tx_pause, rx_pause, pfc_en);
+}
+
+int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
+ struct cgx_pfc_cfg *req,
+ struct cgx_pfc_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+ int err;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ err = rvu_cgx_prio_flow_ctrl_cfg(rvu, req->hdr.pcifunc, req->tx_pause,
+ req->rx_pause, req->pfc_en);
+
+ mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
+ return err;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
new file mode 100644
index 000000000000..4bf948417adb
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -0,0 +1,568 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RPM CN10K driver
+ *
+ * Copyright (C) 2020 Marvell.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/pci.h>
+#include "rvu.h"
+#include "cgx.h"
+#include "rvu_reg.h"
+
+/* RVU LMTST */
+#define LMT_TBL_OP_READ 0
+#define LMT_TBL_OP_WRITE 1
+#define LMT_MAP_TABLE_SIZE (128 * 1024)
+#define LMT_MAPTBL_ENTRY_SIZE 16
+
+static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
+ int lmt_tbl_op)
+{
+ void __iomem *lmt_map_base;
+ u64 tbl_base;
+
+ tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+
+ lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE);
+ if (!lmt_map_base) {
+ dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
+ return -ENOMEM;
+ }
+
+ if (lmt_tbl_op == LMT_TBL_OP_READ) {
+ *val = readq(lmt_map_base + index);
+ } else {
+ writeq((*val), (lmt_map_base + index));
+ /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S
+ * changes effective. Write 1 for flush and read is being used as a
+ * barrier and sets up a data dependency. Write to 0 after a write
+ * to 1 to complete the flush.
+ */
+ rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0));
+ rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CTL);
+ rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, 0x00);
+ }
+
+ iounmap(lmt_map_base);
+ return 0;
+}
+
+#define LMT_MAP_TBL_W1_OFF 8
+static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
+{
+ return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
+ (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
+}
+
+static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
+ u64 iova, u64 *lmt_addr)
+{
+ u64 pa, val, pf;
+ int err;
+
+ if (!iova) {
+ dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__);
+ return -EINVAL;
+ }
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
+ pf = rvu_get_pf(pcifunc) & 0x1F;
+ val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
+ ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
+
+ err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false);
+ if (err) {
+ dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__);
+ return err;
+ }
+ val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS);
+ if (val & ~0x1ULL) {
+ dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val);
+ return -EIO;
+ }
+ /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT0[57:18]
+ * PA[11:0] = IOVA[11:0]
+ */
+ pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT0) >> 18;
+ pa &= GENMASK_ULL(39, 0);
+ *lmt_addr = (pa << 12) | (iova & 0xFFF);
+
+ return 0;
+}
+
+static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ u32 tbl_idx;
+ int err = 0;
+ u64 val;
+
+ /* Read the current lmt addr of pcifunc */
+ tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
+ err = lmtst_map_table_ops(rvu, tbl_idx, &val, LMT_TBL_OP_READ);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to read LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ return err;
+ }
+
+ /* Storing the seondary's lmt base address as this needs to be
+ * reverted in FLR. Also making sure this default value doesn't
+ * get overwritten on multiple calls to this mailbox.
+ */
+ if (!pfvf->lmt_base_addr)
+ pfvf->lmt_base_addr = val;
+
+ /* Update the LMT table with new addr */
+ err = lmtst_map_table_ops(rvu, tbl_idx, &lmt_addr, LMT_TBL_OP_WRITE);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ return err;
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu,
+ struct lmtst_tbl_setup_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ u32 pri_tbl_idx, tbl_idx;
+ u64 lmt_addr;
+ int err = 0;
+ u64 val;
+
+ /* Check if PF_FUNC wants to use it's own local memory as LMTLINE
+ * region, if so, convert that IOVA to physical address and
+ * populate LMT table with that address
+ */
+ if (req->use_local_lmt_region) {
+ err = rvu_get_lmtaddr(rvu, req->hdr.pcifunc,
+ req->lmt_iova, &lmt_addr);
+ if (err < 0)
+ return err;
+
+ /* Update the lmt addr for this PFFUNC in the LMT table */
+ err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, lmt_addr);
+ if (err)
+ return err;
+ }
+
+ /* Reconfiguring lmtst map table in lmt region shared mode i.e. make
+ * multiple PF_FUNCs to share an LMTLINE region, so primary/base
+ * pcifunc (which is passed as an argument to mailbox) is the one
+ * whose lmt base address will be shared among other secondary
+ * pcifunc (will be the one who is calling this mailbox).
+ */
+ if (req->base_pcifunc) {
+ /* Calculating the LMT table index equivalent to primary
+ * pcifunc.
+ */
+ pri_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->base_pcifunc);
+
+ /* Read the base lmt addr of the primary pcifunc */
+ err = lmtst_map_table_ops(rvu, pri_tbl_idx, &val,
+ LMT_TBL_OP_READ);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to read LMT map table: index 0x%x err %d\n",
+ pri_tbl_idx, err);
+ goto error;
+ }
+
+ /* Update the base lmt addr of secondary with primary's base
+ * lmt addr.
+ */
+ err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, val);
+ if (err)
+ return err;
+ }
+
+ /* This mailbox can also be used to update word1 of APR_LMT_MAP_ENTRY_S
+ * like enabling scheduled LMTST, disable LMTLINE prefetch, disable
+ * early completion for ordered LMTST.
+ */
+ if (req->sch_ena || req->dis_sched_early_comp || req->dis_line_pref) {
+ tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->hdr.pcifunc);
+ err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF,
+ &val, LMT_TBL_OP_READ);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to read LMT map table: index 0x%x err %d\n",
+ tbl_idx + LMT_MAP_TBL_W1_OFF, err);
+ goto error;
+ }
+
+ /* Storing lmt map table entry word1 default value as this needs
+ * to be reverted in FLR. Also making sure this default value
+ * doesn't get overwritten on multiple calls to this mailbox.
+ */
+ if (!pfvf->lmt_map_ent_w1)
+ pfvf->lmt_map_ent_w1 = val;
+
+ /* Disable early completion for Ordered LMTSTs. */
+ if (req->dis_sched_early_comp)
+ val |= (req->dis_sched_early_comp <<
+ APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT);
+ /* Enable scheduled LMTST */
+ if (req->sch_ena)
+ val |= (req->sch_ena << APR_LMT_MAP_ENT_SCH_ENA_SHIFT) |
+ req->ssow_pf_func;
+ /* Disables LMTLINE prefetch before receiving store data. */
+ if (req->dis_line_pref)
+ val |= (req->dis_line_pref <<
+ APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT);
+
+ err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF,
+ &val, LMT_TBL_OP_WRITE);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx + LMT_MAP_TBL_W1_OFF, err);
+ goto error;
+ }
+ }
+
+error:
+ return err;
+}
+
+/* Resetting the lmtst map table to original default values */
+void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ u32 tbl_idx;
+ int err;
+
+ if (is_rvu_otx2(rvu))
+ return;
+
+ if (pfvf->lmt_base_addr || pfvf->lmt_map_ent_w1) {
+ /* This corresponds to lmt map table index */
+ tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
+ /* Reverting back original lmt base addr for respective
+ * pcifunc.
+ */
+ if (pfvf->lmt_base_addr) {
+ err = lmtst_map_table_ops(rvu, tbl_idx,
+ &pfvf->lmt_base_addr,
+ LMT_TBL_OP_WRITE);
+ if (err)
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ pfvf->lmt_base_addr = 0;
+ }
+ /* Reverting back to orginal word1 val of lmtst map table entry
+ * which underwent changes.
+ */
+ if (pfvf->lmt_map_ent_w1) {
+ err = lmtst_map_table_ops(rvu,
+ tbl_idx + LMT_MAP_TBL_W1_OFF,
+ &pfvf->lmt_map_ent_w1,
+ LMT_TBL_OP_WRITE);
+ if (err)
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx + LMT_MAP_TBL_W1_OFF, err);
+ pfvf->lmt_map_ent_w1 = 0;
+ }
+ }
+}
+
+int rvu_set_channels_base(struct rvu *rvu)
+{
+ u16 nr_lbk_chans, nr_sdp_chans, nr_cgx_chans, nr_cpt_chans;
+ u16 sdp_chan_base, cgx_chan_base, cpt_chan_base;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 nix_const, nix_const1;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+
+ hw->cgx = (nix_const >> 12) & 0xFULL;
+ hw->lmac_per_cgx = (nix_const >> 8) & 0xFULL;
+ hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
+ hw->lbk_links = (nix_const >> 24) & 0xFULL;
+ hw->cpt_links = (nix_const >> 44) & 0xFULL;
+ hw->sdp_links = 1;
+
+ hw->cgx_chan_base = NIX_CHAN_CGX_LMAC_CHX(0, 0, 0);
+ hw->lbk_chan_base = NIX_CHAN_LBK_CHX(0, 0);
+ hw->sdp_chan_base = NIX_CHAN_SDP_CH_START;
+
+ /* No Programmable channels */
+ if (!(nix_const & BIT_ULL(60)))
+ return 0;
+
+ hw->cap.programmable_chans = true;
+
+ /* If programmable channels are present then configure
+ * channels such that all channel numbers are contiguous
+ * leaving no holes. This way the new CPT channels can be
+ * accomodated. The order of channel numbers assigned is
+ * LBK, SDP, CGX and CPT. Also the base channel number
+ * of a block must be multiple of number of channels
+ * of the block.
+ */
+ nr_lbk_chans = (nix_const >> 16) & 0xFFULL;
+ nr_sdp_chans = nix_const1 & 0xFFFULL;
+ nr_cgx_chans = nix_const & 0xFFULL;
+ nr_cpt_chans = (nix_const >> 32) & 0xFFFULL;
+
+ sdp_chan_base = hw->lbk_chan_base + hw->lbk_links * nr_lbk_chans;
+ /* Round up base channel to multiple of number of channels */
+ hw->sdp_chan_base = ALIGN(sdp_chan_base, nr_sdp_chans);
+
+ cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * nr_sdp_chans;
+ hw->cgx_chan_base = ALIGN(cgx_chan_base, nr_cgx_chans);
+
+ cpt_chan_base = hw->cgx_chan_base + hw->cgx_links * nr_cgx_chans;
+ hw->cpt_chan_base = ALIGN(cpt_chan_base, nr_cpt_chans);
+
+ /* Out of 4096 channels start CPT from 2048 so
+ * that MSB for CPT channels is always set
+ */
+ if (cpt_chan_base <= NIX_CHAN_CPT_CH_START) {
+ hw->cpt_chan_base = NIX_CHAN_CPT_CH_START;
+ } else {
+ dev_err(rvu->dev,
+ "CPT channels could not fit in the range 2048-4095\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define LBK_CONNECT_NIXX(a) (0x0 + (a))
+
+static void __rvu_lbk_set_chans(struct rvu *rvu, void __iomem *base,
+ u64 offset, int lbkid, u16 chans)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 cfg;
+
+ cfg = readq(base + offset);
+ cfg &= ~(LBK_LINK_CFG_RANGE_MASK |
+ LBK_LINK_CFG_ID_MASK | LBK_LINK_CFG_BASE_MASK);
+ cfg |= FIELD_PREP(LBK_LINK_CFG_RANGE_MASK, ilog2(chans));
+ cfg |= FIELD_PREP(LBK_LINK_CFG_ID_MASK, lbkid);
+ cfg |= FIELD_PREP(LBK_LINK_CFG_BASE_MASK, hw->lbk_chan_base);
+
+ writeq(cfg, base + offset);
+}
+
+static void rvu_lbk_set_channels(struct rvu *rvu)
+{
+ struct pci_dev *pdev = NULL;
+ void __iomem *base;
+ u64 lbk_const;
+ u8 src, dst;
+ u16 chans;
+
+ /* To loopback packets between multiple NIX blocks
+ * mutliple LBK blocks are needed. With two NIX blocks,
+ * four LBK blocks are needed and each LBK block
+ * source and destination are as follows:
+ * LBK0 - source NIX0 and destination NIX1
+ * LBK1 - source NIX0 and destination NIX1
+ * LBK2 - source NIX1 and destination NIX0
+ * LBK3 - source NIX1 and destination NIX1
+ * As per the HRM channel numbers should be programmed as:
+ * P2X and X2P of LBK0 as same
+ * P2X and X2P of LBK3 as same
+ * P2X of LBK1 and X2P of LBK2 as same
+ * P2X of LBK2 and X2P of LBK1 as same
+ */
+ while (true) {
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_LBK, pdev);
+ if (!pdev)
+ return;
+
+ base = pci_ioremap_bar(pdev, 0);
+ if (!base)
+ goto err_put;
+
+ lbk_const = readq(base + LBK_CONST);
+ chans = FIELD_GET(LBK_CONST_CHANS, lbk_const);
+ dst = FIELD_GET(LBK_CONST_DST, lbk_const);
+ src = FIELD_GET(LBK_CONST_SRC, lbk_const);
+
+ if (src == dst) {
+ if (src == LBK_CONNECT_NIXX(0)) { /* LBK0 */
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
+ 0, chans);
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
+ 0, chans);
+ } else if (src == LBK_CONNECT_NIXX(1)) { /* LBK3 */
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
+ 1, chans);
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
+ 1, chans);
+ }
+ } else {
+ if (src == LBK_CONNECT_NIXX(0)) { /* LBK1 */
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
+ 0, chans);
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
+ 1, chans);
+ } else if (src == LBK_CONNECT_NIXX(1)) { /* LBK2 */
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
+ 1, chans);
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
+ 0, chans);
+ }
+ }
+ iounmap(base);
+ }
+err_put:
+ pci_dev_put(pdev);
+}
+
+static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr)
+{
+ u64 nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+ u64 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ u16 cgx_chans, lbk_chans, sdp_chans, cpt_chans;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int link, nix_link = 0;
+ u16 start;
+ u64 cfg;
+
+ cgx_chans = nix_const & 0xFFULL;
+ lbk_chans = (nix_const >> 16) & 0xFFULL;
+ sdp_chans = nix_const1 & 0xFFFULL;
+ cpt_chans = (nix_const >> 32) & 0xFFFULL;
+
+ start = hw->cgx_chan_base;
+ for (link = 0; link < hw->cgx_links; link++, nix_link++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
+ cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cgx_chans));
+ cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
+ rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
+ start += cgx_chans;
+ }
+
+ start = hw->lbk_chan_base;
+ for (link = 0; link < hw->lbk_links; link++, nix_link++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
+ cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(lbk_chans));
+ cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
+ rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
+ start += lbk_chans;
+ }
+
+ start = hw->sdp_chan_base;
+ for (link = 0; link < hw->sdp_links; link++, nix_link++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
+ cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(sdp_chans));
+ cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
+ rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
+ start += sdp_chans;
+ }
+
+ start = hw->cpt_chan_base;
+ for (link = 0; link < hw->cpt_links; link++, nix_link++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
+ cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cpt_chans));
+ cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
+ rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
+ start += cpt_chans;
+ }
+}
+
+static void rvu_nix_set_channels(struct rvu *rvu)
+{
+ int blkaddr = 0;
+
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ __rvu_nix_set_channels(rvu, blkaddr);
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ }
+}
+
+static void __rvu_rpm_set_channels(int cgxid, int lmacid, u16 base)
+{
+ u64 cfg;
+
+ cfg = cgx_lmac_read(cgxid, lmacid, RPMX_CMRX_LINK_CFG);
+ cfg &= ~(RPMX_CMRX_LINK_BASE_MASK | RPMX_CMRX_LINK_RANGE_MASK);
+
+ /* There is no read-only constant register to read
+ * the number of channels for LMAC and it is always 16.
+ */
+ cfg |= FIELD_PREP(RPMX_CMRX_LINK_RANGE_MASK, ilog2(16));
+ cfg |= FIELD_PREP(RPMX_CMRX_LINK_BASE_MASK, base);
+ cgx_lmac_write(cgxid, lmacid, RPMX_CMRX_LINK_CFG, cfg);
+}
+
+static void rvu_rpm_set_channels(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 base = hw->cgx_chan_base;
+ int cgx, lmac;
+
+ for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) {
+ for (lmac = 0; lmac < hw->lmac_per_cgx; lmac++) {
+ __rvu_rpm_set_channels(cgx, lmac, base);
+ base += 16;
+ }
+ }
+}
+
+void rvu_program_channels(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (!hw->cap.programmable_chans)
+ return;
+
+ rvu_nix_set_channels(rvu);
+ rvu_lbk_set_channels(rvu);
+ rvu_rpm_set_channels(rvu);
+}
+
+void rvu_sso_block_cn10k_init(struct rvu *rvu, int blkaddr)
+{
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_WS_CFG);
+ /* Enable GET_WORK prefetching to the GWCs. */
+ reg &= ~BIT_ULL(4);
+ rvu_write64(rvu, blkaddr, SSO_AF_WS_CFG, reg);
+}
+
+void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ int blkaddr = nix_hw->blkaddr;
+
+ /* Set AF vWQE timer interval to a LF configurable range of
+ * 6.4us to 1.632ms.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_VWQE_TIMER, 0x3FULL);
+}
+
+void rvu_apr_block_cn10k_init(struct rvu *rvu)
+{
+ u64 reg;
+
+ reg = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
+ reg |= 0xFULL << 35;
+ rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CFG, reg);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
new file mode 100644
index 000000000000..ae8c10089b79
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -0,0 +1,1110 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/pci.h>
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "mbox.h"
+#include "rvu.h"
+
+/* CPT PF device id */
+#define PCI_DEVID_OTX2_CPT_PF 0xA0FD
+#define PCI_DEVID_OTX2_CPT10K_PF 0xA0F2
+
+/* Length of initial context fetch in 128 byte words */
+#define CPT_CTX_ILEN 2ULL
+
+#define cpt_get_eng_sts(e_min, e_max, rsp, etype) \
+({ \
+ u64 free_sts = 0, busy_sts = 0; \
+ typeof(rsp) _rsp = rsp; \
+ u32 e, i; \
+ \
+ for (e = (e_min), i = 0; e < (e_max); e++, i++) { \
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e)); \
+ if (reg & 0x1) \
+ busy_sts |= 1ULL << i; \
+ \
+ if (reg & 0x2) \
+ free_sts |= 1ULL << i; \
+ } \
+ (_rsp)->busy_sts_##etype = busy_sts; \
+ (_rsp)->free_sts_##etype = free_sts; \
+})
+
+static irqreturn_t rvu_cpt_af_flt_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 reg0, reg1, reg2 = 0;
+
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
+ reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
+ if (!is_rvu_otx2(rvu)) {
+ reg2 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(2));
+ dev_err_ratelimited(rvu->dev,
+ "Received CPTAF FLT irq : 0x%llx, 0x%llx, 0x%llx",
+ reg0, reg1, reg2);
+ } else {
+ dev_err_ratelimited(rvu->dev,
+ "Received CPTAF FLT irq : 0x%llx, 0x%llx",
+ reg0, reg1);
+ }
+
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(0), reg0);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(1), reg1);
+ if (!is_rvu_otx2(rvu))
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(2), reg2);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_cpt_af_rvu_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
+ dev_err_ratelimited(rvu->dev, "Received CPTAF RVU irq : 0x%llx", reg);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT, reg);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_cpt_af_ras_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
+ dev_err_ratelimited(rvu->dev, "Received CPTAF RAS irq : 0x%llx", reg);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT, reg);
+ return IRQ_HANDLED;
+}
+
+static int rvu_cpt_do_register_interrupt(struct rvu_block *block, int irq_offs,
+ irq_handler_t handler,
+ const char *name)
+{
+ struct rvu *rvu = block->rvu;
+ int ret;
+
+ ret = request_irq(pci_irq_vector(rvu->pdev, irq_offs), handler, 0,
+ name, block);
+ if (ret) {
+ dev_err(rvu->dev, "RVUAF: %s irq registration failed", name);
+ return ret;
+ }
+
+ WARN_ON(rvu->irq_allocated[irq_offs]);
+ rvu->irq_allocated[irq_offs] = true;
+ return 0;
+}
+
+static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off)
+{
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ int i;
+
+ /* Disable all CPT AF interrupts */
+ for (i = 0; i < CPT_10K_AF_INT_VEC_RVU; i++)
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
+
+ for (i = 0; i < CPT_10K_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[off + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, off + i), block);
+ rvu->irq_allocated[off + i] = false;
+ }
+}
+
+static void cpt_unregister_interrupts(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int i, offs;
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return;
+ offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs) {
+ dev_warn(rvu->dev,
+ "Failed to get CPT_AF_INT vector offsets\n");
+ return;
+ }
+ block = &hw->block[blkaddr];
+ if (!is_rvu_otx2(rvu))
+ return cpt_10k_unregister_interrupts(block, offs);
+
+ /* Disable all CPT AF interrupts */
+ for (i = 0; i < CPT_AF_INT_VEC_RVU; i++)
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
+
+ for (i = 0; i < CPT_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), block);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+void rvu_cpt_unregister_interrupts(struct rvu *rvu)
+{
+ cpt_unregister_interrupts(rvu, BLKADDR_CPT0);
+ cpt_unregister_interrupts(rvu, BLKADDR_CPT1);
+}
+
+static int cpt_10k_register_interrupts(struct rvu_block *block, int off)
+{
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ int i, ret;
+
+ for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) {
+ sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i);
+ ret = rvu_cpt_do_register_interrupt(block, off + i,
+ rvu_cpt_af_flt_intr_handler,
+ &rvu->irq_name[(off + i) * NAME_SIZE]);
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
+ }
+
+ ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU,
+ rvu_cpt_af_rvu_intr_handler,
+ "CPTAF RVU");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
+
+ ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RAS,
+ rvu_cpt_af_ras_intr_handler,
+ "CPTAF RAS");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1);
+
+ return 0;
+err:
+ rvu_cpt_unregister_interrupts(rvu);
+ return ret;
+}
+
+static int cpt_register_interrupts(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int i, offs, ret = 0;
+ char irq_name[16];
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return 0;
+
+ block = &hw->block[blkaddr];
+ offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs) {
+ dev_warn(rvu->dev,
+ "Failed to get CPT_AF_INT vector offsets\n");
+ return 0;
+ }
+
+ if (!is_rvu_otx2(rvu))
+ return cpt_10k_register_interrupts(block, offs);
+
+ for (i = CPT_AF_INT_VEC_FLT0; i < CPT_AF_INT_VEC_RVU; i++) {
+ snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i);
+ ret = rvu_cpt_do_register_interrupt(block, offs + i,
+ rvu_cpt_af_flt_intr_handler,
+ irq_name);
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
+ }
+
+ ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RVU,
+ rvu_cpt_af_rvu_intr_handler,
+ "CPTAF RVU");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
+
+ ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RAS,
+ rvu_cpt_af_ras_intr_handler,
+ "CPTAF RAS");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1);
+
+ return 0;
+err:
+ rvu_cpt_unregister_interrupts(rvu);
+ return ret;
+}
+
+int rvu_cpt_register_interrupts(struct rvu *rvu)
+{
+ int ret;
+
+ ret = cpt_register_interrupts(rvu, BLKADDR_CPT0);
+ if (ret)
+ return ret;
+
+ return cpt_register_interrupts(rvu, BLKADDR_CPT1);
+}
+
+static int get_cpt_pf_num(struct rvu *rvu)
+{
+ int i, domain_nr, cpt_pf_num = -1;
+ struct pci_dev *pdev;
+
+ domain_nr = pci_domain_nr(rvu->pdev->bus);
+ for (i = 0; i < rvu->hw->total_pfs; i++) {
+ pdev = pci_get_domain_bus_and_slot(domain_nr, i + 1, 0);
+ if (!pdev)
+ continue;
+
+ if (pdev->device == PCI_DEVID_OTX2_CPT_PF ||
+ pdev->device == PCI_DEVID_OTX2_CPT10K_PF) {
+ cpt_pf_num = i;
+ put_device(&pdev->dev);
+ break;
+ }
+ put_device(&pdev->dev);
+ }
+ return cpt_pf_num;
+}
+
+static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc)
+{
+ int cpt_pf_num = rvu->cpt_pf_num;
+
+ if (rvu_get_pf(pcifunc) != cpt_pf_num)
+ return false;
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ return false;
+
+ return true;
+}
+
+static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc)
+{
+ int cpt_pf_num = rvu->cpt_pf_num;
+
+ if (rvu_get_pf(pcifunc) != cpt_pf_num)
+ return false;
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ return false;
+
+ return true;
+}
+
+static int validate_and_get_cpt_blkaddr(int req_blkaddr)
+{
+ int blkaddr;
+
+ blkaddr = req_blkaddr ? req_blkaddr : BLKADDR_CPT0;
+ if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
+ return -EINVAL;
+
+ return blkaddr;
+}
+
+int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
+ struct cpt_lf_alloc_req_msg *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ int cptlf, blkaddr;
+ int num_lfs, slot;
+ u64 val;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (req->eng_grpmsk == 0x0)
+ return CPT_AF_ERR_GRP_INVALID;
+
+ block = &rvu->hw->block[blkaddr];
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ block->addr);
+ if (!num_lfs)
+ return CPT_AF_ERR_LF_INVALID;
+
+ /* Check if requested 'CPTLF <=> NIXLF' mapping is valid */
+ if (req->nix_pf_func) {
+ /* If default, use 'this' CPTLF's PFFUNC */
+ if (req->nix_pf_func == RVU_DEFAULT_PF_FUNC)
+ req->nix_pf_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->nix_pf_func, BLKTYPE_NIX))
+ return CPT_AF_ERR_NIX_PF_FUNC_INVALID;
+ }
+
+ /* Check if requested 'CPTLF <=> SSOLF' mapping is valid */
+ if (req->sso_pf_func) {
+ /* If default, use 'this' CPTLF's PFFUNC */
+ if (req->sso_pf_func == RVU_DEFAULT_PF_FUNC)
+ req->sso_pf_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->sso_pf_func, BLKTYPE_SSO))
+ return CPT_AF_ERR_SSO_PF_FUNC_INVALID;
+ }
+
+ for (slot = 0; slot < num_lfs; slot++) {
+ cptlf = rvu_get_lf(rvu, block, pcifunc, slot);
+ if (cptlf < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ /* Set CPT LF group and priority */
+ val = (u64)req->eng_grpmsk << 48 | 1;
+ if (!is_rvu_otx2(rvu))
+ val |= (CPT_CTX_ILEN << 17);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+
+ /* Set CPT LF NIX_PF_FUNC and SSO_PF_FUNC. EXE_LDWB is set
+ * on reset.
+ */
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+ val &= ~(GENMASK_ULL(63, 48) | GENMASK_ULL(47, 32));
+ val |= ((u64)req->nix_pf_func << 48 |
+ (u64)req->sso_pf_func << 32);
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
+ }
+
+ return 0;
+}
+
+static int cpt_lf_free(struct rvu *rvu, struct msg_req *req, int blkaddr)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int num_lfs, cptlf, slot, err;
+ struct rvu_block *block;
+
+ block = &rvu->hw->block[blkaddr];
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ block->addr);
+ if (!num_lfs)
+ return 0;
+
+ for (slot = 0; slot < num_lfs; slot++) {
+ cptlf = rvu_get_lf(rvu, block, pcifunc, slot);
+ if (cptlf < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ /* Perform teardown */
+ rvu_cpt_lf_teardown(rvu, pcifunc, blkaddr, cptlf, slot);
+
+ /* Reset LF */
+ err = rvu_lf_reset(rvu, block, cptlf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
+ block->addr, cptlf);
+ }
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int ret;
+
+ ret = cpt_lf_free(rvu, req, BLKADDR_CPT0);
+ if (ret)
+ return ret;
+
+ if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
+ ret = cpt_lf_free(rvu, req, BLKADDR_CPT1);
+
+ return ret;
+}
+
+static int cpt_inline_ipsec_cfg_inbound(struct rvu *rvu, int blkaddr, u8 cptlf,
+ struct cpt_inline_ipsec_cfg_msg *req)
+{
+ u16 sso_pf_func = req->sso_pf_func;
+ u8 nix_sel;
+ u64 val;
+
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ if (req->enable && (val & BIT_ULL(16))) {
+ /* IPSec inline outbound path is already enabled for a given
+ * CPT LF, HRM states that inline inbound & outbound paths
+ * must not be enabled at the same time for a given CPT LF
+ */
+ return CPT_AF_ERR_INLINE_IPSEC_INB_ENA;
+ }
+ /* Check if requested 'CPTLF <=> SSOLF' mapping is valid */
+ if (sso_pf_func && !is_pffunc_map_valid(rvu, sso_pf_func, BLKTYPE_SSO))
+ return CPT_AF_ERR_SSO_PF_FUNC_INVALID;
+
+ nix_sel = (blkaddr == BLKADDR_CPT1) ? 1 : 0;
+ /* Enable CPT LF for IPsec inline inbound operations */
+ if (req->enable)
+ val |= BIT_ULL(9);
+ else
+ val &= ~BIT_ULL(9);
+
+ val |= (u64)nix_sel << 8;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+
+ if (sso_pf_func) {
+ /* Set SSO_PF_FUNC */
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+ val |= (u64)sso_pf_func << 32;
+ val |= (u64)req->nix_pf_func << 48;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
+ }
+ if (req->sso_pf_func_ovrd)
+ /* Set SSO_PF_FUNC_OVRD for inline IPSec */
+ rvu_write64(rvu, blkaddr, CPT_AF_ECO, 0x1);
+
+ /* Configure the X2P Link register with the cpt base channel number and
+ * range of channels it should propagate to X2P
+ */
+ if (!is_rvu_otx2(rvu)) {
+ val = (ilog2(NIX_CHAN_CPT_X2P_MASK + 1) << 16);
+ val |= (u64)rvu->hw->cpt_chan_base;
+
+ rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0), val);
+ rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1), val);
+ }
+
+ return 0;
+}
+
+static int cpt_inline_ipsec_cfg_outbound(struct rvu *rvu, int blkaddr, u8 cptlf,
+ struct cpt_inline_ipsec_cfg_msg *req)
+{
+ u16 nix_pf_func = req->nix_pf_func;
+ int nix_blkaddr;
+ u8 nix_sel;
+ u64 val;
+
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ if (req->enable && (val & BIT_ULL(9))) {
+ /* IPSec inline inbound path is already enabled for a given
+ * CPT LF, HRM states that inline inbound & outbound paths
+ * must not be enabled at the same time for a given CPT LF
+ */
+ return CPT_AF_ERR_INLINE_IPSEC_OUT_ENA;
+ }
+
+ /* Check if requested 'CPTLF <=> NIXLF' mapping is valid */
+ if (nix_pf_func && !is_pffunc_map_valid(rvu, nix_pf_func, BLKTYPE_NIX))
+ return CPT_AF_ERR_NIX_PF_FUNC_INVALID;
+
+ /* Enable CPT LF for IPsec inline outbound operations */
+ if (req->enable)
+ val |= BIT_ULL(16);
+ else
+ val &= ~BIT_ULL(16);
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+
+ if (nix_pf_func) {
+ /* Set NIX_PF_FUNC */
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+ val |= (u64)nix_pf_func << 48;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
+
+ nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, nix_pf_func);
+ nix_sel = (nix_blkaddr == BLKADDR_NIX0) ? 0 : 1;
+
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ val |= (u64)nix_sel << 8;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_cpt_inline_ipsec_cfg(struct rvu *rvu,
+ struct cpt_inline_ipsec_cfg_msg *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ int cptlf, blkaddr, ret;
+ u16 actual_slot;
+
+ blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc,
+ req->slot, &actual_slot);
+ if (blkaddr < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ block = &rvu->hw->block[blkaddr];
+
+ cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot);
+ if (cptlf < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ switch (req->dir) {
+ case CPT_INLINE_INBOUND:
+ ret = cpt_inline_ipsec_cfg_inbound(rvu, blkaddr, cptlf, req);
+ break;
+
+ case CPT_INLINE_OUTBOUND:
+ ret = cpt_inline_ipsec_cfg_outbound(rvu, blkaddr, cptlf, req);
+ break;
+
+ default:
+ return CPT_AF_ERR_PARAM;
+ }
+
+ return ret;
+}
+
+static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
+{
+ u64 offset = req->reg_offset;
+ int blkaddr, num_lfs, lf;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return false;
+
+ /* Registers that can be accessed from PF/VF */
+ if ((offset & 0xFF000) == CPT_AF_LFX_CTL(0) ||
+ (offset & 0xFF000) == CPT_AF_LFX_CTL2(0)) {
+ if (offset & 7)
+ return false;
+
+ lf = (offset & 0xFFF) >> 3;
+ block = &rvu->hw->block[blkaddr];
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+ if (lf >= num_lfs)
+ /* Slot is not valid for that PF/VF */
+ return false;
+
+ /* Translate local LF used by VFs to global CPT LF */
+ lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr],
+ req->hdr.pcifunc, lf);
+ if (lf < 0)
+ return false;
+
+ req->reg_offset &= 0xFF000;
+ req->reg_offset += lf << 3;
+ return true;
+ } else if (!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK)) {
+ /* Registers that can be accessed from PF */
+ switch (offset) {
+ case CPT_AF_DIAG:
+ case CPT_AF_CTL:
+ case CPT_AF_PF_FUNC:
+ case CPT_AF_BLK_RST:
+ case CPT_AF_CONSTANTS1:
+ case CPT_AF_CTX_FLUSH_TIMER:
+ return true;
+ }
+
+ switch (offset & 0xFF000) {
+ case CPT_AF_EXEX_STS(0):
+ case CPT_AF_EXEX_CTL(0):
+ case CPT_AF_EXEX_CTL2(0):
+ case CPT_AF_EXEX_UCODE_BASE(0):
+ if (offset & 7)
+ return false;
+ break;
+ default:
+ return false;
+ }
+ return true;
+ }
+ return false;
+}
+
+int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
+ struct cpt_rd_wr_reg_msg *req,
+ struct cpt_rd_wr_reg_msg *rsp)
+{
+ int blkaddr;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* This message is accepted only if sent from CPT PF/VF */
+ if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
+ !is_cpt_vf(rvu, req->hdr.pcifunc))
+ return CPT_AF_ERR_ACCESS_DENIED;
+
+ if (!is_valid_offset(rvu, req))
+ return CPT_AF_ERR_ACCESS_DENIED;
+
+ rsp->reg_offset = req->reg_offset;
+ rsp->ret_val = req->ret_val;
+ rsp->is_write = req->is_write;
+
+
+ if (req->is_write)
+ rvu_write64(rvu, blkaddr, req->reg_offset, req->val);
+ else
+ rsp->val = rvu_read64(rvu, blkaddr, req->reg_offset);
+
+ return 0;
+}
+
+static void get_ctx_pc(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
+{
+ if (is_rvu_otx2(rvu))
+ return;
+
+ rsp->ctx_mis_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_MIS_PC);
+ rsp->ctx_hit_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_HIT_PC);
+ rsp->ctx_aop_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_AOP_PC);
+ rsp->ctx_aop_lat_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_CTX_AOP_LATENCY_PC);
+ rsp->ctx_ifetch_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_IFETCH_PC);
+ rsp->ctx_ifetch_lat_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_CTX_IFETCH_LATENCY_PC);
+ rsp->ctx_ffetch_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
+ rsp->ctx_ffetch_lat_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_CTX_FFETCH_LATENCY_PC);
+ rsp->ctx_wback_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
+ rsp->ctx_wback_lat_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_CTX_FFETCH_LATENCY_PC);
+ rsp->ctx_psh_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
+ rsp->ctx_psh_lat_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_CTX_FFETCH_LATENCY_PC);
+ rsp->ctx_err = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ERR);
+ rsp->ctx_enc_id = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ENC_ID);
+ rsp->ctx_flush_timer = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FLUSH_TIMER);
+
+ rsp->rxc_time = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME);
+ rsp->rxc_time_cfg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG);
+ rsp->rxc_active_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
+ rsp->rxc_zombie_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS);
+ rsp->rxc_dfrg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG);
+ rsp->x2p_link_cfg0 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0));
+ rsp->x2p_link_cfg1 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1));
+}
+
+static void get_eng_sts(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
+{
+ u16 max_ses, max_ies, max_aes;
+ u32 e_min = 0, e_max = 0;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
+ max_ses = reg & 0xffff;
+ max_ies = (reg >> 16) & 0xffff;
+ max_aes = (reg >> 32) & 0xffff;
+
+ /* Get AE status */
+ e_min = max_ses + max_ies;
+ e_max = max_ses + max_ies + max_aes;
+ cpt_get_eng_sts(e_min, e_max, rsp, ae);
+ /* Get SE status */
+ e_min = 0;
+ e_max = max_ses;
+ cpt_get_eng_sts(e_min, e_max, rsp, se);
+ /* Get IE status */
+ e_min = max_ses;
+ e_max = max_ses + max_ies;
+ cpt_get_eng_sts(e_min, e_max, rsp, ie);
+}
+
+int rvu_mbox_handler_cpt_sts(struct rvu *rvu, struct cpt_sts_req *req,
+ struct cpt_sts_rsp *rsp)
+{
+ int blkaddr;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* This message is accepted only if sent from CPT PF/VF */
+ if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
+ !is_cpt_vf(rvu, req->hdr.pcifunc))
+ return CPT_AF_ERR_ACCESS_DENIED;
+
+ get_ctx_pc(rvu, rsp, blkaddr);
+
+ /* Get CPT engines status */
+ get_eng_sts(rvu, rsp, blkaddr);
+
+ /* Read CPT instruction PC registers */
+ rsp->inst_req_pc = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
+ rsp->inst_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
+ rsp->rd_req_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
+ rsp->rd_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
+ rsp->rd_uc_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
+ rsp->active_cycles_pc = rvu_read64(rvu, blkaddr,
+ CPT_AF_ACTIVE_CYCLES_PC);
+ rsp->exe_err_info = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
+ rsp->cptclk_cnt = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
+ rsp->diag = rvu_read64(rvu, blkaddr, CPT_AF_DIAG);
+
+ return 0;
+}
+
+#define RXC_ZOMBIE_THRES GENMASK_ULL(59, 48)
+#define RXC_ZOMBIE_LIMIT GENMASK_ULL(43, 32)
+#define RXC_ACTIVE_THRES GENMASK_ULL(27, 16)
+#define RXC_ACTIVE_LIMIT GENMASK_ULL(11, 0)
+#define RXC_ACTIVE_COUNT GENMASK_ULL(60, 48)
+#define RXC_ZOMBIE_COUNT GENMASK_ULL(60, 48)
+
+static void cpt_rxc_time_cfg(struct rvu *rvu, struct cpt_rxc_time_cfg_req *req,
+ int blkaddr, struct cpt_rxc_time_cfg_req *save)
+{
+ u64 dfrg_reg;
+
+ if (save) {
+ /* Save older config */
+ dfrg_reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG);
+ save->zombie_thres = FIELD_GET(RXC_ZOMBIE_THRES, dfrg_reg);
+ save->zombie_limit = FIELD_GET(RXC_ZOMBIE_LIMIT, dfrg_reg);
+ save->active_thres = FIELD_GET(RXC_ACTIVE_THRES, dfrg_reg);
+ save->active_limit = FIELD_GET(RXC_ACTIVE_LIMIT, dfrg_reg);
+
+ save->step = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG);
+ }
+
+ dfrg_reg = FIELD_PREP(RXC_ZOMBIE_THRES, req->zombie_thres);
+ dfrg_reg |= FIELD_PREP(RXC_ZOMBIE_LIMIT, req->zombie_limit);
+ dfrg_reg |= FIELD_PREP(RXC_ACTIVE_THRES, req->active_thres);
+ dfrg_reg |= FIELD_PREP(RXC_ACTIVE_LIMIT, req->active_limit);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG, req->step);
+ rvu_write64(rvu, blkaddr, CPT_AF_RXC_DFRG, dfrg_reg);
+}
+
+int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu,
+ struct cpt_rxc_time_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ int blkaddr;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* This message is accepted only if sent from CPT PF/VF */
+ if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
+ !is_cpt_vf(rvu, req->hdr.pcifunc))
+ return CPT_AF_ERR_ACCESS_DENIED;
+
+ cpt_rxc_time_cfg(rvu, req, blkaddr, NULL);
+
+ return 0;
+}
+
+int rvu_mbox_handler_cpt_ctx_cache_sync(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_cpt_ctx_flush(rvu, req->hdr.pcifunc);
+}
+
+static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
+{
+ struct cpt_rxc_time_cfg_req req, prev;
+ int timeout = 2000;
+ u64 reg;
+
+ if (is_rvu_otx2(rvu))
+ return;
+
+ /* Set time limit to minimum values, so that rxc entries will be
+ * flushed out quickly.
+ */
+ req.step = 1;
+ req.zombie_thres = 1;
+ req.zombie_limit = 1;
+ req.active_thres = 1;
+ req.active_limit = 1;
+
+ cpt_rxc_time_cfg(rvu, &req, blkaddr, &prev);
+
+ do {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
+ udelay(1);
+ if (FIELD_GET(RXC_ACTIVE_COUNT, reg))
+ timeout--;
+ else
+ break;
+ } while (timeout);
+
+ if (timeout == 0)
+ dev_warn(rvu->dev, "Poll for RXC active count hits hard loop counter\n");
+
+ timeout = 2000;
+ do {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS);
+ udelay(1);
+ if (FIELD_GET(RXC_ZOMBIE_COUNT, reg))
+ timeout--;
+ else
+ break;
+ } while (timeout);
+
+ if (timeout == 0)
+ dev_warn(rvu->dev, "Poll for RXC zombie count hits hard loop counter\n");
+
+ /* Restore config */
+ cpt_rxc_time_cfg(rvu, &prev, blkaddr, NULL);
+}
+
+#define INPROG_INFLIGHT(reg) ((reg) & 0x1FF)
+#define INPROG_GRB_PARTIAL(reg) ((reg) & BIT_ULL(31))
+#define INPROG_GRB(reg) (((reg) >> 32) & 0xFF)
+#define INPROG_GWB(reg) (((reg) >> 40) & 0xFF)
+
+static void cpt_lf_disable_iqueue(struct rvu *rvu, int blkaddr, int slot)
+{
+ int i = 0, hard_lp_ctr = 100000;
+ u64 inprog, grp_ptr;
+ u16 nq_ptr, dq_ptr;
+
+ /* Disable instructions enqueuing */
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTL), 0x0);
+
+ /* Disable executions in the LF's queue */
+ inprog = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
+ inprog &= ~BIT_ULL(16);
+ rvu_write64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), inprog);
+
+ /* Wait for CPT queue to become execution-quiescent */
+ do {
+ inprog = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
+ if (INPROG_GRB_PARTIAL(inprog)) {
+ i = 0;
+ hard_lp_ctr--;
+ } else {
+ i++;
+ }
+
+ grp_ptr = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot,
+ CPT_LF_Q_GRP_PTR));
+ nq_ptr = (grp_ptr >> 32) & 0x7FFF;
+ dq_ptr = grp_ptr & 0x7FFF;
+
+ } while (hard_lp_ctr && (i < 10) && (nq_ptr != dq_ptr));
+
+ if (hard_lp_ctr == 0)
+ dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n");
+
+ i = 0;
+ hard_lp_ctr = 100000;
+ do {
+ inprog = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
+
+ if ((INPROG_INFLIGHT(inprog) == 0) &&
+ (INPROG_GWB(inprog) < 40) &&
+ ((INPROG_GRB(inprog) == 0) ||
+ (INPROG_GRB((inprog)) == 40))) {
+ i++;
+ } else {
+ i = 0;
+ hard_lp_ctr--;
+ }
+ } while (hard_lp_ctr && (i < 10));
+
+ if (hard_lp_ctr == 0)
+ dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n");
+}
+
+int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int slot)
+{
+ u64 reg;
+
+ if (is_cpt_pf(rvu, pcifunc) || is_cpt_vf(rvu, pcifunc))
+ cpt_rxc_teardown(rvu, blkaddr);
+
+ /* Enable BAR2 ALIAS for this pcifunc. */
+ reg = BIT_ULL(16) | pcifunc;
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
+
+ cpt_lf_disable_iqueue(rvu, blkaddr, slot);
+
+ /* Set group drop to help clear out hardware */
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
+ reg |= BIT_ULL(17);
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), reg);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
+
+ return 0;
+}
+
+#define CPT_RES_LEN 16
+#define CPT_SE_IE_EGRP 1ULL
+
+static int cpt_inline_inb_lf_cmd_send(struct rvu *rvu, int blkaddr,
+ int nix_blkaddr)
+{
+ int cpt_pf_num = rvu->cpt_pf_num;
+ struct cpt_inst_lmtst_req *req;
+ dma_addr_t res_daddr;
+ int timeout = 3000;
+ u8 cpt_idx;
+ u64 *inst;
+ u16 *res;
+ int rc;
+
+ res = kzalloc(CPT_RES_LEN, GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ res_daddr = dma_map_single(rvu->dev, res, CPT_RES_LEN,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(rvu->dev, res_daddr)) {
+ dev_err(rvu->dev, "DMA mapping failed for CPT result\n");
+ rc = -EFAULT;
+ goto res_free;
+ }
+ *res = 0xFFFF;
+
+ /* Send mbox message to CPT PF */
+ req = (struct cpt_inst_lmtst_req *)
+ otx2_mbox_alloc_msg_rsp(&rvu->afpf_wq_info.mbox_up,
+ cpt_pf_num, sizeof(*req),
+ sizeof(struct msg_rsp));
+ if (!req) {
+ rc = -ENOMEM;
+ goto res_daddr_unmap;
+ }
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.id = MBOX_MSG_CPT_INST_LMTST;
+
+ inst = req->inst;
+ /* Prepare CPT_INST_S */
+ inst[0] = 0;
+ inst[1] = res_daddr;
+ /* AF PF FUNC */
+ inst[2] = 0;
+ /* Set QORD */
+ inst[3] = 1;
+ inst[4] = 0;
+ inst[5] = 0;
+ inst[6] = 0;
+ /* Set EGRP */
+ inst[7] = CPT_SE_IE_EGRP << 61;
+
+ /* Subtract 1 from the NIX-CPT credit count to preserve
+ * credit counts.
+ */
+ cpt_idx = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
+ rvu_write64(rvu, nix_blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ BIT_ULL(22) - 1);
+
+ otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, cpt_pf_num);
+ rc = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, cpt_pf_num);
+ if (rc)
+ dev_warn(rvu->dev, "notification to pf %d failed\n",
+ cpt_pf_num);
+ /* Wait for CPT instruction to be completed */
+ do {
+ mdelay(1);
+ if (*res == 0xFFFF)
+ timeout--;
+ else
+ break;
+ } while (timeout);
+
+ if (timeout == 0)
+ dev_warn(rvu->dev, "Poll for result hits hard loop counter\n");
+
+res_daddr_unmap:
+ dma_unmap_single(rvu->dev, res_daddr, CPT_RES_LEN, DMA_BIDIRECTIONAL);
+res_free:
+ kfree(res);
+
+ return 0;
+}
+
+#define CTX_CAM_PF_FUNC GENMASK_ULL(61, 46)
+#define CTX_CAM_CPTR GENMASK_ULL(45, 0)
+
+int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc)
+{
+ int nix_blkaddr, blkaddr;
+ u16 max_ctx_entries, i;
+ int slot = 0, num_lfs;
+ u64 reg, cam_data;
+ int rc;
+
+ nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (nix_blkaddr < 0)
+ return -EINVAL;
+
+ if (is_rvu_otx2(rvu))
+ return 0;
+
+ blkaddr = (nix_blkaddr == BLKADDR_NIX1) ? BLKADDR_CPT1 : BLKADDR_CPT0;
+
+ /* Submit CPT_INST_S to track when all packets have been
+ * flushed through for the NIX PF FUNC in inline inbound case.
+ */
+ rc = cpt_inline_inb_lf_cmd_send(rvu, blkaddr, nix_blkaddr);
+ if (rc)
+ return rc;
+
+ /* Wait for rxc entries to be flushed out */
+ cpt_rxc_teardown(rvu, blkaddr);
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
+ max_ctx_entries = (reg >> 48) & 0xFFF;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ blkaddr);
+ if (num_lfs == 0) {
+ dev_warn(rvu->dev, "CPT LF is not configured\n");
+ goto unlock;
+ }
+
+ /* Enable BAR2 ALIAS for this pcifunc. */
+ reg = BIT_ULL(16) | pcifunc;
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
+
+ for (i = 0; i < max_ctx_entries; i++) {
+ cam_data = rvu_read64(rvu, blkaddr, CPT_AF_CTX_CAM_DATA(i));
+
+ if ((FIELD_GET(CTX_CAM_PF_FUNC, cam_data) == pcifunc) &&
+ FIELD_GET(CTX_CAM_CPTR, cam_data)) {
+ reg = BIT_ULL(46) | FIELD_GET(CTX_CAM_CPTR, cam_data);
+ rvu_write64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTX_FLUSH),
+ reg);
+ }
+ }
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
+
+unlock:
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return 0;
+}
+
+int rvu_cpt_init(struct rvu *rvu)
+{
+ /* Retrieve CPT PF number */
+ rvu->cpt_pf_num = get_cpt_pf_num(rvu);
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 5205796859f6..d71c3e51373a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -1,11 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2019 Marvell International Ltd.
+ * Copyright (C) 2019 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifdef CONFIG_DEBUG_FS
@@ -19,6 +16,7 @@
#include "rvu_reg.h"
#include "rvu.h"
#include "cgx.h"
+#include "lmac_common.h"
#include "npc.h"
#define DEBUGFS_DIR_NAME "octeontx2"
@@ -97,7 +95,7 @@ static char *cgx_tx_stats_fields[] = {
[CGX_STAT5] = "Total frames sent on the interface",
[CGX_STAT6] = "Packets sent with an octet count < 64",
[CGX_STAT7] = "Packets sent with an octet count == 64",
- [CGX_STAT8] = "Packets sent with an octet count of 65–127",
+ [CGX_STAT8] = "Packets sent with an octet count of 65-127",
[CGX_STAT9] = "Packets sent with an octet count of 128-255",
[CGX_STAT10] = "Packets sent with an octet count of 256-511",
[CGX_STAT11] = "Packets sent with an octet count of 512-1023",
@@ -109,6 +107,89 @@ static char *cgx_tx_stats_fields[] = {
[CGX_STAT17] = "Control/PAUSE packets sent",
};
+static char *rpm_rx_stats_fields[] = {
+ "Octets of received packets",
+ "Octets of received packets with out error",
+ "Received packets with alignment errors",
+ "Control/PAUSE packets received",
+ "Packets received with Frame too long Errors",
+ "Packets received with a1nrange length Errors",
+ "Received packets",
+ "Packets received with FrameCheckSequenceErrors",
+ "Packets received with VLAN header",
+ "Error packets",
+ "Packets received with unicast DMAC",
+ "Packets received with multicast DMAC",
+ "Packets received with broadcast DMAC",
+ "Dropped packets",
+ "Total frames received on interface",
+ "Packets received with an octet count < 64",
+ "Packets received with an octet count == 64",
+ "Packets received with an octet count of 65-127",
+ "Packets received with an octet count of 128-255",
+ "Packets received with an octet count of 256-511",
+ "Packets received with an octet count of 512-1023",
+ "Packets received with an octet count of 1024-1518",
+ "Packets received with an octet count of > 1518",
+ "Oversized Packets",
+ "Jabber Packets",
+ "Fragmented Packets",
+ "CBFC(class based flow control) pause frames received for class 0",
+ "CBFC pause frames received for class 1",
+ "CBFC pause frames received for class 2",
+ "CBFC pause frames received for class 3",
+ "CBFC pause frames received for class 4",
+ "CBFC pause frames received for class 5",
+ "CBFC pause frames received for class 6",
+ "CBFC pause frames received for class 7",
+ "CBFC pause frames received for class 8",
+ "CBFC pause frames received for class 9",
+ "CBFC pause frames received for class 10",
+ "CBFC pause frames received for class 11",
+ "CBFC pause frames received for class 12",
+ "CBFC pause frames received for class 13",
+ "CBFC pause frames received for class 14",
+ "CBFC pause frames received for class 15",
+ "MAC control packets received",
+};
+
+static char *rpm_tx_stats_fields[] = {
+ "Total octets sent on the interface",
+ "Total octets transmitted OK",
+ "Control/Pause frames sent",
+ "Total frames transmitted OK",
+ "Total frames sent with VLAN header",
+ "Error Packets",
+ "Packets sent to unicast DMAC",
+ "Packets sent to the multicast DMAC",
+ "Packets sent to a broadcast DMAC",
+ "Packets sent with an octet count == 64",
+ "Packets sent with an octet count of 65-127",
+ "Packets sent with an octet count of 128-255",
+ "Packets sent with an octet count of 256-511",
+ "Packets sent with an octet count of 512-1023",
+ "Packets sent with an octet count of 1024-1518",
+ "Packets sent with an octet count of > 1518",
+ "CBFC(class based flow control) pause frames transmitted for class 0",
+ "CBFC pause frames transmitted for class 1",
+ "CBFC pause frames transmitted for class 2",
+ "CBFC pause frames transmitted for class 3",
+ "CBFC pause frames transmitted for class 4",
+ "CBFC pause frames transmitted for class 5",
+ "CBFC pause frames transmitted for class 6",
+ "CBFC pause frames transmitted for class 7",
+ "CBFC pause frames transmitted for class 8",
+ "CBFC pause frames transmitted for class 9",
+ "CBFC pause frames transmitted for class 10",
+ "CBFC pause frames transmitted for class 11",
+ "CBFC pause frames transmitted for class 12",
+ "CBFC pause frames transmitted for class 13",
+ "CBFC pause frames transmitted for class 14",
+ "CBFC pause frames transmitted for class 15",
+ "MAC control packets sent",
+ "Total frames sent on the interface"
+};
+
#define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
blk_addr, NDC_AF_CONST) & 0xFF)
@@ -139,6 +220,96 @@ static const struct file_operations rvu_dbg_##name##_fops = { \
static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
+#define LMT_MAPTBL_ENTRY_SIZE 16
+/* Dump LMTST map table */
+static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct rvu *rvu = filp->private_data;
+ u64 lmt_addr, val, tbl_base;
+ int pf, vf, num_vfs, hw_vfs;
+ void __iomem *lmt_map_base;
+ int index = 0, off = 0;
+ int bytes_not_copied;
+ int buf_size = 10240;
+ char *buf;
+
+ /* don't allow partial reads */
+ if (*ppos != 0)
+ return 0;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOSPC;
+
+ tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+
+ lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
+ if (!lmt_map_base) {
+ dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
+ kfree(buf);
+ return false;
+ }
+
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "\n\t\t\t\t\tLmtst Map Table Entries");
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "\n\t\t\t\t\t=======================");
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
+ off += scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "Lmtline Base (word 0)\t\t");
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "Lmt Map Entry (word 1)");
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t",
+ pf);
+
+ index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
+ off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
+ (tbl_base + index));
+ lmt_addr = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%016llx\t\t", lmt_addr);
+ index += 8;
+ val = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
+ val);
+ /* Reading num of VFs per PF */
+ rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
+ for (vf = 0; vf < num_vfs; vf++) {
+ index = (pf * rvu->hw->total_vfs * 16) +
+ ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "PF%d:VF%d \t\t", pf, vf);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%llx\t\t", (tbl_base + index));
+ lmt_addr = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%016llx\t\t", lmt_addr);
+ index += 8;
+ val = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%016llx\n", val);
+ }
+ }
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+
+ bytes_not_copied = copy_to_user(buffer, buf, off);
+ kfree(buf);
+
+ iounmap(lmt_map_base);
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ *ppos = off;
+ return off;
+}
+
+RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
+
static void get_lf_str_list(struct rvu_block block, int pcifunc,
char *lfs)
{
@@ -237,6 +408,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
kfree(buf);
return -ENOMEM;
}
+
off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
"pcifunc");
for (index = 0; index < BLK_COUNT; index++)
@@ -312,18 +484,59 @@ out:
RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
-static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blktype, int lf,
+static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
+{
+ struct rvu *rvu = filp->private;
+ struct pci_dev *pdev = NULL;
+ struct mac_ops *mac_ops;
+ char cgx[10], lmac[10];
+ struct rvu_pfvf *pfvf;
+ int pf, domain, blkid;
+ u8 cgx_id, lmac_id;
+ u16 pcifunc;
+
+ domain = 2;
+ mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
+ /* There can be no CGX devices at all */
+ if (!mac_ops)
+ return 0;
+ seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
+ mac_ops->name);
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+
+ pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
+ if (!pdev)
+ continue;
+
+ cgx[0] = 0;
+ lmac[0] = 0;
+ pcifunc = pf << 10;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ if (pfvf->nix_blkaddr == BLKADDR_NIX0)
+ blkid = 0;
+ else
+ blkid = 1;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
+ &lmac_id);
+ sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
+ sprintf(lmac, "LMAC%d", lmac_id);
+ seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
+ dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
+ }
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
+
+static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
u16 *pcifunc)
{
struct rvu_block *block;
struct rvu_hwinfo *hw;
- int blkaddr;
-
- blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
- if (blkaddr < 0) {
- dev_warn(rvu->dev, "Invalid blktype\n");
- return false;
- }
hw = rvu->hw;
block = &hw->block[blkaddr];
@@ -379,10 +592,12 @@ static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
{
void (*print_qsize)(struct seq_file *filp,
struct rvu_pfvf *pfvf) = NULL;
+ struct dentry *current_dir;
struct rvu_pfvf *pfvf;
struct rvu *rvu;
int qsize_id;
u16 pcifunc;
+ int blkaddr;
rvu = filp->private;
switch (blktype) {
@@ -400,7 +615,15 @@ static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
return -EINVAL;
}
- if (!rvu_dbg_is_valid_lf(rvu, blktype, qsize_id, &pcifunc))
+ if (blktype == BLKTYPE_NPA) {
+ blkaddr = BLKADDR_NPA;
+ } else {
+ current_dir = filp->file->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
+ BLKADDR_NIX1 : BLKADDR_NIX0);
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -417,6 +640,8 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
struct seq_file *seqfile = filp->private_data;
char *cmd_buf, *cmd_buf_tmp, *subtoken;
struct rvu *rvu = seqfile->private;
+ struct dentry *current_dir;
+ int blkaddr;
u16 pcifunc;
int ret, lf;
@@ -438,12 +663,20 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
if (cmd_buf)
ret = -EINVAL;
- if (!strncmp(subtoken, "help", 4) || ret < 0) {
+ if (ret < 0 || !strncmp(subtoken, "help", 4)) {
dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
goto qsize_write_done;
}
- if (!rvu_dbg_is_valid_lf(rvu, blktype, lf, &pcifunc)) {
+ if (blktype == BLKTYPE_NPA) {
+ blkaddr = BLKADDR_NPA;
+ } else {
+ current_dir = filp->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
+ BLKADDR_NIX1 : BLKADDR_NIX0);
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
ret = -EINVAL;
goto qsize_write_done;
}
@@ -476,6 +709,7 @@ RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
{
struct npa_aura_s *aura = &rsp->aura;
+ struct rvu *rvu = m->private;
seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
@@ -495,6 +729,9 @@ static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
(u64)aura->limit, aura->bp, aura->fc_ena);
+
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
aura->fc_up_crossing, aura->fc_stype);
seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
@@ -512,12 +749,15 @@ static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
}
/* Dumps given NPA Pool's context */
static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
{
struct npa_pool_s *pool = &rsp->pool;
+ struct rvu *rvu = m->private;
seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
@@ -539,6 +779,8 @@ static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
pool->avg_con, pool->fc_ena, pool->fc_stype);
seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
pool->fc_hyst_bits, pool->fc_up_crossing);
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
@@ -552,8 +794,10 @@ static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
pool->thresh_int_ena, pool->thresh_up);
- seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t\t%d\n",
+ seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
pool->thresh_qint_idx, pool->err_qint_idx);
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
}
/* Reads aura/pool's ctx from admin queue */
@@ -586,7 +830,7 @@ static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
return -EINVAL;
}
- if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc))
+ if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -644,7 +888,7 @@ static int write_npa_ctx(struct rvu *rvu, bool all,
int max_id = 0;
u16 pcifunc;
- if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc))
+ if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -792,9 +1036,17 @@ static void ndc_cache_stats(struct seq_file *s, int blk_addr,
int ctype, int transaction)
{
u64 req, out_req, lat, cant_alloc;
- struct rvu *rvu = s->private;
+ struct nix_hw *nix_hw;
+ struct rvu *rvu;
int port;
+ if (blk_addr == BLKADDR_NDC_NPA0) {
+ rvu = s->private;
+ } else {
+ nix_hw = s->private;
+ rvu = nix_hw->rvu;
+ }
+
for (port = 0; port < NDC_MAX_PORT; port++) {
req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
(port, ctype, transaction));
@@ -837,9 +1089,17 @@ RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
{
- struct rvu *rvu = s->private;
+ struct nix_hw *nix_hw;
+ struct rvu *rvu;
int bank, max_bank;
+ if (blk_addr == BLKADDR_NDC_NPA0) {
+ rvu = s->private;
+ } else {
+ nix_hw = s->private;
+ rvu = nix_hw->rvu;
+ }
+
max_bank = NDC_MAX_BANK(rvu, blk_addr);
for (bank = 0; bank < max_bank; bank++) {
seq_printf(s, "BANK:%d\n", bank);
@@ -855,16 +1115,30 @@ static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
{
- return ndc_blk_cache_stats(filp, NIX0_RX,
- BLKADDR_NDC_NIX0_RX);
+ struct nix_hw *nix_hw = filp->private;
+ int blkaddr = 0;
+ int ndc_idx = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
+ ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
+
+ return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
}
RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
{
- return ndc_blk_cache_stats(filp, NIX0_TX,
- BLKADDR_NDC_NIX0_TX);
+ struct nix_hw *nix_hw = filp->private;
+ int blkaddr = 0;
+ int ndc_idx = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
+ ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
+
+ return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
}
RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
@@ -880,8 +1154,14 @@ RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
void *unused)
{
- return ndc_blk_hits_miss_stats(filp,
- NPA0_U, BLKADDR_NDC_NIX0_RX);
+ struct nix_hw *nix_hw = filp->private;
+ int ndc_idx = NPA0_U;
+ int blkaddr = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
+
+ return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
}
RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
@@ -889,16 +1169,92 @@ RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
void *unused)
{
- return ndc_blk_hits_miss_stats(filp,
- NPA0_U, BLKADDR_NDC_NIX0_TX);
+ struct nix_hw *nix_hw = filp->private;
+ int ndc_idx = NPA0_U;
+ int blkaddr = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
+
+ return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
}
RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
+static void print_nix_cn10k_sq_ctx(struct seq_file *m,
+ struct nix_cn10k_sq_ctx_s *sq_ctx)
+{
+ seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
+ sq_ctx->ena, sq_ctx->qint_idx);
+ seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
+ sq_ctx->substream, sq_ctx->sdp_mcast);
+ seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
+ sq_ctx->cq, sq_ctx->sqe_way_mask);
+
+ seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
+ sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
+ seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
+ sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
+ seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
+ sq_ctx->default_chan, sq_ctx->sqb_count);
+
+ seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
+ seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
+ seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
+ sq_ctx->sqb_aura, sq_ctx->sq_int);
+ seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
+ sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
+
+ seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
+ sq_ctx->max_sqe_size, sq_ctx->cq_limit);
+ seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
+ sq_ctx->mnq_dis, sq_ctx->lmt_dis);
+ seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
+ sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
+ seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
+ sq_ctx->tail_offset, sq_ctx->smenq_offset);
+ seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
+ sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
+
+ seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
+ sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
+ seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
+ seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
+ seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
+ seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
+ sq_ctx->smenq_next_sqb);
+
+ seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
+
+ seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
+ seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
+ sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
+ seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
+ sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
+ seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
+ sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
+
+ seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
+ (u64)sq_ctx->scm_lso_rem);
+ seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
+ seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
+ seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_octs);
+ seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_pkts);
+}
+
/* Dumps given nix_sq's context */
static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
{
struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+
+ if (!is_rvu_otx2(rvu)) {
+ print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
+ return;
+ }
seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
sq_ctx->sqe_way_mask, sq_ctx->cq);
@@ -959,10 +1315,94 @@ static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
(u64)sq_ctx->dropped_pkts);
}
+static void print_nix_cn10k_rq_ctx(struct seq_file *m,
+ struct nix_cn10k_rq_ctx_s *rq_ctx)
+{
+ seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
+ rq_ctx->ena, rq_ctx->sso_ena);
+ seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
+ rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
+ seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
+ rq_ctx->cq, rq_ctx->lenerr_dis);
+ seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
+ rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
+ seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
+ rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
+ seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
+ rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
+ seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
+
+ seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
+ rq_ctx->spb_aura, rq_ctx->lpb_aura);
+ seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
+ seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
+ rq_ctx->sso_grp, rq_ctx->sso_tt);
+ seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
+ rq_ctx->pb_caching, rq_ctx->wqe_caching);
+ seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
+ rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
+ seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
+ rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
+ seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
+ rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
+
+ seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
+ seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
+ seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
+ seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
+ rq_ctx->wqe_skip, rq_ctx->spb_ena);
+ seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
+ rq_ctx->lpb_sizem1, rq_ctx->first_skip);
+ seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
+ rq_ctx->later_skip, rq_ctx->xqe_imm_size);
+ seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
+ rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
+
+ seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
+ rq_ctx->xqe_drop, rq_ctx->xqe_pass);
+ seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
+ rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
+ seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
+ rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
+ seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
+ rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
+
+ seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
+ rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
+ seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
+ rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
+ seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
+ rq_ctx->rq_int, rq_ctx->rq_int_ena);
+ seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
+
+ seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
+ rq_ctx->ltag, rq_ctx->good_utag);
+ seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
+ rq_ctx->bad_utag, rq_ctx->flow_tagw);
+ seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
+ rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
+ seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
+ rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
+ seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
+
+ seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
+ seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
+ seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
+ seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
+ seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
+}
+
/* Dumps given nix_rq's context */
static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
{
struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+
+ if (!is_rvu_otx2(rvu)) {
+ print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
+ return;
+ }
seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
rq_ctx->wqe_aura, rq_ctx->substream);
@@ -1057,7 +1497,8 @@ static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
{
void (*print_nix_ctx)(struct seq_file *filp,
struct nix_aq_enq_rsp *rsp) = NULL;
- struct rvu *rvu = filp->private;
+ struct nix_hw *nix_hw = filp->private;
+ struct rvu *rvu = nix_hw->rvu;
struct nix_aq_enq_req aq_req;
struct nix_aq_enq_rsp rsp;
char *ctype_string = NULL;
@@ -1089,7 +1530,7 @@ static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
return -EINVAL;
}
- if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc))
+ if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -1141,13 +1582,15 @@ static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
}
static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
- int id, int ctype, char *ctype_string)
+ int id, int ctype, char *ctype_string,
+ struct seq_file *m)
{
+ struct nix_hw *nix_hw = m->private;
struct rvu_pfvf *pfvf;
int max_id = 0;
u16 pcifunc;
- if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc))
+ if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -1207,7 +1650,8 @@ static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
int ctype)
{
struct seq_file *m = filp->private_data;
- struct rvu *rvu = m->private;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
char *cmd_buf, *ctype_string;
int nixlf, id = 0, ret;
bool all = false;
@@ -1243,7 +1687,7 @@ static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
goto done;
} else {
ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
- ctype_string);
+ ctype_string, m);
}
done:
kfree(cmd_buf);
@@ -1347,102 +1791,242 @@ static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
-static void rvu_dbg_nix_init(struct rvu *rvu)
+static ssize_t rvu_dbg_nix_tx_stall_hwissue_display(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
{
- const struct device *dev = &rvu->pdev->dev;
- struct dentry *pfile;
+ return rvu_nix_get_tx_stall_counters(filp->private_data, buffer, ppos);
+}
- rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
- if (!rvu->rvu_dbg.nix) {
- dev_err(rvu->dev, "create debugfs dir failed for nix\n");
- return;
- }
+RVU_DEBUG_FOPS(nix_tx_stall_hwissue, nix_tx_stall_hwissue_display, NULL);
- pfile = debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_sq_ctx_fops);
- if (!pfile)
- goto create_failed;
+static void print_band_prof_ctx(struct seq_file *m,
+ struct nix_bandprof_s *prof)
+{
+ char *str;
- pfile = debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_rq_ctx_fops);
- if (!pfile)
- goto create_failed;
+ switch (prof->pc_mode) {
+ case NIX_RX_PC_MODE_VLAN:
+ str = "VLAN";
+ break;
+ case NIX_RX_PC_MODE_DSCP:
+ str = "DSCP";
+ break;
+ case NIX_RX_PC_MODE_GEN:
+ str = "Generic";
+ break;
+ case NIX_RX_PC_MODE_RSVD:
+ str = "Reserved";
+ break;
+ }
+ seq_printf(m, "W0: pc_mode\t\t%s\n", str);
+ str = (prof->icolor == 3) ? "Color blind" :
+ (prof->icolor == 0) ? "Green" :
+ (prof->icolor == 1) ? "Yellow" : "Red";
+ seq_printf(m, "W0: icolor\t\t%s\n", str);
+ seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
+ seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
+ seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
+ seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
+ seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
+ seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
+ seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
+ seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
+
+ seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
+ str = (prof->lmode == 0) ? "byte" : "packet";
+ seq_printf(m, "W1: lmode\t\t%s\n", str);
+ seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
+ seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
+ seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
+ seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
+ str = (prof->gc_action == 0) ? "PASS" :
+ (prof->gc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: gc_action\t\t%s\n", str);
+ str = (prof->yc_action == 0) ? "PASS" :
+ (prof->yc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: yc_action\t\t%s\n", str);
+ str = (prof->rc_action == 0) ? "PASS" :
+ (prof->rc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: rc_action\t\t%s\n", str);
+ seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
+ seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
+ seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
+
+ seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
+ seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
+ seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
+ seq_printf(m, "W4: green_pkt_pass\t%lld\n",
+ (u64)prof->green_pkt_pass);
+ seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
+ (u64)prof->yellow_pkt_pass);
+ seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
+ seq_printf(m, "W7: green_octs_pass\t%lld\n",
+ (u64)prof->green_octs_pass);
+ seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
+ (u64)prof->yellow_octs_pass);
+ seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
+ seq_printf(m, "W10: green_pkt_drop\t%lld\n",
+ (u64)prof->green_pkt_drop);
+ seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
+ (u64)prof->yellow_pkt_drop);
+ seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
+ seq_printf(m, "W13: green_octs_drop\t%lld\n",
+ (u64)prof->green_octs_drop);
+ seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
+ (u64)prof->yellow_octs_drop);
+ seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
+ seq_puts(m, "==============================\n");
+}
- pfile = debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_cq_ctx_fops);
- if (!pfile)
- goto create_failed;
+static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
+{
+ struct nix_hw *nix_hw = m->private;
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ struct rvu *rvu = nix_hw->rvu;
+ struct nix_ipolicer *ipolicer;
+ int layer, prof_idx, idx, rc;
+ u16 pcifunc;
+ char *str;
- pfile = debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_ndc_tx_cache_fops);
- if (!pfile)
- goto create_failed;
+ /* Ingress policers do not exist on all platforms */
+ if (!nix_hw->ipolicer)
+ return 0;
- pfile = debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_ndc_rx_cache_fops);
- if (!pfile)
- goto create_failed;
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
+ (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
- pfile = debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix,
- rvu, &rvu_dbg_nix_ndc_tx_hits_miss_fops);
- if (!pfile)
- goto create_failed;
+ seq_printf(m, "\n%s bandwidth profiles\n", str);
+ seq_puts(m, "=======================\n");
- pfile = debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix,
- rvu, &rvu_dbg_nix_ndc_rx_hits_miss_fops);
- if (!pfile)
- goto create_failed;
+ ipolicer = &nix_hw->ipolicer[layer];
- pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_qsize_fops);
- if (!pfile)
- goto create_failed;
+ for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
+ if (is_rsrc_free(&ipolicer->band_prof, idx))
+ continue;
- return;
-create_failed:
- dev_err(dev, "Failed to create debugfs dir/file for NIX\n");
- debugfs_remove_recursive(rvu->rvu_dbg.nix);
+ prof_idx = (idx & 0x3FFF) | (layer << 14);
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
+ 0x00, NIX_AQ_CTYPE_BANDPROF,
+ prof_idx);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of %s profile %d, err %d\n",
+ __func__, str, idx, rc);
+ return 0;
+ }
+ seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
+ pcifunc = ipolicer->pfvf_map[idx];
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ seq_printf(m, "Allocated to :: PF %d\n",
+ rvu_get_pf(pcifunc));
+ else
+ seq_printf(m, "Allocated to :: PF %d VF %d\n",
+ rvu_get_pf(pcifunc),
+ (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
+ print_band_prof_ctx(m, &aq_rsp.prof);
+ }
+ }
+ return 0;
}
-static void rvu_dbg_npa_init(struct rvu *rvu)
+RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
+
+static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
{
- const struct device *dev = &rvu->pdev->dev;
- struct dentry *pfile;
+ struct nix_hw *nix_hw = m->private;
+ struct nix_ipolicer *ipolicer;
+ int layer;
+ char *str;
- rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
- if (!rvu->rvu_dbg.npa)
- return;
+ /* Ingress policers do not exist on all platforms */
+ if (!nix_hw->ipolicer)
+ return 0;
+
+ seq_puts(m, "\nBandwidth profile resource free count\n");
+ seq_puts(m, "=====================================\n");
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
+ (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ seq_printf(m, "%s :: Max: %4d Free: %4d\n", str,
+ ipolicer->band_prof.max,
+ rvu_rsrc_free_count(&ipolicer->band_prof));
+ }
+ seq_puts(m, "=====================================\n");
- pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
- &rvu_dbg_npa_qsize_fops);
- if (!pfile)
- goto create_failed;
+ return 0;
+}
- pfile = debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
- &rvu_dbg_npa_aura_ctx_fops);
- if (!pfile)
- goto create_failed;
+RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
- pfile = debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
- &rvu_dbg_npa_pool_ctx_fops);
- if (!pfile)
- goto create_failed;
+static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
+{
+ struct nix_hw *nix_hw;
- pfile = debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
- &rvu_dbg_npa_ndc_cache_fops);
- if (!pfile)
- goto create_failed;
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return;
- pfile = debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa,
- rvu, &rvu_dbg_npa_ndc_hits_miss_fops);
- if (!pfile)
- goto create_failed;
+ if (blkaddr == BLKADDR_NIX0) {
+ nix_hw = &rvu->hw->nix[0];
+ } else {
+ rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
+ rvu->rvu_dbg.root);
+ if (!rvu->rvu_dbg.nix) {
+ dev_err(rvu->dev,
+ "create debugfs dir failed for nix1\n");
+ return;
+ }
+ nix_hw = &rvu->hw->nix[1];
+ }
- return;
+ debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_sq_ctx_fops);
+ debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_rq_ctx_fops);
+ debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_cq_ctx_fops);
+ debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_ndc_tx_cache_fops);
+ debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_ndc_rx_cache_fops);
+ debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_ndc_tx_hits_miss_fops);
+ debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_ndc_rx_hits_miss_fops);
+ debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
+ &rvu_dbg_nix_qsize_fops);
+ debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_band_prof_ctx_fops);
+ debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_band_prof_rsrc_fops);
+ if (is_rvu_96xx_A0(rvu)) {
+ debugfs_create_file("tx_stall_hwissue", 0600,
+ rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_tx_stall_hwissue_fops);
+ }
+}
-create_failed:
- dev_err(dev, "Failed to create debugfs dir/file for NPA\n");
- debugfs_remove_recursive(rvu->rvu_dbg.npa);
+static void rvu_dbg_npa_init(struct rvu *rvu)
+{
+ rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
+ debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_qsize_fops);
+ debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_aura_ctx_fops);
+ debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_pool_ctx_fops);
+ debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_ndc_cache_fops);
+ debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_ndc_hits_miss_fops);
}
#define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \
@@ -1468,6 +2052,7 @@ create_failed:
static int cgx_print_stats(struct seq_file *s, int lmac_id)
{
struct cgx_link_user_info linfo;
+ struct mac_ops *mac_ops;
void *cgxd = s->private;
u64 ucast, mcast, bcast;
int stat = 0, err = 0;
@@ -1479,6 +2064,11 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
if (!rvu)
return -ENODEV;
+ mac_ops = get_mac_ops(cgxd);
+ /* There can be no CGX devices at all */
+ if (!mac_ops)
+ return 0;
+
/* Link status */
seq_puts(s, "\n=======Link Status======\n\n");
err = cgx_get_link_info(cgxd, lmac_id, &linfo);
@@ -1488,7 +2078,8 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
linfo.link_up ? "UP" : "DOWN", linfo.speed);
/* Rx stats */
- seq_puts(s, "\n=======NIX RX_STATS(CGX port level)======\n\n");
+ seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
+ mac_ops->name);
ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
if (err)
return err;
@@ -1510,7 +2101,8 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
return err;
/* Tx stats */
- seq_puts(s, "\n=======NIX TX_STATS(CGX port level)======\n\n");
+ seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
+ mac_ops->name);
ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
if (err)
return err;
@@ -1529,33 +2121,43 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
return err;
/* Rx stats */
- seq_puts(s, "\n=======CGX RX_STATS======\n\n");
- while (stat < CGX_RX_STATS_COUNT) {
- err = cgx_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
+ seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
+ while (stat < mac_ops->rx_stats_cnt) {
+ err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
if (err)
return err;
- seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat], rx_stat);
+ if (is_rvu_otx2(rvu))
+ seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
+ rx_stat);
+ else
+ seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
+ rx_stat);
stat++;
}
/* Tx stats */
stat = 0;
- seq_puts(s, "\n=======CGX TX_STATS======\n\n");
- while (stat < CGX_TX_STATS_COUNT) {
- err = cgx_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
+ seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
+ while (stat < mac_ops->tx_stats_cnt) {
+ err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
if (err)
return err;
- seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], tx_stat);
+
+ if (is_rvu_otx2(rvu))
+ seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
+ tx_stat);
+ else
+ seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
+ tx_stat);
stat++;
}
return err;
}
-static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
{
struct dentry *current_dir;
- int err, lmac_id;
char *buf;
current_dir = filp->file->f_path.dentry->d_parent;
@@ -1563,53 +2165,127 @@ static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
if (!buf)
return -EINVAL;
- err = kstrtoint(buf + 1, 10, &lmac_id);
- if (!err) {
- err = cgx_print_stats(filp, lmac_id);
- if (err)
- return err;
- }
+ return kstrtoint(buf + 1, 10, lmac_id);
+}
+
+static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+{
+ int lmac_id, err;
+
+ err = rvu_dbg_derive_lmacid(filp, &lmac_id);
+ if (!err)
+ return cgx_print_stats(filp, lmac_id);
+
return err;
}
RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
+static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
+{
+ struct pci_dev *pdev = NULL;
+ void *cgxd = s->private;
+ char *bcast, *mcast;
+ u16 index, domain;
+ u8 dmac[ETH_ALEN];
+ struct rvu *rvu;
+ u64 cfg, mac;
+ int pf;
+
+ rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
+ if (!rvu)
+ return -ENODEV;
+
+ pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
+ domain = 2;
+
+ pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
+ if (!pdev)
+ return 0;
+
+ cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
+ bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
+ mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
+
+ seq_puts(s,
+ "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n");
+ seq_printf(s, "%s PF%d %9s %9s",
+ dev_name(&pdev->dev), pf, bcast, mcast);
+ if (cfg & CGX_DMAC_CAM_ACCEPT)
+ seq_printf(s, "%12s\n\n", "UNICAST");
+ else
+ seq_printf(s, "%16s\n\n", "PROMISCUOUS");
+
+ seq_puts(s, "\nDMAC-INDEX ADDRESS\n");
+
+ for (index = 0 ; index < 32 ; index++) {
+ cfg = cgx_read_dmac_entry(cgxd, index);
+ /* Display enabled dmac entries associated with current lmac */
+ if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
+ FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
+ mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
+ u64_to_ether_addr(mac, dmac);
+ seq_printf(s, "%7d %pM\n", index, dmac);
+ }
+ }
+
+ return 0;
+}
+
+static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
+{
+ int err, lmac_id;
+
+ err = rvu_dbg_derive_lmacid(filp, &lmac_id);
+ if (!err)
+ return cgx_print_dmac_flt(filp, lmac_id);
+
+ return err;
+}
+
+RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
+
static void rvu_dbg_cgx_init(struct rvu *rvu)
{
- const struct device *dev = &rvu->pdev->dev;
- struct dentry *pfile;
+ struct mac_ops *mac_ops;
+ unsigned long lmac_bmap;
int i, lmac_id;
char dname[20];
void *cgx;
- rvu->rvu_dbg.cgx_root = debugfs_create_dir("cgx", rvu->rvu_dbg.root);
+ if (!cgx_get_cgxcnt_max())
+ return;
+
+ mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
+ if (!mac_ops)
+ return;
+
+ rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
+ rvu->rvu_dbg.root);
for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
cgx = rvu_cgx_pdata(i, rvu);
if (!cgx)
continue;
+ lmac_bmap = cgx_get_lmac_bmap(cgx);
/* cgx debugfs dir */
- sprintf(dname, "cgx%d", i);
+ sprintf(dname, "%s%d", mac_ops->name, i);
rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
rvu->rvu_dbg.cgx_root);
- for (lmac_id = 0; lmac_id < cgx_get_lmac_cnt(cgx); lmac_id++) {
+ for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
/* lmac debugfs dir */
sprintf(dname, "lmac%d", lmac_id);
rvu->rvu_dbg.lmac =
debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
- pfile = debugfs_create_file("stats", 0600,
- rvu->rvu_dbg.lmac, cgx,
- &rvu_dbg_cgx_stat_fops);
- if (!pfile)
- goto create_failed;
+ debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
+ cgx, &rvu_dbg_cgx_stat_fops);
+ debugfs_create_file("mac_filter", 0600,
+ rvu->rvu_dbg.lmac, cgx,
+ &rvu_dbg_cgx_dmac_flt_fops);
}
}
- return;
-
-create_failed:
- dev_err(dev, "Failed to create debugfs dir/file for CGX\n");
- debugfs_remove_recursive(rvu->rvu_dbg.cgx_root);
}
/* NPC debugfs APIs */
@@ -1620,9 +2296,6 @@ static void rvu_print_npc_mcam_info(struct seq_file *s,
int entry_acnt, entry_ecnt;
int cntr_acnt, cntr_ecnt;
- /* Skip PF0 */
- if (!pcifunc)
- return;
rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
&entry_acnt, &entry_ecnt);
rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
@@ -1653,7 +2326,7 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
struct rvu *rvu = filp->private;
int pf, vf, numvfs, blkaddr;
struct npc_mcam *mcam;
- u16 pcifunc;
+ u16 pcifunc, counters;
u64 cfg;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -1661,6 +2334,7 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
return -ENODEV;
mcam = &rvu->hw->mcam;
+ counters = rvu->hw->npc_counters;
seq_puts(filp, "\nNPC MCAM info:\n");
/* MCAM keywidth on receive and transmit sides */
@@ -1683,10 +2357,9 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
/* MCAM counters */
- cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
- cfg = (cfg >> 48) & 0xFFFF;
- seq_printf(filp, "\n\t\t MCAM counters \t: %lld\n", cfg);
- seq_printf(filp, "\t\t Reserved \t: %lld\n", cfg - mcam->counters.max);
+ seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
+ seq_printf(filp, "\t\t Reserved \t: %d\n",
+ counters - mcam->counters.max);
seq_printf(filp, "\t\t Available \t: %d\n",
rvu_rsrc_free_count(&mcam->counters));
@@ -1713,6 +2386,989 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
return 0;
}
+static int parse_sso_cmd_buffer(char *cmd_buf, size_t *count,
+ const char __user *buffer, int *ssolf,
+ bool *all)
+{
+ int ret, bytes_not_copied;
+ char *cmd_buf_tmp;
+ char *subtoken;
+
+ bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ cmd_buf[*count] = '\0';
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ *count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ subtoken = strsep(&cmd_buf, " ");
+ if (subtoken && strcmp(subtoken, "all") == 0) {
+ *all = true;
+ } else{
+ ret = subtoken ? kstrtoint(subtoken, 10, ssolf) : -EINVAL;
+ if (ret < 0)
+ return ret;
+ }
+ if (cmd_buf)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void sso_hwgrp_display_iq_list(struct rvu *rvu, int ssolf, u16 idx,
+ u16 tail_idx, u8 queue_type)
+{
+ const char *queue[3] = {"DQ", "CQ", "AQ"};
+ int blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ pr_info("SSO HWGGRP[%d] [%s] Chain queue head[%d]", ssolf,
+ queue[queue_type], idx);
+ pr_info("SSO HWGGRP[%d] [%s] Chain queue tail[%d]", ssolf,
+ queue[queue_type], tail_idx);
+ pr_info("--------------------------------------------------\n");
+ do {
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_TAG(idx));
+ pr_info("SSO HWGGRP[%d] [%s] IE[%d] TAG 0x%llx\n", ssolf,
+ queue[queue_type], idx, reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_GRP(idx));
+ pr_info("SSO HWGGRP[%d] [%s] IE[%d] GRP 0x%llx\n", ssolf,
+ queue[queue_type], idx, reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_PENDTAG(idx));
+ pr_info("SSO HWGGRP[%d] [%s] IE[%d] PENDTAG 0x%llx\n", ssolf,
+ queue[queue_type], idx, reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_LINKS(idx));
+ pr_info("SSO HWGGRP[%d] [%s] IE[%d] LINKS 0x%llx\n", ssolf,
+ queue[queue_type], idx, reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_QLINKS(idx));
+ pr_info("SSO HWGGRP[%d] [%s] IE[%d] QLINKS 0x%llx\n", ssolf,
+ queue[queue_type], idx, reg);
+ pr_info("--------------------------------------------------\n");
+ if (idx == tail_idx)
+ break;
+ idx = reg & 0x1FFF;
+ } while (idx != 0x1FFF);
+}
+
+static void sso_hwgrp_display_taq_list(struct rvu *rvu, int ssolf, u8 wae_head,
+ u16 ent_head, u8 wae_used, u8 taq_lines)
+{
+ int i, blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ pr_info("--------------------------------------------------\n");
+ do {
+ for (i = wae_head; i < taq_lines && wae_used; i++) {
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_TAQX_WAEY_TAG(ent_head, i));
+ pr_info("SSO HWGGRP[%d] TAQ[%d] WAE[%d] TAG 0x%llx\n",
+ ssolf, ent_head, i, reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_TAQX_WAEY_WQP(ent_head, i));
+ pr_info("SSO HWGGRP[%d] TAQ[%d] WAE[%d] WQP 0x%llx\n",
+ ssolf, ent_head, i, reg);
+ wae_used--;
+ }
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_TAQX_LINK(ent_head));
+ pr_info("SSO HWGGRP[%d] TAQ[%d] LINK 0x%llx\n",
+ ssolf, ent_head, reg);
+ ent_head = reg & 0x7FF;
+ pr_info("--------------------------------------------------\n");
+ } while (ent_head && wae_used);
+}
+
+static int read_sso_pc(struct rvu *rvu)
+{
+ int blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_ACTIVE_CYCLES0);
+ pr_info("SSO Add-Work active cycles %lld\n", reg);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_ACTIVE_CYCLES1);
+ pr_info("SSO Get-Work active cycles %lld\n", reg);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_ACTIVE_CYCLES2);
+ pr_info("SSO Work-Slot active cycles %lld\n", reg);
+ pr_info("\n");
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_NOS_CNT) & 0x1FFF;
+ pr_info("SSO work-queue entries on the no-schedule list %lld\n", reg);
+ pr_info("\n");
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_AW_READ_ARB);
+ pr_info("SSO XAQ reads outstanding %lld\n",
+ (reg >> 24) & 0x3F);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_XAQ_REQ_PC);
+ pr_info("SSO XAQ reads requests %lld\n", reg);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_XAQ_LATENCY_PC);
+ pr_info("SSO XAQ read latency cycles %lld\n", reg);
+ pr_info("\n");
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_AW_WE);
+ pr_info("SSO IAQ reserved %lld\n",
+ (reg >> 16) & 0x3FFF);
+ pr_info("SSO IAQ total %lld\n", reg & 0x3FFF);
+ pr_info("\n");
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_TAQ_CNT);
+ pr_info("SSO TAQ reserved %lld\n",
+ (reg >> 16) & 0x7FF);
+ pr_info("SSO TAQ total %lld\n", reg & 0x7FF);
+ pr_info("\n");
+
+ return 0;
+}
+
+/* Reads SSO hwgrp perfomance counters */
+static void read_sso_hwgrp_pc(struct rvu *rvu, int ssolf, bool all)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr, max_id;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ if (ssolf < 0 || ssolf >= block->lf.max) {
+ pr_info("Invalid SSOLF(HWGRP), valid range is 0-%d\n",
+ block->lf.max - 1);
+ return;
+ }
+ max_id = block->lf.max;
+
+ if (all)
+ ssolf = 0;
+ else
+ max_id = ssolf + 1;
+
+ pr_info("==================================================\n");
+ for (; ssolf < max_id; ssolf++) {
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_WS_PC(ssolf));
+ pr_info("SSO HWGGRP[%d] Work-Schedule PC 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_EXT_PC(ssolf));
+ pr_info("SSO HWGGRP[%d] External Schedule PC 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_WA_PC(ssolf));
+ pr_info("SSO HWGGRP[%d] Work-Add PC 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_TS_PC(ssolf));
+ pr_info("SSO HWGGRP[%d] Tag Switch PC 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_DS_PC(ssolf));
+ pr_info("SSO HWGGRP[%d] Deschedule PC 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_DQ_PC(ssolf));
+ pr_info("SSO HWGGRP[%d] Work-Descheduled PC 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWGRPX_PAGE_CNT(ssolf));
+ pr_info("SSO HWGGRP[%d] In-use Page Count 0x%llx\n", ssolf,
+ reg);
+ pr_info("==================================================\n");
+ }
+}
+
+/* Reads SSO hwgrp Threshold */
+static void read_sso_hwgrp_thresh(struct rvu *rvu, int ssolf, bool all)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr, max_id;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ if (ssolf < 0 || ssolf >= block->lf.max) {
+ pr_info("Invalid SSOLF(HWGRP), valid range is 0-%d\n",
+ block->lf.max - 1);
+ return;
+ }
+ max_id = block->lf.max;
+
+ if (all)
+ ssolf = 0;
+ else
+ max_id = ssolf + 1;
+
+ pr_info("==================================================\n");
+ for (; ssolf < max_id; ssolf++) {
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWGRPX_IAQ_THR(ssolf));
+ pr_info("SSO HWGGRP[%d] IAQ Threshold 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWGRPX_TAQ_THR(ssolf));
+ pr_info("SSO HWGGRP[%d] TAQ Threshold 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWGRPX_XAQ_AURA(ssolf));
+ pr_info("SSO HWGGRP[%d] XAQ Aura 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWGRPX_XAQ_LIMIT(ssolf));
+ pr_info("SSO HWGGRP[%d] XAQ Limit 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWGRPX_IU_ACCNT(ssolf));
+ pr_info("SSO HWGGRP[%d] IU Account Index 0x%llx\n", ssolf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_IU_ACCNTX_CFG(reg & 0xFF));
+ pr_info("SSO HWGGRP[%d] IU Accounting Cfg 0x%llx\n", ssolf,
+ reg);
+ pr_info("==================================================\n");
+ }
+}
+
+/* Reads SSO hwgrp TAQ list */
+static void read_sso_hwgrp_taq_list(struct rvu *rvu, int ssolf, bool all)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u8 taq_entries, wae_head;
+ struct rvu_block *block;
+ u16 ent_head, cl_used;
+ int blkaddr, max_id;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ if (ssolf < 0 || ssolf >= block->lf.max) {
+ pr_info("Invalid SSOLF(HWGRP), valid range is 0-%d\n",
+ block->lf.max - 1);
+ return;
+ }
+ max_id = block->lf.max;
+
+ if (all)
+ ssolf = 0;
+ else
+ max_id = ssolf + 1;
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_CONST);
+ taq_entries = (reg >> 48) & 0xFF;
+ pr_info("==================================================\n");
+ for (; ssolf < max_id; ssolf++) {
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("SSO HWGGRP[%d] Transitory Output Admission Queue",
+ ssolf);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_TOAQX_STATUS(ssolf));
+ pr_info("SSO HWGGRP[%d] TOAQ Status 0x%llx\n", ssolf,
+ reg);
+ ent_head = (reg >> 12) & 0x7FF;
+ cl_used = (reg >> 32) & 0x7FF;
+ if (reg & BIT_ULL(61) && cl_used) {
+ pr_info("SSO HWGGRP[%d] TOAQ CL_USED 0x%x\n",
+ ssolf, cl_used);
+ sso_hwgrp_display_taq_list(rvu, ssolf, ent_head, 0,
+ cl_used * taq_entries,
+ taq_entries);
+ }
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("SSO HWGGRP[%d] Transitory Input Admission Queue",
+ ssolf);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_TIAQX_STATUS(ssolf));
+ pr_info("SSO HWGGRP[%d] TIAQ Status 0x%llx\n", ssolf,
+ reg);
+ wae_head = (reg >> 60) & 0xF;
+ cl_used = (reg >> 32) & 0x7FFF;
+ ent_head = (reg >> 12) & 0x7FF;
+ if (reg & BIT_ULL(61) && cl_used) {
+ pr_info("SSO HWGGRP[%d] TIAQ WAE_USED 0x%x\n",
+ ssolf, cl_used);
+ sso_hwgrp_display_taq_list(rvu, ssolf, ent_head,
+ wae_head, cl_used,
+ taq_entries);
+ }
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("==================================================\n");
+ }
+}
+
+/* Reads SSO hwgrp IAQ list */
+static void read_sso_hwgrp_iaq_list(struct rvu *rvu, int ssolf, bool all)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ u16 head_idx, tail_idx;
+ int blkaddr, max_id;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ if (ssolf < 0 || ssolf >= block->lf.max) {
+ pr_info("Invalid SSOLF(HWGRP), valid range is 0-%d\n",
+ block->lf.max - 1);
+ return;
+ }
+ max_id = block->lf.max;
+
+ if (all)
+ ssolf = 0;
+ else
+ max_id = ssolf + 1;
+ pr_info("==================================================\n");
+ for (; ssolf < max_id; ssolf++) {
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("SSO HWGGRP[%d] Deschedule Queue(DQ)\n", ssolf);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IPL_DESCHEDX(ssolf));
+ pr_info("SSO HWGGRP[%d] DQ List 0x%llx\n", ssolf,
+ reg);
+ head_idx = (reg >> 13) & 0x1FFF;
+ tail_idx = reg & 0x1FFF;
+ if (reg & (BIT_ULL(26) | BIT_ULL(27)))
+ sso_hwgrp_display_iq_list(rvu, ssolf, head_idx,
+ tail_idx, 0);
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("SSO HWGGRP[%d] Conflict Queue(CQ)\n", ssolf);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IPL_CONFX(ssolf));
+ pr_info("SSO HWGGRP[%d] CQ List 0x%llx\n", ssolf,
+ reg);
+ head_idx = (reg >> 13) & 0x1FFF;
+ tail_idx = reg & 0x1FFF;
+ if (reg & (BIT_ULL(26) | BIT_ULL(27)))
+ sso_hwgrp_display_iq_list(rvu, ssolf, head_idx,
+ tail_idx, 1);
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("SSO HWGGRP[%d] Admission Queue(AQ)\n", ssolf);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IPL_IAQX(ssolf));
+ pr_info("SSO HWGGRP[%d] AQ List 0x%llx\n", ssolf,
+ reg);
+ head_idx = (reg >> 13) & 0x1FFF;
+ tail_idx = reg & 0x1FFF;
+ if (reg & (BIT_ULL(26) | BIT_ULL(27)))
+ sso_hwgrp_display_iq_list(rvu, ssolf, head_idx,
+ tail_idx, 2);
+ pr_info("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
+ pr_info("==================================================\n");
+ }
+}
+
+/* Reads SSO hwgrp IENT list */
+static int read_sso_hwgrp_ient_list(struct rvu *rvu)
+{
+ const char *tt_c[4] = {"SSO_TT_ORDERED_", "SSO_TT_ATOMIC__",
+ "SSO_TT_UNTAGGED", "SSO_TT_EMPTY___"};
+ struct rvu_hwinfo *hw = rvu->hw;
+ int max_idx = hw->sso.sso_iue;
+ u64 pendtag, qlinks, links;
+ int len, idx, blkaddr;
+ u64 tag, grp, wqp;
+ char str[300];
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ for (idx = 0; idx < max_idx; idx++) {
+ len = 0;
+ tag = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_TAG(idx));
+ grp = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_GRP(idx));
+ pendtag = rvu_read64(rvu, blkaddr,
+ SSO_AF_IENTX_PENDTAG(idx));
+ links = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_LINKS(idx));
+ qlinks = rvu_read64(rvu, blkaddr,
+ SSO_AF_IENTX_QLINKS(idx));
+ wqp = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_WQP(idx));
+ len = snprintf(str + len, 300,
+ "SSO IENT[%4d] TT [%s] HWGRP [%3lld] ", idx,
+ tt_c[(tag >> 32) & 0x3], (grp >> 48) & 0x1f);
+ len += snprintf(str + len, 300 - len,
+ "TAG [0x%010llx] GRP [0x%016llx] ", tag, grp);
+ len += snprintf(str + len, 300 - len, "PENDTAG [0x%010llx] ",
+ pendtag);
+ len += snprintf(str + len, 300 - len,
+ "LINKS [0x%016llx] QLINKS [0x%010llx] ", links,
+ qlinks);
+ snprintf(str + len, 300 - len, "WQP [0x%016llx]\n", wqp);
+ pr_info("%s", str);
+ }
+
+ return 0;
+}
+
+/* Reads SSO hwgrp free list */
+static int read_sso_hwgrp_free_list(struct rvu *rvu)
+{
+ int blkaddr;
+ u64 reg;
+ u8 idx;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ pr_info("==================================================\n");
+ for (idx = 0; idx < 4; idx++) {
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IPL_FREEX(idx));
+ pr_info("SSO FREE LIST[%d]\n", idx);
+ pr_info("qnum_head : %lld qnum_tail : %lld\n",
+ (reg >> 58) & 0x3, (reg >> 56) & 0x3);
+ pr_info("queue_cnt : %llx\n", (reg >> 26) & 0x7fff);
+ pr_info("queue_val : %lld queue_head : %4lld queue_tail %4lld\n"
+ , (reg >> 40) & 0x1, (reg >> 13) & 0x1fff,
+ reg & 0x1fff);
+ pr_info("==================================================\n");
+ }
+
+ return 0;
+}
+
+/* Reads SSO hwgrp perfomance counters */
+static void read_sso_hws_info(struct rvu *rvu, int ssowlf, bool all)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr;
+ int max_id;
+ u64 reg;
+ u8 mask;
+ u8 set;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ if (ssowlf < 0 || ssowlf >= block->lf.max) {
+ pr_info("Invalid SSOWLF(HWS), valid range is 0-%d\n",
+ block->lf.max - 1);
+ return;
+ }
+ max_id = block->lf.max;
+
+ if (all)
+ ssowlf = 0;
+ else
+ max_id = ssowlf + 1;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ pr_info("==================================================\n");
+ for (; ssowlf < max_id; ssowlf++) {
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWSX_ARB(ssowlf));
+ pr_info("SSOW HWS[%d] Arbitration State 0x%llx\n", ssowlf,
+ reg);
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWSX_GMCTL(ssowlf));
+ pr_info("SSOW HWS[%d] Guest Machine Control 0x%llx\n", ssowlf,
+ reg);
+ for (set = 0; set < 2; set++)
+ for (mask = 0; mask < 4; mask++) {
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_HWSX_SX_GRPMSKX(ssowlf,
+ set,
+ mask));
+ pr_info(
+ "SSOW HWS[%d] SET[%d] Group Mask[%d] 0x%llx\n",
+ ssowlf, set, mask, reg);
+ }
+ pr_info("==================================================\n");
+ }
+}
+
+typedef void (*sso_dump_cb)(struct rvu *rvu, int ssolf, bool all);
+
+static ssize_t rvu_dbg_sso_cmd_parser(struct file *filp,
+ const char __user *buffer, size_t count,
+ loff_t *ppos, char *lf_type,
+ char *file_nm, sso_dump_cb fn)
+{
+ struct rvu *rvu = filp->private_data;
+ bool all = false;
+ char *cmd_buf;
+ int lf = 0;
+
+ if ((*ppos != 0) || !count)
+ return -EINVAL;
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return -ENOSPC;
+
+ if (parse_sso_cmd_buffer(cmd_buf, &count, buffer,
+ &lf, &all) < 0) {
+ pr_info("Usage: echo [<%s>/all] > %s\n", lf_type, file_nm);
+ } else {
+ fn(rvu, lf, all);
+ }
+ kfree(cmd_buf);
+
+ return count;
+}
+
+/* SSO debugfs APIs */
+static ssize_t rvu_dbg_sso_pc_display(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return read_sso_pc(filp->private_data);
+}
+
+static ssize_t rvu_dbg_sso_hwgrp_pc_display(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_sso_cmd_parser(filp, buffer, count, ppos, "hwgrp",
+ "sso_hwgrp_pc", read_sso_hwgrp_pc);
+}
+
+static ssize_t rvu_dbg_sso_hwgrp_thresh_display(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_sso_cmd_parser(filp, buffer, count, ppos, "hwgrp",
+ "sso_hwgrp_thresh", read_sso_hwgrp_thresh);
+}
+
+static ssize_t rvu_dbg_sso_hwgrp_taq_wlk_display(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_sso_cmd_parser(filp, buffer, count, ppos, "hwgrp",
+ "sso_hwgrp_taq_wlk", read_sso_hwgrp_taq_list);
+}
+
+static ssize_t rvu_dbg_sso_hwgrp_iaq_wlk_display(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_sso_cmd_parser(filp, buffer, count, ppos, "hwgrp",
+ "sso_hwgrp_iaq_wlk", read_sso_hwgrp_iaq_list);
+}
+
+static ssize_t rvu_dbg_sso_hwgrp_ient_wlk_display(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return read_sso_hwgrp_ient_list(filp->private_data);
+}
+
+static ssize_t rvu_dbg_sso_hwgrp_fl_wlk_display(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return read_sso_hwgrp_free_list(filp->private_data);
+}
+
+static ssize_t rvu_dbg_sso_hws_info_display(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_sso_cmd_parser(filp, buffer, count, ppos, "hws",
+ "sso_hws_info", read_sso_hws_info);
+}
+
+RVU_DEBUG_FOPS(sso_pc, sso_pc_display, NULL);
+RVU_DEBUG_FOPS(sso_hwgrp_pc, NULL, sso_hwgrp_pc_display);
+RVU_DEBUG_FOPS(sso_hwgrp_thresh, NULL, sso_hwgrp_thresh_display);
+RVU_DEBUG_FOPS(sso_hwgrp_taq_wlk, NULL, sso_hwgrp_taq_wlk_display);
+RVU_DEBUG_FOPS(sso_hwgrp_iaq_wlk, NULL, sso_hwgrp_iaq_wlk_display);
+RVU_DEBUG_FOPS(sso_hwgrp_ient_wlk, sso_hwgrp_ient_wlk_display, NULL);
+RVU_DEBUG_FOPS(sso_hwgrp_fl_wlk, sso_hwgrp_fl_wlk_display, NULL);
+RVU_DEBUG_FOPS(sso_hws_info, NULL, sso_hws_info_display);
+
+static void rvu_dbg_sso_init(struct rvu *rvu)
+{
+ rvu->rvu_dbg.sso = debugfs_create_dir("sso", rvu->rvu_dbg.root);
+ rvu->rvu_dbg.sso_hwgrp = debugfs_create_dir("hwgrp", rvu->rvu_dbg.sso);
+ rvu->rvu_dbg.sso_hws = debugfs_create_dir("hws", rvu->rvu_dbg.sso);
+
+ debugfs_create_file("sso_pc", 0600, rvu->rvu_dbg.sso, rvu,
+ &rvu_dbg_sso_pc_fops);
+
+ debugfs_create_file("sso_hwgrp_pc", 0600, rvu->rvu_dbg.sso_hwgrp,
+ rvu, &rvu_dbg_sso_hwgrp_pc_fops);
+
+ debugfs_create_file("sso_hwgrp_thresh", 0600, rvu->rvu_dbg.sso_hwgrp,
+ rvu, &rvu_dbg_sso_hwgrp_thresh_fops);
+
+ debugfs_create_file("sso_hwgrp_taq_walk", 0600, rvu->rvu_dbg.sso_hwgrp,
+ rvu, &rvu_dbg_sso_hwgrp_taq_wlk_fops);
+
+ debugfs_create_file("sso_hwgrp_iaq_walk", 0600, rvu->rvu_dbg.sso_hwgrp,
+ rvu, &rvu_dbg_sso_hwgrp_iaq_wlk_fops);
+
+ debugfs_create_file("sso_hwgrp_ient_walk", 0600, rvu->rvu_dbg.sso_hwgrp,
+ rvu, &rvu_dbg_sso_hwgrp_ient_wlk_fops);
+
+ debugfs_create_file("sso_hwgrp_free_list_walk", 0600,
+ rvu->rvu_dbg.sso_hwgrp, rvu,
+ &rvu_dbg_sso_hwgrp_fl_wlk_fops);
+
+ debugfs_create_file("sso_hws_info", 0600, rvu->rvu_dbg.sso_hws,
+ rvu, &rvu_dbg_sso_hws_info_fops);
+}
+
+/* CPT debugfs APIs */
+static int parse_cpt_cmd_buffer(char *cmd_buf, size_t *count,
+ const char __user *buffer, char *e_type)
+{
+ int bytes_not_copied;
+ char *cmd_buf_tmp;
+ char *subtoken;
+
+ bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ cmd_buf[*count] = '\0';
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ *count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ subtoken = strsep(&cmd_buf, " ");
+ if (subtoken)
+ strcpy(e_type, subtoken);
+ else
+ return -EINVAL;
+
+ if (cmd_buf)
+ return -EINVAL;
+
+ if (strcmp(e_type, "SE") && strcmp(e_type, "IE") &&
+ strcmp(e_type, "AE") && strcmp(e_type, "all"))
+ return -EINVAL;
+
+ return 0;
+}
+
+static ssize_t rvu_dbg_cpt_cmd_parser(struct file *filp,
+ const char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct seq_file *s = filp->private_data;
+ struct rvu *rvu = s->private;
+ char *cmd_buf;
+ int ret = 0;
+
+ if ((*ppos != 0) || !count)
+ return -EINVAL;
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return -ENOSPC;
+
+ if (parse_cpt_cmd_buffer(cmd_buf, &count, buffer,
+ rvu->rvu_dbg.cpt_ctx.e_type) < 0)
+ ret = -EINVAL;
+
+ kfree(cmd_buf);
+
+ if (ret)
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t rvu_dbg_cpt_engines_sts_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_cpt_cmd_parser(filp, buffer, count, ppos);
+}
+
+static int rvu_dbg_cpt_engines_sts_display(struct seq_file *filp, void *unused)
+{
+ u64 busy_sts[2] = {0}, free_sts[2] = {0};
+ struct rvu *rvu = filp->private;
+ u16 max_ses, max_ies, max_aes;
+ u32 e_min = 0, e_max = 0, e;
+ struct dentry *current_dir;
+ int blkaddr;
+ char *e_type;
+ u64 reg;
+
+ current_dir = filp->file->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "cpt1") ?
+ BLKADDR_CPT1 : BLKADDR_CPT0);
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
+ max_ses = reg & 0xffff;
+ max_ies = (reg >> 16) & 0xffff;
+ max_aes = (reg >> 32) & 0xffff;
+
+ e_type = rvu->rvu_dbg.cpt_ctx.e_type;
+
+ if (strcmp(e_type, "SE") == 0) {
+ e_min = 0;
+ e_max = max_ses - 1;
+ } else if (strcmp(e_type, "IE") == 0) {
+ e_min = max_ses;
+ e_max = max_ses + max_ies - 1;
+ } else if (strcmp(e_type, "AE") == 0) {
+ e_min = max_ses + max_ies;
+ e_max = max_ses + max_ies + max_aes - 1;
+ } else if (strcmp(e_type, "all") == 0) {
+ e_min = 0;
+ e_max = max_ses + max_ies + max_aes - 1;
+ } else {
+ return -EINVAL;
+ }
+
+ for (e = e_min; e <= e_max; e++) {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
+ if (reg & 0x1) {
+ if (e < max_ses)
+ busy_sts[0] |= 1ULL << e;
+ else if (e >= max_ses)
+ busy_sts[1] |= 1ULL << (e - max_ses);
+ }
+ if (reg & 0x2) {
+ if (e < max_ses)
+ free_sts[0] |= 1ULL << e;
+ else if (e >= max_ses)
+ free_sts[1] |= 1ULL << (e - max_ses);
+ }
+ }
+ seq_printf(filp, "FREE STS : 0x%016llx 0x%016llx\n", free_sts[1],
+ free_sts[0]);
+ seq_printf(filp, "BUSY STS : 0x%016llx 0x%016llx\n", busy_sts[1],
+ busy_sts[0]);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_engines_sts, cpt_engines_sts_display,
+ cpt_engines_sts_write);
+
+static ssize_t rvu_dbg_cpt_engines_info_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return rvu_dbg_cpt_cmd_parser(filp, buffer, count, ppos);
+}
+
+static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
+{
+ struct rvu *rvu = filp->private;
+ u16 max_ses, max_ies, max_aes;
+ struct dentry *current_dir;
+ u32 e_min, e_max, e;
+ int blkaddr;
+ char *e_type;
+ u64 reg;
+
+ current_dir = filp->file->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "cpt1") ?
+ BLKADDR_CPT1 : BLKADDR_CPT0);
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
+ max_ses = reg & 0xffff;
+ max_ies = (reg >> 16) & 0xffff;
+ max_aes = (reg >> 32) & 0xffff;
+
+ e_type = rvu->rvu_dbg.cpt_ctx.e_type;
+
+ if (strcmp(e_type, "SE") == 0) {
+ e_min = 0;
+ e_max = max_ses - 1;
+ } else if (strcmp(e_type, "IE") == 0) {
+ e_min = max_ses;
+ e_max = max_ses + max_ies - 1;
+ } else if (strcmp(e_type, "AE") == 0) {
+ e_min = max_ses + max_ies;
+ e_max = max_ses + max_ies + max_aes - 1;
+ } else if (strcmp(e_type, "all") == 0) {
+ e_min = 0;
+ e_max = max_ses + max_ies + max_aes - 1;
+ } else {
+ return -EINVAL;
+ }
+
+ seq_puts(filp, "===========================================\n");
+ for (e = e_min; e <= e_max; e++) {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
+ seq_printf(filp, "CPT Engine[%u] Group Enable 0x%02llx\n", e,
+ reg & 0xff);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
+ seq_printf(filp, "CPT Engine[%u] Active Info 0x%llx\n", e,
+ reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
+ seq_printf(filp, "CPT Engine[%u] Control 0x%llx\n", e,
+ reg);
+ seq_puts(filp, "===========================================\n");
+ }
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display,
+ cpt_engines_info_write);
+
+static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
+{
+ struct rvu *rvu = filp->private;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct dentry *current_dir;
+ struct rvu_block *block;
+ int blkaddr;
+ u64 reg;
+ u32 lf;
+
+ current_dir = filp->file->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "cpt1") ?
+ BLKADDR_CPT1 : BLKADDR_CPT0);
+
+ block = &hw->block[blkaddr];
+ if (!block->lf.bmap)
+ return -ENODEV;
+
+ seq_puts(filp, "===========================================\n");
+ for (lf = 0; lf < block->lf.max; lf++) {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
+
+ seq_printf(filp, "CPT Lf[%u] CTL 0x%llx\n", lf, reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
+ seq_printf(filp, "CPT Lf[%u] CTL2 0x%llx\n", lf, reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
+ seq_printf(filp, "CPT Lf[%u] PTR_CTL 0x%llx\n", lf, reg);
+ reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
+ (lf << block->lfshift));
+ seq_printf(filp, "CPT Lf[%u] CFG 0x%llx\n", lf, reg);
+ seq_puts(filp, "===========================================\n");
+ }
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
+
+static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
+{
+ struct rvu *rvu = filp->private;
+ struct dentry *current_dir;
+ u64 reg0, reg1;
+ int blkaddr;
+
+ current_dir = filp->file->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "cpt1") ?
+ BLKADDR_CPT1 : BLKADDR_CPT0);
+
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
+ reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
+ seq_printf(filp, "CPT_AF_FLTX_INT: 0x%llx 0x%llx\n", reg0, reg1);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
+ reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
+ seq_printf(filp, "CPT_AF_PSNX_EXE: 0x%llx 0x%llx\n", reg0, reg1);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
+ seq_printf(filp, "CPT_AF_PSNX_LF: 0x%llx\n", reg0);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
+ seq_printf(filp, "CPT_AF_RVU_INT: 0x%llx\n", reg0);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
+ seq_printf(filp, "CPT_AF_RAS_INT: 0x%llx\n", reg0);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
+ seq_printf(filp, "CPT_AF_EXE_ERR_INFO: 0x%llx\n", reg0);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
+
+static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
+{
+ struct dentry *current_dir;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 reg;
+
+ rvu = filp->private;
+
+ current_dir = filp->file->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "cpt1") ?
+ BLKADDR_CPT1 : BLKADDR_CPT0);
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
+ seq_printf(filp, "CPT instruction requests %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
+ seq_printf(filp, "CPT instruction latency %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
+ seq_printf(filp, "CPT NCB read requests %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
+ seq_printf(filp, "CPT NCB read latency %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
+ seq_printf(filp, "CPT read requests caused by UC fills %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
+ seq_printf(filp, "CPT active cycles pc %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
+ seq_printf(filp, "CPT clock count pc %llu\n", reg);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
+
+static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
+{
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return;
+
+ if (blkaddr == BLKADDR_CPT0) {
+ rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
+ } else {
+ rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
+ rvu->rvu_dbg.root);
+ }
+ debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_pc_fops);
+ debugfs_create_file("cpt_engines_sts", 0600, rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_engines_sts_fops);
+ debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_engines_info_fops);
+ debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_lfs_info_fops);
+ debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_err_info_fops);
+}
+
RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
@@ -1738,57 +3394,255 @@ static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
-static void rvu_dbg_npc_init(struct rvu *rvu)
+static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
+ struct rvu_npc_mcam_rule *rule)
{
- const struct device *dev = &rvu->pdev->dev;
- struct dentry *pfile;
+ u8 bit;
+
+ for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
+ seq_printf(s, "\t%s ", npc_get_field_name(bit));
+ switch (bit) {
+ case NPC_DMAC:
+ seq_printf(s, "%pM ", rule->packet.dmac);
+ seq_printf(s, "mask %pM\n", rule->mask.dmac);
+ break;
+ case NPC_SMAC:
+ seq_printf(s, "%pM ", rule->packet.smac);
+ seq_printf(s, "mask %pM\n", rule->mask.smac);
+ break;
+ case NPC_ETYPE:
+ seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
+ seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
+ break;
+ case NPC_OUTER_VID:
+ case NPC_FDSA_VAL:
+ seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
+ seq_printf(s, "mask 0x%x\n",
+ ntohs(rule->mask.vlan_tci));
+ break;
+ case NPC_TOS:
+ seq_printf(s, "%d ", rule->packet.tos);
+ seq_printf(s, "mask 0x%x\n", rule->mask.tos);
+ break;
+ case NPC_SIP_IPV4:
+ seq_printf(s, "%pI4 ", &rule->packet.ip4src);
+ seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
+ break;
+ case NPC_DIP_IPV4:
+ seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
+ seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
+ break;
+ case NPC_SIP_IPV6:
+ seq_printf(s, "%pI6 ", rule->packet.ip6src);
+ seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
+ break;
+ case NPC_DIP_IPV6:
+ seq_printf(s, "%pI6 ", rule->packet.ip6dst);
+ seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
+ break;
+ case NPC_SPORT_TCP:
+ case NPC_SPORT_UDP:
+ case NPC_SPORT_SCTP:
+ seq_printf(s, "%d ", ntohs(rule->packet.sport));
+ seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
+ break;
+ case NPC_DPORT_TCP:
+ case NPC_DPORT_UDP:
+ case NPC_DPORT_SCTP:
+ seq_printf(s, "%d ", ntohs(rule->packet.dport));
+ seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
+ break;
+ default:
+ seq_puts(s, "\n");
+ break;
+ }
+ }
+}
- rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
- if (!rvu->rvu_dbg.npc)
- return;
+static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
+ struct rvu_npc_mcam_rule *rule)
+{
+ if (is_npc_intf_tx(rule->intf)) {
+ switch (rule->tx_action.op) {
+ case NIX_TX_ACTIONOP_DROP:
+ seq_puts(s, "\taction: Drop\n");
+ break;
+ case NIX_TX_ACTIONOP_UCAST_DEFAULT:
+ seq_puts(s, "\taction: Unicast to default channel\n");
+ break;
+ case NIX_TX_ACTIONOP_UCAST_CHAN:
+ seq_printf(s, "\taction: Unicast to channel %d\n",
+ rule->tx_action.index);
+ break;
+ case NIX_TX_ACTIONOP_MCAST:
+ seq_puts(s, "\taction: Multicast\n");
+ break;
+ case NIX_TX_ACTIONOP_DROP_VIOL:
+ seq_puts(s, "\taction: Lockdown Violation Drop\n");
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (rule->rx_action.op) {
+ case NIX_RX_ACTIONOP_DROP:
+ seq_puts(s, "\taction: Drop\n");
+ break;
+ case NIX_RX_ACTIONOP_UCAST:
+ seq_printf(s, "\taction: Direct to queue %d\n",
+ rule->rx_action.index);
+ break;
+ case NIX_RX_ACTIONOP_RSS:
+ seq_puts(s, "\taction: RSS\n");
+ break;
+ case NIX_RX_ACTIONOP_UCAST_IPSEC:
+ seq_puts(s, "\taction: Unicast ipsec\n");
+ break;
+ case NIX_RX_ACTIONOP_MCAST:
+ seq_puts(s, "\taction: Multicast\n");
+ break;
+ default:
+ break;
+ }
+ }
+}
- pfile = debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc,
- rvu, &rvu_dbg_npc_mcam_info_fops);
- if (!pfile)
- goto create_failed;
+static const char *rvu_dbg_get_intf_name(int intf)
+{
+ switch (intf) {
+ case NIX_INTFX_RX(0):
+ return "NIX0_RX";
+ case NIX_INTFX_RX(1):
+ return "NIX1_RX";
+ case NIX_INTFX_TX(0):
+ return "NIX0_TX";
+ case NIX_INTFX_TX(1):
+ return "NIX1_TX";
+ default:
+ break;
+ }
- pfile = debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc,
- rvu, &rvu_dbg_npc_rx_miss_act_fops);
- if (!pfile)
- goto create_failed;
+ return "unknown";
+}
- return;
+static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
+{
+ struct rvu_npc_mcam_rule *iter;
+ struct rvu *rvu = s->private;
+ struct npc_mcam *mcam;
+ int pf, vf = -1;
+ bool enabled;
+ int blkaddr;
+ u16 target;
+ u64 hits;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return 0;
+
+ mcam = &rvu->hw->mcam;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(iter, &mcam->mcam_rules, list) {
+ pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+ seq_printf(s, "\n\tInstalled by: PF%d ", pf);
+
+ if (iter->owner & RVU_PFVF_FUNC_MASK) {
+ vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
+ seq_printf(s, "VF%d", vf);
+ }
+ seq_puts(s, "\n");
+
+ seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
+ "RX" : "TX");
+ seq_printf(s, "\tinterface: %s\n",
+ rvu_dbg_get_intf_name(iter->intf));
+ seq_printf(s, "\tmcam entry: %d\n", iter->entry);
+
+ rvu_dbg_npc_mcam_show_flows(s, iter);
+ if (is_npc_intf_rx(iter->intf)) {
+ target = iter->rx_action.pf_func;
+ pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+ seq_printf(s, "\tForward to: PF%d ", pf);
+
+ if (target & RVU_PFVF_FUNC_MASK) {
+ vf = (target & RVU_PFVF_FUNC_MASK) - 1;
+ seq_printf(s, "VF%d", vf);
+ }
+ seq_puts(s, "\n");
+ seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
+ seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
+ }
+
+ rvu_dbg_npc_mcam_show_action(s, iter);
+
+ enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
+ seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
+
+ if (!iter->has_cntr)
+ continue;
+ seq_printf(s, "\tcounter: %d\n", iter->cntr);
+
+ hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
+ seq_printf(s, "\thits: %lld\n", hits);
+ }
+ mutex_unlock(&mcam->lock);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
+
+static void rvu_dbg_npc_init(struct rvu *rvu)
+{
+ rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
+ debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_mcam_info_fops);
+ debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_mcam_rules_fops);
+ debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_rx_miss_act_fops);
+}
-create_failed:
- dev_err(dev, "Failed to create debugfs dir/file for NPC\n");
- debugfs_remove_recursive(rvu->rvu_dbg.npc);
+static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
+{
+ if (!is_rvu_otx2(rvu))
+ return "cn10k";
+ else
+ return "octeontx2";
}
void rvu_dbg_init(struct rvu *rvu)
{
- struct device *dev = &rvu->pdev->dev;
- struct dentry *pfile;
+ rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
- rvu->rvu_dbg.root = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL);
- if (!rvu->rvu_dbg.root) {
- dev_err(rvu->dev, "%s failed\n", __func__);
- return;
- }
- pfile = debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
- &rvu_dbg_rsrc_status_fops);
- if (!pfile)
- goto create_failed;
+ debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
+ &rvu_dbg_rsrc_status_fops);
+
+ if (!is_rvu_otx2(rvu))
+ debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
+ rvu, &rvu_dbg_lmtst_map_table_fops);
+ if (!cgx_get_cgxcnt_max())
+ goto create;
+
+ if (is_rvu_otx2(rvu))
+ debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
+ rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
+ else
+ debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
+ rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
+
+create:
rvu_dbg_npa_init(rvu);
- rvu_dbg_nix_init(rvu);
+ rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
+ rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
rvu_dbg_cgx_init(rvu);
rvu_dbg_npc_init(rvu);
+ rvu_dbg_sso_init(rvu);
- return;
-
-create_failed:
- dev_err(dev, "Failed to create debugfs dir\n");
- debugfs_remove_recursive(rvu->rvu_dbg.root);
+ rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
+ rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
}
void rvu_dbg_exit(struct rvu *rvu)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
new file mode 100644
index 000000000000..eb2da2b77af4
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -0,0 +1,1817 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function Devlink
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include<linux/bitfield.h>
+
+#include "rvu.h"
+#include "rvu_reg.h"
+#include "rvu_struct.h"
+
+#define DRV_NAME "octeontx2-af"
+
+static int rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ return devlink_fmsg_obj_nest_start(fmsg);
+}
+
+static int rvu_report_pair_end(struct devlink_fmsg *fmsg)
+{
+ int err;
+
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return devlink_fmsg_pair_nest_end(fmsg);
+}
+
+static bool rvu_common_request_irq(struct rvu *rvu, int offset,
+ const char *name, irq_handler_t fn)
+{
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ int rc;
+
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "%s", name);
+ rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu_dl);
+ if (rc)
+ dev_warn(rvu->dev, "Failed to register %s irq\n", name);
+ else
+ rvu->irq_allocated[offset] = true;
+
+ return rvu->irq_allocated[offset];
+}
+
+static void rvu_nix_intr_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, intr_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_intr_reporter,
+ "NIX_AF_RVU Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_RVU_INT);
+ nix_event_context->nix_af_rvu_int = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->intr_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_gen_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, gen_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_gen_reporter,
+ "NIX_AF_GEN Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_gen_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_GEN_INT);
+ nix_event_context->nix_af_rvu_gen = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->gen_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_err_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, err_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_err_reporter,
+ "NIX_AF_ERR Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_err_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
+ nix_event_context->nix_af_rvu_err = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->err_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_ras_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, ras_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_ras_reporter,
+ "NIX_AF_RAS Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_ras_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
+ nix_event_context->nix_af_rvu_ras = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->ras_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_unregister_interrupts(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ int offs, i, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return;
+
+ offs = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
+ if (!offs)
+ return;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
+
+ if (rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + NIX_AF_INT_VEC_RVU),
+ rvu_dl);
+ rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false;
+ }
+
+ for (i = NIX_AF_INT_VEC_GEN; i < NIX_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+static int rvu_nix_register_interrupts(struct rvu *rvu)
+{
+ int blkaddr, base;
+ bool rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* Get NIX AF MSIX vectors offset. */
+ base = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
+ if (!base) {
+ dev_warn(rvu->dev,
+ "Failed to get NIX%d NIX_AF_INT vector offsets\n",
+ blkaddr - BLKADDR_NIX0);
+ return 0;
+ }
+ /* Register and enable NIX_AF_RVU_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_RVU,
+ "NIX_AF_RVU_INT",
+ rvu_nix_af_rvu_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NIX_AF_GEN_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_GEN,
+ "NIX_AF_GEN_INT",
+ rvu_nix_af_rvu_gen_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NIX_AF_ERR_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_AF_ERR,
+ "NIX_AF_ERR_INT",
+ rvu_nix_af_rvu_err_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NIX_AF_RAS interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_POISON,
+ "NIX_AF_RAS",
+ rvu_nix_af_rvu_ras_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+err:
+ rvu_nix_unregister_interrupts(rvu);
+ return rc;
+}
+
+static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx,
+ enum nix_af_rvu_health health_reporter)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ u64 intr_val;
+ int err;
+
+ nix_event_context = ctx;
+ switch (health_reporter) {
+ case NIX_AF_RVU_INTR:
+ intr_val = nix_event_context->nix_af_rvu_int;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_RVU");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ",
+ nix_event_context->nix_af_rvu_int);
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NIX_AF_RVU_GEN:
+ intr_val = nix_event_context->nix_af_rvu_gen;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_GENERAL");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ",
+ nix_event_context->nix_af_rvu_gen);
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(1)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(4)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NIX_AF_RVU_ERR:
+ intr_val = nix_event_context->nix_af_rvu_err;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_ERR");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ",
+ nix_event_context->nix_af_rvu_err);
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(14)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(13)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(12)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(6)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(5)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(4)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(3)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(2)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(1)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NIX_AF_RVU_RAS:
+ intr_val = nix_event_context->nix_af_rvu_err;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_RAS");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ",
+ nix_event_context->nix_af_rvu_err);
+ if (err)
+ return err;
+ err = devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:");
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(34)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(33)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(32)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tHW ctx");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(4)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(3)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer");
+
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(2)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(1)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rvu_hw_nix_intr_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx, struct netlink_ext_ack * ext_ack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_INTR) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_INTR);
+}
+
+static int rvu_hw_nix_intr_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack * ext_ack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_int)
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_nix_gen_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx, struct netlink_ext_ack * ext_ack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_GEN) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_GEN);
+}
+
+static int rvu_hw_nix_gen_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack * ext_ack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_gen)
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_nix_err_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx, struct netlink_ext_ack * ext_ack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_ERR) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_ERR);
+}
+
+static int rvu_hw_nix_err_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack * ext_ack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_err)
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_nix_ras_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx, struct netlink_ext_ack * ext_ack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_RAS) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_RAS);
+}
+
+static int rvu_hw_nix_ras_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack * ext_ack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_int)
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+RVU_REPORTERS(hw_nix_intr);
+RVU_REPORTERS(hw_nix_gen);
+RVU_REPORTERS(hw_nix_err);
+RVU_REPORTERS(hw_nix_ras);
+
+static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl);
+
+static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
+{
+ struct rvu_nix_health_reporters *rvu_reporters;
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu *rvu = rvu_dl->rvu;
+
+ rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
+ if (!rvu_reporters)
+ return -ENOMEM;
+
+ rvu_dl->rvu_nix_health_reporter = rvu_reporters;
+ nix_event_context = kzalloc(sizeof(*nix_event_context), GFP_KERNEL);
+ if (!nix_event_context)
+ return -ENOMEM;
+
+ rvu_reporters->nix_event_ctx = nix_event_context;
+ rvu_reporters->rvu_hw_nix_intr_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_intr_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_intr reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter);
+ }
+
+ rvu_reporters->rvu_hw_nix_gen_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_gen_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_gen reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter);
+ }
+
+ rvu_reporters->rvu_hw_nix_err_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_err_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_err_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_err reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter);
+ }
+
+ rvu_reporters->rvu_hw_nix_ras_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_ras_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_ras reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter);
+ }
+
+ rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
+ if (!rvu_dl->devlink_wq)
+ goto err;
+
+ INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
+ INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
+ INIT_WORK(&rvu_reporters->err_work, rvu_nix_err_work);
+ INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
+
+ return 0;
+err:
+ rvu_nix_health_reporters_destroy(rvu_dl);
+ return -ENOMEM;
+}
+
+static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
+{
+ struct rvu *rvu = rvu_dl->rvu;
+ int err;
+
+ err = rvu_nix_register_reporters(rvu_dl);
+ if (err) {
+ dev_warn(rvu->dev, "Failed to create nix reporter, err =%d\n",
+ err);
+ return err;
+ }
+ rvu_nix_register_interrupts(rvu);
+
+ return 0;
+}
+
+static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl)
+{
+ struct rvu_nix_health_reporters *nix_reporters;
+ struct rvu *rvu = rvu_dl->rvu;
+
+ nix_reporters = rvu_dl->rvu_nix_health_reporter;
+
+ if (!nix_reporters->rvu_hw_nix_ras_reporter)
+ return;
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_intr_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_intr_reporter);
+
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_gen_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_gen_reporter);
+
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_err_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_err_reporter);
+
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_ras_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_ras_reporter);
+
+ rvu_nix_unregister_interrupts(rvu);
+ kfree(rvu_dl->rvu_nix_health_reporter->nix_event_ctx);
+ kfree(rvu_dl->rvu_nix_health_reporter);
+}
+
+static void rvu_npa_intr_work(struct work_struct *work)
+{
+ struct rvu_npa_health_reporters *rvu_npa_health_reporter;
+
+ rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, intr_work);
+ devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_intr_reporter,
+ "NPA_AF_RVU Error",
+ rvu_npa_health_reporter->npa_event_ctx);
+}
+
+static irqreturn_t rvu_npa_af_rvu_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_npa_event_ctx *npa_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NPA_AF_RVU_INT);
+ npa_event_context->npa_af_rvu_int = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT, intr);
+ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->intr_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_npa_gen_work(struct work_struct *work)
+{
+ struct rvu_npa_health_reporters *rvu_npa_health_reporter;
+
+ rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, gen_work);
+ devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_gen_reporter,
+ "NPA_AF_GEN Error",
+ rvu_npa_health_reporter->npa_event_ctx);
+}
+
+static irqreturn_t rvu_npa_af_gen_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_npa_event_ctx *npa_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NPA_AF_GEN_INT);
+ npa_event_context->npa_af_rvu_gen = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT, intr);
+ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->gen_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_npa_err_work(struct work_struct *work)
+{
+ struct rvu_npa_health_reporters *rvu_npa_health_reporter;
+
+ rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, err_work);
+ devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_err_reporter,
+ "NPA_AF_ERR Error",
+ rvu_npa_health_reporter->npa_event_ctx);
+}
+
+static irqreturn_t rvu_npa_af_err_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_npa_event_ctx *npa_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+ npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NPA_AF_ERR_INT);
+ npa_event_context->npa_af_rvu_err = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT, intr);
+ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->err_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_npa_ras_work(struct work_struct *work)
+{
+ struct rvu_npa_health_reporters *rvu_npa_health_reporter;
+
+ rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, ras_work);
+ devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_ras_reporter,
+ "HW NPA_AF_RAS Error reported",
+ rvu_npa_health_reporter->npa_event_ctx);
+}
+
+static irqreturn_t rvu_npa_af_ras_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_npa_event_ctx *npa_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NPA_AF_RAS);
+ npa_event_context->npa_af_rvu_ras = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NPA_AF_RAS, intr);
+ rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->ras_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_npa_unregister_interrupts(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ int i, offs, blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return;
+
+ reg = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG);
+ offs = reg & 0x3FF;
+
+ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
+
+ for (i = 0; i < NPA_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+static int rvu_npa_register_interrupts(struct rvu *rvu)
+{
+ int blkaddr, base;
+ bool rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* Get NPA AF MSIX vectors offset. */
+ base = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG) & 0x3ff;
+ if (!base) {
+ dev_warn(rvu->dev,
+ "Failed to get NPA_AF_INT vector offsets\n");
+ return 0;
+ }
+
+ /* Register and enable NPA_AF_RVU_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_RVU,
+ "NPA_AF_RVU_INT",
+ rvu_npa_af_rvu_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NPA_AF_GEN_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_GEN,
+ "NPA_AF_RVU_GEN",
+ rvu_npa_af_gen_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NPA_AF_ERR_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_AF_ERR,
+ "NPA_AF_ERR_INT",
+ rvu_npa_af_err_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NPA_AF_RAS interrupt */
+ rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_POISON,
+ "NPA_AF_RAS",
+ rvu_npa_af_ras_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+err:
+ rvu_npa_unregister_interrupts(rvu);
+ return rc;
+}
+
+static int rvu_npa_report_show(struct devlink_fmsg *fmsg, void *ctx,
+ enum npa_af_rvu_health health_reporter)
+{
+ struct rvu_npa_event_ctx *npa_event_context;
+ unsigned int intr_val, alloc_dis, free_dis;
+ int err;
+
+ npa_event_context = ctx;
+ switch (health_reporter) {
+ case NPA_AF_RVU_GEN:
+ intr_val = npa_event_context->npa_af_rvu_gen;
+ err = rvu_report_pair_start(fmsg, "NPA_AF_GENERAL");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA General Interrupt Reg ",
+ npa_event_context->npa_af_rvu_gen);
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(32)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tUnmap PF Error");
+ if (err)
+ return err;
+ }
+
+ free_dis = FIELD_GET(GENMASK(15, 0), intr_val);
+ if (free_dis & BIT(NPA_INPQ_NIX0_RX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: free disabled RX");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_NIX0_TX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:free disabled TX");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_NIX1_RX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: free disabled RX");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_NIX1_TX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:free disabled TX");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_SSO)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for SSO");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_TIM)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for TIM");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_DPI)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for DPI");
+ if (err)
+ return err;
+ }
+ if (free_dis & BIT(NPA_INPQ_AURA_OP)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for AURA");
+ if (err)
+ return err;
+ }
+
+ alloc_dis = FIELD_GET(GENMASK(31, 16), intr_val);
+ if (alloc_dis & BIT(NPA_INPQ_NIX0_RX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: alloc disabled RX");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_NIX0_TX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:alloc disabled TX");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_NIX1_RX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: alloc disabled RX");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_NIX1_TX)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:alloc disabled TX");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_SSO)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for SSO");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_TIM)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for TIM");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_DPI)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for DPI");
+ if (err)
+ return err;
+ }
+ if (alloc_dis & BIT(NPA_INPQ_AURA_OP)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for AURA");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NPA_AF_RVU_ERR:
+ err = rvu_report_pair_start(fmsg, "NPA_AF_ERR");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA Error Interrupt Reg ",
+ npa_event_context->npa_af_rvu_err);
+ if (err)
+ return err;
+
+ if (npa_event_context->npa_af_rvu_err & BIT_ULL(14)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_INST_S read");
+ if (err)
+ return err;
+ }
+ if (npa_event_context->npa_af_rvu_err & BIT_ULL(13)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_RES_S write");
+ if (err)
+ return err;
+ }
+ if (npa_event_context->npa_af_rvu_err & BIT_ULL(12)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NPA_AF_RVU_RAS:
+ err = rvu_report_pair_start(fmsg, "NPA_AF_RVU_RAS");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RAS Interrupt Reg ",
+ npa_event_context->npa_af_rvu_ras);
+ if (err)
+ return err;
+ if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_INST_S");
+ if (err)
+ return err;
+ }
+ if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_RES_S");
+ if (err)
+ return err;
+ }
+ if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on HW context");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NPA_AF_RVU_INTR:
+ err = rvu_report_pair_start(fmsg, "NPA_AF_RVU");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RVU Interrupt Reg ",
+ npa_event_context->npa_af_rvu_int);
+ if (err)
+ return err;
+ if (npa_event_context->npa_af_rvu_int & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
+ if (err)
+ return err;
+ }
+ return rvu_report_pair_end(fmsg);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rvu_hw_npa_intr_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx, struct netlink_ext_ack *extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_npa_event_ctx *npa_ctx;
+
+ npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+
+ return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_INTR) :
+ rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_INTR);
+}
+
+static int rvu_hw_npa_intr_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_npa_event_ctx *npa_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (npa_event_ctx->npa_af_rvu_int)
+ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_npa_gen_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx, struct netlink_ext_ack *extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_npa_event_ctx *npa_ctx;
+
+ npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+
+ return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_GEN) :
+ rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_GEN);
+}
+
+static int rvu_hw_npa_gen_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_npa_event_ctx *npa_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (npa_event_ctx->npa_af_rvu_gen)
+ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_npa_err_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx, struct netlink_ext_ack *extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_npa_event_ctx *npa_ctx;
+
+ npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+
+ return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_ERR) :
+ rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_ERR);
+}
+
+static int rvu_hw_npa_err_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_npa_event_ctx *npa_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (npa_event_ctx->npa_af_rvu_err)
+ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_npa_ras_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx, struct netlink_ext_ack *extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_npa_event_ctx *npa_ctx;
+
+ npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
+
+ return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_RAS) :
+ rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_RAS);
+}
+
+static int rvu_hw_npa_ras_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_npa_event_ctx *npa_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (npa_event_ctx->npa_af_rvu_ras)
+ rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+RVU_REPORTERS(hw_npa_intr);
+RVU_REPORTERS(hw_npa_gen);
+RVU_REPORTERS(hw_npa_err);
+RVU_REPORTERS(hw_npa_ras);
+
+static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl);
+
+static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
+{
+ struct rvu_npa_health_reporters *rvu_reporters;
+ struct rvu_npa_event_ctx *npa_event_context;
+ struct rvu *rvu = rvu_dl->rvu;
+
+ rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
+ if (!rvu_reporters)
+ return -ENOMEM;
+
+ rvu_dl->rvu_npa_health_reporter = rvu_reporters;
+ npa_event_context = kzalloc(sizeof(*npa_event_context), GFP_KERNEL);
+ if (!npa_event_context)
+ return -ENOMEM;
+
+ rvu_reporters->npa_event_ctx = npa_event_context;
+ rvu_reporters->rvu_hw_npa_intr_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_intr_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_npa_intr_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_npa_intr reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter);
+ }
+
+ rvu_reporters->rvu_hw_npa_gen_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_gen_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_npa_gen_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_npa_gen reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter);
+ }
+
+ rvu_reporters->rvu_hw_npa_err_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_err_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_npa_err_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_npa_err reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter);
+ }
+
+ rvu_reporters->rvu_hw_npa_ras_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_ras_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_npa_ras_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_npa_ras reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter);
+ }
+
+ rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
+ if (!rvu_dl->devlink_wq)
+ goto err;
+
+ INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work);
+ INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work);
+ INIT_WORK(&rvu_reporters->gen_work, rvu_npa_gen_work);
+ INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work);
+
+ return 0;
+err:
+ rvu_npa_health_reporters_destroy(rvu_dl);
+ return -ENOMEM;
+}
+
+static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl)
+{
+ struct rvu *rvu = rvu_dl->rvu;
+ int err;
+
+ err = rvu_npa_register_reporters(rvu_dl);
+ if (err) {
+ dev_warn(rvu->dev, "Failed to create npa reporter, err =%d\n",
+ err);
+ return err;
+ }
+ rvu_npa_register_interrupts(rvu);
+
+ return 0;
+}
+
+static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl)
+{
+ struct rvu_npa_health_reporters *npa_reporters;
+ struct rvu *rvu = rvu_dl->rvu;
+
+ npa_reporters = rvu_dl->rvu_npa_health_reporter;
+
+ if (!npa_reporters->rvu_hw_npa_ras_reporter)
+ return;
+ if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_intr_reporter))
+ devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_intr_reporter);
+
+ if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_gen_reporter))
+ devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_gen_reporter);
+
+ if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_err_reporter))
+ devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_err_reporter);
+
+ if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_ras_reporter))
+ devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_ras_reporter);
+
+ rvu_npa_unregister_interrupts(rvu);
+ kfree(rvu_dl->rvu_npa_health_reporter->npa_event_ctx);
+ kfree(rvu_dl->rvu_npa_health_reporter);
+}
+
+static int rvu_health_reporters_create(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl;
+ int err;
+
+ rvu_dl = rvu->rvu_dl;
+ err = rvu_npa_health_reporters_create(rvu_dl);
+ if (err)
+ return err;
+
+ return rvu_nix_health_reporters_create(rvu_dl);
+}
+
+static void rvu_health_reporters_destroy(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl;
+
+ if (!rvu->rvu_dl)
+ return;
+
+ rvu_dl = rvu->rvu_dl;
+ rvu_npa_health_reporters_destroy(rvu_dl);
+ rvu_nix_health_reporters_destroy(rvu_dl);
+}
+
+enum rvu_af_dl_param_id {
+ RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_TIMERS,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_TENNS,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_GPIOS,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_GTI,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_PTP,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_SYNC,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_BTS,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_EXT_GTI,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_TIMERS,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_TENNS,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_GPIOS,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_GTI,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_PTP,
+ RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_BTS,
+};
+
+static u64 rvu_af_dl_tim_param_id_to_offset(u32 id)
+{
+ u64 offset = 0;
+
+ switch (id) {
+ case RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_TENNS:
+ offset = TIM_AF_CAPTURE_TENNS;
+ break;
+ case RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_GPIOS:
+ offset = TIM_AF_CAPTURE_GPIOS;
+ break;
+ case RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_GTI:
+ offset = TIM_AF_CAPTURE_GTI;
+ break;
+ case RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_PTP:
+ offset = TIM_AF_CAPTURE_PTP;
+ break;
+ case RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_BTS:
+ offset = TIM_AF_CAPTURE_BTS;
+ break;
+ case RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_EXT_GTI:
+ offset = TIM_AF_CAPTURE_EXT_GTI;
+ break;
+ case RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_TENNS:
+ offset = TIM_AF_ADJUST_TENNS;
+ break;
+ case RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_GPIOS:
+ offset = TIM_AF_ADJUST_GPIOS;
+ break;
+ case RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_GTI:
+ offset = TIM_AF_ADJUST_GTI;
+ break;
+ case RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_PTP:
+ offset = TIM_AF_ADJUST_PTP;
+ break;
+ case RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_BTS:
+ offset = TIM_AF_ADJUST_BTS;
+ break;
+ }
+
+ return offset;
+}
+
+/* Devlink Params APIs */
+static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ int dwrr_mtu = val.vu32;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+
+ if (!rvu->hw->cap.nix_common_dwrr_mtu) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Setting DWRR_MTU is not supported on this silicon");
+ return -EOPNOTSUPP;
+ }
+
+ if ((dwrr_mtu > 65536 || !is_power_of_2(dwrr_mtu)) &&
+ (dwrr_mtu != 9728 && dwrr_mtu != 10240)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid, supported MTUs are 0,2,4,8.16,32,64....4K,8K,32K,64K and 9728, 10240");
+ return -EINVAL;
+ }
+
+ nix_hw = get_nix_hw(rvu->hw, BLKADDR_NIX0);
+ if (!nix_hw)
+ return -ENODEV;
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+ if (rvu_rsrc_free_count(&txsch->schq) != txsch->schq.max) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Changing DWRR MTU is not supported when there are active NIXLFs");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Makesure none of the PF/VF interfaces are initialized and retry");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 dwrr_mtu;
+
+ dwrr_mtu = convert_bytes_to_dwrr_mtu(ctx->val.vu32);
+ rvu_write64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU, dwrr_mtu);
+
+ return 0;
+}
+
+static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 dwrr_mtu;
+
+ if (!rvu->hw->cap.nix_common_dwrr_mtu)
+ return -EOPNOTSUPP;
+
+ dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU);
+ ctx->val.vu32 = convert_dwrr_mtu_to_bytes(dwrr_mtu);
+
+ return 0;
+}
+
+static int rvu_af_dl_tim_capture_timers_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 capt_timers = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CAPTURE_TIMERS);
+
+ ctx->val.vu8 = (u8)(capt_timers & TIM_AF_CAPTURE_TIMERS_MASK);
+
+ return 0;
+}
+
+static int rvu_af_dl_tim_capture_timers_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+
+ rvu_write64(rvu, BLKADDR_TIM, TIM_AF_CAPTURE_TIMERS, (u64)ctx->val.vu8);
+
+ return 0;
+}
+
+static int rvu_af_dl_tim_capture_timers_validate(struct devlink *devlink,
+ u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (val.vu8 > TIM_AF_CAPTURE_TIMERS_MASK) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid value to set tim capture timers");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rvu_af_dl_tim_capture_time_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 time, offset;
+
+ offset = rvu_af_dl_tim_param_id_to_offset(id);
+ time = rvu_read64(rvu, BLKADDR_TIM, offset);
+ snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%llu", time);
+
+ return 0;
+}
+
+static int rvu_af_dl_tim_capture_time_set(struct devlink __always_unused *devlink,
+ u32 __always_unused id,
+ struct devlink_param_gset_ctx __always_unused *ctx)
+{
+ return 0;
+}
+
+static int rvu_af_dl_tim_adjust_timers_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 adjust_timer = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_ADJUST_TIMERS);
+
+ if (adjust_timer & TIM_AF_ADJUST_TIMERS_MASK)
+ ctx->val.vbool = true;
+ else
+ ctx->val.vbool = false;
+
+ return 0;
+}
+
+static int rvu_af_dl_tim_adjust_timers_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 adjust_timer = ctx->val.vbool ? BIT_ULL(0) : 0;
+
+ rvu_write64(rvu, BLKADDR_TIM, TIM_AF_ADJUST_TIMERS, adjust_timer);
+
+ return 0;
+}
+
+static int rvu_af_dl_tim_adjust_timer_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 offset, delta;
+
+ offset = rvu_af_dl_tim_param_id_to_offset(id);
+ delta = rvu_read64(rvu, BLKADDR_TIM, offset);
+ snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%llu", delta);
+
+ return 0;
+}
+
+static int rvu_af_dl_tim_adjust_timer_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 offset, delta;
+
+ if (kstrtoull(ctx->val.vstr, 10, &delta))
+ return -EINVAL;
+
+ offset = rvu_af_dl_tim_param_id_to_offset(id);
+ rvu_write64(rvu, BLKADDR_TIM, offset, delta);
+
+ return 0;
+}
+
+static int rvu_af_dl_tim_adjust_timer_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ u64 delta;
+
+ if (kstrtoull(val.vstr, 10, &delta)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid value to set tim adjust timer");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param rvu_af_dl_params[] = {
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
+ "dwrr_mtu", DEVLINK_PARAM_TYPE_U32,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
+ rvu_af_dl_dwrr_mtu_validate),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_TIMERS,
+ "tim_capture_timers", DEVLINK_PARAM_TYPE_U8,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_capture_timers_get,
+ rvu_af_dl_tim_capture_timers_set,
+ rvu_af_dl_tim_capture_timers_validate),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_TENNS,
+ "tim_capture_tenns", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_capture_time_get,
+ rvu_af_dl_tim_capture_time_set, NULL),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_GPIOS,
+ "tim_capture_gpios", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_capture_time_get,
+ rvu_af_dl_tim_capture_time_set, NULL),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_GTI,
+ "tim_capture_gti", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_capture_time_get,
+ rvu_af_dl_tim_capture_time_set, NULL),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_PTP,
+ "tim_capture_ptp", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_capture_time_get,
+ rvu_af_dl_tim_capture_time_set, NULL),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_SYNC,
+ "tim_capture_sync", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_capture_time_get,
+ rvu_af_dl_tim_capture_time_set, NULL),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_BTS,
+ "tim_capture_bts", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_capture_time_get,
+ rvu_af_dl_tim_capture_time_set, NULL),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_CAPTURE_EXT_GTI,
+ "tim_capture_ext_gti", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_capture_time_get,
+ rvu_af_dl_tim_capture_time_set, NULL),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_TIMERS,
+ "tim_adjust_timers", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_adjust_timers_get,
+ rvu_af_dl_tim_adjust_timers_set, NULL),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_TENNS,
+ "tim_adjust_tenns", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_adjust_timer_get,
+ rvu_af_dl_tim_adjust_timer_set,
+ rvu_af_dl_tim_adjust_timer_validate),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_GPIOS,
+ "tim_adjust_gpios", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_adjust_timer_get,
+ rvu_af_dl_tim_adjust_timer_set,
+ rvu_af_dl_tim_adjust_timer_validate),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_GTI,
+ "tim_adjust_gti", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_adjust_timer_get,
+ rvu_af_dl_tim_adjust_timer_set,
+ rvu_af_dl_tim_adjust_timer_validate),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_PTP,
+ "tim_adjust_ptp", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_adjust_timer_get,
+ rvu_af_dl_tim_adjust_timer_set,
+ rvu_af_dl_tim_adjust_timer_validate),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_TIM_ADJUST_BTS,
+ "tim_adjust_bts", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_tim_adjust_timer_get,
+ rvu_af_dl_tim_adjust_timer_set,
+ rvu_af_dl_tim_adjust_timer_validate),
+};
+
+/* Devlink switch mode */
+static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ struct rvu_switch *rswitch;
+
+ rswitch = &rvu->rswitch;
+ *mode = rswitch->mode;
+
+ return 0;
+}
+
+static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ struct rvu_switch *rswitch;
+
+ rswitch = &rvu->rswitch;
+ switch (mode) {
+ case DEVLINK_ESWITCH_MODE_LEGACY:
+ case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ if (rswitch->mode == mode)
+ return 0;
+ rswitch->mode = mode;
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ rvu_switch_enable(rvu);
+ else
+ rvu_switch_disable(rvu);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ return devlink_info_driver_name_put(req, DRV_NAME);
+}
+
+static const struct devlink_ops rvu_devlink_ops = {
+ .info_get = rvu_devlink_info_get,
+ .eswitch_mode_get = rvu_devlink_eswitch_mode_get,
+ .eswitch_mode_set = rvu_devlink_eswitch_mode_set,
+};
+
+int rvu_register_dl(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl;
+ struct devlink *dl;
+ int err;
+
+ dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink));
+ if (!dl) {
+ dev_warn(rvu->dev, "devlink_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ err = devlink_register(dl, rvu->dev);
+ if (err) {
+ dev_err(rvu->dev, "devlink register failed with error %d\n", err);
+ devlink_free(dl);
+ return err;
+ }
+
+ rvu_dl = devlink_priv(dl);
+ rvu_dl->dl = dl;
+ rvu_dl->rvu = rvu;
+ rvu->rvu_dl = rvu_dl;
+
+ err = rvu_health_reporters_create(rvu);
+ if (err) {
+ dev_err(rvu->dev,
+ "devlink health reporter creation failed with error %d\n", err);
+ goto err_dl_health;
+ }
+
+ err = devlink_params_register(dl, rvu_af_dl_params,
+ ARRAY_SIZE(rvu_af_dl_params));
+ if (err) {
+ dev_err(rvu->dev,
+ "devlink params register failed with error %d", err);
+ goto err_dl_health;
+ }
+
+ devlink_params_publish(dl);
+
+ return 0;
+
+err_dl_health:
+ rvu_health_reporters_destroy(rvu);
+ devlink_unregister(dl);
+ devlink_free(dl);
+ return err;
+}
+
+void rvu_unregister_dl(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct devlink *dl = rvu_dl->dl;
+
+ if (!dl)
+ return;
+
+ devlink_params_unregister(dl, rvu_af_dl_params,
+ ARRAY_SIZE(rvu_af_dl_params));
+ rvu_health_reporters_destroy(rvu);
+ devlink_unregister(dl);
+ devlink_free(dl);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h
new file mode 100644
index 000000000000..51efe88dce11
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function Devlink
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef RVU_DEVLINK_H
+#define RVU_DEVLINK_H
+
+#define RVU_REPORTERS(_name) \
+static const struct devlink_health_reporter_ops rvu_ ## _name ## _reporter_ops = { \
+ .name = #_name, \
+ .recover = rvu_ ## _name ## _recover, \
+ .dump = rvu_ ## _name ## _dump, \
+}
+
+enum npa_af_rvu_health {
+ NPA_AF_RVU_INTR,
+ NPA_AF_RVU_GEN,
+ NPA_AF_RVU_ERR,
+ NPA_AF_RVU_RAS,
+};
+
+struct rvu_npa_event_ctx {
+ u64 npa_af_rvu_int;
+ u64 npa_af_rvu_gen;
+ u64 npa_af_rvu_err;
+ u64 npa_af_rvu_ras;
+};
+
+struct rvu_npa_health_reporters {
+ struct rvu_npa_event_ctx *npa_event_ctx;
+ struct devlink_health_reporter *rvu_hw_npa_intr_reporter;
+ struct work_struct intr_work;
+ struct devlink_health_reporter *rvu_hw_npa_gen_reporter;
+ struct work_struct gen_work;
+ struct devlink_health_reporter *rvu_hw_npa_err_reporter;
+ struct work_struct err_work;
+ struct devlink_health_reporter *rvu_hw_npa_ras_reporter;
+ struct work_struct ras_work;
+};
+
+enum nix_af_rvu_health {
+ NIX_AF_RVU_INTR,
+ NIX_AF_RVU_GEN,
+ NIX_AF_RVU_ERR,
+ NIX_AF_RVU_RAS,
+};
+
+struct rvu_nix_event_ctx {
+ u64 nix_af_rvu_int;
+ u64 nix_af_rvu_gen;
+ u64 nix_af_rvu_err;
+ u64 nix_af_rvu_ras;
+};
+
+struct rvu_nix_health_reporters {
+ struct rvu_nix_event_ctx *nix_event_ctx;
+ struct devlink_health_reporter *rvu_hw_nix_intr_reporter;
+ struct work_struct intr_work;
+ struct devlink_health_reporter *rvu_hw_nix_gen_reporter;
+ struct work_struct gen_work;
+ struct devlink_health_reporter *rvu_hw_nix_err_reporter;
+ struct work_struct err_work;
+ struct devlink_health_reporter *rvu_hw_nix_ras_reporter;
+ struct work_struct ras_work;
+};
+
+struct rvu_devlink {
+ struct devlink *dl;
+ struct rvu *rvu;
+ struct workqueue_struct *devlink_wq;
+ struct rvu_npa_health_reporters *rvu_npa_health_reporter;
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+};
+
+/* Devlink APIs */
+int rvu_register_dl(struct rvu *rvu);
+void rvu_unregister_dl(struct rvu *rvu);
+
+#endif /* RVU_DEVLINK_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_fixes.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_fixes.c
new file mode 100644
index 000000000000..b350dbaf737c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_fixes.c
@@ -0,0 +1,1009 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2019 Marvell.
+ *
+ */
+
+#include <linux/kthread.h>
+#include <linux/pci.h>
+#include <linux/cpu.h>
+#include <linux/sched/signal.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "common.h"
+#include "mbox.h"
+#include "rvu.h"
+#include "cgx.h"
+#include "rvu_fixes.h"
+
+#define OTX2_MAX_CQ_CNT 64
+
+struct nix_tx_stall {
+ struct rvu *rvu;
+ int blkaddr;
+ int smq_count;
+ int tl4_count;
+ int tl3_count;
+ int tl2_count;
+ int sq_count;
+ u16 *smq_tl2_map;
+ u16 *tl4_tl2_map;
+ u16 *tl3_tl2_map;
+ u16 *tl2_tl1_map;
+ u16 *sq_smq_map;
+#define LINK_TYPE_SHIFT 7
+#define EXPR_LINK(map) (map & (1 << LINK_TYPE_SHIFT))
+#define LINK_CHAN_SHIFT 8
+#define LINK_CHAN(map) (map >> LINK_CHAN_SHIFT)
+ u16 *tl2_link_map;
+ u8 *nixlf_tl2_count;
+ u64 *nixlf_poll_count;
+ u64 *nixlf_stall_count;
+ u64 *nlink_credits; /* Normal link credits */
+ u64 poll_cntr;
+ u64 stalled_cntr;
+ int pse_link_bp_level;
+ bool txsch_config_changed;
+ struct mutex txsch_lock; /* To sync Tx SCHQ config update and poll */
+ struct task_struct *poll_thread; /* Tx stall condition polling thread */
+};
+
+/* Tranmsit stall hw issue's workaround reads loads of registers
+ * at frequent intervals, having barrier for every register access
+ * will increase the cycles spent in stall detection. Hence using
+ * relaxed counterparts.
+ */
+static inline void rvu_wr64(struct rvu *rvu, u64 block, u64 offset, u64 val)
+{
+ writeq_relaxed(val, rvu->afreg_base + ((block << 28) | offset));
+}
+
+static inline u64 rvu_rd64(struct rvu *rvu, u64 block, u64 offset)
+{
+ return readq_relaxed(rvu->afreg_base + ((block << 28) | offset));
+}
+
+/**
+ * rvu_usleep_interruptible - sleep waiting for signals
+ * @usecs: Time in microseconds to sleep for
+ *
+ * A replica of msleep_interruptable to reduce tx stall
+ * poll interval.
+ */
+static unsigned long rvu_usleep_interruptible(unsigned int usecs)
+{
+ unsigned long timeout = usecs_to_jiffies(usecs) + 1;
+
+ while (timeout && !signal_pending(current))
+ timeout = schedule_timeout_interruptible(timeout);
+ return jiffies_to_usecs(timeout);
+}
+
+void rvu_nix_txsch_lock(struct nix_hw *nix_hw)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+
+ if (tx_stall)
+ mutex_lock(&tx_stall->txsch_lock);
+}
+
+void rvu_nix_txsch_unlock(struct nix_hw *nix_hw)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+
+ if (tx_stall)
+ mutex_unlock(&tx_stall->txsch_lock);
+}
+
+void rvu_nix_txsch_config_changed(struct nix_hw *nix_hw)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+
+ if (tx_stall)
+ tx_stall->txsch_config_changed = true;
+}
+
+void rvu_nix_update_link_credits(struct rvu *rvu, int blkaddr,
+ int link, u64 ncredits)
+{
+ struct nix_tx_stall *tx_stall;
+ struct nix_hw *nix_hw;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
+ tx_stall = nix_hw->tx_stall;
+ if (!tx_stall)
+ return;
+
+ tx_stall->nlink_credits[link] = ncredits;
+}
+
+void rvu_nix_update_sq_smq_mapping(struct rvu *rvu, int blkaddr, int nixlf,
+ u16 sq, u16 smq)
+{
+ struct nix_tx_stall *tx_stall;
+ struct nix_hw *nix_hw;
+ int sq_count;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
+ tx_stall = nix_hw->tx_stall;
+ if (!tx_stall)
+ return;
+
+ sq_count = tx_stall->sq_count;
+
+ rvu_nix_txsch_lock(nix_hw);
+ tx_stall->sq_smq_map[nixlf * sq_count + sq] = smq;
+ rvu_nix_txsch_unlock(nix_hw);
+}
+
+static void rvu_nix_scan_link_credits(struct rvu *rvu, int blkaddr,
+ struct nix_tx_stall *tx_stall)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 credits;
+ int link;
+
+ for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
+ credits = rvu_rd64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_NORM_CREDIT(link));
+ tx_stall->nlink_credits[link] = credits;
+ }
+}
+
+static void rvu_nix_scan_tl2_link_mapping(struct rvu *rvu,
+ struct nix_tx_stall *tx_stall,
+ int blkaddr, int tl2, int smq)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int link, chan;
+ u64 link_cfg;
+
+ for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
+ link_cfg = rvu_rd64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(tl2, link));
+ if (!(link_cfg & BIT_ULL(12)))
+ continue;
+
+ /* Get channel of the LINK to which this TL2 is transmitting */
+ chan = link_cfg & 0x3F;
+ tx_stall->tl2_link_map[tl2] = chan << LINK_CHAN_SHIFT;
+
+ /* Save link info */
+ tx_stall->tl2_link_map[tl2] |= (link & 0x7F);
+
+ /* Workaround assumes TL2 transmits to only one link.
+ * So assume the first link enabled is the only one.
+ */
+ break;
+ }
+}
+
+static bool is_sq_allocated(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ int blkaddr, int sq)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ struct admin_queue *aq;
+
+ block = &hw->block[blkaddr];
+ aq = block->aq;
+ spin_lock(&aq->lock);
+ if (test_bit(sq, pfvf->sq_bmap)) {
+ spin_unlock(&aq->lock);
+ return true;
+ }
+ spin_unlock(&aq->lock);
+ return false;
+}
+
+static bool is_schq_allocated(struct rvu *rvu, struct nix_hw *nix_hw,
+ int lvl, int schq)
+{
+ struct nix_txsch *txsch = &nix_hw->txsch[lvl];
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (test_bit(schq, txsch->schq.bmap)) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return true;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return false;
+}
+
+static bool is_sw_xoff_set(struct rvu *rvu, int blkaddr, int lvl, int schq)
+{
+ u64 cfg, swxoff_reg = 0x00;
+
+ switch (lvl) {
+ case NIX_TXSCH_LVL_MDQ:
+ swxoff_reg = NIX_AF_MDQX_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ swxoff_reg = NIX_AF_TL4X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ swxoff_reg = NIX_AF_TL3X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ swxoff_reg = NIX_AF_TL2X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL1:
+ swxoff_reg = NIX_AF_TL1X_SW_XOFF(schq);
+ break;
+ }
+ if (!swxoff_reg)
+ return false;
+
+ cfg = rvu_rd64(rvu, blkaddr, swxoff_reg);
+ if (cfg & BIT_ULL(0))
+ return true;
+
+ return false;
+}
+
+static void rvu_nix_scan_txsch_hierarchy(struct rvu *rvu,
+ struct nix_hw *nix_hw, int blkaddr)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_txsch *tl2_txsch;
+ struct rvu_block *block;
+ int tl4, tl3, tl2, tl1;
+ int lf, smq, size;
+ u16 pcifunc;
+ u64 cfg;
+
+ /* Clear previous mappings */
+ size = sizeof(u16);
+ memset(tx_stall->smq_tl2_map, U16_MAX, tx_stall->smq_count * size);
+ memset(tx_stall->tl4_tl2_map, U16_MAX, tx_stall->tl4_count * size);
+ memset(tx_stall->tl3_tl2_map, U16_MAX, tx_stall->tl3_count * size);
+ memset(tx_stall->tl2_tl1_map, U16_MAX, tx_stall->tl2_count * size);
+ memset(tx_stall->tl2_link_map, U16_MAX, tx_stall->tl2_count * size);
+
+ for (smq = 0; smq < tx_stall->smq_count; smq++) {
+ /* Skip SMQ if it's not assigned to any */
+ if (!is_schq_allocated(rvu, nix_hw, NIX_TXSCH_LVL_SMQ, smq))
+ continue;
+
+ /* If SW_XOFF is set, ignore the scheduler queue */
+ cfg = rvu_rd64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
+ if (cfg & BIT_ULL(50))
+ continue;
+ if (is_sw_xoff_set(rvu, blkaddr, NIX_TXSCH_LVL_MDQ, smq))
+ continue;
+
+ cfg = rvu_rd64(rvu, blkaddr, NIX_AF_MDQX_PARENT(smq));
+ tl4 = (cfg >> 16) & 0x1FF;
+ if (is_sw_xoff_set(rvu, blkaddr, NIX_TXSCH_LVL_TL4, tl4))
+ continue;
+
+ cfg = rvu_rd64(rvu, blkaddr, NIX_AF_TL4X_PARENT(tl4));
+ tl3 = (cfg >> 16) & 0x1FF;
+ if (is_sw_xoff_set(rvu, blkaddr, NIX_TXSCH_LVL_TL3, tl3))
+ continue;
+
+ cfg = rvu_rd64(rvu, blkaddr, NIX_AF_TL3X_PARENT(tl3));
+ tl2 = (cfg >> 16) & 0x1FF;
+ if (is_sw_xoff_set(rvu, blkaddr, NIX_TXSCH_LVL_TL2, tl2))
+ continue;
+
+ cfg = rvu_rd64(rvu, blkaddr, NIX_AF_TL2X_PARENT(tl2));
+ tl1 = (cfg >> 16) & 0x1FF;
+ if (is_sw_xoff_set(rvu, blkaddr, NIX_TXSCH_LVL_TL1, tl1))
+ continue;
+
+ tx_stall->smq_tl2_map[smq] = tl2;
+ tx_stall->tl4_tl2_map[tl4] = tl2;
+ tx_stall->tl3_tl2_map[tl3] = tl2;
+ tx_stall->tl2_tl1_map[tl2] = tl1;
+ rvu_nix_scan_tl2_link_mapping(rvu, tx_stall, blkaddr, tl2, smq);
+ }
+
+ /* Get count of TL2s attached to each NIXLF */
+ tl2_txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+ block = &hw->block[blkaddr];
+ memset(tx_stall->nixlf_tl2_count, 0, block->lf.max * sizeof(u8));
+ for (lf = 0; lf < block->lf.max; lf++) {
+ mutex_lock(&rvu->rsrc_lock);
+ if (!test_bit(lf, block->lf.bmap)) {
+ mutex_unlock(&rvu->rsrc_lock);
+ continue;
+ }
+ pcifunc = block->fn_map[lf];
+ mutex_unlock(&rvu->rsrc_lock);
+
+ for (tl2 = 0; tl2 < tx_stall->tl2_count; tl2++) {
+ if (!is_schq_allocated(rvu, nix_hw,
+ NIX_TXSCH_LVL_TL2, tl2))
+ continue;
+ if (pcifunc == TXSCH_MAP_FUNC(tl2_txsch->pfvf_map[tl2]))
+ tx_stall->nixlf_tl2_count[lf]++;
+ }
+ }
+}
+
+#define TX_OCTS 4
+#define RVU_AF_BAR2_SEL (0x9000000ull)
+#define NIX_LF_SQ_OP_OCTS (0xa10)
+
+static bool is_sq_stalled(struct rvu *rvu, struct nix_hw *nix_hw, int smq)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+ u64 btx_octs, atx_octs, cfg, incr;
+ int sq_count = tx_stall->sq_count;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr = tx_stall->blkaddr;
+ struct nix_txsch *smq_txsch;
+ struct rvu_pfvf *pfvf;
+ atomic64_t *ptr;
+ int nixlf, sq;
+ u16 pcifunc;
+
+ smq_txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+ pcifunc = TXSCH_MAP_FUNC(smq_txsch->pfvf_map[smq]);
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return false;
+
+ /* If a NIXLF is transmitting pkts via only one TL2, then checking
+ * global NIXLF TX stats is sufficient.
+ */
+ if (tx_stall->nixlf_tl2_count[nixlf] != 1)
+ goto poll_sq_stats;
+
+ tx_stall->nixlf_poll_count[nixlf]++;
+ btx_octs = rvu_rd64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, TX_OCTS));
+ usleep_range(50, 60);
+ atx_octs = rvu_rd64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, TX_OCTS));
+ if (btx_octs == atx_octs) {
+ tx_stall->nixlf_stall_count[nixlf]++;
+ return true;
+ }
+ return false;
+
+poll_sq_stats:
+ if (!tx_stall->nixlf_tl2_count[nixlf])
+ return false;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ /* Enable BAR2 register access from AF BAR2 alias registers*/
+ cfg = BIT_ULL(16) | pcifunc;
+ rvu_wr64(rvu, blkaddr, RVU_AF_BAR2_SEL, cfg);
+
+ for (sq = 0; sq < pfvf->sq_ctx->qsize; sq++) {
+ if (!is_sq_allocated(rvu, pfvf, blkaddr, sq))
+ continue;
+
+ rvu_nix_txsch_lock(nix_hw);
+ if (tx_stall->sq_smq_map[nixlf * sq_count + sq] != smq) {
+ rvu_nix_txsch_unlock(nix_hw);
+ continue;
+ }
+ rvu_nix_txsch_unlock(nix_hw);
+
+ incr = (u64)sq << 32;
+ ptr = (__force atomic64_t *)(rvu->afreg_base + ((blkaddr << 28)
+ | RVU_AF_BAR2_ALIASX(nixlf, NIX_LF_SQ_OP_OCTS)));
+
+ btx_octs = atomic64_fetch_add_relaxed(incr, ptr);
+ usleep_range(50, 60);
+ atx_octs = atomic64_fetch_add_relaxed(incr, ptr);
+ /* If atleast one SQ is transmitting pkts then SMQ is
+ * not stalled.
+ */
+ if (btx_octs != atx_octs)
+ return false;
+ }
+ tx_stall->nixlf_stall_count[nixlf]++;
+
+ return true;
+}
+
+static bool rvu_nix_check_smq_stall(struct rvu *rvu, struct nix_hw *nix_hw,
+ int tl2)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+ int blkaddr = tx_stall->blkaddr;
+ u64 mdesc_cnt;
+ int smq;
+
+ for (smq = 0; smq < tx_stall->smq_count; smq++) {
+ if (tx_stall->smq_tl2_map[smq] != tl2)
+ continue;
+
+ mdesc_cnt = rvu_rd64(rvu, blkaddr, NIX_AF_SMQX_STATUS(smq));
+ if (!(mdesc_cnt & 0x7F))
+ continue;
+ if (is_sq_stalled(rvu, nix_hw, smq))
+ return true;
+ }
+ return false;
+}
+
+static bool is_cgx_idle(u64 status, u8 link_map)
+{
+ if (EXPR_LINK(link_map))
+ return status & CGXX_CMRX_TX_LMAC_E_IDLE;
+ return status & CGXX_CMRX_TX_LMAC_IDLE;
+}
+
+static bool rvu_cgx_tx_idle(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_txsch *tl2_txsch, int tl2)
+{
+ unsigned long timeout = jiffies + usecs_to_jiffies(20);
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+ u16 pcifunc, link_map;
+ u8 cgx_id, lmac_id;
+ u64 status;
+ void *cgxd;
+ int pf;
+
+ pcifunc = TXSCH_MAP_FUNC(tl2_txsch->pfvf_map[tl2]);
+ pf = rvu_get_pf(pcifunc);
+ if (!is_pf_cgxmapped(rvu, pf))
+ return false;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ if (!cgxd)
+ return false;
+
+ link_map = tx_stall->tl2_link_map[tl2];
+
+ /* Wait for LMAC TX_IDLE */
+ while (time_before(jiffies, timeout)) {
+ status = cgx_get_lmac_tx_fifo_status(cgxd, lmac_id);
+ if (is_cgx_idle(status, link_map))
+ return true;
+ usleep_range(1, 2);
+ }
+ return false;
+}
+
+static void rvu_nix_restore_tx(struct rvu *rvu, struct nix_hw *nix_hw,
+ int blkaddr, int tl2)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+ struct nix_txsch *tl2_txsch;
+ int tl, link;
+
+ link = tx_stall->tl2_link_map[tl2] & 0x7F;
+
+ tx_stall->stalled_cntr++;
+
+ tl2_txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+ rvu_nix_txsch_lock(nix_hw);
+
+ /* Set SW_XOFF for every TL2 queue which transmits to
+ * the associated link.
+ */
+ for (tl = 0; tl < tx_stall->tl2_count; tl++) {
+ if ((tx_stall->tl2_link_map[tl] & 0x7F) != link)
+ continue;
+ /* Full workaround is implemented assuming fixed 1:1
+ * TL3:TL2 mapping, ie TL3 and TL2 index can be used
+ * interchangeably. Hence except in this API, no other
+ * place we check for PSE backpressure level configured
+ * in NIX_AF_PSE_CHANNEL_LEVEL reg.
+ */
+ if (tx_stall->pse_link_bp_level == NIX_TXSCH_LVL_TL2)
+ rvu_wr64(rvu, blkaddr,
+ NIX_AF_TL2X_SW_XOFF(tl), BIT_ULL(0));
+ else
+ rvu_wr64(rvu, blkaddr,
+ NIX_AF_TL3X_SW_XOFF(tl), BIT_ULL(0));
+ }
+ usleep_range(20, 25);
+
+ /* Wait for LMAC TX_IDLE */
+ if (link < rvu->hw->cgx_links) {
+ if (!rvu_cgx_tx_idle(rvu, nix_hw, tl2_txsch, tl2))
+ goto clear_sw_xoff;
+ }
+
+ /* Restore link credits */
+ rvu_wr64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link),
+ tx_stall->nlink_credits[link]);
+
+ /* Toggle SW_XOFF of every scheduler queue at every level
+ * which points to this TL2.
+ */
+ for (tl = 0; tl < tx_stall->smq_count; tl++) {
+ if (tx_stall->smq_tl2_map[tl] != tl2)
+ continue;
+ rvu_wr64(rvu, blkaddr, NIX_AF_MDQX_SW_XOFF(tl), BIT_ULL(0));
+ rvu_wr64(rvu, blkaddr, NIX_AF_MDQX_SW_XOFF(tl), 0x00);
+ }
+
+ for (tl = 0; tl < tx_stall->tl4_count; tl++) {
+ if (tx_stall->tl4_tl2_map[tl] != tl2)
+ continue;
+ rvu_wr64(rvu, blkaddr, NIX_AF_TL4X_SW_XOFF(tl), BIT_ULL(0));
+ rvu_wr64(rvu, blkaddr, NIX_AF_TL4X_SW_XOFF(tl), 0x00);
+ }
+
+ for (tl = 0; tl < tx_stall->tl3_count; tl++) {
+ if (tx_stall->tl3_tl2_map[tl] != tl2)
+ continue;
+ if (tx_stall->pse_link_bp_level == NIX_TXSCH_LVL_TL2) {
+ rvu_wr64(rvu, blkaddr,
+ NIX_AF_TL3X_SW_XOFF(tl), BIT_ULL(0));
+ rvu_wr64(rvu, blkaddr, NIX_AF_TL3X_SW_XOFF(tl), 0x00);
+ } else {
+ /* TL3 and TL2 indices used by this NIXLF are same */
+ rvu_wr64(rvu, blkaddr,
+ NIX_AF_TL2X_SW_XOFF(tl), BIT_ULL(0));
+ rvu_wr64(rvu, blkaddr, NIX_AF_TL2X_SW_XOFF(tl), 0x00);
+ }
+ }
+
+clear_sw_xoff:
+ /* Clear SW_XOFF of all TL2 queues, which are set above */
+ for (tl = 0; tl < tx_stall->tl2_count; tl++) {
+ if ((tx_stall->tl2_link_map[tl] & 0x7F) != link)
+ continue;
+ if (tx_stall->pse_link_bp_level == NIX_TXSCH_LVL_TL2)
+ rvu_wr64(rvu, blkaddr, NIX_AF_TL2X_SW_XOFF(tl), 0x00);
+ else
+ rvu_wr64(rvu, blkaddr, NIX_AF_TL3X_SW_XOFF(tl), 0x00);
+ }
+ rvu_nix_txsch_unlock(nix_hw);
+}
+
+static bool is_link_backpressured(struct nix_tx_stall *tx_stall,
+ struct nix_hw *nix_hw,
+ int blkaddr, int tl2)
+{
+ struct rvu *rvu = tx_stall->rvu;
+ struct nix_txsch *tl2_txsch;
+ int pkt_cnt, unit_cnt;
+ int link, chan;
+ u64 cfg;
+
+ /* Skip uninitialized ones */
+ if (tx_stall->tl2_link_map[tl2] == U16_MAX)
+ return true;
+
+ link = tx_stall->tl2_link_map[tl2] & 0x7F;
+ chan = LINK_CHAN(tx_stall->tl2_link_map[tl2]);
+
+ cfg = rvu_rd64(rvu, blkaddr, NIX_AF_TX_LINKX_HW_XOFF(link));
+ if (cfg & BIT_ULL(chan))
+ return true;
+
+ /* Skip below checks for LBK links */
+ if (link >= rvu->hw->cgx_links)
+ return false;
+
+ cfg = rvu_rd64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
+
+ /* Check if current credits or pkt count is -ve or simply
+ * morethan what is configured.
+ */
+ pkt_cnt = (cfg >> 2) & 0x3FF;
+ unit_cnt = (cfg >> 12) & 0xFFFFF;
+ if (pkt_cnt > ((tx_stall->nlink_credits[link] >> 2) & 0x3FF) ||
+ unit_cnt > ((tx_stall->nlink_credits[link] >> 12) & 0xFFFFF)) {
+ return false;
+ }
+
+ tl2_txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+ if (rvu_cgx_tx_idle(rvu, nix_hw, tl2_txsch, tl2))
+ return false;
+
+ return true;
+}
+
+static int rvu_nix_poll_for_tx_stall(void *arg)
+{
+ struct nix_tx_stall *tx_stall = arg;
+ struct rvu *rvu = tx_stall->rvu;
+ int blkaddr = tx_stall->blkaddr;
+ struct nix_hw *nix_hw;
+ int tl2;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ while (!kthread_should_stop()) {
+ for (tl2 = 0; tl2 < tx_stall->tl2_count; tl2++) {
+ /* Skip TL2 if it's not assigned to any */
+ if (!is_schq_allocated(rvu, nix_hw,
+ NIX_TXSCH_LVL_TL2, tl2))
+ continue;
+
+ tx_stall->poll_cntr++;
+
+ if (tx_stall->txsch_config_changed) {
+ rvu_nix_txsch_lock(nix_hw);
+ rvu_nix_scan_txsch_hierarchy(rvu, nix_hw,
+ blkaddr);
+ tx_stall->txsch_config_changed = false;
+ rvu_nix_txsch_unlock(nix_hw);
+ }
+
+ rvu_nix_txsch_lock(nix_hw);
+ if (is_link_backpressured(tx_stall, nix_hw,
+ blkaddr, tl2)) {
+ rvu_nix_txsch_unlock(nix_hw);
+ continue;
+ }
+ rvu_nix_txsch_unlock(nix_hw);
+
+ if (!rvu_nix_check_smq_stall(rvu, nix_hw, tl2))
+ continue;
+
+ rvu_nix_restore_tx(rvu, nix_hw, blkaddr, tl2);
+ }
+ rvu_usleep_interruptible(250);
+ }
+
+ return 0;
+}
+
+static int rvu_nix_init_tl_map(struct rvu *rvu, struct nix_hw *nix_hw, int lvl)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+ struct nix_txsch *txsch;
+ u16 *tl_map;
+
+ txsch = &nix_hw->txsch[lvl];
+ tl_map = devm_kcalloc(rvu->dev, txsch->schq.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!tl_map)
+ return -ENOMEM;
+
+ switch (lvl) {
+ case NIX_TXSCH_LVL_SMQ:
+ tx_stall->smq_count = txsch->schq.max;
+ tx_stall->smq_tl2_map = tl_map;
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ tx_stall->tl4_count = txsch->schq.max;
+ tx_stall->tl4_tl2_map = tl_map;
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ tx_stall->tl3_count = txsch->schq.max;
+ tx_stall->tl3_tl2_map = tl_map;
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ tx_stall->tl2_count = txsch->schq.max;
+ tx_stall->tl2_tl1_map = tl_map;
+ break;
+ }
+ memset(tl_map, U16_MAX, txsch->schq.max * sizeof(u16));
+ return 0;
+}
+
+static int rvu_nix_tx_stall_workaround_init(struct rvu *rvu,
+ struct nix_hw *nix_hw, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_tx_stall *tx_stall;
+ struct rvu_block *block;
+ int links, err;
+
+ if (!hw->cap.nix_fixed_txschq_mapping)
+ return 0;
+
+ tx_stall = devm_kzalloc(rvu->dev,
+ sizeof(struct nix_tx_stall), GFP_KERNEL);
+ if (!tx_stall)
+ return -ENOMEM;
+
+ tx_stall->blkaddr = blkaddr;
+ tx_stall->rvu = rvu;
+ nix_hw->tx_stall = tx_stall;
+
+ /* Get the level at which link/chan will assert backpressure */
+ if (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL))
+ tx_stall->pse_link_bp_level = NIX_TXSCH_LVL_TL3;
+ else
+ tx_stall->pse_link_bp_level = NIX_TXSCH_LVL_TL2;
+
+ mutex_init(&tx_stall->txsch_lock);
+
+ /* Alloc memory for saving SMQ/TL4/TL3/TL1 to TL2 mapping */
+ err = rvu_nix_init_tl_map(rvu, nix_hw, NIX_TXSCH_LVL_SMQ);
+ if (err)
+ return err;
+ err = rvu_nix_init_tl_map(rvu, nix_hw, NIX_TXSCH_LVL_TL4);
+ if (err)
+ return err;
+ err = rvu_nix_init_tl_map(rvu, nix_hw, NIX_TXSCH_LVL_TL3);
+ if (err)
+ return err;
+ err = rvu_nix_init_tl_map(rvu, nix_hw, NIX_TXSCH_LVL_TL2);
+ if (err)
+ return err;
+
+ block = &hw->block[blkaddr];
+ tx_stall->sq_count = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT);
+
+ /* SMQs to nixlf SQ mapping info */
+ tx_stall->sq_smq_map = devm_kcalloc(rvu->dev,
+ block->lf.max * tx_stall->sq_count,
+ sizeof(u16), GFP_KERNEL);
+ if (!tx_stall->sq_smq_map)
+ return -ENOMEM;
+ memset(tx_stall->sq_smq_map, U16_MAX,
+ block->lf.max * tx_stall->sq_count * sizeof(u16));
+
+ /* TL2 to transmit link mapping info */
+ tx_stall->tl2_link_map = devm_kcalloc(rvu->dev, tx_stall->tl2_count,
+ sizeof(u16), GFP_KERNEL);
+ if (!tx_stall->tl2_link_map)
+ return -ENOMEM;
+ memset(tx_stall->tl2_link_map, U16_MAX,
+ tx_stall->tl2_count * sizeof(u16));
+
+ /* Number of Tl2s attached to NIXLF */
+ tx_stall->nixlf_tl2_count = devm_kcalloc(rvu->dev, block->lf.max,
+ sizeof(u8), GFP_KERNEL);
+ if (!tx_stall->nixlf_tl2_count)
+ return -ENOMEM;
+ memset(tx_stall->nixlf_tl2_count, 0, block->lf.max * sizeof(u8));
+
+ /* Per NIXLF poll and stall counters */
+ tx_stall->nixlf_poll_count = devm_kcalloc(rvu->dev, block->lf.max,
+ sizeof(u64), GFP_KERNEL);
+ if (!tx_stall->nixlf_poll_count)
+ return -ENOMEM;
+ memset(tx_stall->nixlf_poll_count, 0, block->lf.max * sizeof(u64));
+
+ tx_stall->nixlf_stall_count = devm_kcalloc(rvu->dev, block->lf.max,
+ sizeof(u64), GFP_KERNEL);
+ if (!tx_stall->nixlf_stall_count)
+ return -ENOMEM;
+ memset(tx_stall->nixlf_stall_count, 0, block->lf.max * sizeof(u64));
+
+ /* For saving HW link's transmit credits config */
+ links = rvu->hw->cgx_links + rvu->hw->lbk_links;
+ tx_stall->nlink_credits = devm_kcalloc(rvu->dev, links,
+ sizeof(u64), GFP_KERNEL);
+ if (!tx_stall->nlink_credits)
+ return -ENOMEM;
+ rvu_nix_scan_link_credits(rvu, blkaddr, tx_stall);
+
+ tx_stall->poll_thread = kthread_create(rvu_nix_poll_for_tx_stall,
+ (void *)tx_stall,
+ "nix_tx_stall_polling_kthread");
+ if (IS_ERR(tx_stall->poll_thread))
+ return PTR_ERR(tx_stall->poll_thread);
+
+ kthread_bind(tx_stall->poll_thread, cpumask_first(cpu_online_mask));
+ wake_up_process(tx_stall->poll_thread);
+ return 0;
+}
+
+static void rvu_nix_tx_stall_workaround_exit(struct rvu *rvu,
+ struct nix_hw *nix_hw)
+{
+ struct nix_tx_stall *tx_stall = nix_hw->tx_stall;
+
+ if (!tx_stall)
+ return;
+
+ if (tx_stall->poll_thread)
+ kthread_stop(tx_stall->poll_thread);
+ mutex_destroy(&tx_stall->txsch_lock);
+}
+
+ssize_t rvu_nix_get_tx_stall_counters(struct nix_hw *nix_hw,
+ char __user *buffer, loff_t *ppos)
+{
+ struct rvu *rvu = nix_hw->rvu;
+ struct rvu_hwinfo *hw;
+ struct nix_tx_stall *tx_stall;
+ struct rvu_block *block;
+ int blkaddr, len, lf;
+ char kbuf[2000];
+
+ hw = rvu->hw;
+ if (*ppos)
+ return 0;
+
+ blkaddr = nix_hw->blkaddr;
+
+ tx_stall = nix_hw->tx_stall;
+ if (!tx_stall)
+ return -EFAULT;
+
+ len = snprintf(kbuf, sizeof(kbuf), "\n NIX transmit stall stats\n");
+ len += snprintf(kbuf + len, sizeof(kbuf),
+ "\t\tPolled: \t\t%lld\n", tx_stall->poll_cntr);
+ len += snprintf(kbuf + len, sizeof(kbuf),
+ "\t\tTx stall detected: \t%lld\n\n",
+ tx_stall->stalled_cntr);
+
+ block = &hw->block[blkaddr];
+ mutex_lock(&rvu->rsrc_lock);
+ for (lf = 0; lf < block->lf.max; lf++) {
+ if (!test_bit(lf, block->lf.bmap))
+ continue;
+ len += snprintf(kbuf + len, sizeof(kbuf),
+ "\t\tNIXLF%d Polled: %lld \tStalled: %lld\n",
+ lf, tx_stall->nixlf_poll_count[lf],
+ tx_stall->nixlf_stall_count[lf]);
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ if (len > 0) {
+ if (copy_to_user(buffer, kbuf, len))
+ return -EFAULT;
+ }
+
+ *ppos += len;
+ return len;
+}
+
+static void rvu_nix_enable_internal_bp(struct rvu *rvu, int blkaddr)
+{
+ /* An issue exists in A0 silicon whereby, NIX CQ may reach in CQ full
+ * state followed by CQ hang on CQM query response from stale
+ * CQ context. To avoid such condition, enable internal backpressure
+ * with BP_TEST registers.
+ */
+ if (is_rvu_96xx_A0(rvu)) {
+ /* Enable internal backpressure on pipe_stg0 */
+ rvu_write64(rvu, blkaddr, NIX_AF_RQM_BP_TEST,
+ BIT_ULL(51) | BIT_ULL(23) | BIT_ULL(22) | 0x100ULL);
+ /* Enable internal backpressure on cqm query request */
+ rvu_write64(rvu, blkaddr, NIX_AF_CQM_BP_TEST,
+ BIT_ULL(43) | BIT_ULL(23) | BIT_ULL(22) | 0x100ULL);
+ }
+}
+
+int rvu_nix_fixes_init(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
+{
+ int err;
+ u64 cfg;
+
+
+ /* As per a HW errata in 96xx A0 silicon, NIX may corrupt
+ * internal state when conditional clocks are turned off.
+ * Hence enable them.
+ */
+ if (is_rvu_96xx_A0(rvu))
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x5EULL);
+ if (is_rvu_pre_96xx_C0(rvu))
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
+
+ /* Set chan/link to backpressure TL3 instead of TL2 */
+ rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
+
+ /* Disable SQ manager's sticky mode operation (set TM6 = 0, TM11 = 0)
+ * This sticky mode is known to cause SQ stalls when multiple
+ * SQs are mapped to same SMQ and transmitting pkts simultaneously.
+ * NIX PSE may dead lock when therea are any sticky to non-sticky
+ * transmission. Hence disable it (TM5 = 0).
+ */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
+ cfg &= ~(BIT_ULL(15) | BIT_ULL(14) | BIT_ULL(23));
+ /* NIX may drop credits when condition clocks are turned off.
+ * Hence enable control flow clk (set TM9 = 1).
+ */
+ cfg |= BIT_ULL(21);
+ rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
+
+ rvu_nix_enable_internal_bp(rvu, blkaddr);
+
+ if (!is_rvu_96xx_A0(rvu))
+ return 0;
+
+ err = rvu_nix_tx_stall_workaround_init(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void rvu_nix_fixes_exit(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ if (!is_rvu_96xx_A0(rvu))
+ return;
+
+ rvu_nix_tx_stall_workaround_exit(rvu, nix_hw);
+}
+
+int rvu_tim_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
+ u16 pcifunc, int slot)
+{
+ int lf, blkaddr;
+ u64 val;
+
+ /* Due to a HW issue LF_CFG_DEBUG register cannot be used to
+ * find PF_FUNC <=> LF mapping, hence scan through LFX_CFG
+ * registers to find mapped LF for a given PF_FUNC.
+ */
+ if (is_rvu_96xx_B0(rvu)) {
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_TIM, pcifunc);
+ if (blkaddr < 0)
+ return TIM_AF_LF_INVALID;
+
+ for (lf = 0; lf < block->lf.max; lf++) {
+ val = rvu_read64(rvu, block->addr, block->lfcfg_reg |
+ (lf << block->lfshift));
+ if ((((val >> 8) & 0xffff) == pcifunc) &&
+ (val & 0xff) == slot)
+ return lf;
+ }
+ return -1;
+ }
+
+ val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
+ rvu_write64(rvu, block->addr, block->lookup_reg, val);
+
+ /* Wait for the lookup to finish */
+ while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
+ ;
+
+ val = rvu_read64(rvu, block->addr, block->lookup_reg);
+
+ /* Check LF valid bit */
+ if (!(val & (1ULL << 12)))
+ return -1;
+
+ return (val & 0xFFF);
+}
+
+int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena)
+{
+ /* Due to a HW issue in these silicon versions, parse nibble enable
+ * configuration has to be identical for both Rx and Tx interfaces.
+ */
+ if (is_rvu_96xx_B0(rvu))
+ return nibble_ena;
+ return 0;
+}
+
+bool is_parse_nibble_config_valid(struct rvu *rvu,
+ struct npc_mcam_kex *mcam_kex)
+{
+ if (!is_rvu_96xx_B0(rvu))
+ return true;
+
+ /* Due to a HW issue in above silicon versions, parse nibble enable
+ * configuration has to be identical for both Rx and Tx interfaces.
+ */
+ if (mcam_kex->keyx_cfg[NIX_INTF_RX] != mcam_kex->keyx_cfg[NIX_INTF_TX])
+ return false;
+ return true;
+}
+
+void __weak otx2smqvf_xmit(void)
+{
+ /* Nothing to do */
+}
+
+void rvu_smqvf_xmit(struct rvu *rvu)
+{
+ if (is_rvu_95xx_A0(rvu) || is_rvu_96xx_A0(rvu)) {
+ usleep_range(50, 60);
+ otx2smqvf_xmit();
+ }
+}
+
+void rvu_tim_hw_fixes(struct rvu *rvu, int blkaddr)
+{
+ u64 cfg;
+ /* Due wrong clock gating, TIM expire counter is updated wrongly.
+ * Workaround is to enable force clock (FORCE_CSCLK_ENA = 1).
+ */
+ cfg = rvu_read64(rvu, blkaddr, TIM_AF_FLAGS_REG);
+ cfg |= BIT_ULL(1);
+ rvu_write64(rvu, blkaddr, TIM_AF_FLAGS_REG, cfg);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_fixes.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_fixes.h
new file mode 100644
index 000000000000..16ddf487c4d3
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_fixes.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2019 Marvell.
+ *
+ */
+
+#ifndef RVU_FIXES_H
+#define RVU_FIXES_H
+
+#define RVU_SMQVF_PCIFUNC 17
+
+struct rvu;
+
+void otx2smqvf_xmit(void);
+void rvu_smqvf_xmit(struct rvu *rvu);
+
+#endif /* RVU_FIXES_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index f6a3cf3e6f23..915013fa28d4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -1,11 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
@@ -16,9 +13,23 @@
#include "rvu.h"
#include "npc.h"
#include "cgx.h"
+#include "lmac_common.h"
+#include "rvu_fixes.h"
+static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id);
+static int nix_setup_ipolicers(struct rvu *rvu,
+ struct nix_hw *nix_hw, int blkaddr);
+static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw);
+static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
+ struct nix_hw *nix_hw, u16 pcifunc);
+static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
+static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
+ u32 leaf_prof);
+static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
+ int type, bool add);
+static const char *nix_get_ctx_name(int ctype);
enum mc_tbl_sz {
MC_TBL_SZ_256,
@@ -68,6 +79,23 @@ struct mce {
u16 pcifunc;
};
+int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
+{
+ int i = 0;
+
+ /*If blkaddr is 0, return the first nix block address*/
+ if (blkaddr == 0)
+ return rvu->nix_blkaddr[blkaddr];
+
+ while (i + 1 < MAX_NIX_BLKS) {
+ if (rvu->nix_blkaddr[i] == blkaddr)
+ return rvu->nix_blkaddr[i + 1];
+ i++;
+ }
+
+ return 0;
+}
+
bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -81,14 +109,16 @@ bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
int rvu_get_nixlf_count(struct rvu *rvu)
{
+ int blkaddr = 0, max = 0;
struct rvu_block *block;
- int blkaddr;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
- if (blkaddr < 0)
- return 0;
- block = &rvu->hw->block[blkaddr];
- return block->lf.max;
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ block = &rvu->hw->block[blkaddr];
+ max += block->lf.max;
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ }
+ return max;
}
int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
@@ -111,6 +141,22 @@ int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
return 0;
}
+int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
+ struct nix_hw **nix_hw, int *blkaddr)
+{
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || *blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ *nix_hw = get_nix_hw(rvu->hw, *blkaddr);
+ if (!*nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+ return 0;
+}
+
static void nix_mce_list_init(struct nix_mce_list *list, int max)
{
INIT_HLIST_HEAD(&list->head);
@@ -130,23 +176,88 @@ static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
return idx;
}
-static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
+struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
{
- if (blkaddr == BLKADDR_NIX0 && hw->nix0)
- return hw->nix0;
-
+ int nix_blkaddr = 0, i = 0;
+ struct rvu *rvu = hw->rvu;
+
+ nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
+ while (nix_blkaddr) {
+ if (blkaddr == nix_blkaddr && hw->nix)
+ return &hw->nix[i];
+ nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
+ i++;
+ }
return NULL;
}
+u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)
+{
+ dwrr_mtu &= 0x1FULL;
+
+ /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
+ * Value of 4 is reserved for MTU value of 9728 bytes.
+ * Value of 5 is reserved for MTU value of 10240 bytes.
+ */
+ switch (dwrr_mtu) {
+ case 4:
+ return 9728;
+ case 5:
+ return 10240;
+ default:
+ return BIT_ULL(dwrr_mtu);
+ }
+
+ return 0;
+}
+
+u32 convert_bytes_to_dwrr_mtu(u32 bytes)
+{
+ /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
+ * Value of 4 is reserved for MTU value of 9728 bytes.
+ * Value of 5 is reserved for MTU value of 10240 bytes.
+ */
+ if (bytes > BIT_ULL(16))
+ return 0;
+
+ switch (bytes) {
+ case 9728:
+ return 4;
+ case 10240:
+ return 5;
+ default:
+ return ilog2(bytes);
+ }
+
+ return 0;
+}
+
static void nix_rx_sync(struct rvu *rvu, int blkaddr)
{
int err;
- /*Sync all in flight RX packets to LLC/DRAM */
+ /* Sync all in flight RX packets to LLC/DRAM */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
+ err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
+ if (err)
+ dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
+
+ /* SW_SYNC ensures all existing transactions are finished and pkts
+ * are written to LLC/DRAM, queues should be teared down after
+ * successful SW_SYNC. Due to a HW errata, in some rare scenarios
+ * an existing transaction might end after SW_SYNC operation. To
+ * ensure operation is fully done, do the SW_SYNC twice.
+ */
rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
if (err)
- dev_err(rvu->dev, "NIX RX software sync failed\n");
+ dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
+
+ /* As per a HW errata in 96xx A0 silicon, HW may clear SW_SYNC[ENA]
+ * bit too early. Hence wait for 50us more.
+ */
+ if (is_rvu_96xx_A0(rvu))
+ usleep_range(50, 60);
}
static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
@@ -184,15 +295,21 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
return true;
}
-static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
+static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
+ struct nix_lf_alloc_rsp *rsp, bool loop)
{
- struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ u16 req_chan_base, req_chan_end, req_chan_cnt;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct sdp_node_info *sdp_info;
+ int pkind, pf, vf, lbkid, vfid;
u8 cgx_id, lmac_id;
- int pkind, pf, vf;
+ bool from_vf;
int err;
pf = rvu_get_pf(pcifunc);
- if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
+ if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
+ type != NIX_INTF_TYPE_SDP)
return 0;
switch (type) {
@@ -206,32 +323,106 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
"PF_Func 0x%x: Invalid pkind\n", pcifunc);
return -EINVAL;
}
- pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0);
+ pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
pfvf->tx_chan_base = pfvf->rx_chan_base;
pfvf->rx_chan_cnt = 1;
pfvf->tx_chan_cnt = 1;
- cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
- rvu_npc_set_pkind(rvu, pkind, pfvf);
+ rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id;
+
+ if (rvu_cgx_is_pkind_config_permitted(rvu, pcifunc)) {
+ cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ pkind);
+ rvu_npc_set_pkind(rvu, pkind, pfvf);
+ }
- /* By default we enable pause frames */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
- cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu),
- lmac_id, true, true);
break;
case NIX_INTF_TYPE_LBK:
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+ /* If NIX1 block is present on the silicon then NIXes are
+ * assigned alternatively for lbk interfaces. NIX0 should
+ * send packets on lbk link 1 channels and NIX1 should send
+ * on lbk link 0 channels for the communication between
+ * NIX0 and NIX1.
+ */
+ lbkid = 0;
+ if (rvu->hw->lbk_links > 1)
+ lbkid = vf & 0x1 ? 0 : 1;
+
+ /* By default NIX0 is configured to send packet on lbk link 1
+ * (which corresponds to LBK1), same packet will receive on
+ * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0
+ * (which corresponds to LBK2) packet will receive on NIX0 lbk
+ * link 1.
+ * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0
+ * transmits and receives on lbk link 0, whick corresponds
+ * to LBK1 block, back to back connectivity between NIX and
+ * LBK can be achieved (which is similar to 96xx)
+ *
+ * RX TX
+ * NIX0 lbk link 1 (LBK2) 1 (LBK1)
+ * NIX0 lbk link 0 (LBK0) 0 (LBK0)
+ * NIX1 lbk link 0 (LBK1) 0 (LBK2)
+ * NIX1 lbk link 1 (LBK3) 1 (LBK3)
+ */
+ if (loop)
+ lbkid = !lbkid;
+
/* Note that AF's VFs work in pairs and talk over consecutive
* loopback channels.Therefore if odd number of AF VFs are
* enabled then the last VF remains with no pair.
*/
- pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf);
- pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) :
- NIX_CHAN_LBK_CHX(0, vf + 1);
+ pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
+ pfvf->tx_chan_base = vf & 0x1 ?
+ rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
+ rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
pfvf->rx_chan_cnt = 1;
pfvf->tx_chan_cnt = 1;
+ rsp->tx_link = hw->cgx_links + lbkid;
+ pfvf->lbkid = lbkid;
+ rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
- pfvf->rx_chan_base, false);
+ pfvf->rx_chan_base,
+ pfvf->rx_chan_cnt);
+
+ break;
+ case NIX_INTF_TYPE_SDP:
+ from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
+ parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
+ sdp_info = parent_pf->sdp_info;
+ if (!sdp_info) {
+ dev_err(rvu->dev, "Invalid sdp_info pointer\n");
+ return -EINVAL;
+ }
+ if (from_vf) {
+ req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn +
+ sdp_info->num_pf_rings;
+ vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+ for (vfid = 0; vfid < vf; vfid++)
+ req_chan_base += sdp_info->vf_rings[vfid];
+ req_chan_cnt = sdp_info->vf_rings[vf];
+ req_chan_end = req_chan_base + req_chan_cnt - 1;
+ if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) ||
+ req_chan_end > rvu_nix_chan_sdp(rvu, 255)) {
+ dev_err(rvu->dev,
+ "PF_Func 0x%x: Invalid channel base and count\n",
+ pcifunc);
+ return -EINVAL;
+ }
+ } else {
+ req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn;
+ req_chan_cnt = sdp_info->num_pf_rings;
+ }
+
+ pfvf->rx_chan_base = req_chan_base;
+ pfvf->rx_chan_cnt = req_chan_cnt;
+ pfvf->tx_chan_base = pfvf->rx_chan_base;
+ pfvf->tx_chan_cnt = pfvf->rx_chan_cnt;
+
+ rsp->tx_link = hw->cgx_links + hw->lbk_links;
+ rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base,
+ pfvf->rx_chan_cnt);
break;
}
@@ -242,16 +433,17 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
pfvf->rx_chan_base, pfvf->mac_addr);
/* Add this PF_FUNC to bcast pkt replication list */
- err = nix_update_bcast_mce_list(rvu, pcifunc, true);
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
if (err) {
dev_err(rvu->dev,
"Bcast list, failed to enable PF_FUNC 0x%x\n",
pcifunc);
return err;
}
-
+ /* Install MCAM rule matching Ethernet broadcast mac address */
rvu_npc_install_bcast_match_entry(rvu, pcifunc,
nixlf, pfvf->rx_chan_base);
+
pfvf->maxlen = NIC_HW_MIN_FRS;
pfvf->minlen = NIC_HW_MIN_FRS;
@@ -265,28 +457,28 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
pfvf->maxlen = 0;
pfvf->minlen = 0;
- pfvf->rxvlan = false;
/* Remove this PF_FUNC from bcast pkt replication list */
- err = nix_update_bcast_mce_list(rvu, pcifunc, false);
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
if (err) {
dev_err(rvu->dev,
"Bcast list, failed to disable PF_FUNC 0x%x\n",
pcifunc);
}
- /* Free and disable any MCAM entries used by this NIX LF */
- rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+ /* Disable DMAC filters used */
+ rvu_cgx_disable_dmac_entries(rvu, pcifunc);
}
-int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
- struct nix_bp_cfg_req *req,
- struct msg_rsp *rsp)
+static int nix_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp, bool cpt_link)
{
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
int blkaddr, pf, type;
u16 chan_base, chan;
+ u16 chan_v;
u64 cfg;
pf = rvu_get_pf(pcifunc);
@@ -294,24 +486,49 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
return 0;
+ if (cpt_link && !rvu->hw->cpt_links)
+ return 0;
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
chan_base = pfvf->rx_chan_base + req->chan_base;
for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
- rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+ /* CPT channel for a given link channel is always
+ * assumed to be BIT(11) set in link channel.
+ */
+ if (cpt_link)
+ chan_v = chan | BIT(11);
+ else
+ chan_v = chan;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
cfg & ~BIT_ULL(16));
}
return 0;
}
+int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ return nix_bp_disable(rvu, req, rsp, false);
+}
+
+int rvu_mbox_handler_nix_cpt_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ return nix_bp_disable(rvu, req, rsp, true);
+}
+
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id)
{
- int bpid, blkaddr, lmac_chan_cnt;
+ int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt, vf;
+ u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt;
struct rvu_hwinfo *hw = rvu->hw;
- u16 cgx_bpid_cnt, lbk_bpid_cnt;
struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
u64 cfg;
@@ -323,6 +540,10 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+ sdp_chan_cnt = cfg & 0xFFF;
+ sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
+
pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
/* Backpressure IDs range division
@@ -337,7 +558,7 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
*/
switch (type) {
case NIX_INTF_TYPE_CGX:
- if ((req->chan_base + req->chan_cnt) > 15)
+ if ((req->chan_base + req->chan_cnt) > 16)
return -EINVAL;
rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
/* Assign bpid based on cgx, lmac and chan id */
@@ -351,36 +572,60 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
break;
case NIX_INTF_TYPE_LBK:
- if ((req->chan_base + req->chan_cnt) > 63)
+ if ((req->chan_base + req->chan_cnt) > 1)
return -EINVAL;
- bpid = cgx_bpid_cnt + req->chan_base;
+ /* Channel number allocation is based on VF id,
+ * hence BPID follows similar scheme.
+ */
+ vf = (req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+
+ bpid = cgx_bpid_cnt + req->chan_base + vf;
if (req->bpid_per_chan)
bpid += chan_id;
if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
return -EINVAL;
break;
+ case NIX_INTF_TYPE_SDP:
+ if ((req->chan_base + req->chan_cnt) > 255)
+ return -EINVAL;
+
+ bpid = sdp_bpid_cnt + req->chan_base;
+ if (req->bpid_per_chan)
+ bpid += chan_id;
+
+ if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt))
+ return -EINVAL;
+ break;
default:
return -EINVAL;
}
return bpid;
}
-int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
- struct nix_bp_cfg_req *req,
- struct nix_bp_cfg_rsp *rsp)
+static int nix_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp,
+ bool cpt_link)
{
int blkaddr, pf, type, chan_id = 0;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
u16 chan_base, chan;
s16 bpid, bpid_base;
+ u16 chan_v;
u64 cfg;
pf = rvu_get_pf(pcifunc);
type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ if (is_sdp_pfvf(pcifunc))
+ type = NIX_INTF_TYPE_SDP;
- /* Enable backpressure only for CGX mapped PFs and LBK interface */
- if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
+ /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */
+ if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
+ type != NIX_INTF_TYPE_SDP)
+ return 0;
+
+ if (cpt_link && !rvu->hw->cpt_links)
return 0;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -396,9 +641,19 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
return -EINVAL;
}
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
- rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
- cfg | (bpid & 0xFF) | BIT_ULL(16));
+ /* CPT channel for a given link channel is always
+ * assumed to be BIT(11) set in link channel.
+ */
+
+ if (cpt_link)
+ chan_v = chan | BIT(11);
+ else
+ chan_v = chan;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
+ cfg &= ~GENMASK_ULL(8, 0);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
+ cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
chan_id++;
bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
}
@@ -415,6 +670,20 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ return nix_bp_enable(rvu, req, rsp, false);
+}
+
+int rvu_mbox_handler_nix_cpt_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ return nix_bp_enable(rvu, req, rsp, true);
+}
+
static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
u64 format, bool v4, u64 *fidx)
{
@@ -546,9 +815,10 @@ static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
struct rvu_pfvf *pfvf, int nixlf,
int rss_sz, int rss_grps, int hwctx_size,
- u64 way_mask)
+ u64 way_mask, bool tag_lsb_as_adder)
{
int err, grp, num_indices;
+ u64 val;
/* RSS is not requested for this NIXLF */
if (!rss_sz)
@@ -564,10 +834,13 @@ static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
(u64)pfvf->rss_ctx->iova);
/* Config full RSS table size, enable RSS and caching */
- rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
- BIT_ULL(36) | BIT_ULL(4) |
- ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
- way_mask << 20);
+ val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 |
+ ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE);
+
+ if (tag_lsb_as_adder)
+ val |= BIT_ULL(5);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val);
/* Config RSS group offset and sizes */
for (grp = 0; grp < rss_grps; grp++)
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
@@ -612,8 +885,9 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
return 0;
}
-static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
- struct nix_aq_enq_rsp *rsp)
+static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
@@ -626,10 +900,7 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
bool ena;
u64 cfg;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
+ blkaddr = nix_hw->blkaddr;
block = &hw->block[blkaddr];
aq = block->aq;
if (!aq) {
@@ -640,8 +911,11 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
pfvf = rvu_get_pfvf(rvu, pcifunc);
nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
- /* Skip NIXLF check for broadcast MCE entry init */
- if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
+ /* Skip NIXLF check for broadcast MCE entry and bandwidth profile
+ * operations done by AF itself.
+ */
+ if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
+ (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
if (!pfvf->nixlf || nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
}
@@ -669,8 +943,9 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
break;
case NIX_AQ_CTYPE_MCE:
cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
+
/* Check if index exceeds MCE list length */
- if (!hw->nix0->mcast.mce_ctx ||
+ if (!nix_hw->mcast.mce_ctx ||
(req->qidx >= (256UL << (cfg & 0xF))))
rc = NIX_AF_ERR_AQ_ENQUEUE;
@@ -680,6 +955,11 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
if (rsp)
rc = NIX_AF_ERR_AQ_ENQUEUE;
break;
+ case NIX_AQ_CTYPE_BANDPROF:
+ if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
+ nix_hw, pcifunc))
+ rc = NIX_AF_ERR_INVALID_BANDPROF;
+ break;
default:
rc = NIX_AF_ERR_AQ_ENQUEUE;
}
@@ -695,6 +975,8 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
pcifunc, req->sq.smq))
return NIX_AF_ERR_AQ_ENQUEUE;
+ rvu_nix_update_sq_smq_mapping(rvu, blkaddr, nixlf, req->qidx,
+ req->sq.smq);
}
memset(&inst, 0, sizeof(struct nix_aq_inst_s));
@@ -736,6 +1018,9 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(mask, &req->mce_mask,
sizeof(struct nix_rx_mce_s));
+ else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
+ memcpy(mask, &req->prof_mask,
+ sizeof(struct nix_bandprof_s));
fallthrough;
case NIX_AQ_INSTOP_INIT:
if (req->ctype == NIX_AQ_CTYPE_RQ)
@@ -748,6 +1033,8 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
+ else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
+ memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
break;
case NIX_AQ_INSTOP_NOP:
case NIX_AQ_INSTOP_READ:
@@ -825,6 +1112,9 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(&rsp->mce, ctx,
sizeof(struct nix_rx_mce_s));
+ else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
+ memcpy(&rsp->prof, ctx,
+ sizeof(struct nix_bandprof_s));
}
}
@@ -832,6 +1122,98 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
return 0;
}
+static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_aq_enq_req *req, u8 ctype)
+{
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ int rc, word;
+
+ if (req->ctype != NIX_AQ_CTYPE_CQ)
+ return 0;
+
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
+ req->hdr.pcifunc, ctype, req->qidx);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n",
+ __func__, nix_get_ctx_name(ctype), req->qidx,
+ req->hdr.pcifunc);
+ return rc;
+ }
+
+ /* Make copy of original context & mask which are required
+ * for resubmission
+ */
+ memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s));
+ memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s));
+
+ /* exclude fields which HW can update */
+ aq_req.cq_mask.cq_err = 0;
+ aq_req.cq_mask.wrptr = 0;
+ aq_req.cq_mask.tail = 0;
+ aq_req.cq_mask.head = 0;
+ aq_req.cq_mask.avg_level = 0;
+ aq_req.cq_mask.update_time = 0;
+ aq_req.cq_mask.substream = 0;
+
+ /* Context mask (cq_mask) holds mask value of fields which
+ * are changed in AQ WRITE operation.
+ * for example cq.drop = 0xa;
+ * cq_mask.drop = 0xff;
+ * Below logic performs '&' between cq and cq_mask so that non
+ * updated fields are masked out for request and response
+ * comparison
+ */
+ for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64);
+ word++) {
+ *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &=
+ (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
+ *(u64 *)((u8 *)&aq_req.cq + word * 8) &=
+ (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
+ }
+
+ if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s)))
+ return NIX_AF_ERR_AQ_CTX_RETRY_WRITE;
+
+ return 0;
+}
+
+static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
+{
+ struct nix_hw *nix_hw;
+ int err, retries = 5;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+retry:
+ err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
+
+ /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic'
+ * As a work around perfrom CQ context read after each AQ write. If AQ
+ * read shows AQ write is not updated perform AQ write again.
+ */
+ if (!err && req->op == NIX_AQ_INSTOP_WRITE) {
+ err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ);
+ if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) {
+ if (retries--)
+ goto retry;
+ else
+ return NIX_AF_ERR_CQ_CTX_WRITE_ERR;
+ }
+ }
+
+ return err;
+}
+
static const char *nix_get_ctx_name(int ctype)
{
switch (ctype) {
@@ -947,6 +1329,17 @@ int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
return rvu_nix_aq_enq_inst(rvu, req, rsp);
}
#endif
+EXPORT_SYMBOL(rvu_mbox_handler_nix_aq_enq);
+
+/* CN10K mbox handler */
+int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
+ struct nix_cn10k_aq_enq_req *req,
+ struct nix_cn10k_aq_enq_rsp *rsp)
+{
+ return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
+ (struct nix_aq_enq_rsp *)rsp);
+}
+EXPORT_SYMBOL(rvu_mbox_handler_nix_cn10k_aq_enq);
int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
@@ -960,10 +1353,10 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
struct nix_lf_alloc_rsp *rsp)
{
int nixlf, qints, hwctx_size, intf, err, rc = 0;
+ struct rvu_pfvf *pfvf, *parent_pf;
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_block *block;
- struct rvu_pfvf *pfvf;
u64 cfg, ctx_cfg;
int blkaddr;
@@ -973,6 +1366,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
if (req->way_mask)
req->way_mask &= 0xFFFF;
+ parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (!pfvf->nixlf || blkaddr < 0)
@@ -1076,7 +1470,8 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
/* Initialize receive side scaling (RSS) */
hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
- req->rss_grps, hwctx_size, req->way_mask);
+ req->rss_grps, hwctx_size, req->way_mask,
+ !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER));
if (err)
goto free_mem;
@@ -1130,17 +1525,32 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
/* Configure pkind for TX parse config */
- cfg = NPC_TX_DEF_PKIND;
- rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
+ if (rvu_cgx_is_pkind_config_permitted(rvu, pcifunc)) {
+ cfg = NPC_TX_DEF_PKIND;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
+ }
intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
- err = nix_interface_init(rvu, pcifunc, intf, nixlf);
+ if (is_sdp_pfvf(pcifunc))
+ intf = NIX_INTF_TYPE_SDP;
+
+ err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp,
+ !!(req->flags & NIX_LF_LBK_BLK_SEL));
if (err)
goto free_mem;
/* Disable NPC entries as NIXLF's contexts are not initialized yet */
rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+ /* Configure RX VTAG Type 7 (strip) for vf vlan */
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
+ VTAGSIZE_T4 | VTAG_STRIP);
+ /* Configure RX VTAG Type 6 (strip) for fdsa */
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE6),
+ VTAGSIZE_T4 | VTAG_STRIP | VTAG_CAPTURE);
+
goto exit;
free_mem:
@@ -1168,10 +1578,15 @@ exit:
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
rsp->qints = ((cfg >> 12) & 0xFFF);
rsp->cints = ((cfg >> 24) & 0xFFF);
+ rsp->hw_rx_tstamp_en = parent_pf->hw_rx_tstamp_en;
+ rsp->cgx_links = hw->cgx_links;
+ rsp->lbk_links = hw->lbk_links;
+ rsp->sdp_links = hw->sdp_links;
+
return rc;
}
-int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
struct msg_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -1190,6 +1605,15 @@ int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
+ if (req->flags & NIX_LF_DISABLE_FLOWS)
+ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+ else
+ rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
+
+ /* Free any tx vtag def entries used by this NIX LF */
+ if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
+ nix_free_tx_vtag_entries(rvu, pcifunc);
+
nix_interface_deinit(rvu, pcifunc, nixlf);
/* Reset this NIX LF */
@@ -1222,7 +1646,7 @@ int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
cfg = (((u32)req->offset & 0x7) << 16) |
(((u32)req->y_mask & 0xF) << 12) |
@@ -1240,12 +1664,104 @@ int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
return 0;
}
+/* Handle shaper update specially for few revisions */
+static bool
+handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf,
+ int lvl, u64 reg, u64 regval)
+{
+ u64 regbase, oldval, sw_xoff = 0;
+ u64 dbgval, md_debug0 = 0;
+ unsigned long poll_tmo;
+ bool rate_reg = 0;
+ u32 schq;
+
+ regbase = reg & 0xFFFF;
+ schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+
+ /* Check for rate register */
+ switch (lvl) {
+ case NIX_TXSCH_LVL_TL1:
+ md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq);
+ sw_xoff = NIX_AF_TL1X_SW_XOFF(schq);
+
+ rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0));
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq);
+ sw_xoff = NIX_AF_TL2X_SW_XOFF(schq);
+
+ rate_reg = (regbase == NIX_AF_TL2X_CIR(0) ||
+ regbase == NIX_AF_TL2X_PIR(0));
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq);
+ sw_xoff = NIX_AF_TL3X_SW_XOFF(schq);
+
+ rate_reg = (regbase == NIX_AF_TL3X_CIR(0) ||
+ regbase == NIX_AF_TL3X_PIR(0));
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq);
+ sw_xoff = NIX_AF_TL4X_SW_XOFF(schq);
+
+ rate_reg = (regbase == NIX_AF_TL4X_CIR(0) ||
+ regbase == NIX_AF_TL4X_PIR(0));
+ break;
+ case NIX_TXSCH_LVL_MDQ:
+ sw_xoff = NIX_AF_MDQX_SW_XOFF(schq);
+ rate_reg = (regbase == NIX_AF_MDQX_CIR(0) ||
+ regbase == NIX_AF_MDQX_PIR(0));
+ break;
+ }
+
+ if (!rate_reg)
+ return false;
+
+ /* Nothing special to do when state is not toggled */
+ oldval = rvu_read64(rvu, blkaddr, reg);
+ if ((oldval & 0x1) == (regval & 0x1)) {
+ rvu_write64(rvu, blkaddr, reg, regval);
+ return true;
+ }
+
+ /* PIR/CIR disable */
+ if (!(regval & 0x1)) {
+ rvu_write64(rvu, blkaddr, sw_xoff, 1);
+ rvu_write64(rvu, blkaddr, reg, 0);
+ udelay(4);
+ rvu_write64(rvu, blkaddr, sw_xoff, 0);
+ return true;
+ }
+
+ /* PIR/CIR enable */
+ rvu_write64(rvu, blkaddr, sw_xoff, 1);
+ if (md_debug0) {
+ poll_tmo = jiffies + usecs_to_jiffies(10000);
+ /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */
+ do {
+ if (time_after(jiffies, poll_tmo)) {
+ dev_err(rvu->dev,
+ "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n",
+ nixlf, schq, lvl);
+ goto exit;
+ }
+ usleep_range(1, 5);
+ dbgval = rvu_read64(rvu, blkaddr, md_debug0);
+ } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48)));
+ }
+ rvu_write64(rvu, blkaddr, reg, regval);
+exit:
+ rvu_write64(rvu, blkaddr, sw_xoff, 0);
+ return true;
+}
+
/* Disable shaping of pkts by a scheduler queue
* at a given scheduler level.
*/
static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
- int lvl, int schq)
+ int nixlf, int lvl, int schq)
{
+ struct rvu_hwinfo *hw = rvu->hw;
u64 cir_reg = 0, pir_reg = 0;
u64 cfg;
@@ -1266,6 +1782,21 @@ static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
cir_reg = NIX_AF_TL4X_CIR(schq);
pir_reg = NIX_AF_TL4X_PIR(schq);
break;
+ case NIX_TXSCH_LVL_MDQ:
+ cir_reg = NIX_AF_MDQX_CIR(schq);
+ pir_reg = NIX_AF_MDQX_PIR(schq);
+ break;
+ }
+
+ /* Shaper state toggle needs wait/poll */
+ if (hw->cap.nix_shaper_toggle_wait) {
+ if (cir_reg)
+ handle_txschq_shaper_update(rvu, blkaddr, nixlf,
+ lvl, cir_reg, 0);
+ if (pir_reg)
+ handle_txschq_shaper_update(rvu, blkaddr, nixlf,
+ lvl, pir_reg, 0);
+ return;
}
if (!cir_reg)
@@ -1283,6 +1814,7 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
int lvl, int schq)
{
struct rvu_hwinfo *hw = rvu->hw;
+ int link_level;
int link;
if (lvl >= hw->cap.nix_tx_aggr_lvl)
@@ -1292,7 +1824,9 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
if (lvl == NIX_TXSCH_LVL_TL4)
rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
- if (lvl != NIX_TXSCH_LVL_TL2)
+ link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
+ NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
+ if (lvl != link_level)
return;
/* Reset TL2's CGX or LBK link config */
@@ -1301,6 +1835,40 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
}
+static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr,
+ int lvl, int schq)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 reg;
+
+ /* Skip this if shaping is not supported */
+ if (!hw->cap.nix_shaping)
+ return;
+
+ /* Clear level specific SW_XOFF */
+ switch (lvl) {
+ case NIX_TXSCH_LVL_TL1:
+ reg = NIX_AF_TL1X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ reg = NIX_AF_TL2X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ reg = NIX_AF_TL3X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ reg = NIX_AF_TL4X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_MDQ:
+ reg = NIX_AF_MDQX_SW_XOFF(schq);
+ break;
+ default:
+ return;
+ }
+
+ rvu_write64(rvu, blkaddr, reg, 0x0);
+}
+
static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -1374,7 +1942,8 @@ static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
free_cnt = rvu_rsrc_free_count(&txsch->schq);
}
- if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
+ if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC ||
+ req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC)
return NIX_AF_ERR_TLX_ALLOC_FAIL;
/* If contiguous queues are needed, check for availability */
@@ -1478,22 +2047,29 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
int link, blkaddr, rc = 0;
int lvl, idx, start, end;
struct nix_txsch *txsch;
- struct rvu_pfvf *pfvf;
struct nix_hw *nix_hw;
u32 *pfvf_map;
+ int nixlf;
u16 schq;
- pfvf = rvu_get_pfvf(rvu, pcifunc);
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (!pfvf->nixlf || blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (rc)
+ return rc;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
mutex_lock(&rvu->rsrc_lock);
+ /* Check if request can be accommodated as per limits set by admin */
+ if (!hw->cap.nix_fixed_txschq_mapping &&
+ rvu_check_txsch_policy(rvu, req, pcifunc)) {
+ dev_err(rvu->dev, "Func 0x%x: TXSCH policy check failed\n",
+ pcifunc);
+ goto err;
+ }
+
/* Check if request is valid as per HW capabilities
* and can be accomodated.
*/
@@ -1535,7 +2111,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
NIX_TXSCHQ_CFG_DONE))
pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
- nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
+ nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
}
for (idx = 0; idx < req->schq[lvl]; idx++) {
@@ -1544,7 +2120,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
NIX_TXSCHQ_CFG_DONE))
pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
- nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
+ nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
}
}
@@ -1561,8 +2137,8 @@ exit:
return rc;
}
-static void nix_smq_flush(struct rvu *rvu, int blkaddr,
- int smq, u16 pcifunc, int nixlf)
+static int nix_smq_flush(struct rvu *rvu, int blkaddr,
+ int smq, u16 pcifunc, int nixlf)
{
int pf = rvu_get_pf(pcifunc);
u8 cgx_id = 0, lmac_id = 0;
@@ -1572,8 +2148,8 @@ static void nix_smq_flush(struct rvu *rvu, int blkaddr,
/* enable cgx tx if disabled */
if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
- restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
- lmac_id, true);
+ restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, true);
}
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
@@ -1586,6 +2162,8 @@ static void nix_smq_flush(struct rvu *rvu, int blkaddr,
*/
rvu_cgx_enadis_rx_bp(rvu, pf, false);
+ rvu_smqvf_xmit(rvu);
+
/* Wait for flush to complete */
err = rvu_poll_reg(rvu, blkaddr,
NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
@@ -1596,7 +2174,8 @@ static void nix_smq_flush(struct rvu *rvu, int blkaddr,
rvu_cgx_enadis_rx_bp(rvu, pf, true);
/* restore cgx tx state */
if (restore_tx_en)
- cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+ rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+ return err;
}
static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
@@ -1605,6 +2184,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
struct rvu_hwinfo *hw = rvu->hw;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
+ u16 map_func;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
@@ -1612,25 +2192,42 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
- /* Disable TL2/3 queue links before SMQ flush*/
+ /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/
mutex_lock(&rvu->rsrc_lock);
- for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
- if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
+ for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+
+ if (lvl >= hw->cap.nix_tx_aggr_lvl)
continue;
- txsch = &nix_hw->txsch[lvl];
for (schq = 0; schq < txsch->schq.max; schq++) {
if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue;
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
}
}
+ nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1,
+ nix_get_tx_link(rvu, pcifunc));
+
+ /* On PF cleanup, clear cfg done flag as
+ * PF would have changed default config.
+ */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
+ schq = nix_get_tx_link(rvu, pcifunc);
+ /* Do not clear pcifunc in txsch->pfvf_map[schq] because
+ * VF might be using this TL1 queue
+ */
+ map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
+ txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0);
+ }
/* Flush SMQs */
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
@@ -1658,9 +2255,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
}
mutex_unlock(&rvu->rsrc_lock);
- /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
- rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
- err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
+ err = rvu_ndc_sync(rvu, blkaddr, nixlf, NIX_AF_NDC_TX_SYNC);
if (err)
dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
@@ -1676,6 +2271,7 @@ static int nix_txschq_free_one(struct rvu *rvu,
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
u32 *pfvf_map;
+ int rc;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
@@ -1683,7 +2279,7 @@ static int nix_txschq_free_one(struct rvu *rvu,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
if (nixlf < 0)
@@ -1700,15 +2296,24 @@ static int nix_txschq_free_one(struct rvu *rvu,
mutex_lock(&rvu->rsrc_lock);
if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
- mutex_unlock(&rvu->rsrc_lock);
+ rc = NIX_AF_ERR_TLX_INVALID;
goto err;
}
+ /* Clear SW_XOFF of this resource only.
+ * For SMQ level, all path XOFF's
+ * need to be made clear by user
+ */
+ nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
+
/* Flush if it is a SMQ. Onus of disabling
* TL2/3 queue links before SMQ flush is on user
*/
- if (lvl == NIX_TXSCH_LVL_SMQ)
- nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
+ if (lvl == NIX_TXSCH_LVL_SMQ &&
+ nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) {
+ rc = NIX_AF_SMQ_FLUSH_FAILED;
+ goto err;
+ }
/* Free the resource */
rvu_free_rsrc(&txsch->schq, schq);
@@ -1716,7 +2321,8 @@ static int nix_txschq_free_one(struct rvu *rvu,
mutex_unlock(&rvu->rsrc_lock);
return 0;
err:
- return NIX_AF_ERR_TLX_INVALID;
+ mutex_unlock(&rvu->rsrc_lock);
+ return rc;
}
int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
@@ -1799,6 +2405,11 @@ static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
regbase == NIX_AF_TL4X_PIR(0))
return false;
break;
+ case NIX_TXSCH_LVL_MDQ:
+ if (regbase == NIX_AF_MDQX_CIR(0) ||
+ regbase == NIX_AF_MDQX_PIR(0))
+ return false;
+ break;
}
return true;
}
@@ -1816,19 +2427,88 @@ static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
return;
rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
(TXSCH_TL1_DFLT_RR_PRIO << 1));
- rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
- TXSCH_TL1_DFLT_RR_QTM);
+
+ /* On OcteonTx2 the config was in bytes and newer silcons
+ * it's changed to weight.
+ */
+ if (!rvu->hw->cap.nix_common_dwrr_mtu)
+ rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
+ TXSCH_TL1_DFLT_RR_QTM);
+ else
+ rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
+ CN10K_MAX_DWRR_WEIGHT);
+
rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
}
+/* Register offset - [15:0]
+ * Scheduler Queue number - [25:16]
+ */
+#define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0)
+
+static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw,
+ int blkaddr, struct nix_txschq_config *req,
+ struct nix_txschq_config *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int idx, schq;
+ u64 reg;
+
+ rvu_nix_txsch_lock(nix_hw);
+ for (idx = 0; idx < req->num_regs; idx++) {
+ reg = req->reg[idx];
+ reg &= NIX_TX_SCHQ_MASK;
+ schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+ if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) ||
+ !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq)) {
+ rvu_nix_txsch_unlock(nix_hw);
+ return NIX_AF_INVAL_TXSCHQ_CFG;
+ }
+ rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg);
+ }
+ rsp->lvl = req->lvl;
+ rsp->num_regs = req->num_regs;
+ rvu_nix_txsch_unlock(nix_hw);
+ return 0;
+}
+
+static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr,
+ u16 pcifunc, struct nix_txsch *txsch)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int lbk_link_start, lbk_links;
+ u8 pf = rvu_get_pf(pcifunc);
+ int schq;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return;
+
+ lbk_link_start = hw->cgx_links;
+
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
+ continue;
+ /* Enable all LBK links with channel 63 by default so that
+ * packets can be sent to LBK with a NPC TX MCAM rule
+ */
+ lbk_links = hw->lbk_links;
+ while (lbk_links--)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(schq,
+ lbk_link_start +
+ lbk_links),
+ BIT_ULL(12) | RVU_SWITCH_LBK_CHAN);
+ }
+}
+
int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
struct nix_txschq_config *req,
- struct msg_rsp *rsp)
+ struct nix_txschq_config *rsp)
{
+ u64 reg, val, regval, schq_regbase, val_mask;
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
- u64 reg, regval, schq_regbase;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
int blkaddr, idx, err;
@@ -1845,7 +2525,10 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ if (req->read)
+ return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp);
txsch = &nix_hw->txsch[req->lvl];
pfvf_map = txsch->pfvf_map;
@@ -1859,19 +2542,33 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
return 0;
}
+ rvu_nix_txsch_lock(nix_hw);
for (idx = 0; idx < req->num_regs; idx++) {
reg = req->reg[idx];
+ reg &= NIX_TX_SCHQ_MASK;
regval = req->regval[idx];
schq_regbase = reg & 0xFFFF;
+ val_mask = req->regval_mask[idx];
if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
- txsch->lvl, reg, regval))
+ txsch->lvl, reg, regval)) {
+ rvu_nix_txsch_unlock(nix_hw);
return NIX_AF_INVAL_TXSCHQ_CFG;
+ }
/* Check if shaping and coloring is supported */
if (!is_txschq_shaping_valid(hw, req->lvl, reg))
continue;
+ val = rvu_read64(rvu, blkaddr, reg);
+ regval = (val & val_mask) | (regval & ~val_mask);
+
+ /* Handle shaping state toggle specially */
+ if (hw->cap.nix_shaper_toggle_wait &&
+ handle_txschq_shaper_update(rvu, blkaddr, nixlf,
+ req->lvl, reg, regval))
+ continue;
+
/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
@@ -1910,6 +2607,10 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
rvu_write64(rvu, blkaddr, reg, regval);
}
+ rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc,
+ &nix_hw->txsch[NIX_TXSCH_LVL_TL2]);
+ rvu_nix_txsch_config_changed(nix_hw);
+ rvu_nix_txsch_unlock(nix_hw);
return 0;
}
@@ -1918,9 +2619,14 @@ static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
{
u64 regval = req->vtag_size;
- if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8)
+ if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
+ req->vtag_size > VTAGSIZE_T8)
return -EINVAL;
+ /* RX VTAG Type 7,6 are reserved for vf vlan& FDSA tag strip */
+ if (req->rx.vtag_type >= NIX_AF_LFX_RX_VTAG_TYPE6)
+ return NIX_AF_ERR_RX_VTAG_INUSE;
+
if (req->rx.capture_vtag)
regval |= BIT_ULL(5);
if (req->rx.strip_vtag)
@@ -1931,9 +2637,169 @@ static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
return 0;
}
+static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
+ u16 pcifunc, int index)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ struct nix_txvlan *vlan;
+
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ vlan = &nix_hw->txvlan;
+ if (vlan->entry2pfvf_map[index] != pcifunc)
+ return NIX_AF_ERR_PARAM;
+
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
+
+ vlan->entry2pfvf_map[index] = 0;
+ rvu_free_rsrc(&vlan->rsrc, index);
+
+ return 0;
+}
+
+static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
+{
+ struct nix_txvlan *vlan;
+ struct nix_hw *nix_hw;
+ int index, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
+ vlan = &nix_hw->txvlan;
+
+ mutex_lock(&vlan->rsrc_lock);
+ /* Scan all the entries and free the ones mapped to 'pcifunc' */
+ for (index = 0; index < vlan->rsrc.max; index++) {
+ if (vlan->entry2pfvf_map[index] == pcifunc)
+ nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
+ }
+ mutex_unlock(&vlan->rsrc_lock);
+}
+
+static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
+ u64 vtag, u8 size)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ struct nix_txvlan *vlan;
+ u64 regval;
+ int index;
+
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ vlan = &nix_hw->txvlan;
+
+ mutex_lock(&vlan->rsrc_lock);
+
+ index = rvu_alloc_rsrc(&vlan->rsrc);
+ if (index < 0) {
+ mutex_unlock(&vlan->rsrc_lock);
+ return index;
+ }
+
+ mutex_unlock(&vlan->rsrc_lock);
+
+ regval = size ? vtag : vtag << 32;
+
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_CTL(index), size);
+
+ return index;
+}
+
+static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
+ struct nix_vtag_config *req)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ u16 pcifunc = req->hdr.pcifunc;
+ int idx0 = req->tx.vtag0_idx;
+ int idx1 = req->tx.vtag1_idx;
+ struct nix_txvlan *vlan;
+ int err = 0;
+
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ vlan = &nix_hw->txvlan;
+ if (req->tx.free_vtag0 && req->tx.free_vtag1)
+ if (vlan->entry2pfvf_map[idx0] != pcifunc ||
+ vlan->entry2pfvf_map[idx1] != pcifunc)
+ return NIX_AF_ERR_PARAM;
+
+ mutex_lock(&vlan->rsrc_lock);
+
+ if (req->tx.free_vtag0) {
+ err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
+ if (err)
+ goto exit;
+ }
+
+ if (req->tx.free_vtag1)
+ err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
+
+exit:
+ mutex_unlock(&vlan->rsrc_lock);
+ return err;
+}
+
+static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
+ struct nix_vtag_config *req,
+ struct nix_vtag_config_rsp *rsp)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ struct nix_txvlan *vlan;
+ u16 pcifunc = req->hdr.pcifunc;
+
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ vlan = &nix_hw->txvlan;
+ if (req->tx.cfg_vtag0) {
+ rsp->vtag0_idx =
+ nix_tx_vtag_alloc(rvu, blkaddr,
+ req->tx.vtag0, req->vtag_size);
+
+ if (rsp->vtag0_idx < 0)
+ return NIX_AF_ERR_TX_VTAG_NOSPC;
+
+ vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
+ }
+
+ if (req->tx.cfg_vtag1) {
+ rsp->vtag1_idx =
+ nix_tx_vtag_alloc(rvu, blkaddr,
+ req->tx.vtag1, req->vtag_size);
+
+ if (rsp->vtag1_idx < 0)
+ goto err_free;
+
+ vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
+ }
+
+ return 0;
+
+err_free:
+ if (req->tx.cfg_vtag0)
+ nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
+
+ return NIX_AF_ERR_TX_VTAG_NOSPC;
+}
+
int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
struct nix_vtag_config *req,
- struct msg_rsp *rsp)
+ struct nix_vtag_config_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
int blkaddr, nixlf, err;
@@ -1943,19 +2809,28 @@ int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
return err;
if (req->cfg_type) {
+ /* rx vtag configuration */
err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
if (err)
return NIX_AF_ERR_PARAM;
} else {
- /* TODO: handle tx vtag configuration */
- return 0;
+ /* tx vtag configuration */
+ if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
+ (req->tx.free_vtag0 || req->tx.free_vtag1))
+ return NIX_AF_ERR_PARAM;
+
+ if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
+ return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
+
+ if (req->tx.free_vtag0 || req->tx.free_vtag1)
+ return nix_tx_vtag_decfg(rvu, blkaddr, req);
}
return 0;
}
-static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
- u16 pcifunc, int next, bool eol)
+static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
+ int mce, u8 op, u16 pcifunc, int next, bool eol)
{
struct nix_aq_enq_req aq_req;
int err;
@@ -1965,8 +2840,8 @@ static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
aq_req.op = op;
aq_req.qidx = mce;
- /* Forward bcast pkts to RQ0, RSS not needed */
- aq_req.mce.op = 0;
+ /* Use RSS with RSS index 0 */
+ aq_req.mce.op = 1;
aq_req.mce.index = 0;
aq_req.mce.eol = eol;
aq_req.mce.pf_func = pcifunc;
@@ -1975,7 +2850,7 @@ static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
/* All fields valid */
*(u64 *)(&aq_req.mce_mask) = ~0ULL;
- err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
+ err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
if (err) {
dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
@@ -1984,8 +2859,8 @@ static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
return 0;
}
-static int nix_update_mce_list(struct nix_mce_list *mce_list,
- u16 pcifunc, bool add)
+static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
+ u16 pcifunc, bool add)
{
struct mce *mce, *tail = NULL;
bool delete = false;
@@ -1996,6 +2871,9 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list,
if (mce->pcifunc == pcifunc && !add) {
delete = true;
break;
+ } else if (mce->pcifunc == pcifunc && add) {
+ /* entry already exists */
+ return 0;
}
tail = mce;
}
@@ -2023,36 +2901,23 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list,
return 0;
}
-int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
+int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
+ struct nix_mce_list *mce_list,
+ int mce_idx, int mcam_index, bool add)
{
- int err = 0, idx, next_idx, last_idx;
- struct nix_mce_list *mce_list;
+ int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
struct nix_mcast *mcast;
struct nix_hw *nix_hw;
- struct rvu_pfvf *pfvf;
struct mce *mce;
- int blkaddr;
-
- /* Broadcast pkt replication is not needed for AF's VFs, hence skip */
- if (is_afvf(pcifunc))
- return 0;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0)
- return 0;
-
- nix_hw = get_nix_hw(rvu->hw, blkaddr);
- if (!nix_hw)
- return 0;
- mcast = &nix_hw->mcast;
+ if (!mce_list)
+ return -EINVAL;
/* Get this PF/VF func's MCE index */
- pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
- idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
+ idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
- mce_list = &pfvf->bcast_mce_list;
- if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
+ if (idx > (mce_idx + mce_list->max)) {
dev_err(rvu->dev,
"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
__func__, idx, mce_list->max,
@@ -2060,20 +2925,26 @@ int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
return -EINVAL;
}
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mcast = &nix_hw->mcast;
mutex_lock(&mcast->mce_lock);
- err = nix_update_mce_list(mce_list, pcifunc, add);
+ err = nix_update_mce_list_entry(mce_list, pcifunc, add);
if (err)
goto end;
/* Disable MCAM entry in NPC */
if (!mce_list->count) {
- rvu_npc_enable_bcast_entry(rvu, pcifunc, false);
+ npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
goto end;
}
/* Dump the updated list to HW */
- idx = pfvf->bcast_mce_idx;
+ idx = mce_idx;
last_idx = idx + mce_list->count - 1;
hlist_for_each_entry(mce, &mce_list->head, node) {
if (idx > last_idx)
@@ -2081,9 +2952,9 @@ int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
next_idx = idx + 1;
/* EOL should be set in last MCE */
- err = nix_setup_mce(rvu, idx, NIX_AQ_INSTOP_WRITE,
- mce->pcifunc, next_idx,
- (next_idx > last_idx) ? true : false);
+ err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
+ mce->pcifunc, next_idx,
+ (next_idx > last_idx) ? true : false);
if (err)
goto end;
idx++;
@@ -2094,7 +2965,76 @@ end:
return err;
}
-static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
+void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
+ struct nix_mce_list **mce_list, int *mce_idx)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_pfvf *pfvf;
+
+ if (!hw->cap.nix_rx_multicast ||
+ !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
+ *mce_list = NULL;
+ *mce_idx = 0;
+ return;
+ }
+
+ /* Get this PF/VF func's MCE index */
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+
+ if (type == NIXLF_BCAST_ENTRY) {
+ *mce_list = &pfvf->bcast_mce_list;
+ *mce_idx = pfvf->bcast_mce_idx;
+ } else if (type == NIXLF_ALLMULTI_ENTRY) {
+ *mce_list = &pfvf->mcast_mce_list;
+ *mce_idx = pfvf->mcast_mce_idx;
+ } else if (type == NIXLF_PROMISC_ENTRY) {
+ *mce_list = &pfvf->promisc_mce_list;
+ *mce_idx = pfvf->promisc_mce_idx;
+ } else {
+ *mce_list = NULL;
+ *mce_idx = 0;
+ }
+}
+
+static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
+ int type, bool add)
+{
+ int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_mce_list *mce_list;
+ int pf;
+
+ /* skip multicast pkt replication for AF's VFs & SDP links */
+ if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc))
+ return 0;
+
+ if (!hw->cap.nix_rx_multicast)
+ return 0;
+
+ pf = rvu_get_pf(pcifunc);
+ if (!is_pf_cgxmapped(rvu, pf))
+ return 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return -EINVAL;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return -EINVAL;
+
+ nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
+
+ mcam_index = npc_get_nixlf_mcam_index(mcam,
+ pcifunc & ~RVU_PFVF_FUNC_MASK,
+ nixlf, type);
+ err = nix_update_mce_list(rvu, pcifunc, mce_list,
+ mce_idx, mcam_index, add);
+ return err;
+}
+
+static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
{
struct nix_mcast *mcast = &nix_hw->mcast;
int err, pf, numvfs, idx;
@@ -2112,11 +3052,23 @@ static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
numvfs = (cfg >> 12) & 0xFF;
pfvf = &rvu->pf[pf];
- /* Save the start MCE */
- pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ /* This NIX0/1 block mapped to PF ? */
+ if (pfvf->nix_blkaddr != nix_hw->blkaddr)
+ continue;
+
+ /* save start idx of broadcast mce list */
+ pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
+ /* save start idx of multicast mce list */
+ pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
+
+ /* save the start idx of promisc mce list */
+ pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
+
for (idx = 0; idx < (numvfs + 1); idx++) {
/* idx-0 is for PF, followed by VFs */
pcifunc = (pf << RVU_PFVF_PF_SHIFT);
@@ -2126,9 +3078,26 @@ static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
* Will be updated when a NIXLF is attached/detached to
* these PF/VFs.
*/
- err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx,
- NIX_AQ_INSTOP_INIT,
- pcifunc, 0, true);
+ err = nix_blk_setup_mce(rvu, nix_hw,
+ pfvf->bcast_mce_idx + idx,
+ NIX_AQ_INSTOP_INIT,
+ pcifunc, 0, true);
+ if (err)
+ return err;
+
+ /* add dummy entries to multicast mce list */
+ err = nix_blk_setup_mce(rvu, nix_hw,
+ pfvf->mcast_mce_idx + idx,
+ NIX_AQ_INSTOP_INIT,
+ pcifunc, 0, true);
+ if (err)
+ return err;
+
+ /* add dummy entries to promisc mce list */
+ err = nix_blk_setup_mce(rvu, nix_hw,
+ pfvf->promisc_mce_idx + idx,
+ NIX_AQ_INSTOP_INIT,
+ pcifunc, 0, true);
if (err)
return err;
}
@@ -2177,7 +3146,32 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
mutex_init(&mcast->mce_lock);
- return nix_setup_bcast_tables(rvu, nix_hw);
+ return nix_setup_mce_tables(rvu, nix_hw);
+}
+
+static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ struct nix_txvlan *vlan = &nix_hw->txvlan;
+ int err;
+
+ /* Allocate resource bimap for tx vtag def registers*/
+ vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
+ err = rvu_alloc_bitmap(&vlan->rsrc);
+ if (err)
+ return -ENOMEM;
+
+ /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
+ vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!vlan->entry2pfvf_map)
+ goto free_mem;
+
+ mutex_init(&vlan->rsrc_lock);
+ return 0;
+
+free_mem:
+ kfree(vlan->rsrc.bmap);
+ return -ENOMEM;
}
static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
@@ -2225,6 +3219,15 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
for (schq = 0; schq < txsch->schq.max; schq++)
txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
}
+
+ /* Setup a default value of 8192 as DWRR MTU */
+ if (rvu->hw->cap.nix_common_dwrr_mtu) {
+ rvu_write64(rvu, blkaddr, NIX_AF_DWRR_RPM_MTU,
+ convert_bytes_to_dwrr_mtu(8192));
+ rvu_write64(rvu, blkaddr, NIX_AF_DWRR_SDP_MTU,
+ convert_bytes_to_dwrr_mtu(8192));
+ }
+
return 0;
}
@@ -2279,6 +3282,63 @@ static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
return 0;
}
+static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu)
+{
+ /* CN10K supports LBK FIFO size 72 KB */
+ if (rvu->hw->lbk_bufsize == 0x12000)
+ *max_mtu = CN10K_LBK_LINK_MAX_FRS;
+ else
+ *max_mtu = NIC_HW_MAX_FRS;
+}
+
+static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
+{
+ /* RPM supports FIFO len 128 KB */
+ if (rvu_cgx_get_fifolen(rvu) == 0x20000)
+ *max_mtu = CN10K_LMAC_LINK_MAX_FRS;
+ else
+ *max_mtu = NIC_HW_MAX_FRS;
+}
+
+int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
+ struct nix_hw_info *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ u64 dwrr_mtu;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ rsp->vwqe_delay = 0;
+ if (!is_rvu_otx2(rvu))
+ rsp->vwqe_delay = rvu_read64(rvu, blkaddr, NIX_AF_VWQE_TIMER) &
+ GENMASK_ULL(9, 0);
+
+ if (is_afvf(pcifunc))
+ rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
+ else
+ rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
+
+ rsp->min_mtu = NIC_HW_MIN_FRS;
+
+ if (!rvu->hw->cap.nix_common_dwrr_mtu) {
+ /* Return '1' on OTx2 */
+ rsp->rpm_dwrr_mtu = 1;
+ rsp->sdp_dwrr_mtu = 1;
+ return 0;
+ }
+
+ dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU);
+ rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
+
+ dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_SDP_MTU);
+ rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
+
+ return 0;
+}
+
int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
@@ -2324,6 +3384,8 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
struct nix_rx_flowkey_alg *field;
struct nix_rx_flowkey_alg tmp;
u32 key_type, valid_key;
+ int l4_key_offset = 0;
+ u32 l3_l4_src_dst;
if (!alg)
return -EINVAL;
@@ -2350,6 +3412,15 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
* group_member - Enabled when protocol is part of a group.
*/
+ /* Last 4 bits (31:28) are reserved to specify SRC, DST
+ * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST,
+ * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST
+ * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST
+ */
+ l3_l4_src_dst = flow_cfg;
+ /* Reset these 4 bits, so that these won't be part of key */
+ flow_cfg &= NIX_FLOW_KEY_TYPE_L3_L4_MASK;
+
keyoff_marker = 0; max_key_off = 0; group_member = 0;
nr_field = 0; key_off = 0; field_marker = 1;
field = &tmp; max_bit_pos = fls(flow_cfg);
@@ -2370,6 +3441,13 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
/* This should be set to 1, when SEL_CHAN is set */
field->bytesm1 = 1;
break;
+ case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
+ field->lid = NPC_LID_LC;
+ field->hdr_offset = 9; /* offset */
+ field->bytesm1 = 0; /* 1 byte */
+ field->ltype_match = NPC_LT_LC_IP;
+ field->ltype_mask = 0xF;
+ break;
case NIX_FLOW_KEY_TYPE_IPV4:
case NIX_FLOW_KEY_TYPE_INNR_IPV4:
field->lid = NPC_LID_LC;
@@ -2380,6 +3458,22 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
}
field->hdr_offset = 12; /* SIP offset */
field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
+
+ /* Only SIP */
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
+ field->bytesm1 = 3; /* SIP, 4 bytes */
+
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
+ /* Both SIP + DIP */
+ if (field->bytesm1 == 3) {
+ field->bytesm1 = 7; /* SIP + DIP, 8B */
+ } else {
+ /* Only DIP */
+ field->hdr_offset = 16; /* DIP off */
+ field->bytesm1 = 3; /* DIP, 4 bytes */
+ }
+ }
+
field->ltype_mask = 0xF; /* Match only IPv4 */
keyoff_marker = false;
break;
@@ -2393,6 +3487,22 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
}
field->hdr_offset = 8; /* SIP offset */
field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
+
+ /* Only SIP */
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
+ field->bytesm1 = 15; /* SIP, 16 bytes */
+
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
+ /* Both SIP + DIP */
+ if (field->bytesm1 == 15) {
+ /* SIP + DIP, 32 bytes */
+ field->bytesm1 = 31;
+ } else {
+ /* Only DIP */
+ field->hdr_offset = 24; /* DIP off */
+ field->bytesm1 = 15; /* DIP,16 bytes */
+ }
+ }
field->ltype_mask = 0xF; /* Match only IPv6 */
break;
case NIX_FLOW_KEY_TYPE_TCP:
@@ -2408,6 +3518,21 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->lid = NPC_LID_LH;
field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_SRC_ONLY)
+ field->bytesm1 = 1; /* SRC, 2 bytes */
+
+ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_DST_ONLY) {
+ /* Both SRC + DST */
+ if (field->bytesm1 == 1) {
+ /* SRC + DST, 4 bytes */
+ field->bytesm1 = 3;
+ } else {
+ /* Only DIP */
+ field->hdr_offset = 2; /* DST off */
+ field->bytesm1 = 1; /* DST, 2 bytes */
+ }
+ }
+
/* Enum values for NPC_LID_LD and NPC_LID_LG are same,
* so no need to change the ltype_match, just change
* the lid for inner protocols
@@ -2449,6 +3574,12 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field_marker = false;
keyoff_marker = false;
}
+
+ /* TCP/UDP/SCTP and ESP/AH falls at same offset so
+ * remember the TCP key offset of 40 byte hash key.
+ */
+ if (key_type == NIX_FLOW_KEY_TYPE_TCP)
+ l4_key_offset = key_off;
break;
case NIX_FLOW_KEY_TYPE_NVGRE:
field->lid = NPC_LID_LD;
@@ -2512,6 +3643,13 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->ltype_match = NPC_LT_LE_GTPU;
field->ltype_mask = 0xF;
break;
+ case NIX_FLOW_KEY_TYPE_CH_LEN_90B:
+ field->lid = NPC_LID_LA;
+ field->hdr_offset = 24;
+ field->bytesm1 = 1; /* 2 Bytes*/
+ field->ltype_match = NPC_LT_LA_CUSTOM_L2_90B_ETHER;
+ field->ltype_mask = 0xF;
+ break;
case NIX_FLOW_KEY_TYPE_VLAN:
field->lid = NPC_LID_LB;
field->hdr_offset = 2; /* Skip TPID (2-bytes) */
@@ -2520,11 +3658,38 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->ltype_mask = 0xF;
field->fn_mask = 1; /* Mask out the first nibble */
break;
+ case NIX_FLOW_KEY_TYPE_CUSTOM0:
+ field->lid = NPC_LID_LC;
+ field->hdr_offset = 6;
+ field->bytesm1 = 1; /* 2 Bytes*/
+ field->ltype_match = NPC_LT_LC_CUSTOM0;
+ field->ltype_mask = 0xF;
+ break;
+ case NIX_FLOW_KEY_TYPE_AH:
+ case NIX_FLOW_KEY_TYPE_ESP:
+ field->hdr_offset = 0;
+ field->bytesm1 = 7; /* SPI + sequence number */
+ field->ltype_mask = 0xF;
+ field->lid = NPC_LID_LE;
+ field->ltype_match = NPC_LT_LE_ESP;
+ if (key_type == NIX_FLOW_KEY_TYPE_AH) {
+ field->lid = NPC_LID_LD;
+ field->ltype_match = NPC_LT_LD_AH;
+ field->hdr_offset = 4;
+ keyoff_marker = false;
+ }
+ break;
}
field->ena = 1;
/* Found a valid flow key type */
if (valid_key) {
+ /* Use the key offset of TCP/UDP/SCTP fields
+ * for ESP/AH fields.
+ */
+ if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
+ key_type == NIX_FLOW_KEY_TYPE_AH)
+ key_off = l4_key_offset;
field->key_offset = key_off;
memcpy(&alg[nr_field], field, sizeof(*field));
max_key_off = max(max_key_off, field->bytesm1 + 1);
@@ -2555,7 +3720,7 @@ static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
hw = get_nix_hw(rvu->hw, blkaddr);
if (!hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
/* No room to add new flow hash algoritham */
if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
@@ -2595,7 +3760,7 @@ int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
/* Failed to get algo index from the exiting list, reserve new */
@@ -2684,6 +3849,7 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
struct nix_set_mac_addr *req,
struct msg_rsp *rsp)
{
+ bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK);
u16 pcifunc = req->hdr.pcifunc;
int blkaddr, nixlf, err;
struct rvu_pfvf *pfvf;
@@ -2694,12 +3860,23 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
pfvf = rvu_get_pfvf(rvu, pcifunc);
+ /* untrusted VF can't overwrite admin(PF) changes */
+ if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
+ (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
+ dev_warn(rvu->dev,
+ "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
+ return -EPERM;
+ }
+
ether_addr_copy(pfvf->mac_addr, req->mac_addr);
rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base, req->mac_addr);
- rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+ if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
+ ether_addr_copy(pfvf->default_mac, req->mac_addr);
+
+ rvu_switch_update_rules(rvu, pcifunc);
return 0;
}
@@ -2724,31 +3901,74 @@ int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
struct msg_rsp *rsp)
{
- bool allmulti = false, disable_promisc = false;
+ bool allmulti, promisc, nix_rx_multicast;
u16 pcifunc = req->hdr.pcifunc;
- int blkaddr, nixlf, err;
struct rvu_pfvf *pfvf;
+ int nixlf, err;
- err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
+ allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
+ pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
+
+ nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
+
+ if (is_vf(pcifunc) && !nix_rx_multicast &&
+ (promisc || allmulti)) {
+ dev_warn_ratelimited(rvu->dev,
+ "VF promisc/multicast not supported\n");
+ return 0;
+ }
+
+ /* untrusted VF can't configure promisc/allmulti */
+ if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
+ (promisc || allmulti))
+ return 0;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
if (err)
return err;
- pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (nix_rx_multicast) {
+ /* add/del this PF_FUNC to/from mcast pkt replication list */
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
+ allmulti);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update pcifunc 0x%x to multicast list\n",
+ pcifunc);
+ return err;
+ }
- if (req->mode & NIX_RX_MODE_PROMISC)
- allmulti = false;
- else if (req->mode & NIX_RX_MODE_ALLMULTI)
- allmulti = true;
- else
- disable_promisc = true;
+ /* add/del this PF_FUNC to/from promisc pkt replication list */
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
+ promisc);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update pcifunc 0x%x to promisc list\n",
+ pcifunc);
+ return err;
+ }
+ }
- if (disable_promisc)
- rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
- else
- rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
- pfvf->rx_chan_base, allmulti);
+ /* install/uninstall allmulti entry */
+ if (allmulti) {
+ rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base);
+ } else {
+ if (!nix_rx_multicast)
+ rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
+ }
- rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+ /* install/uninstall promisc entry */
+ if (promisc) {
+ rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base,
+ pfvf->rx_chan_cnt);
+ } else {
+ if (!nix_rx_multicast)
+ rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
+ }
return 0;
}
@@ -2798,6 +4018,80 @@ static void nix_find_link_frs(struct rvu *rvu,
req->minlen = minlen;
}
+static int
+nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
+ u16 pcifunc, u64 tx_credits)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id = 0, lmac_id = 0;
+ unsigned long poll_tmo;
+ bool restore_tx_en = 0;
+ struct nix_hw *nix_hw;
+ u64 cfg, sw_xoff = 0;
+ u32 schq = 0;
+ u32 credits;
+ int rc;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ if (tx_credits == nix_hw->tx_credits[link])
+ return 0;
+
+ /* Enable cgx tx if disabled for credits to be back */
+ if (is_pf_cgxmapped(rvu, pf)) {
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, true);
+ }
+
+ rvu_nix_txsch_lock(nix_hw);
+ mutex_lock(&rvu->rsrc_lock);
+ /* Disable new traffic to link */
+ if (hw->cap.nix_shaping) {
+ schq = nix_get_tx_link(rvu, pcifunc);
+ sw_xoff = rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq));
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0));
+ }
+
+ rc = NIX_AF_ERR_LINK_CREDITS;
+ poll_tmo = jiffies + usecs_to_jiffies(200000);
+ /* Wait for credits to return */
+ do {
+ if (time_after(jiffies, poll_tmo))
+ goto exit;
+ usleep_range(100, 200);
+
+ cfg = rvu_read64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_NORM_CREDIT(link));
+ credits = (cfg >> 12) & 0xFFFFFULL;
+ } while (credits != nix_hw->tx_credits[link]);
+
+ cfg &= ~(0xFFFFFULL << 12);
+ cfg |= (tx_credits << 12);
+ rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
+ rc = 0;
+
+ nix_hw->tx_credits[link] = tx_credits;
+ rvu_nix_update_link_credits(rvu, blkaddr, link, cfg);
+
+exit:
+ /* Enable traffic back */
+ if (hw->cap.nix_shaping && !sw_xoff)
+ rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq), 0);
+
+ /* Restore state of cgx tx */
+ if (restore_tx_en)
+ rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+
+ mutex_unlock(&rvu->rsrc_lock);
+ rvu_nix_txsch_unlock(nix_hw);
+ return rc;
+}
+
int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
struct msg_rsp *rsp)
{
@@ -2808,7 +4102,9 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
struct nix_txsch *txsch;
u64 cfg, lmac_fifo_len;
struct nix_hw *nix_hw;
+ struct rvu_pfvf *pfvf;
u8 cgx = 0, lmac = 0;
+ u16 max_mtu;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
@@ -2816,9 +4112,14 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ if (is_afvf(pcifunc))
+ rvu_get_lbk_link_max_frs(rvu, &max_mtu);
+ else
+ rvu_get_lmac_link_max_frs(rvu, &max_mtu);
- if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS)
+ if (!req->sdp_link && req->maxlen > max_mtu)
return NIX_AF_ERR_FRS_INVALID;
if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
@@ -2858,7 +4159,8 @@ rx_frscfg:
link = (cgx * hw->lmac_per_cgx) + lmac;
} else if (pf == 0) {
/* For VFs of PF0 ingress is LBK port, so config LBK link */
- link = hw->cgx_links;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ link = hw->cgx_links + pfvf->lbkid;
}
if (link < 0)
@@ -2878,71 +4180,10 @@ linkcfg:
/* Update transmit credits for CGX links */
lmac_fifo_len =
- CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
- cfg &= ~(0xFFFFFULL << 12);
- cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12;
- rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
- return 0;
-}
-
-int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp)
-{
- struct npc_mcam_alloc_entry_req alloc_req = { };
- struct npc_mcam_alloc_entry_rsp alloc_rsp = { };
- struct npc_mcam_free_entry_req free_req = { };
- u16 pcifunc = req->hdr.pcifunc;
- int blkaddr, nixlf, err;
- struct rvu_pfvf *pfvf;
-
- /* LBK VFs do not have separate MCAM UCAST entry hence
- * skip allocating rxvlan for them
- */
- if (is_afvf(pcifunc))
- return 0;
-
- pfvf = rvu_get_pfvf(rvu, pcifunc);
- if (pfvf->rxvlan)
- return 0;
-
- /* alloc new mcam entry */
- alloc_req.hdr.pcifunc = pcifunc;
- alloc_req.count = 1;
-
- err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
- &alloc_rsp);
- if (err)
- return err;
-
- /* update entry to enable rxvlan offload */
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0) {
- err = NIX_AF_ERR_AF_LF_INVALID;
- goto free_entry;
- }
-
- nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0);
- if (nixlf < 0) {
- err = NIX_AF_ERR_AF_LF_INVALID;
- goto free_entry;
- }
-
- pfvf->rxvlan_index = alloc_rsp.entry_list[0];
- /* all it means is that rxvlan_index is valid */
- pfvf->rxvlan = true;
-
- err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
- if (err)
- goto free_entry;
-
- return 0;
-free_entry:
- free_req.hdr.pcifunc = pcifunc;
- free_req.entry = alloc_rsp.entry_list[0];
- rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp);
- pfvf->rxvlan = false;
- return err;
+ rvu_cgx_get_fifolen(rvu) /
+ cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
+ return nix_config_link_credits(rvu, blkaddr, link, pcifunc,
+ (lmac_fifo_len - req->maxlen) / 16);
}
int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
@@ -2967,6 +4208,11 @@ int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
else
cfg &= ~BIT_ULL(40);
+ if (req->len_verify & NIX_RX_DROP_RE)
+ cfg |= BIT_ULL(32);
+ else
+ cfg &= ~BIT_ULL(32);
+
if (req->csum_verify & BIT(0))
cfg |= BIT_ULL(37);
else
@@ -2977,11 +4223,25 @@ int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
return 0;
}
-static void nix_link_config(struct rvu *rvu, int blkaddr)
+static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
+{
+ /* CN10k supports 72KB FIFO size and max packet size of 64k */
+ if (rvu->hw->lbk_bufsize == 0x12000)
+ return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16;
+ else
+ return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
+}
+
+static void nix_link_config(struct rvu *rvu, int blkaddr,
+ struct nix_hw *nix_hw)
{
struct rvu_hwinfo *hw = rvu->hw;
int cgx, lmac_cnt, slink, link;
- u64 tx_credits;
+ u16 lbk_max_frs, lmac_max_frs;
+ u64 tx_credits, cfg;
+
+ rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
+ rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
/* Set default min/max packet lengths allowed on NIX Rx links.
*
@@ -2989,37 +4249,56 @@ static void nix_link_config(struct rvu *rvu, int blkaddr)
* as undersize and report them to SW as error pkts, hence
* setting it to 40 bytes.
*/
- for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
+ for (link = 0; link < hw->cgx_links; link++) {
rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
- NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
+ ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
}
+ for (link = hw->cgx_links; link < hw->cgx_links + hw->lbk_links; link++) {
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
+ ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
+ }
if (hw->sdp_links) {
link = hw->cgx_links + hw->lbk_links;
rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
}
+ /* Set CPT link i.e second pass config */
+ if (hw->cpt_links) {
+ link = hw->cgx_links + hw->lbk_links + hw->sdp_links;
+ /* Set default min/max packet lengths allowed to LBK as that
+ * LBK link's range is max.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
+ ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
+ }
+
/* Set credits for Tx links assuming max packet length allowed.
* This will be reconfigured based on MTU set for PF/VF.
*/
for (cgx = 0; cgx < hw->cgx; cgx++) {
lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
- tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16;
+ /* Skip when cgx is not available or lmac cnt is zero */
+ if (lmac_cnt <= 0)
+ continue;
+ tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) -
+ lmac_max_frs) / 16;
/* Enable credits and set credit pkt count to max allowed */
- tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
+ cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
slink = cgx * hw->lmac_per_cgx;
for (link = slink; link < (slink + lmac_cnt); link++) {
+ nix_hw->tx_credits[link] = tx_credits;
rvu_write64(rvu, blkaddr,
- NIX_AF_TX_LINKX_NORM_CREDIT(link),
- tx_credits);
+ NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
}
}
/* Set Tx credits for LBK link */
slink = hw->cgx_links;
for (link = slink; link < (slink + hw->lbk_links); link++) {
- tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */
+ tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
+ nix_hw->tx_credits[link] = tx_credits;
/* Enable credits and set credit pkt count to max allowed */
tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
rvu_write64(rvu, blkaddr,
@@ -3113,17 +4392,37 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
return 0;
}
-int rvu_nix_init(struct rvu *rvu)
+static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 hw_const;
+
+ hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+
+ /* On OcteonTx2 DWRR quantum is directly configured into each of
+ * the transmit scheduler queues. And PF/VF drivers were free to
+ * config any value upto 2^24.
+ * On CN10K, HW is modified, the quantum configuration at scheduler
+ * queues is in terms of weight. And SW needs to setup a base DWRR MTU
+ * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do
+ * 'DWRR MTU * weight' to get the quantum.
+ *
+ * Check if HW uses a common MTU for all DWRR quantum configs.
+ * On OcteonTx2 this register field is '0'.
+ */
+ if (((hw_const >> 56) & 0x10) == 0x10)
+ hw->cap.nix_common_dwrr_mtu = true;
+}
+
+static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
{
const struct npc_lt_def_cfg *ltdefs;
struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr = nix_hw->blkaddr;
struct rvu_block *block;
- int blkaddr, err;
+ int err;
u64 cfg;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
- if (blkaddr < 0)
- return 0;
block = &hw->block[blkaddr];
if (is_rvu_96xx_B0(rvu)) {
@@ -3152,13 +4451,8 @@ int rvu_nix_init(struct rvu *rvu)
if (err)
return err;
- /* Set num of links of each type */
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
- hw->cgx = (cfg >> 12) & 0xF;
- hw->lmac_per_cgx = (cfg >> 8) & 0xF;
- hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
- hw->lbk_links = 1;
- hw->sdp_links = 1;
+ /* Setup capabilities of the NIX block */
+ rvu_nix_setup_capabilities(rvu, blkaddr);
/* Initialize admin queue */
err = nix_aq_init(rvu, block);
@@ -3168,26 +4462,41 @@ int rvu_nix_init(struct rvu *rvu)
/* Restore CINT timer delay to HW reset values */
rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
- if (blkaddr == BLKADDR_NIX0) {
- hw->nix0 = devm_kzalloc(rvu->dev,
- sizeof(struct nix_hw), GFP_KERNEL);
- if (!hw->nix0)
- return -ENOMEM;
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG);
- err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
+ /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
+ cfg |= 1ULL;
+ if (!is_rvu_otx2(rvu))
+ cfg |= NIX_PTP_1STEP_EN;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg);
+
+ if (!is_rvu_otx2(rvu))
+ rvu_nix_block_cn10k_init(rvu, nix_hw);
+
+ if (is_block_implemented(hw, blkaddr)) {
+ err = nix_setup_txschq(rvu, nix_hw, blkaddr);
if (err)
return err;
- err = nix_af_mark_format_setup(rvu, hw->nix0, blkaddr);
+ err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
if (err)
return err;
- err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
+ err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
+
+ err = nix_setup_mcast(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
+
+ err = nix_setup_txvlan(rvu, nix_hw);
if (err)
return err;
/* Configure segmentation offload formats */
- nix_setup_lso(rvu, hw->nix0, blkaddr);
+ nix_setup_lso(rvu, nix_hw, blkaddr);
/* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
* This helps HW protocol checker to identify headers
@@ -3227,49 +4536,155 @@ int rvu_nix_init(struct rvu *rvu)
(ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
ltdefs->rx_isctp.ltype_mask);
+ if (!is_rvu_otx2(rvu)) {
+ /* Enable APAD calculation for other protocols
+ * matching APAD0 and APAD1 lt def registers.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
+ (ltdefs->rx_apad0.valid << 11) |
+ (ltdefs->rx_apad0.lid << 8) |
+ (ltdefs->rx_apad0.ltype_match << 4) |
+ ltdefs->rx_apad0.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
+ (ltdefs->rx_apad1.valid << 11) |
+ (ltdefs->rx_apad1.lid << 8) |
+ (ltdefs->rx_apad1.ltype_match << 4) |
+ ltdefs->rx_apad1.ltype_mask);
+
+ /* Receive ethertype defination register defines layer
+ * information in NPC_RESULT_S to identify the Ethertype
+ * location in L2 header. Used for Ethertype overwriting
+ * in inline IPsec flow.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
+ (ltdefs->rx_et[0].offset << 12) |
+ (ltdefs->rx_et[0].valid << 11) |
+ (ltdefs->rx_et[0].lid << 8) |
+ (ltdefs->rx_et[0].ltype_match << 4) |
+ ltdefs->rx_et[0].ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
+ (ltdefs->rx_et[1].offset << 12) |
+ (ltdefs->rx_et[1].valid << 11) |
+ (ltdefs->rx_et[1].lid << 8) |
+ (ltdefs->rx_et[1].ltype_match << 4) |
+ ltdefs->rx_et[1].ltype_mask);
+ }
+
err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
if (err)
return err;
+ nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links,
+ sizeof(u64), GFP_KERNEL);
+ if (!nix_hw->tx_credits)
+ return -ENOMEM;
+
/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
- nix_link_config(rvu, blkaddr);
+ nix_link_config(rvu, blkaddr, nix_hw);
/* Enable Channel backpressure */
rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
+
+ err = rvu_nix_fixes_init(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
+
+ if (is_block_implemented(rvu->hw, BLKADDR_CPT0)) {
+ /* Config IPSec headers identification */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IPSECX(0),
+ (ltdefs->rx_ipsec[0].lid << 8) |
+ (ltdefs->rx_ipsec[0].ltype_match << 4) |
+ ltdefs->rx_ipsec[0].ltype_mask);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IPSECX(1),
+ (ltdefs->rx_ipsec[1].spi_offset << 12) |
+ (ltdefs->rx_ipsec[1].lid << 8) |
+ (ltdefs->rx_ipsec[1].ltype_match << 4) |
+ ltdefs->rx_ipsec[1].ltype_mask);
+ }
}
+
return 0;
}
-void rvu_nix_freemem(struct rvu *rvu)
+int rvu_nix_init(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
- struct rvu_block *block;
+ struct nix_hw *nix_hw;
+ int blkaddr = 0, err;
+ int i = 0;
+
+ hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
+ GFP_KERNEL);
+ if (!hw->nix)
+ return -ENOMEM;
+
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ nix_hw = &hw->nix[i];
+ nix_hw->rvu = rvu;
+ nix_hw->blkaddr = blkaddr;
+ err = rvu_nix_block_init(rvu, nix_hw);
+ if (err)
+ return err;
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ i++;
+ }
+
+ return 0;
+}
+
+static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
+ struct rvu_block *block)
+{
struct nix_txsch *txsch;
struct nix_mcast *mcast;
+ struct nix_txvlan *vlan;
struct nix_hw *nix_hw;
- int blkaddr, lvl;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
- if (blkaddr < 0)
- return;
+ int lvl;
- block = &hw->block[blkaddr];
rvu_aq_free(rvu, block->aq);
- if (blkaddr == BLKADDR_NIX0) {
+ if (is_block_implemented(rvu->hw, blkaddr)) {
nix_hw = get_nix_hw(rvu->hw, blkaddr);
- if (!nix_hw)
+ if (!nix_hw) {
+ dev_err(rvu->dev, "Unable to free %s memory\n",
+ block->name);
return;
+ }
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
txsch = &nix_hw->txsch[lvl];
kfree(txsch->schq.bmap);
}
+ kfree(nix_hw->tx_credits);
+
+ nix_ipolicer_freemem(rvu, nix_hw);
+
+ vlan = &nix_hw->txvlan;
+ kfree(vlan->rsrc.bmap);
+ mutex_destroy(&vlan->rsrc_lock);
+
mcast = &nix_hw->mcast;
qmem_free(rvu->dev, mcast->mce_ctx);
qmem_free(rvu->dev, mcast->mcast_buf);
mutex_destroy(&mcast->mce_lock);
+ rvu_nix_fixes_exit(rvu, nix_hw);
+ }
+}
+
+void rvu_nix_freemem(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr = 0;
+
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ block = &hw->block[blkaddr];
+ rvu_nix_block_freemem(rvu, blkaddr, block);
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
}
}
@@ -3277,6 +4692,7 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
int nixlf, err;
err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
@@ -3285,6 +4701,13 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
+ npc_mcam_enable_flows(rvu, pcifunc);
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ set_bit(NIXLF_INITIALIZED, &pfvf->flags);
+
+ rvu_switch_update_rules(rvu, pcifunc);
+
return rvu_cgx_start_stop_io(rvu, pcifunc, true);
}
@@ -3292,30 +4715,44 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
int nixlf, err;
err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
if (err)
return err;
- rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
return rvu_cgx_start_stop_io(rvu, pcifunc, false);
}
+#define RX_SA_BASE GENMASK_ULL(52, 7)
+
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct hwctx_disable_req ctx_req;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+ u64 sa_base;
+ void *cgxd;
int err;
ctx_req.hdr.pcifunc = pcifunc;
/* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
+ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+ rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
nix_interface_deinit(rvu, pcifunc, nixlf);
nix_rx_sync(rvu, blkaddr);
nix_txschq_free(rvu, pcifunc);
+ clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
+
rvu_cgx_start_stop_io(rvu, pcifunc, false);
if (pfvf->sq_ctx) {
@@ -3339,7 +4776,48 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
dev_err(rvu->dev, "CQ ctx disable failed\n");
}
+ /* Disabling CGX and NPC config done for PTP */
+ if (pfvf->hw_rx_tstamp_en) {
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ cgx_lmac_ptp_config(cgxd, lmac_id, false);
+ /* Undo NPC config done for PTP */
+ if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false))
+ dev_err(rvu->dev, "NPC config for PTP failed\n");
+ pfvf->hw_rx_tstamp_en = false;
+ }
+
+ /* reset HW config done for Switch headers */
+ rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT,
+ (PKIND_TX | PKIND_RX), 0, 0, 0, 0);
+
+ /* reset priority flow control config */
+ rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0);
+
+ /* reset 802.3x flow control config */
+ rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0);
+
nix_ctx_free(rvu, pfvf);
+
+ nix_free_all_bandprof(rvu, pcifunc);
+
+ sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf));
+ if (FIELD_GET(RX_SA_BASE, sa_base)) {
+ err = rvu_cpt_ctx_flush(rvu, pcifunc);
+ if (err)
+ dev_err(rvu->dev,
+ "CPT ctx flush failed with error: %d\n", err);
+ }
+
+ if (is_block_implemented(rvu->hw, BLKADDR_CPT0)) {
+ /* reset the configuration related to inline ipsec */
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(nixlf),
+ 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(nixlf),
+ 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf),
+ 0x0);
+ }
}
#define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
@@ -3348,10 +4826,18 @@ static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
{
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_block *block;
- int blkaddr;
+ int blkaddr, pf;
int nixlf;
u64 cfg;
+ pf = rvu_get_pf(pcifunc);
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
+ return 0;
+
+ /* Silicon does not support enabling time stamp in higig mode */
+ if (rvu_cgx_is_higig2_enabled(rvu, rvu_get_pf(pcifunc)))
+ return NIX_AF_ERR_PTP_CONFIG_FAIL;
+
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
@@ -3402,7 +4888,7 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
- return -EINVAL;
+ return NIX_AF_ERR_INVALID_NIXBLK;
/* Find existing matching LSO format, if any */
for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
@@ -3435,3 +4921,797 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
return 0;
}
+
+int rvu_mbox_handler_nix_set_vlan_tpid(struct rvu *rvu,
+ struct nix_set_vlan_tpid *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int nixlf, err, blkaddr;
+ u64 cfg;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
+
+ if (req->vlan_type != NIX_VLAN_TYPE_OUTER &&
+ req->vlan_type != NIX_VLAN_TYPE_INNER)
+ return NIX_AF_ERR_PARAM;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
+
+ if (req->vlan_type == NIX_VLAN_TYPE_OUTER)
+ cfg = (cfg & ~GENMASK_ULL(15, 0)) | req->tpid;
+ else
+ cfg = (cfg & ~GENMASK_ULL(31, 16)) | ((u64)req->tpid << 16);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
+ return 0;
+}
+
+#define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48)
+#define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32)
+#define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16)
+#define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0)
+
+#define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24)
+#define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
+#define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0)
+
+static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req,
+ int blkaddr)
+{
+ u8 cpt_idx, cpt_blkaddr;
+ u64 val;
+
+ cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
+ if (req->enable) {
+ /* Enable context prefetching */
+ if (!is_rvu_otx2(rvu))
+ val = BIT_ULL(51);
+
+ /* Set OPCODE and EGRP */
+ val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp);
+ val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode);
+ val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1);
+ val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val);
+
+ /* Set CPT queue for inline IPSec */
+ val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot);
+ val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC,
+ req->inst_qsel.cpt_pf_func);
+
+ if (!is_rvu_otx2(rvu)) {
+ cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 :
+ BLKADDR_CPT1;
+ val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr);
+ }
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
+ val);
+
+ /* Set CPT credit */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ req->cpt_credit);
+ } else {
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
+ 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ 0x3FFFFF);
+ }
+}
+
+int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu,
+ struct nix_inline_ipsec_cfg *req,
+ struct msg_rsp *rsp)
+{
+ if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ return 0;
+
+ nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0);
+ if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
+ nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(
+struct rvu *rvu, struct nix_inline_ipsec_lf_cfg *req, struct msg_rsp *rsp)
+{
+ int lf, blkaddr, err;
+ u64 val;
+
+ if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ return 0;
+
+ err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr);
+ if (err)
+ return err;
+
+ if (req->enable) {
+ /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */
+ val = (u64)req->ipsec_cfg0.tt << 44 |
+ (u64)req->ipsec_cfg0.tag_const << 20 |
+ (u64)req->ipsec_cfg0.sa_pow2_size << 16 |
+ req->ipsec_cfg0.lenm1_max;
+
+ if (blkaddr == BLKADDR_NIX1)
+ val |= BIT_ULL(46);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val);
+
+ /* Set SA_IDX_W and SA_IDX_MAX */
+ val = (u64)req->ipsec_cfg1.sa_idx_w << 32 |
+ req->ipsec_cfg1.sa_idx_max;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val);
+
+ /* Set SA base address */
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
+ req->sa_base_addr);
+ } else {
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
+ 0x0);
+ }
+
+ return 0;
+}
+
+void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
+{
+ bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
+
+ /* overwrite vf mac address with default_mac */
+ if (from_vf)
+ ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
+}
+
+bool rvu_nix_is_ptp_tx_enabled(struct rvu *rvu, u16 pcifunc)
+{
+ int blkaddr, nixlf, err;
+ u64 cfg;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return false;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
+ return (cfg & BIT_ULL(32));
+}
+
+/* NIX ingress policers or bandwidth profiles APIs */
+static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
+{
+ struct npc_lt_def_cfg defs, *ltdefs;
+
+ ltdefs = &defs;
+ memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
+
+ /* Extract PCP and DEI fields from outer VLAN from byte offset
+ * 2 from the start of LB_PTR (ie TAG).
+ * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
+ * fields are considered when 'Tunnel enable' is set in profile.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
+ (2UL << 12) | (ltdefs->ovlan.lid << 8) |
+ (ltdefs->ovlan.ltype_match << 4) |
+ ltdefs->ovlan.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
+ (2UL << 12) | (ltdefs->ivlan.lid << 8) |
+ (ltdefs->ivlan.ltype_match << 4) |
+ ltdefs->ivlan.ltype_mask);
+
+ /* DSCP field in outer and tunneled IPv4 packets */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
+ (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
+ (ltdefs->rx_oip4.ltype_match << 4) |
+ ltdefs->rx_oip4.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
+ (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
+ (ltdefs->rx_iip4.ltype_match << 4) |
+ ltdefs->rx_iip4.ltype_mask);
+
+ /* DSCP field (traffic class) in outer and tunneled IPv6 packets */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
+ (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
+ (ltdefs->rx_oip6.ltype_match << 4) |
+ ltdefs->rx_oip6.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
+ (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
+ (ltdefs->rx_iip6.ltype_match << 4) |
+ ltdefs->rx_iip6.ltype_mask);
+}
+
+static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
+ int layer, int prof_idx)
+{
+ struct nix_cn10k_aq_enq_req aq_req;
+ int rc;
+
+ memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+
+ aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
+ aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq_req.op = NIX_AQ_INSTOP_INIT;
+
+ /* Context is all zeros, submit to AQ */
+ rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)&aq_req, NULL);
+ if (rc)
+ dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
+ layer, prof_idx);
+ return rc;
+}
+
+static int nix_setup_ipolicers(struct rvu *rvu,
+ struct nix_hw *nix_hw, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_ipolicer *ipolicer;
+ int err, layer, prof_idx;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ if (!(cfg & BIT_ULL(61))) {
+ hw->cap.ipolicer = false;
+ return 0;
+ }
+
+ hw->cap.ipolicer = true;
+ nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
+ sizeof(*ipolicer), GFP_KERNEL);
+ if (!nix_hw->ipolicer)
+ return -ENOMEM;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
+
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ ipolicer = &nix_hw->ipolicer[layer];
+ switch (layer) {
+ case BAND_PROF_LEAF_LAYER:
+ ipolicer->band_prof.max = cfg & 0XFFFF;
+ break;
+ case BAND_PROF_MID_LAYER:
+ ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
+ break;
+ case BAND_PROF_TOP_LAYER:
+ ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
+ break;
+ }
+
+ if (!ipolicer->band_prof.max)
+ continue;
+
+ err = rvu_alloc_bitmap(&ipolicer->band_prof);
+ if (err)
+ return err;
+
+ ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
+ ipolicer->band_prof.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!ipolicer->pfvf_map)
+ return -ENOMEM;
+
+ ipolicer->match_id = devm_kcalloc(rvu->dev,
+ ipolicer->band_prof.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!ipolicer->match_id)
+ return -ENOMEM;
+
+ for (prof_idx = 0;
+ prof_idx < ipolicer->band_prof.max; prof_idx++) {
+ /* Set AF as current owner for INIT ops to succeed */
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+
+ /* There is no enable bit in the profile context,
+ * so no context disable. So let's INIT them here
+ * so that PF/VF later on have to just do WRITE to
+ * setup policer rates and config.
+ */
+ err = nix_init_policer_context(rvu, nix_hw,
+ layer, prof_idx);
+ if (err)
+ return err;
+ }
+
+ /* Allocate memory for maintaining ref_counts for MID level
+ * profiles, this will be needed for leaf layer profiles'
+ * aggregation.
+ */
+ if (layer != BAND_PROF_MID_LAYER)
+ continue;
+
+ ipolicer->ref_count = devm_kcalloc(rvu->dev,
+ ipolicer->band_prof.max,
+ sizeof(u16), GFP_KERNEL);
+ }
+
+ /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */
+ rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
+
+ nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
+
+ return 0;
+}
+
+static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ struct nix_ipolicer *ipolicer;
+ int layer;
+
+ if (!rvu->hw->cap.ipolicer)
+ return;
+
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ ipolicer = &nix_hw->ipolicer[layer];
+
+ if (!ipolicer->band_prof.max)
+ continue;
+
+ kfree(ipolicer->band_prof.bmap);
+ }
+}
+
+static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
+ struct nix_hw *nix_hw, u16 pcifunc)
+{
+ struct nix_ipolicer *ipolicer;
+ int layer, hi_layer, prof_idx;
+
+ /* Bits [15:14] in profile index represent layer */
+ layer = (req->qidx >> 14) & 0x03;
+ prof_idx = req->qidx & 0x3FFF;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ if (prof_idx >= ipolicer->band_prof.max)
+ return -EINVAL;
+
+ /* Check if the profile is allocated to the requesting PCIFUNC or not
+ * with the exception of AF. AF is allowed to read and update contexts.
+ */
+ if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
+ return -EINVAL;
+
+ /* If this profile is linked to higher layer profile then check
+ * if that profile is also allocated to the requesting PCIFUNC
+ * or not.
+ */
+ if (!req->prof.hl_en)
+ return 0;
+
+ /* Leaf layer profile can link only to mid layer and
+ * mid layer to top layer.
+ */
+ if (layer == BAND_PROF_LEAF_LAYER)
+ hi_layer = BAND_PROF_MID_LAYER;
+ else if (layer == BAND_PROF_MID_LAYER)
+ hi_layer = BAND_PROF_TOP_LAYER;
+ else
+ return -EINVAL;
+
+ ipolicer = &nix_hw->ipolicer[hi_layer];
+ prof_idx = req->prof.band_prof_id;
+ if (prof_idx >= ipolicer->band_prof.max ||
+ ipolicer->pfvf_map[prof_idx] != pcifunc)
+ return -EINVAL;
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
+ struct nix_bandprof_alloc_req *req,
+ struct nix_bandprof_alloc_rsp *rsp)
+{
+ int blkaddr, layer, prof, idx, err;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mutex_lock(&rvu->rsrc_lock);
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ if (!req->prof_count[layer])
+ continue;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ for (idx = 0; idx < req->prof_count[layer]; idx++) {
+ /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
+ if (idx == MAX_BANDPROF_PER_PFFUNC)
+ break;
+
+ prof = rvu_alloc_rsrc(&ipolicer->band_prof);
+ if (prof < 0)
+ break;
+ rsp->prof_count[layer]++;
+ rsp->prof_idx[layer][idx] = prof;
+ ipolicer->pfvf_map[prof] = pcifunc;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
+{
+ int blkaddr, layer, prof_idx, err;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mutex_lock(&rvu->rsrc_lock);
+ /* Free all the profiles allocated to the PCIFUNC */
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ ipolicer = &nix_hw->ipolicer[layer];
+
+ for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
+ if (ipolicer->pfvf_map[prof_idx] != pcifunc)
+ continue;
+
+ /* Clear ratelimit aggregation, if any */
+ if (layer == BAND_PROF_LEAF_LAYER &&
+ ipolicer->match_id[prof_idx])
+ nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
+
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+ ipolicer->match_id[prof_idx] = 0;
+ rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
+ struct nix_bandprof_free_req *req,
+ struct msg_rsp *rsp)
+{
+ int blkaddr, layer, prof_idx, idx, err;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+
+ if (req->free_all)
+ return nix_free_all_bandprof(rvu, pcifunc);
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mutex_lock(&rvu->rsrc_lock);
+ /* Free the requested profile indices */
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ if (!req->prof_count[layer])
+ continue;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ for (idx = 0; idx < req->prof_count[layer]; idx++) {
+ prof_idx = req->prof_idx[layer][idx];
+ if (prof_idx >= ipolicer->band_prof.max ||
+ ipolicer->pfvf_map[prof_idx] != pcifunc)
+ continue;
+
+ /* Clear ratelimit aggregation, if any */
+ if (layer == BAND_PROF_LEAF_LAYER &&
+ ipolicer->match_id[prof_idx])
+ nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
+
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+ ipolicer->match_id[prof_idx] = 0;
+ rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
+ if (idx == MAX_BANDPROF_PER_PFFUNC)
+ break;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_cn10k_aq_enq_req *aq_req,
+ struct nix_cn10k_aq_enq_rsp *aq_rsp,
+ u16 pcifunc, u8 ctype, u32 qidx)
+{
+ memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+ aq_req->hdr.pcifunc = pcifunc;
+ aq_req->ctype = ctype;
+ aq_req->op = NIX_AQ_INSTOP_READ;
+ aq_req->qidx = qidx;
+
+ return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)aq_req,
+ (struct nix_aq_enq_rsp *)aq_rsp);
+}
+
+static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
+ struct nix_hw *nix_hw,
+ struct nix_cn10k_aq_enq_req *aq_req,
+ struct nix_cn10k_aq_enq_rsp *aq_rsp,
+ u32 leaf_prof, u16 mid_prof)
+{
+ memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+ aq_req->hdr.pcifunc = 0x00;
+ aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq_req->op = NIX_AQ_INSTOP_WRITE;
+ aq_req->qidx = leaf_prof;
+
+ aq_req->prof.band_prof_id = mid_prof;
+ aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
+ aq_req->prof.hl_en = 1;
+ aq_req->prof_mask.hl_en = 1;
+
+ return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)aq_req,
+ (struct nix_aq_enq_rsp *)aq_rsp);
+}
+
+int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
+ u16 rq_idx, u16 match_id)
+{
+ int leaf_prof, mid_prof, leaf_match;
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+ int blkaddr, idx, rc;
+
+ if (!rvu->hw->cap.ipolicer)
+ return 0;
+
+ rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (rc)
+ return rc;
+
+ /* Fetch the RQ's context to see if policing is enabled */
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
+ NIX_AQ_CTYPE_RQ, rq_idx);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
+ __func__, rq_idx, pcifunc);
+ return rc;
+ }
+
+ if (!aq_rsp.rq.policer_ena)
+ return 0;
+
+ /* Get the bandwidth profile ID mapped to this RQ */
+ leaf_prof = aq_rsp.rq.band_prof_id;
+
+ ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
+ ipolicer->match_id[leaf_prof] = match_id;
+
+ /* Check if any other leaf profile is marked with same match_id */
+ for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
+ if (idx == leaf_prof)
+ continue;
+ if (ipolicer->match_id[idx] != match_id)
+ continue;
+
+ leaf_match = idx;
+ break;
+ }
+
+ if (idx == ipolicer->band_prof.max)
+ return 0;
+
+ /* Fetch the matching profile's context to check if it's already
+ * mapped to a mid level profile.
+ */
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
+ NIX_AQ_CTYPE_BANDPROF, leaf_match);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of leaf profile %d\n",
+ __func__, leaf_match);
+ return rc;
+ }
+
+ ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
+ if (aq_rsp.prof.hl_en) {
+ /* Get Mid layer prof index and map leaf_prof index
+ * also such that flows that are being steered
+ * to different RQs and marked with same match_id
+ * are rate limited in a aggregate fashion
+ */
+ mid_prof = aq_rsp.prof.band_prof_id;
+ rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
+ &aq_req, &aq_rsp,
+ leaf_prof, mid_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
+ __func__, leaf_prof, mid_prof);
+ goto exit;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ ipolicer->ref_count[mid_prof]++;
+ mutex_unlock(&rvu->rsrc_lock);
+ goto exit;
+ }
+
+ /* Allocate a mid layer profile and
+ * map both 'leaf_prof' and 'leaf_match' profiles to it.
+ */
+ mutex_lock(&rvu->rsrc_lock);
+ mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
+ if (mid_prof < 0) {
+ dev_err(rvu->dev,
+ "%s: Unable to allocate mid layer profile\n", __func__);
+ mutex_unlock(&rvu->rsrc_lock);
+ goto exit;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ ipolicer->pfvf_map[mid_prof] = 0x00;
+ ipolicer->ref_count[mid_prof] = 0;
+
+ /* Initialize mid layer profile same as 'leaf_prof' */
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
+ NIX_AQ_CTYPE_BANDPROF, leaf_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of leaf profile %d\n",
+ __func__, leaf_prof);
+ goto exit;
+ }
+
+ memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+ aq_req.hdr.pcifunc = 0x00;
+ aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
+ aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq_req.op = NIX_AQ_INSTOP_WRITE;
+ memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
+ memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s));
+ /* Clear higher layer enable bit in the mid profile, just in case */
+ aq_req.prof.hl_en = 0;
+ aq_req.prof_mask.hl_en = 1;
+
+ rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)&aq_req, NULL);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to INIT context of mid layer profile %d\n",
+ __func__, mid_prof);
+ goto exit;
+ }
+
+ /* Map both leaf profiles to this mid layer profile */
+ rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
+ &aq_req, &aq_rsp,
+ leaf_prof, mid_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
+ __func__, leaf_prof, mid_prof);
+ goto exit;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ ipolicer->ref_count[mid_prof]++;
+ mutex_unlock(&rvu->rsrc_lock);
+
+ rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
+ &aq_req, &aq_rsp,
+ leaf_match, mid_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
+ __func__, leaf_match, mid_prof);
+ ipolicer->ref_count[mid_prof]--;
+ goto exit;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ ipolicer->ref_count[mid_prof]++;
+ mutex_unlock(&rvu->rsrc_lock);
+
+exit:
+ return rc;
+}
+
+/* Called with mutex rsrc_lock */
+static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
+ u32 leaf_prof)
+{
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ struct nix_ipolicer *ipolicer;
+ u16 mid_prof;
+ int rc;
+
+ mutex_unlock(&rvu->rsrc_lock);
+
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
+ NIX_AQ_CTYPE_BANDPROF, leaf_prof);
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of leaf profile %d\n",
+ __func__, leaf_prof);
+ return;
+ }
+
+ if (!aq_rsp.prof.hl_en)
+ return;
+
+ mid_prof = aq_rsp.prof.band_prof_id;
+ ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
+ ipolicer->ref_count[mid_prof]--;
+ /* If ref_count is zero, free mid layer profile */
+ if (!ipolicer->ref_count[mid_prof]) {
+ ipolicer->pfvf_map[mid_prof] = 0x00;
+ rvu_free_rsrc(&ipolicer->band_prof, mid_prof);
+ }
+}
+
+int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req,
+ struct nix_bandprof_get_hwinfo_rsp *rsp)
+{
+ struct nix_ipolicer *ipolicer;
+ int blkaddr, layer, err;
+ struct nix_hw *nix_hw;
+ u64 tu;
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ /* Return number of bandwidth profiles free at each layer */
+ mutex_lock(&rvu->rsrc_lock);
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof);
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ /* Set the policer timeunit in nanosec */
+ tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0);
+ rsp->policer_timeunit = (tu + 1) * 100;
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_rx_sw_sync(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_rx_sync(rvu, blkaddr);
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
index 67471cb2b129..9d764f5abad3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -1,11 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
@@ -292,6 +289,7 @@ int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
return rvu_npa_aq_enq_inst(rvu, req, rsp);
}
#endif
+EXPORT_SYMBOL(rvu_mbox_handler_npa_aq_enq);
int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
@@ -419,6 +417,10 @@ exit:
rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF;
rsp->stack_pg_bytes = cfg & 0xFF;
rsp->qints = (cfg >> 28) & 0xFFF;
+ if (!is_rvu_otx2(rvu)) {
+ cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL);
+ rsp->cache_lines = (cfg >> 1) & 0x3F;
+ }
return rc;
}
@@ -478,6 +480,13 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
#endif
rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
+ /* For CN10K NPA BATCH DMA set 35 cache lines */
+ if (!is_rvu_otx2(rvu)) {
+ cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL);
+ cfg &= ~0x7EULL;
+ cfg |= BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1);
+ rvu_write64(rvu, block->addr, NPA_AF_BATCH_CTL, cfg);
+ }
/* Result structure can be followed by Aura/Pool context at
* RES + 128bytes and a write mask at RES + 256 bytes, depending on
* operation type. Alloc sufficient result memory for all operations.
@@ -497,18 +506,14 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
int rvu_npa_init(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
- int blkaddr, err;
+ int blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
if (blkaddr < 0)
return 0;
/* Initialize admin queue */
- err = npa_aq_init(rvu, &hw->block[blkaddr]);
- if (err)
- return err;
-
- return 0;
+ return npa_aq_init(rvu, &hw->block[blkaddr]);
}
void rvu_npa_freemem(struct rvu *rvu)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 6fa9358e6db4..97fb12db0192 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -1,16 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/stddef.h>
#include "rvu_struct.h"
#include "rvu_reg.h"
@@ -19,15 +18,14 @@
#include "cgx.h"
#include "npc_profile.h"
-#define RSVD_MCAM_ENTRIES_PER_PF 2 /* Bcast & Promisc */
+#define RSVD_MCAM_ENTRIES_PER_PF 3 /* Broadcast, Promisc and AllMulticast */
#define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */
-#define NIXLF_UCAST_ENTRY 0
-#define NIXLF_BCAST_ENTRY 1
-#define NIXLF_PROMISC_ENTRY 2
+#define NPC_HW_TSTAMP_OFFSET 8ULL
+#define NPC_KEX_PF_FUNC_MASK 0xFFFFULL
+#define ALIGN_8B_CEIL(__a) (((__a) + 7) & (-8))
#define NPC_PARSE_RESULT_DMAC_OFFSET 8
-#define NPC_HW_TSTAMP_OFFSET 8ULL
static const char def_pfl_name[] = "default";
@@ -36,6 +34,45 @@ static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam,
u16 pcifunc);
+bool is_npc_intf_tx(u8 intf)
+{
+ return !!(intf & 0x1);
+}
+
+bool is_npc_intf_rx(u8 intf)
+{
+ return !(intf & 0x1);
+}
+
+bool is_npc_interface_valid(struct rvu *rvu, u8 intf)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ return intf < hw->npc_intfs;
+}
+
+static int npc_mcam_verify_pf_func(struct rvu *rvu,
+ struct mcam_entry *entry_data, u8 intf,
+ u16 pcifunc)
+{
+ u16 pf_func, pf_func_mask;
+
+ if (is_npc_intf_rx(intf))
+ return 0;
+
+ pf_func_mask = (entry_data->kw_mask[0] >> 32) &
+ NPC_KEX_PF_FUNC_MASK;
+ pf_func = (entry_data->kw[0] >> 32) & NPC_KEX_PF_FUNC_MASK;
+
+ pf_func = be16_to_cpu((__force __be16)pf_func);
+ if (pf_func_mask != NPC_KEX_PF_FUNC_MASK ||
+ ((pf_func & ~RVU_PFVF_FUNC_MASK) !=
+ (pcifunc & ~RVU_PFVF_FUNC_MASK)))
+ return -EINVAL;
+
+ return 0;
+}
+
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf)
{
int blkaddr;
@@ -94,8 +131,33 @@ int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool enable)
return 0;
}
-static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
- u16 pcifunc, int nixlf, int type)
+static int npc_get_ucast_mcam_index(struct npc_mcam *mcam, u16 pcifunc,
+ int nixlf)
+{
+ struct rvu_hwinfo *hw = container_of(mcam, struct rvu_hwinfo, mcam);
+ struct rvu *rvu = hw->rvu;
+ int blkaddr = 0, max = 0;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ /* Given a PF/VF and NIX LF number calculate the unicast mcam
+ * entry index based on the NIX block assigned to the PF/VF.
+ */
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ if (pfvf->nix_blkaddr == blkaddr)
+ break;
+ block = &rvu->hw->block[blkaddr];
+ max += block->lf.max;
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ }
+
+ return mcam->nixlf_offset + (max + nixlf) * RSVD_MCAM_ENTRIES_PER_NIXLF;
+}
+
+int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
+ u16 pcifunc, int nixlf, int type)
{
int pf = rvu_get_pf(pcifunc);
int index;
@@ -110,14 +172,16 @@ static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
*/
if (type == NIXLF_BCAST_ENTRY)
return index;
- else if (type == NIXLF_PROMISC_ENTRY)
+ else if (type == NIXLF_ALLMULTI_ENTRY)
return index + 1;
+ else if (type == NIXLF_PROMISC_ENTRY)
+ return index + 2;
}
- return (mcam->nixlf_offset + (nixlf * RSVD_MCAM_ENTRIES_PER_NIXLF));
+ return npc_get_ucast_mcam_index(mcam, pcifunc, nixlf);
}
-static int npc_get_bank(struct npc_mcam *mcam, int index)
+int npc_get_bank(struct npc_mcam *mcam, int index)
{
int bank = index / mcam->banksize;
@@ -128,8 +192,8 @@ static int npc_get_bank(struct npc_mcam *mcam, int index)
return bank;
}
-static bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam,
- int blkaddr, int index)
+bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index)
{
int bank = npc_get_bank(mcam, index);
u64 cfg;
@@ -139,8 +203,8 @@ static bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam,
return (cfg & 1);
}
-static void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
- int blkaddr, int index, bool enable)
+void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, bool enable)
{
int bank = npc_get_bank(mcam, index);
int actbank = bank;
@@ -257,12 +321,121 @@ static void npc_get_keyword(struct mcam_entry *entry, int idx,
*cam0 = ~*cam1 & kw_mask;
}
+static void npc_fill_entryword(struct mcam_entry *entry, int idx,
+ u64 cam0, u64 cam1)
+{
+ /* Similar to npc_get_keyword, but fills mcam_entry structure from
+ * CAM registers.
+ */
+ switch (idx) {
+ case 0:
+ entry->kw[0] = cam1;
+ entry->kw_mask[0] = cam1 ^ cam0;
+ break;
+ case 1:
+ entry->kw[1] = cam1;
+ entry->kw_mask[1] = cam1 ^ cam0;
+ break;
+ case 2:
+ entry->kw[1] |= (cam1 & CAM_MASK(16)) << 48;
+ entry->kw[2] = (cam1 >> 16) & CAM_MASK(48);
+ entry->kw_mask[1] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48;
+ entry->kw_mask[2] = ((cam1 ^ cam0) >> 16) & CAM_MASK(48);
+ break;
+ case 3:
+ entry->kw[2] |= (cam1 & CAM_MASK(16)) << 48;
+ entry->kw[3] = (cam1 >> 16) & CAM_MASK(32);
+ entry->kw_mask[2] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48;
+ entry->kw_mask[3] = ((cam1 ^ cam0) >> 16) & CAM_MASK(32);
+ break;
+ case 4:
+ entry->kw[3] |= (cam1 & CAM_MASK(32)) << 32;
+ entry->kw[4] = (cam1 >> 32) & CAM_MASK(32);
+ entry->kw_mask[3] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32;
+ entry->kw_mask[4] = ((cam1 ^ cam0) >> 32) & CAM_MASK(32);
+ break;
+ case 5:
+ entry->kw[4] |= (cam1 & CAM_MASK(32)) << 32;
+ entry->kw[5] = (cam1 >> 32) & CAM_MASK(16);
+ entry->kw_mask[4] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32;
+ entry->kw_mask[5] = ((cam1 ^ cam0) >> 32) & CAM_MASK(16);
+ break;
+ case 6:
+ entry->kw[5] |= (cam1 & CAM_MASK(48)) << 16;
+ entry->kw[6] = (cam1 >> 48) & CAM_MASK(16);
+ entry->kw_mask[5] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16;
+ entry->kw_mask[6] = ((cam1 ^ cam0) >> 48) & CAM_MASK(16);
+ break;
+ case 7:
+ entry->kw[6] |= (cam1 & CAM_MASK(48)) << 16;
+ entry->kw_mask[6] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16;
+ break;
+ }
+}
+
+static u64 npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 pf_func)
+{
+ int bank, nixlf, index;
+
+ /* get ucast entry rule entry index */
+ nix_get_nixlf(rvu, pf_func, &nixlf, NULL);
+ index = npc_get_nixlf_mcam_index(mcam, pf_func, nixlf,
+ NIXLF_UCAST_ENTRY);
+ bank = npc_get_bank(mcam, index);
+ index &= (mcam->banksize - 1);
+
+ return rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+}
+
+static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, struct mcam_entry *entry,
+ bool *enable)
+{
+ struct rvu_npc_mcam_rule *rule;
+ u16 owner, target_func;
+ struct rvu_pfvf *pfvf;
+ u64 rx_action;
+
+ owner = mcam->entry2pfvf_map[index];
+ target_func = (entry->action >> 4) & 0xffff;
+ /* do nothing when target is LBK/PF or owner is not PF */
+ if (is_pffunc_af(owner) || is_afvf(target_func) ||
+ (owner & RVU_PFVF_FUNC_MASK) ||
+ !(target_func & RVU_PFVF_FUNC_MASK))
+ return;
+
+ /* save entry2target_pffunc */
+ pfvf = rvu_get_pfvf(rvu, target_func);
+ mcam->entry2target_pffunc[index] = target_func;
+
+ /* don't enable rule when nixlf not attached or initialized */
+ if (!(is_nixlf_attached(rvu, target_func) &&
+ test_bit(NIXLF_INITIALIZED, &pfvf->flags)))
+ *enable = false;
+
+ /* fix up not needed for the rules added by user(ntuple filters) */
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->entry == index)
+ return;
+ }
+
+ /* copy VF default entry action to the VF mcam entry */
+ rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
+ target_func);
+ if (rx_action)
+ entry->action = rx_action;
+}
+
static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index, u8 intf,
struct mcam_entry *entry, bool enable)
{
int bank = npc_get_bank(mcam, index);
int kw = 0, actbank, actindex;
+ u8 tx_intf_mask = ~intf & 0x3;
+ u8 tx_intf = intf;
u64 cam0, cam1;
actbank = bank; /* Save bank id, to set action later on */
@@ -283,12 +456,21 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
*/
for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) {
/* Interface should be set in all banks */
+ if (is_npc_intf_tx(intf)) {
+ /* Last bit must be set and rest don't care
+ * for TX interfaces
+ */
+ tx_intf_mask = 0x1;
+ tx_intf = intf & tx_intf_mask;
+ tx_intf_mask = ~tx_intf & tx_intf_mask;
+ }
+
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1),
- intf);
+ tx_intf);
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0),
- ~intf & 0x3);
+ tx_intf_mask);
/* Set the match key */
npc_get_keyword(entry, kw, &cam0, &cam1);
@@ -304,6 +486,10 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), cam0);
}
+ /* PF installing VF rule */
+ if (is_npc_intf_rx(intf) && actindex < mcam->bmap_entries)
+ npc_fixup_vf_rule(rvu, mcam, blkaddr, actindex, entry, &enable);
+
/* Set 'action' */
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_ACTION(index, actbank), entry->action);
@@ -317,6 +503,42 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true);
}
+void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 src,
+ struct mcam_entry *entry, u8 *intf, u8 *ena)
+{
+ int sbank = npc_get_bank(mcam, src);
+ int bank, kw = 0;
+ u64 cam0, cam1;
+
+ src &= (mcam->banksize - 1);
+ bank = sbank;
+
+ for (; bank < (sbank + mcam->banks_per_entry); bank++, kw = kw + 2) {
+ cam1 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 1));
+ cam0 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 0));
+ npc_fill_entryword(entry, kw, cam0, cam1);
+
+ cam1 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 1));
+ cam0 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 0));
+ npc_fill_entryword(entry, kw + 1, cam0, cam1);
+ }
+
+ entry->action = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(src, sbank));
+ entry->vtag_action =
+ rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank));
+ *intf = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank, 1)) & 3;
+ *ena = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(src, sbank)) & 1;
+}
+
static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 src, u16 dest)
{
@@ -371,34 +593,23 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, u8 *mac_addr)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
struct npc_mcam *mcam = &rvu->hw->mcam;
- struct mcam_entry entry = { {0} };
struct nix_rx_action action;
- int blkaddr, index, kwi;
- u64 mac = 0;
+ int blkaddr, index;
- /* AF's VFs work in promiscuous mode */
- if (is_afvf(pcifunc))
+ /* AF's and SDP VFs work in promiscuous mode */
+ if (is_afvf(pcifunc) || is_sdp_vf(pcifunc))
return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return;
- for (index = ETH_ALEN - 1; index >= 0; index--)
- mac |= ((u64)*mac_addr++) << (8 * index);
-
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_UCAST_ENTRY);
- /* Match ingress channel and DMAC */
- entry.kw[0] = chan;
- entry.kw_mask[0] = 0xFFFULL;
-
- kwi = NPC_PARSE_RESULT_DMAC_OFFSET / sizeof(u64);
- entry.kw[kwi] = mac;
- entry.kw_mask[kwi] = BIT_ULL(48) - 1;
-
/* Don't change the action if entry is already enabled
* Otherwise RSS action may get overwritten.
*/
@@ -411,32 +622,36 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
action.pf_func = pcifunc;
}
- entry.action = *(u64 *)&action;
- npc_config_mcam_entry(rvu, mcam, blkaddr, index,
- NIX_INTF_RX, &entry, true);
+ req.default_rule = 1;
+ ether_addr_copy(req.packet.dmac, mac_addr);
+ eth_broadcast_addr((u8 *)&req.mask.dmac);
+ req.features = BIT_ULL(NPC_DMAC);
+ req.channel = chan;
+ req.chan_mask = 0xFFFU;
+ req.intf = pfvf->nix_rx_intf;
+ req.op = action.op;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = action.pf_func;
+ req.index = action.index;
+ req.match_id = action.match_id;
+ req.flow_key_alg = action.flow_key_alg;
- /* add VLAN matching, setup action and save entry back for later */
- entry.kw[0] |= (NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG) << 20;
- entry.kw_mask[0] |= (NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG) << 20;
-
- entry.vtag_action = VTAG0_VALID_BIT |
- FIELD_PREP(VTAG0_TYPE_MASK, 0) |
- FIELD_PREP(VTAG0_LID_MASK, NPC_LID_LA) |
- FIELD_PREP(VTAG0_RELPTR_MASK, 12);
-
- memcpy(&pfvf->entry, &entry, sizeof(entry));
+ rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
- int nixlf, u64 chan, bool allmulti)
+ int nixlf, u64 chan, u8 chan_cnt)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct npc_mcam *mcam = &rvu->hw->mcam;
- int blkaddr, ucast_idx, index, kwi;
- struct mcam_entry entry = { {0} };
- struct nix_rx_action action = { };
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ int blkaddr, ucast_idx, index;
+ struct nix_rx_action action;
+ u64 relaxed_mask;
- /* Only PF or AF VF can add a promiscuous entry */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) && !is_afvf(pcifunc))
+ if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc))
return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -445,39 +660,71 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_PROMISC_ENTRY);
-
- entry.kw[0] = chan;
- entry.kw_mask[0] = 0xFFFULL;
-
- if (allmulti) {
- kwi = NPC_KEXOF_DMAC / sizeof(u64);
- entry.kw[kwi] = BIT_ULL(40); /* LSB bit of 1st byte in DMAC */
- entry.kw_mask[kwi] = BIT_ULL(40);
- }
-
- ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
- nixlf, NIXLF_UCAST_ENTRY);
+ if (is_cgx_vf(rvu, pcifunc))
+ index = npc_get_nixlf_mcam_index(mcam,
+ pcifunc & ~RVU_PFVF_FUNC_MASK,
+ nixlf, NIXLF_PROMISC_ENTRY);
/* If the corresponding PF's ucast action is RSS,
* use the same action for promisc also
*/
+ ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx))
*(u64 *)&action = npc_get_mcam_action(rvu, mcam,
- blkaddr, ucast_idx);
+ blkaddr, ucast_idx);
if (action.op != NIX_RX_ACTIONOP_RSS) {
*(u64 *)&action = 0x00;
action.op = NIX_RX_ACTIONOP_UCAST;
- action.pf_func = pcifunc;
}
- entry.action = *(u64 *)&action;
- npc_config_mcam_entry(rvu, mcam, blkaddr, index,
- NIX_INTF_RX, &entry, true);
+ /* RX_ACTION set to MCAST for CGX PF's */
+ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list &&
+ is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
+ *(u64 *)&action = 0x00;
+ action.op = NIX_RX_ACTIONOP_MCAST;
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+ action.index = pfvf->promisc_mce_idx;
+ }
+
+ /* For cn10k the upper two bits of the channel number are
+ * cpt channel number. with masking out these bits in the
+ * mcam entry, same entry used for NIX will allow packets
+ * received from cpt for parsing.
+ */
+ if (!is_rvu_otx2(rvu)) {
+ req.chan_mask = NIX_CHAN_CPT_X2P_MASK;
+ } else {
+ req.chan_mask = 0xFFFU;
+ }
+
+ if (chan_cnt > 1) {
+ if (!is_power_of_2(chan_cnt)) {
+ dev_err(rvu->dev,
+ "%s: channel count more than 1, must be power of 2\n", __func__);
+ return;
+ }
+ relaxed_mask = GENMASK_ULL(BITS_PER_LONG_LONG - 1,
+ ilog2(chan_cnt));
+ req.chan_mask &= relaxed_mask;
+ }
+
+ req.channel = chan;
+ req.intf = pfvf->nix_rx_intf;
+ req.entry = index;
+ req.op = action.op;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc;
+ req.index = action.index;
+ req.match_id = action.match_id;
+ req.flow_key_alg = action.flow_key_alg;
+
+ rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
-static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc,
- int nixlf, bool enable)
+void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, bool enable)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
int blkaddr, index;
@@ -486,33 +733,22 @@ static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc,
if (blkaddr < 0)
return;
- /* Only PF's have a promiscuous entry */
- if (pcifunc & RVU_PFVF_FUNC_MASK)
- return;
+ /* Get 'pcifunc' of PF device */
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_PROMISC_ENTRY);
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
}
-void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
-{
- npc_enadis_promisc_entry(rvu, pcifunc, nixlf, false);
-}
-
-void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
-{
- npc_enadis_promisc_entry(rvu, pcifunc, nixlf, true);
-}
-
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan)
{
+ struct rvu_pfvf *pfvf;
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
struct npc_mcam *mcam = &rvu->hw->mcam;
- struct mcam_entry entry = { {0} };
struct rvu_hwinfo *hw = rvu->hw;
- struct nix_rx_action action;
- struct rvu_pfvf *pfvf;
int blkaddr, index;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -526,44 +762,137 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
/* If pkt replication is not supported,
* then only PF is allowed to add a bcast match entry.
*/
- if (!hw->cap.nix_rx_multicast && pcifunc & RVU_PFVF_FUNC_MASK)
+ if (!hw->cap.nix_rx_multicast && is_vf(pcifunc))
return;
/* Get 'pcifunc' of PF device */
pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_BCAST_ENTRY);
- /* Match ingress channel */
- entry.kw[0] = chan;
- entry.kw_mask[0] = 0xfffull;
-
- /* Match broadcast MAC address.
- * DMAC is extracted at 0th bit of PARSE_KEX::KW1
- */
- entry.kw[1] = 0xffffffffffffull;
- entry.kw_mask[1] = 0xffffffffffffull;
-
- *(u64 *)&action = 0x00;
if (!hw->cap.nix_rx_multicast) {
/* Early silicon doesn't support pkt replication,
* so install entry with UCAST action, so that PF
* receives all broadcast packets.
*/
+ req.op = NIX_RX_ACTIONOP_UCAST;
+ } else {
+ req.op = NIX_RX_ACTIONOP_MCAST;
+ req.index = pfvf->bcast_mce_idx;
+ }
+
+ eth_broadcast_addr((u8 *)&req.packet.dmac);
+ eth_broadcast_addr((u8 *)&req.mask.dmac);
+ req.features = BIT_ULL(NPC_DMAC);
+ req.channel = chan;
+ req.chan_mask = 0xFFFU;
+ req.intf = pfvf->nix_rx_intf;
+ req.entry = index;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc;
+
+ rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Get 'pcifunc' of PF device */
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
+ NIXLF_BCAST_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+}
+
+void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ u64 chan)
+{
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr, ucast_idx, index;
+ u8 mac_addr[ETH_ALEN] = { 0 };
+ struct nix_rx_action action;
+ struct rvu_pfvf *pfvf;
+ u16 vf_func;
+
+ /* Only CGX PF/VF can add allmulticast entry */
+ if (is_afvf(pcifunc) && is_sdp_vf(pcifunc))
+ return;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Get 'pcifunc' of PF device */
+ vf_func = pcifunc & RVU_PFVF_FUNC_MASK;
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_ALLMULTI_ENTRY);
+
+ /* If the corresponding PF's ucast action is RSS,
+ * use the same action for multicast entry also
+ */
+ ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx))
+ *(u64 *)&action = npc_get_mcam_action(rvu, mcam,
+ blkaddr, ucast_idx);
+
+ if (action.op != NIX_RX_ACTIONOP_RSS) {
+ *(u64 *)&action = 0x00;
action.op = NIX_RX_ACTIONOP_UCAST;
action.pf_func = pcifunc;
- } else {
- pfvf = rvu_get_pfvf(rvu, pcifunc);
- action.index = pfvf->bcast_mce_idx;
+ }
+
+ /* RX_ACTION set to MCAST for CGX PF's */
+ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list) {
+ *(u64 *)&action = 0x00;
action.op = NIX_RX_ACTIONOP_MCAST;
+ action.index = pfvf->mcast_mce_idx;
}
- entry.action = *(u64 *)&action;
- npc_config_mcam_entry(rvu, mcam, blkaddr, index,
- NIX_INTF_RX, &entry, true);
+ mac_addr[0] = 0x01; /* LSB bit of 1st byte in DMAC */
+ ether_addr_copy(req.packet.dmac, mac_addr);
+ ether_addr_copy(req.mask.dmac, mac_addr);
+ req.features = BIT_ULL(NPC_DMAC);
+
+ /* For cn10k the upper two bits of the channel number are
+ * cpt channel number. with masking out these bits in the
+ * mcam entry, same entry used for NIX will allow packets
+ * received from cpt for parsing.
+ */
+ if (!is_rvu_otx2(rvu))
+ req.chan_mask = NIX_CHAN_CPT_X2P_MASK;
+ else
+ req.chan_mask = 0xFFFU;
+
+ req.channel = chan;
+ req.intf = pfvf->nix_rx_intf;
+ req.entry = index;
+ req.op = action.op;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc | vf_func;
+ req.index = action.index;
+ req.match_id = action.match_id;
+ req.flow_key_alg = action.flow_key_alg;
+
+ rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
-void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable)
+void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
int blkaddr, index;
@@ -575,16 +904,62 @@ void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable)
/* Get 'pcifunc' of PF device */
pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
- index = npc_get_nixlf_mcam_index(mcam, pcifunc, 0, NIXLF_BCAST_ENTRY);
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
+ NIXLF_ALLMULTI_ENTRY);
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
}
+static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 pcifunc, u64 rx_action)
+{
+ int actindex, index, bank, entry;
+ struct rvu_npc_mcam_rule *rule;
+ bool enable, update;
+
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ return;
+
+ mutex_lock(&mcam->lock);
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2target_pffunc[index] == pcifunc) {
+ update = true;
+ /* update not needed for the rules added via ntuple filters */
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->entry == index)
+ update = false;
+ }
+ if (!update)
+ continue;
+ bank = npc_get_bank(mcam, index);
+ actindex = index;
+ entry = index & (mcam->banksize - 1);
+
+ /* read vf flow entry enable status */
+ enable = is_mcam_entry_enabled(rvu, mcam, blkaddr,
+ actindex);
+ /* disable before mcam entry update */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex,
+ false);
+ /* update 'action' */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(entry, bank),
+ rx_action);
+ if (enable)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ actindex, true);
+ }
+ }
+ mutex_unlock(&mcam->lock);
+}
+
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
struct nix_rx_action action;
int blkaddr, index, bank;
+ struct rvu_pfvf *pfvf;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
@@ -621,13 +996,27 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action);
+ /* update the VF flow rule action with the VF default entry action
+ * due to restriction of the dataplane application PF adding the
+ * VF flow rule can not specify the rx action explicitly.
+ */
+ if (mcam_index < 0)
+ npc_update_vf_flow_entry(rvu, mcam, blkaddr, pcifunc,
+ *(u64 *)&action);
+
+ /* update the action change in default rule */
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (pfvf->def_ucast_rule)
+ pfvf->def_ucast_rule->rx_action = action;
+
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_PROMISC_ENTRY);
/* If PF's promiscuous entry is enabled,
* Set RSS action for that entry as well
*/
- if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
+ if ((!hw->cap.nix_rx_multicast || !pfvf->use_mce_list) &&
+ is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
bank = npc_get_bank(mcam, index);
index &= (mcam->banksize - 1);
@@ -635,16 +1024,49 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
NPC_AF_MCAMEX_BANKX_ACTION(index, bank),
*(u64 *)&action);
}
+}
+
+void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, int type, bool enable)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_mce_list *mce_list;
+ int index, blkaddr, mce_idx;
+ struct rvu_pfvf *pfvf;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc & ~RVU_PFVF_FUNC_MASK,
+ nixlf, type);
- rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+ /* disable MCAM entry when packet replication is not supported by hw */
+ if (!hw->cap.nix_rx_multicast && !is_vf(pcifunc)) {
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+ return;
+ }
+
+ /* return incase mce list is not enabled */
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+ if (hw->cap.nix_rx_multicast && is_vf(pcifunc) &&
+ type != NIXLF_BCAST_ENTRY && !pfvf->use_mce_list)
+ return;
+
+ nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
+
+ nix_update_mce_list(rvu, pcifunc, mce_list,
+ mce_idx, index, enable);
+ if (enable)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
}
static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
int nixlf, bool enable)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
- struct nix_rx_action action;
- int index, bank, blkaddr;
+ int index, blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
@@ -655,56 +1077,47 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
nixlf, NIXLF_UCAST_ENTRY);
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
- /* For PF, ena/dis promisc and bcast MCAM match entries.
- * For VFs add/delete from bcast list when RX multicast
- * feature is present.
+ /* Nothing to do for VFs, on platforms where pkt replication
+ * is not supported
*/
- if (pcifunc & RVU_PFVF_FUNC_MASK && !rvu->hw->cap.nix_rx_multicast)
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) && !rvu->hw->cap.nix_rx_multicast)
return;
- /* For bcast, enable/disable only if it's action is not
- * packet replication, incase if action is replication
- * then this PF/VF's nixlf is removed from bcast replication
- * list.
- */
- index = npc_get_nixlf_mcam_index(mcam, pcifunc & ~RVU_PFVF_FUNC_MASK,
- nixlf, NIXLF_BCAST_ENTRY);
- bank = npc_get_bank(mcam, index);
- *(u64 *)&action = rvu_read64(rvu, blkaddr,
- NPC_AF_MCAMEX_BANKX_ACTION(index & (mcam->banksize - 1), bank));
-
- /* VFs will not have BCAST entry */
- if (action.op != NIX_RX_ACTIONOP_MCAST &&
- !(pcifunc & RVU_PFVF_FUNC_MASK)) {
- npc_enable_mcam_entry(rvu, mcam,
- blkaddr, index, enable);
- } else {
- nix_update_bcast_mce_list(rvu, pcifunc, enable);
- /* Enable PF's BCAST entry for packet replication */
- rvu_npc_enable_bcast_entry(rvu, pcifunc, enable);
- }
-
- if (enable)
- rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf);
- else
- rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
-
- rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+ /* add/delete pf_func to broadcast MCE list */
+ npc_enadis_default_mce_entry(rvu, pcifunc, nixlf,
+ NIXLF_BCAST_ENTRY, enable);
}
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
+ if (nixlf < 0)
+ return;
+
npc_enadis_default_entries(rvu, pcifunc, nixlf, false);
+
+ /* Delete multicast and promisc MCAM entries */
+ npc_enadis_default_mce_entry(rvu, pcifunc, nixlf,
+ NIXLF_ALLMULTI_ENTRY, false);
+ npc_enadis_default_mce_entry(rvu, pcifunc, nixlf,
+ NIXLF_PROMISC_ENTRY, false);
}
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
+ if (nixlf < 0)
+ return;
+
+ /* Enables only broadcast match entry. Promisc/Allmulti are enabled
+ * in set_rx_mode mbox handler.
+ */
npc_enadis_default_entries(rvu, pcifunc, nixlf, true);
}
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule, *tmp;
int blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -713,12 +1126,56 @@ void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
mutex_lock(&mcam->lock);
- /* Disable and free all MCAM entries mapped to this 'pcifunc' */
+ /* Disable MCAM entries directing traffic to this 'pcifunc' */
+ list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) {
+ if (is_npc_intf_rx(rule->intf) &&
+ rule->rx_action.pf_func == pcifunc &&
+ rule->rx_action.op != NIX_RX_ACTIONOP_MCAST) {
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ rule->entry, false);
+ rule->enable = false;
+ /* Indicate that default rule is disabled */
+ if (rule->default_rule) {
+ pfvf->def_ucast_rule = NULL;
+ list_del(&rule->list);
+ kfree(rule);
+ }
+ }
+ }
+
+ mutex_unlock(&mcam->lock);
+
+ npc_mcam_disable_flows(rvu, pcifunc);
+
+ rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+}
+
+void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule, *tmp;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ mutex_lock(&mcam->lock);
+
+ /* Free all MCAM entries owned by this 'pcifunc' */
npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc);
- /* Free all MCAM counters mapped to this 'pcifunc' */
+ /* Free all MCAM counters owned by this 'pcifunc' */
npc_mcam_free_all_counters(rvu, mcam, pcifunc);
+ /* Delete MCAM entries owned by this 'pcifunc' */
+ list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) {
+ if (rule->owner == pcifunc && !rule->default_rule) {
+ list_del(&rule->list);
+ kfree(rule);
+ }
+ }
+
mutex_unlock(&mcam->lock);
rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
@@ -732,47 +1189,104 @@ void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
rvu_write64(rvu, blkaddr, \
NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
-static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
- const struct npc_mcam_kex *mkex)
+static void npc_program_mkex_rx(struct rvu *rvu, int blkaddr,
+ const struct npc_mcam_kex *mkex, u8 intf)
{
int lid, lt, ld, fl;
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX),
- mkex->keyx_cfg[NIX_INTF_RX]);
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX),
- mkex->keyx_cfg[NIX_INTF_TX]);
+ if (is_npc_intf_tx(intf))
+ return;
- for (ld = 0; ld < NPC_MAX_LD; ld++)
- rvu_write64(rvu, blkaddr, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld),
- mkex->kex_ld_flags[ld]);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf),
+ mkex->keyx_cfg[NIX_INTF_RX]);
+ /* Program LDATA */
for (lid = 0; lid < NPC_MAX_LID; lid++) {
for (lt = 0; lt < NPC_MAX_LT; lt++) {
- for (ld = 0; ld < NPC_MAX_LD; ld++) {
- SET_KEX_LD(NIX_INTF_RX, lid, lt, ld,
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ SET_KEX_LD(intf, lid, lt, ld,
mkex->intf_lid_lt_ld[NIX_INTF_RX]
[lid][lt][ld]);
-
- SET_KEX_LD(NIX_INTF_TX, lid, lt, ld,
- mkex->intf_lid_lt_ld[NIX_INTF_TX]
- [lid][lt][ld]);
- }
}
}
-
+ /* Program LFLAGS */
for (ld = 0; ld < NPC_MAX_LD; ld++) {
- for (fl = 0; fl < NPC_MAX_LFL; fl++) {
- SET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl,
+ for (fl = 0; fl < NPC_MAX_LFL; fl++)
+ SET_KEX_LDFLAGS(intf, ld, fl,
mkex->intf_ld_flags[NIX_INTF_RX]
[ld][fl]);
+ }
+}
+
+static void npc_program_mkex_tx(struct rvu *rvu, int blkaddr,
+ const struct npc_mcam_kex *mkex, u8 intf)
+{
+ int lid, lt, ld, fl;
+
+ if (is_npc_intf_rx(intf))
+ return;
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf),
+ mkex->keyx_cfg[NIX_INTF_TX]);
- SET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl,
+ /* Program LDATA */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ SET_KEX_LD(intf, lid, lt, ld,
+ mkex->intf_lid_lt_ld[NIX_INTF_TX]
+ [lid][lt][ld]);
+ }
+ }
+ /* Program LFLAGS */
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ for (fl = 0; fl < NPC_MAX_LFL; fl++)
+ SET_KEX_LDFLAGS(intf, ld, fl,
mkex->intf_ld_flags[NIX_INTF_TX]
[ld][fl]);
- }
}
}
+static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
+ const struct npc_mcam_kex *mkex)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u8 intf;
+ int ld;
+
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ rvu_write64(rvu, blkaddr, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld),
+ mkex->kex_ld_flags[ld]);
+
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ npc_program_mkex_rx(rvu, blkaddr, mkex, intf);
+ npc_program_mkex_tx(rvu, blkaddr, mkex, intf);
+ }
+}
+
+static int npc_fwdb_prfl_img_map(struct rvu *rvu, void __iomem **prfl_img_addr,
+ u64 *size)
+{
+ u64 prfl_addr, prfl_sz;
+
+ if (!rvu->fwdata)
+ return -EINVAL;
+
+ prfl_addr = rvu->fwdata->mcam_addr;
+ prfl_sz = rvu->fwdata->mcam_sz;
+
+ if (!prfl_addr || !prfl_sz)
+ return -EINVAL;
+
+ *prfl_img_addr = ioremap_wc(prfl_addr, prfl_sz);
+ if (!(*prfl_img_addr))
+ return -ENOMEM;
+
+ *size = prfl_sz;
+
+ return 0;
+}
+
#define MKEX_END_SIGN 0xdeadbeef
static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr,
@@ -781,36 +1295,31 @@ static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr,
struct device *dev = &rvu->pdev->dev;
struct npc_mcam_kex *mcam_kex;
void *mkex_prfl_addr = NULL;
- u64 prfl_addr, prfl_sz;
+ u64 prfl_sz;
+ int ret;
+ /* Order of precedence (high to low):
+ * 1. Via mkex_profile, loaded from ATF.
+ * 2. Built-in KEX profile from npc_mkex_default.
+ */
/* If user not selected mkex profile */
- if (!strncmp(mkex_profile, def_pfl_name, MKEX_NAME_LEN))
- goto program_mkex;
-
- if (!rvu->fwdata)
- goto program_mkex;
- prfl_addr = rvu->fwdata->mcam_addr;
- prfl_sz = rvu->fwdata->mcam_sz;
-
- if (!prfl_addr || !prfl_sz)
+ if (rvu->kpu_fwdata_sz ||
+ !strncmp(mkex_profile, def_pfl_name, MKEX_NAME_LEN))
goto program_mkex;
- mkex_prfl_addr = memremap(prfl_addr, prfl_sz, MEMREMAP_WC);
- if (!mkex_prfl_addr)
+ /* Setting up the mapping for mkex profile image */
+ ret = npc_fwdb_prfl_img_map(rvu, &mkex_prfl_addr, &prfl_sz);
+ if (ret < 0)
goto program_mkex;
- mcam_kex = (struct npc_mcam_kex *)mkex_prfl_addr;
+ mcam_kex = (struct npc_mcam_kex __force *)mkex_prfl_addr;
while (((s64)prfl_sz > 0) && (mcam_kex->mkex_sign != MKEX_END_SIGN)) {
/* Compare with mkex mod_param name string */
if (mcam_kex->mkex_sign == MKEX_SIGN &&
!strncmp(mcam_kex->name, mkex_profile, MKEX_NAME_LEN)) {
- /* Due to an errata (35786) in A0/B0 pass silicon,
- * parse nibble enable configuration has to be
- * identical for both Rx and Tx interfaces.
- */
- if (!is_rvu_96xx_B0(rvu) ||
- mcam_kex->keyx_cfg[NIX_INTF_RX] == mcam_kex->keyx_cfg[NIX_INTF_TX])
+ /* If profile is valid, switch to it. */
+ if (is_parse_nibble_config_valid(rvu, mcam_kex))
rvu->kpu.mkex = mcam_kex;
goto program_mkex;
}
@@ -902,6 +1411,7 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
const struct npc_kpu_profile *profile)
{
int entry, num_entries, max_entries;
+ u64 entry_mask;
if (profile->cam_entries != profile->action_entries) {
dev_err(rvu->dev,
@@ -909,7 +1419,7 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
kpu, profile->cam_entries, profile->action_entries);
}
- max_entries = rvu_read64(rvu, blkaddr, NPC_AF_CONST1) & 0xFFF;
+ max_entries = rvu->hw->npc_kpu_entries;
/* Program CAM match entries for previous KPU extracted data */
num_entries = min_t(int, profile->cam_entries, max_entries);
@@ -925,8 +1435,12 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
/* Enable all programmed entries */
num_entries = min_t(int, profile->action_entries, profile->cam_entries);
+ entry_mask = enable_mask(num_entries);
+ /* Disable first KPU_MAX_CST_ENT entries for built-in profile */
+ if (!rvu->kpu.custom)
+ entry_mask |= GENMASK_ULL(KPU_MAX_CST_ENT - 1, 0);
rvu_write64(rvu, blkaddr,
- NPC_AF_KPUX_ENTRY_DISX(kpu, 0), enable_mask(num_entries));
+ NPC_AF_KPUX_ENTRY_DISX(kpu, 0), entry_mask);
if (num_entries > 64) {
rvu_write64(rvu, blkaddr,
NPC_AF_KPUX_ENTRY_DISX(kpu, 1),
@@ -939,6 +1453,7 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile)
{
+ profile->custom = 0;
profile->name = def_pfl_name;
profile->version = NPC_KPU_PROFILE_VER;
profile->ikpu = ikpu_action_entries;
@@ -951,10 +1466,253 @@ static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile)
return 0;
}
+static int npc_apply_custom_kpu(struct rvu *rvu,
+ struct npc_kpu_profile_adapter *profile)
+{
+ size_t hdr_sz = sizeof(struct npc_kpu_profile_fwdata), offset = 0;
+ struct npc_kpu_profile_fwdata *fw = rvu->kpu_fwdata;
+ struct npc_kpu_profile_action *action;
+ struct npc_kpu_profile_cam *cam;
+ struct npc_kpu_fwdata *fw_kpu;
+ int entries;
+ u16 kpu, entry;
+
+ if (rvu->kpu_fwdata_sz < hdr_sz) {
+ dev_warn(rvu->dev, "Invalid KPU profile size\n");
+ return -EINVAL;
+ }
+ if (le64_to_cpu(fw->signature) != KPU_SIGN) {
+ dev_warn(rvu->dev, "Invalid KPU profile signature %llx\n",
+ fw->signature);
+ return -EINVAL;
+ }
+ profile->custom = 1;
+ profile->name = fw->name;
+ profile->version = le64_to_cpu(fw->version);
+ profile->mkex = &fw->mkex;
+
+ /* Verify if the using known profile structure */
+ if (NPC_KPU_VER_MAJ(profile->version) >
+ NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER)) {
+ dev_warn(rvu->dev, "Not supported Major version: %d > %d\n",
+ NPC_KPU_VER_MAJ(profile->version),
+ NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER));
+ return -EINVAL;
+ }
+ /* Verify if profile is aligned with the required kernel changes */
+ if (NPC_KPU_VER_MIN(profile->version) <
+ NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER)) {
+ dev_warn(rvu->dev,
+ "Invalid KPU profile version: %d.%d.%d expected version <= %d.%d.%d\n",
+ NPC_KPU_VER_MAJ(profile->version),
+ NPC_KPU_VER_MIN(profile->version),
+ NPC_KPU_VER_PATCH(profile->version),
+ NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER),
+ NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER),
+ NPC_KPU_VER_PATCH(NPC_KPU_PROFILE_VER));
+ return -EINVAL;
+ }
+ /* Verify if profile fits the HW */
+ if (fw->kpus > profile->kpus) {
+ dev_warn(rvu->dev, "Not enough KPUs: %d > %ld\n", fw->kpus,
+ profile->kpus);
+ return -EINVAL;
+ }
+ /* Update adapter structure and ensure endianness where needed. */
+ profile->lt_def = &fw->lt_def;
+
+ for (kpu = 0; kpu < fw->kpus; kpu++) {
+ fw_kpu = (struct npc_kpu_fwdata *)(fw->data + offset);
+ if (fw_kpu->entries > KPU_MAX_CST_ENT)
+ dev_warn(rvu->dev,
+ "Too many custom entries on KPU%d: %d > %d\n",
+ kpu, fw_kpu->entries, KPU_MAX_CST_ENT);
+ entries = min(fw_kpu->entries, KPU_MAX_CST_ENT);
+ cam = (struct npc_kpu_profile_cam *)fw_kpu->data;
+ offset += sizeof(*fw_kpu) + fw_kpu->entries * sizeof(*cam);
+ action = (struct npc_kpu_profile_action *)(fw->data + offset);
+ offset += fw_kpu->entries * sizeof(*action);
+ if (rvu->kpu_fwdata_sz < hdr_sz + offset) {
+ dev_warn(rvu->dev,
+ "Profile size mismatch on KPU%i parsing.\n",
+ kpu + 1);
+ return -EINVAL;
+ }
+ /* Fix endianness and update */
+ for (entry = 0; entry < entries; entry++) {
+ cam[entry].dp0 = le16_to_cpu(cam[entry].dp0);
+ cam[entry].dp0_mask = le16_to_cpu(cam[entry].dp0_mask);
+ cam[entry].dp1 = le16_to_cpu(cam[entry].dp1);
+ cam[entry].dp1_mask = le16_to_cpu(cam[entry].dp1_mask);
+ cam[entry].dp2 = le16_to_cpu(cam[entry].dp2);
+ cam[entry].dp2_mask = le16_to_cpu(cam[entry].dp2_mask);
+ profile->kpu[kpu].cam[entry] = cam[entry];
+ profile->kpu[kpu].action[entry] = action[entry];
+ }
+ }
+
+ return 0;
+}
+
+static int npc_load_kpu_prfl_img(struct rvu *rvu, void __iomem *prfl_addr,
+ u64 prfl_sz, const char *kpu_profile)
+{
+ struct npc_kpu_profile_fwdata *kpu_data = NULL;
+ int rc = -EINVAL;
+
+ kpu_data = (struct npc_kpu_profile_fwdata __force *)prfl_addr;
+ if (le64_to_cpu(kpu_data->signature) == KPU_SIGN &&
+ !strncmp(kpu_data->name, kpu_profile, KPU_NAME_LEN)) {
+ dev_info(rvu->dev, "Loading KPU profile from firmware db: %s\n",
+ kpu_profile);
+ rvu->kpu_fwdata = kpu_data;
+ rvu->kpu_fwdata_sz = prfl_sz;
+ rvu->kpu_prfl_addr = prfl_addr;
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static int npc_fwdb_detect_load_prfl_img(struct rvu *rvu, uint64_t prfl_sz,
+ const char *kpu_profile)
+{
+ struct npc_coalesced_kpu_prfl *img_data = NULL;
+ int i = 0, rc = -EINVAL;
+ void __iomem *kpu_prfl_addr;
+ u16 offset;
+
+ img_data = (struct npc_coalesced_kpu_prfl __force *)rvu->kpu_prfl_addr;
+ if (le64_to_cpu(img_data->signature) == KPU_SIGN &&
+ !strncmp(img_data->name, kpu_profile, KPU_NAME_LEN)) {
+ /* Loaded profile is a single KPU profile. */
+ rc = npc_load_kpu_prfl_img(rvu, rvu->kpu_prfl_addr,
+ prfl_sz, kpu_profile);
+ goto done;
+ }
+
+ /* Loaded profile is coalesced image, offset of first KPU profile.*/
+ offset = offsetof(struct npc_coalesced_kpu_prfl, prfl_sz) +
+ (img_data->num_prfl * sizeof(uint16_t));
+ /* Check if mapped image is coalesced image. */
+ while (i < img_data->num_prfl) {
+ /* Profile image offsets are rounded up to next 8 multiple.*/
+ offset = ALIGN_8B_CEIL(offset);
+ kpu_prfl_addr = (void __iomem *)((uintptr_t)rvu->kpu_prfl_addr +
+ offset);
+ rc = npc_load_kpu_prfl_img(rvu, kpu_prfl_addr,
+ img_data->prfl_sz[i], kpu_profile);
+ if (!rc)
+ break;
+ /* Calculating offset of profile image based on profile size.*/
+ offset += img_data->prfl_sz[i];
+ i++;
+ }
+done:
+ return rc;
+}
+
+static int npc_load_kpu_profile_fwdb(struct rvu *rvu, const char *kpu_profile)
+{
+ int ret = -EINVAL;
+ u64 prfl_sz;
+
+ /* Setting up the mapping for NPC profile image */
+ ret = npc_fwdb_prfl_img_map(rvu, &rvu->kpu_prfl_addr, &prfl_sz);
+ if (ret < 0)
+ goto done;
+
+ /* Detect if profile is coalesced or single KPU profile and load */
+ ret = npc_fwdb_detect_load_prfl_img(rvu, prfl_sz, kpu_profile);
+ if (ret == 0)
+ goto done;
+
+ /* Cleaning up if KPU profile image from fwdata is not valid. */
+ if (rvu->kpu_prfl_addr) {
+ iounmap(rvu->kpu_prfl_addr);
+ rvu->kpu_prfl_addr = NULL;
+ rvu->kpu_fwdata_sz = 0;
+ rvu->kpu_fwdata = NULL;
+ }
+
+done:
+ return ret;
+}
+
static void npc_load_kpu_profile(struct rvu *rvu)
{
struct npc_kpu_profile_adapter *profile = &rvu->kpu;
+ const char *kpu_profile = rvu->kpu_pfl_name;
+ const struct firmware *fw = NULL;
+ bool retry_fwdb = false;
+
+ /* If user not specified profile customization */
+ if (!strncmp(kpu_profile, def_pfl_name, KPU_NAME_LEN))
+ goto revert_to_default;
+ /* First prepare default KPU, then we'll customize top entries. */
+ npc_prepare_default_kpu(profile);
+
+ /* Order of preceedence for load loading NPC profile (high to low)
+ * Firmware binary in filesystem.
+ * Firmware database method.
+ * Default KPU profile.
+ */
+ if (!request_firmware(&fw, kpu_profile, rvu->dev)) {
+ dev_info(rvu->dev, "Loading KPU profile from firmware: %s\n",
+ kpu_profile);
+ rvu->kpu_fwdata = kzalloc(fw->size, GFP_KERNEL);
+ if (rvu->kpu_fwdata) {
+ memcpy(rvu->kpu_fwdata, fw->data, fw->size);
+ rvu->kpu_fwdata_sz = fw->size;
+ }
+ release_firmware(fw);
+ retry_fwdb = true;
+ goto program_kpu;
+ }
+
+load_image_fwdb:
+ /* Loading the KPU profile using firmware database */
+ if (npc_load_kpu_profile_fwdb(rvu, kpu_profile))
+ goto revert_to_default;
+
+program_kpu:
+ /* Apply profile customization if firmware was loaded. */
+ if (!rvu->kpu_fwdata_sz || npc_apply_custom_kpu(rvu, profile)) {
+ /* If image from firmware filesystem fails to load or invalid
+ * retry with firmware database method.
+ */
+ if (rvu->kpu_fwdata || rvu->kpu_fwdata_sz) {
+ /* Loading image from firmware database failed. */
+ if (rvu->kpu_prfl_addr) {
+ iounmap(rvu->kpu_prfl_addr);
+ rvu->kpu_prfl_addr = NULL;
+ } else {
+ kfree(rvu->kpu_fwdata);
+ }
+ rvu->kpu_fwdata = NULL;
+ rvu->kpu_fwdata_sz = 0;
+ if (retry_fwdb) {
+ retry_fwdb = false;
+ goto load_image_fwdb;
+ }
+ }
+ dev_warn(rvu->dev,
+ "Can't load KPU profile %s. Using default.\n",
+ kpu_profile);
+ kfree(rvu->kpu_fwdata);
+ rvu->kpu_fwdata = NULL;
+ goto revert_to_default;
+ }
+
+ dev_info(rvu->dev, "Using custom profile '%s', version %d.%d.%d\n",
+ profile->name, NPC_KPU_VER_MAJ(profile->version),
+ NPC_KPU_VER_MIN(profile->version),
+ NPC_KPU_VER_PATCH(profile->version));
+
+ return;
+
+revert_to_default:
npc_prepare_default_kpu(profile);
}
@@ -962,10 +1720,6 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
{
struct rvu_hwinfo *hw = rvu->hw;
int num_pkinds, num_kpus, idx;
- struct npc_pkind *pkind;
-
- /* Get HW limits */
- hw->npc_kpus = (rvu_read64(rvu, blkaddr, NPC_AF_CONST) >> 8) & 0x1F;
/* Disable all KPUs and their entries */
for (idx = 0; idx < hw->npc_kpus; idx++) {
@@ -983,9 +1737,8 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
* Check HW max count to avoid configuring junk or
* writing to unsupported CSR addresses.
*/
- pkind = &hw->pkind;
num_pkinds = rvu->kpu.pkinds;
- num_pkinds = min_t(int, pkind->rsrc.max, num_pkinds);
+ num_pkinds = min_t(int, hw->npc_pkinds, num_pkinds);
for (idx = 0; idx < num_pkinds; idx++)
npc_config_kpuaction(rvu, blkaddr, &rvu->kpu.ikpu[idx], 0, idx, true);
@@ -1003,14 +1756,10 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
int nixlf_count = rvu_get_nixlf_count(rvu);
struct npc_mcam *mcam = &rvu->hw->mcam;
int rsvd, err;
+ u16 index;
+ int cntr;
u64 cfg;
- /* Get HW limits */
- cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
- mcam->banks = (cfg >> 44) & 0xF;
- mcam->banksize = (cfg >> 28) & 0xFFFF;
- mcam->counters.max = (cfg >> 48) & 0xFFFF;
-
/* Actual number of MCAM entries vary by entry size */
cfg = (rvu_read64(rvu, blkaddr,
NPC_AF_INTFX_KEX_CFG(0)) >> 32) & 0x07;
@@ -1077,12 +1826,6 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
mcam->hprio_count = mcam->lprio_count;
mcam->hprio_end = mcam->hprio_count;
- /* Reserve last counter for MCAM RX miss action which is set to
- * drop pkt. This way we will know how many pkts didn't match
- * any MCAM entry.
- */
- mcam->counters.max--;
- mcam->rx_miss_act_cntr = mcam->counters.max;
/* Allocate bitmap for managing MCAM counters and memory
* for saving counter to RVU PFFUNC allocation mapping.
@@ -1109,6 +1852,20 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
if (!mcam->cntr_refcnt)
goto free_mem;
+ /* Alloc memory for saving target device of mcam rule */
+ mcam->entry2target_pffunc = devm_kcalloc(rvu->dev, mcam->total_entries,
+ sizeof(u16), GFP_KERNEL);
+ if (!mcam->entry2target_pffunc)
+ goto free_mem;
+
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP;
+ mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP;
+ }
+
+ for (cntr = 0; cntr < mcam->counters.max; cntr++)
+ mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP;
+
mutex_init(&mcam->lock);
return 0;
@@ -1118,12 +1875,125 @@ free_mem:
return -ENOMEM;
}
+static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr)
+{
+ struct npc_pkind *pkind = &rvu->hw->pkind;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 npc_const, npc_const1;
+ u64 npc_const2 = 0;
+
+ npc_const = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
+ npc_const1 = rvu_read64(rvu, blkaddr, NPC_AF_CONST1);
+ if (npc_const1 & BIT_ULL(63))
+ npc_const2 = rvu_read64(rvu, blkaddr, NPC_AF_CONST2);
+
+ pkind->rsrc.max = NPC_UNRESERVED_PKIND_COUNT;
+ hw->npc_pkinds = (npc_const1 >> 12) & 0xFFULL;
+ hw->npc_kpu_entries = npc_const1 & 0xFFFULL;
+ hw->npc_kpus = (npc_const >> 8) & 0x1FULL;
+ hw->npc_intfs = npc_const & 0xFULL;
+ hw->npc_counters = (npc_const >> 48) & 0xFFFFULL;
+
+ mcam->banks = (npc_const >> 44) & 0xFULL;
+ mcam->banksize = (npc_const >> 28) & 0xFFFFULL;
+ hw->npc_stat_ena = BIT_ULL(9);
+ /* Extended set */
+ if (npc_const2) {
+ hw->npc_ext_set = true;
+ /* 96xx supports only match_stats and npc_counters
+ * reflected in NPC_AF_CONST reg.
+ * STAT_SEL and ENA are at [0:8] and 9 bit positions.
+ * 98xx has both match_stat and ext and npc_counter
+ * reflected in NPC_AF_CONST2
+ * STAT_SEL_EXT added at [12:14] bit position.
+ * cn10k supports only ext and hence npc_counters in
+ * NPC_AF_CONST is 0 and npc_counters reflected in NPC_AF_CONST2.
+ * STAT_SEL bitpos incremented from [0:8] to [0:11] and ENA bit moved to 63
+ */
+ if (!hw->npc_counters)
+ hw->npc_stat_ena = BIT_ULL(63);
+ hw->npc_counters = (npc_const2 >> 16) & 0xFFFFULL;
+ mcam->banksize = npc_const2 & 0xFFFFULL;
+ }
+
+ mcam->counters.max = hw->npc_counters;
+}
+
+static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr)
+{
+ struct npc_mcam_kex *mkex = rvu->kpu.mkex;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 nibble_ena, rx_kex, tx_kex;
+ u8 intf;
+
+ /* Reserve last counter for MCAM RX miss action which is set to
+ * drop packet. This way we will know how many pkts didn't match
+ * any MCAM entry.
+ */
+ mcam->counters.max--;
+ mcam->rx_miss_act_cntr = mcam->counters.max;
+
+ rx_kex = mkex->keyx_cfg[NIX_INTF_RX];
+ tx_kex = mkex->keyx_cfg[NIX_INTF_TX];
+ nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex);
+
+ nibble_ena = rvu_npc_get_tx_nibble_cfg(rvu, nibble_ena);
+ if (nibble_ena) {
+ tx_kex &= ~NPC_PARSE_NIBBLE;
+ tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena);
+ mkex->keyx_cfg[NIX_INTF_TX] = tx_kex;
+ }
+
+ /* Configure RX interfaces */
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ if (is_npc_intf_tx(intf))
+ continue;
+
+ /* Set RX MCAM search key size. LA..LE (ltype only) + Channel */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf),
+ rx_kex);
+
+ /* If MCAM lookup doesn't result in a match, drop the received
+ * packet. And map this action to a counter to count dropped
+ * packets.
+ */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_INTFX_MISS_ACT(intf), NIX_RX_ACTIONOP_DROP);
+
+ /* NPC_AF_INTFX_MISS_STAT_ACT[14:12] - counter[11:9]
+ * NPC_AF_INTFX_MISS_STAT_ACT[8:0] - counter[8:0]
+ */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_INTFX_MISS_STAT_ACT(intf),
+ ((mcam->rx_miss_act_cntr >> 9) << 12) |
+ hw->npc_stat_ena | mcam->rx_miss_act_cntr);
+ }
+
+ /* Configure TX interfaces */
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ if (is_npc_intf_rx(intf))
+ continue;
+
+ /* Extract Ltypes LID_LA to LID_LE */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf),
+ tx_kex);
+
+ /* Set TX miss action to UCAST_DEFAULT i.e
+ * transmit the packet on NIX LF SQ's default channel.
+ */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_INTFX_MISS_ACT(intf),
+ NIX_TX_ACTIONOP_UCAST_DEFAULT);
+ }
+}
+
int rvu_npc_init(struct rvu *rvu)
{
struct npc_kpu_profile_adapter *kpu = &rvu->kpu;
struct npc_pkind *pkind = &rvu->hw->pkind;
struct npc_mcam *mcam = &rvu->hw->mcam;
- u64 cfg, nibble_ena, rx_kex, tx_kex;
int blkaddr, entry, bank, err;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -1132,20 +2002,22 @@ int rvu_npc_init(struct rvu *rvu)
return -ENODEV;
}
+ rvu_npc_hw_init(rvu, blkaddr);
+
/* First disable all MCAM entries, to stop traffic towards NIXLFs */
- cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
- for (bank = 0; bank < ((cfg >> 44) & 0xF); bank++) {
- for (entry = 0; entry < ((cfg >> 28) & 0xFFFF); entry++)
+ for (bank = 0; bank < mcam->banks; bank++) {
+ for (entry = 0; entry < mcam->banksize; entry++)
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CFG(entry, bank), 0);
}
- /* Allocate resource bimap for pkind*/
- pkind->rsrc.max = (rvu_read64(rvu, blkaddr,
- NPC_AF_CONST1) >> 12) & 0xFF;
err = rvu_alloc_bitmap(&pkind->rsrc);
if (err)
return err;
+ /* Reserve PKIND#0 for LBKs. Power reset value of LBK_CH_PKIND is '0',
+ * no need to configure PKIND for all LBKs separately.
+ */
+ rvu_alloc_rsrc(&pkind->rsrc);
/* Allocate mem for pkind to PF and channel mapping info */
pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max,
@@ -1181,42 +2053,21 @@ int rvu_npc_init(struct rvu *rvu)
((u64)NPC_EC_OIP4_CSUM << 32) | (NPC_EC_IIP4_CSUM << 24) |
BIT_ULL(7) | BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1));
- /* Set RX and TX side MCAM search key size.
- * LA..LD (ltype only) + Channel
- */
- rx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_RX];
- tx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_TX];
- nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex);
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX), rx_kex);
- /* Due to an errata (35786) in A0 pass silicon, parse nibble enable
- * configuration has to be identical for both Rx and Tx interfaces.
- */
- if (is_rvu_96xx_B0(rvu)) {
- tx_kex &= ~NPC_PARSE_NIBBLE;
- tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena);
- }
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX), tx_kex);
-
- err = npc_mcam_rsrcs_init(rvu, blkaddr);
- if (err)
- return err;
+ rvu_npc_setup_interfaces(rvu, blkaddr);
/* Configure MKEX profile */
npc_load_mkex_profile(rvu, blkaddr, rvu->mkex_pfl_name);
- /* Set TX miss action to UCAST_DEFAULT i.e
- * transmit the packet on NIX LF SQ's default channel.
- */
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_TX),
- NIX_TX_ACTIONOP_UCAST_DEFAULT);
+ err = npc_mcam_rsrcs_init(rvu, blkaddr);
+ if (err)
+ return err;
- /* If MCAM lookup doesn't result in a match, drop the received packet.
- * And map this action to a counter to count dropped pkts.
- */
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_RX),
- NIX_RX_ACTIONOP_DROP);
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_STAT_ACT(NIX_INTF_RX),
- BIT_ULL(9) | mcam->rx_miss_act_cntr);
+ err = npc_flow_steering_init(rvu, blkaddr);
+ if (err) {
+ dev_err(rvu->dev,
+ "Incorrect mkex profile loaded using default mkex\n");
+ npc_load_mkex_profile(rvu, blkaddr, def_pfl_name);
+ }
return 0;
}
@@ -1228,6 +2079,10 @@ void rvu_npc_freemem(struct rvu *rvu)
kfree(pkind->rsrc.bmap);
kfree(mcam->counters.bmap);
+ if (rvu->kpu_prfl_addr)
+ iounmap(rvu->kpu_prfl_addr);
+ else
+ kfree(rvu->kpu_fwdata);
mutex_destroy(&mcam->lock);
}
@@ -1272,6 +2127,9 @@ void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
static int npc_mcam_verify_entry(struct npc_mcam *mcam,
u16 pcifunc, int entry)
{
+ /* verify AF installed entries */
+ if (is_pffunc_af(pcifunc))
+ return 0;
/* Verify if entry is valid and if it is indeed
* allocated to the requesting PFFUNC.
*/
@@ -1303,7 +2161,8 @@ static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 entry, u16 cntr)
{
u16 index = entry & (mcam->banksize - 1);
- u16 bank = npc_get_bank(mcam, entry);
+ u32 bank = npc_get_bank(mcam, entry);
+ struct rvu_hwinfo *hw = rvu->hw;
/* Set mapping and increment counter's refcnt */
mcam->entry2cntr_map[entry] = cntr;
@@ -1311,7 +2170,7 @@ static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam,
/* Enable stats */
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank),
- BIT_ULL(9) | cntr);
+ ((cntr >> 9) << 12) | hw->npc_stat_ena | cntr);
}
static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu,
@@ -1381,6 +2240,7 @@ static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
npc_unmap_mcam_entry_and_cntr(rvu, mcam,
blkaddr, index,
cntr);
+ mcam->entry2target_pffunc[index] = 0x0;
}
}
}
@@ -1566,6 +2426,17 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
goto alloc;
}
+ /* For a VF base MCAM match rule is set by its PF. And all the
+ * further MCAM rules installed by VF on its own are
+ * concatenated with the base rule set by its PF. Hence PF entries
+ * should be at lower priority compared to VF entries. Otherwise
+ * base rule is hit always and rules installed by VF will be of
+ * no use. Hence if the request is from PF and NOT a priority
+ * allocation request then allocate low priority entries.
+ */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ goto lprio_alloc;
+
/* Find out the search range for non-priority allocation request
*
* Get MCAM free entry count in middle zone.
@@ -1591,6 +2462,7 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
/* Not enough free entries, search all entries in reverse,
* so that low priority ones will get used up.
*/
+lprio_alloc:
reverse = true;
start = 0;
end = mcam->bmap_entries;
@@ -1716,8 +2588,11 @@ int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
rsp->free_count = 0;
/* Check if ref_entry is within range */
- if (req->priority && req->ref_entry >= mcam->bmap_entries)
+ if (req->priority && req->ref_entry >= mcam->bmap_entries) {
+ dev_err(rvu->dev, "%s: reference entry %d is out of range\n",
+ __func__, req->ref_entry);
return NPC_MCAM_INVALID_REQ;
+ }
/* ref_entry can't be '0' if requested priority is high.
* Can't be last entry if requested priority is low.
@@ -1730,11 +2605,15 @@ int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
/* Since list of allocated indices needs to be sent to requester,
* max number of non-contiguous entries per mbox msg is limited.
*/
- if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES)
+ if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES) {
+ dev_err(rvu->dev,
+ "%s: %d Non-contiguous MCAM entries requested is more than max (%d) allowed\n",
+ __func__, req->count, NPC_MAX_NONCONTIG_ENTRIES);
return NPC_MCAM_INVALID_REQ;
+ }
/* Alloc request from PFFUNC with no NIXLF attached should be denied */
- if (!is_nixlf_attached(rvu, pcifunc))
+ if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
return NPC_MCAM_ALLOC_DENIED;
return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp);
@@ -1754,7 +2633,7 @@ int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
return NPC_MCAM_INVALID_REQ;
/* Free request from PFFUNC with no NIXLF attached, ignore */
- if (!is_nixlf_attached(rvu, pcifunc))
+ if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
return NPC_MCAM_INVALID_REQ;
mutex_lock(&mcam->lock);
@@ -1766,7 +2645,8 @@ int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
if (rc)
goto exit;
- mcam->entry2pfvf_map[req->entry] = 0;
+ mcam->entry2pfvf_map[req->entry] = NPC_MCAM_INVALID_MAP;
+ mcam->entry2target_pffunc[req->entry] = 0x0;
npc_mcam_clear_bit(mcam, req->entry);
npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false);
@@ -1786,13 +2666,39 @@ exit:
return rc;
}
+int rvu_mbox_handler_npc_mcam_read_entry(struct rvu *rvu,
+ struct npc_mcam_read_entry_req *req,
+ struct npc_mcam_read_entry_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ if (!rc) {
+ npc_read_mcam_entry(rvu, mcam, blkaddr, req->entry,
+ &rsp->entry_data,
+ &rsp->intf, &rsp->enable);
+ }
+
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
struct npc_mcam_write_entry_req *req,
struct msg_rsp *rsp)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
struct npc_mcam *mcam = &rvu->hw->mcam;
u16 pcifunc = req->hdr.pcifunc;
int blkaddr, rc;
+ u8 nix_intf;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
@@ -1809,12 +2715,27 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
goto exit;
}
- if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX) {
+ if (!is_npc_interface_valid(rvu, req->intf)) {
rc = NPC_MCAM_INVALID_REQ;
goto exit;
}
- npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, req->intf,
+ if (is_npc_intf_tx(req->intf))
+ nix_intf = pfvf->nix_tx_intf;
+ else
+ nix_intf = pfvf->nix_rx_intf;
+
+ if (!is_pffunc_af(pcifunc) &&
+ npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) {
+ rc = NPC_MCAM_INVALID_REQ;
+ goto exit;
+ }
+
+ /* For AF installed rules, the nix_intf should be set to target NIX */
+ if (is_pffunc_af(req->hdr.pcifunc))
+ nix_intf = req->intf;
+
+ npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, nix_intf,
&req->entry_data, req->enable_entry);
if (req->set_cntr)
@@ -1956,7 +2877,7 @@ int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
return NPC_MCAM_INVALID_REQ;
/* If the request is from a PFFUNC with no NIXLF attached, ignore */
- if (!is_nixlf_attached(rvu, pcifunc))
+ if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
return NPC_MCAM_INVALID_REQ;
/* Since list of allocated counter IDs needs to be sent to requester,
@@ -2143,6 +3064,7 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
struct npc_mcam_alloc_and_write_entry_req *req,
struct npc_mcam_alloc_and_write_entry_rsp *rsp)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
struct npc_mcam_alloc_counter_req cntr_req;
struct npc_mcam_alloc_counter_rsp cntr_rsp;
struct npc_mcam_alloc_entry_req entry_req;
@@ -2151,12 +3073,17 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
u16 entry = NPC_MCAM_ENTRY_INVALID;
u16 cntr = NPC_MCAM_ENTRY_INVALID;
int blkaddr, rc;
+ u8 nix_intf;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return NPC_MCAM_INVALID_REQ;
- if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX)
+ if (!is_npc_interface_valid(rvu, req->intf))
+ return NPC_MCAM_INVALID_REQ;
+
+ if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf,
+ req->hdr.pcifunc))
return NPC_MCAM_INVALID_REQ;
/* Try to allocate a MCAM entry */
@@ -2188,7 +3115,7 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
if (rc) {
/* Free allocated MCAM entry */
mutex_lock(&mcam->lock);
- mcam->entry2pfvf_map[entry] = 0;
+ mcam->entry2pfvf_map[entry] = NPC_MCAM_INVALID_MAP;
npc_mcam_clear_bit(mcam, entry);
mutex_unlock(&mcam->lock);
return rc;
@@ -2198,7 +3125,13 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
write_entry:
mutex_lock(&mcam->lock);
- npc_config_mcam_entry(rvu, mcam, blkaddr, entry, req->intf,
+
+ if (is_npc_intf_tx(req->intf))
+ nix_intf = pfvf->nix_tx_intf;
+ else
+ nix_intf = pfvf->nix_rx_intf;
+
+ npc_config_mcam_entry(rvu, mcam, blkaddr, entry, nix_intf,
&req->entry_data, req->enable_entry);
if (req->alloc_cntr)
@@ -2257,26 +3190,208 @@ int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
return 0;
}
-int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf)
+static int
+npc_set_var_len_offset_pkind(struct rvu *rvu, u16 pcifunc, u64 pkind,
+ u8 var_len_off, u8 var_len_off_mask, u8 shift_dir)
+{
+ struct npc_kpu_action0 *act0;
+ u8 shift_count = 0;
+ int blkaddr;
+ u64 val;
+
+ if (!var_len_off_mask)
+ return -EINVAL;
+
+ if (var_len_off_mask != 0xff) {
+ if (shift_dir)
+ shift_count = __ffs(var_len_off_mask);
+ else
+ shift_count = (8 - __fls(var_len_off_mask));
+ }
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+ val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind));
+ act0 = (struct npc_kpu_action0 *)&val;
+ act0->var_len_shift = shift_count;
+ act0->var_len_right = shift_dir;
+ act0->var_len_mask = var_len_off_mask;
+ act0->var_len_offset = var_len_off;
+ rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val);
+ return 0;
+}
+
+int
+rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
+ u64 pkind, u8 var_len_off, u8 var_len_off_mask,
+ u8 shift_dir)
+
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int blkaddr, nixlf, rc, intf_mode;
+ int pf = rvu_get_pf(pcifunc);
+ bool enable_higig2 = false;
+ u64 rxpkind, txpkind;
+ u8 cgx_id, lmac_id;
+
+ /* use default pkind to disable edsa/higig */
+ rxpkind = rvu_npc_get_pkind(rvu, pf);
+ txpkind = NPC_TX_DEF_PKIND;
+ intf_mode = NPC_INTF_MODE_DEF;
+
+ if (mode & OTX2_PRIV_FLAGS_EDSA) {
+ rxpkind = NPC_RX_EDSA_PKIND;
+ intf_mode = NPC_INTF_MODE_EDSA;
+ } else if (mode & OTX2_PRIV_FLAGS_FDSA) {
+ rxpkind = NPC_RX_EDSA_PKIND;
+ intf_mode = NPC_INTF_MODE_FDSA;
+ } else if (mode & OTX2_PRIV_FLAGS_HIGIG) {
+ /* Silicon does not support enabling higig in time stamp mode */
+ if (pfvf->hw_rx_tstamp_en ||
+ rvu_nix_is_ptp_tx_enabled(rvu, pcifunc))
+ return NPC_AF_ERR_HIGIG_CONFIG_FAIL;
+
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_HIGIG2))
+ return NPC_AF_ERR_HIGIG_NOT_SUPPORTED;
+
+ rxpkind = NPC_RX_HIGIG_PKIND;
+ txpkind = NPC_TX_HIGIG_PKIND;
+ intf_mode = NPC_INTF_MODE_HIGIG;
+ enable_higig2 = true;
+ } else if (mode & OTX2_PRIV_FLAGS_CUSTOM) {
+ if (pkind == NPC_RX_CUSTOM_PRE_L2_PKIND) {
+ rc = npc_set_var_len_offset_pkind(rvu, pcifunc, pkind,
+ var_len_off,
+ var_len_off_mask,
+ shift_dir);
+ if (rc)
+ return rc;
+ }
+ rxpkind = pkind;
+ txpkind = pkind;
+ }
+
+ if (dir & PKIND_RX) {
+ /* rx pkind set req valid only for cgx mapped PFs */
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return 0;
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ rc = cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, rxpkind);
+ if (rc)
+ return rc;
+ }
+
+ if (dir & PKIND_TX) {
+ /* Tx pkind set request valid if PCIFUNC has NIXLF attached */
+ rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (rc)
+ return rc;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf),
+ txpkind);
+ }
+
+ if (enable_higig2 ^ rvu_cgx_is_higig2_enabled(rvu, pf))
+ rvu_cgx_enadis_higig2(rvu, pf, enable_higig2);
+
+ pfvf->intf_mode = intf_mode;
+ return 0;
+}
+
+int rvu_mbox_handler_npc_set_pkind(struct rvu *rvu,
+ struct npc_set_pkind *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_npc_set_parse_mode(rvu, req->hdr.pcifunc, req->mode,
+ req->dir, req->pkind, req->var_len_off,
+ req->var_len_off_mask, req->shift_dir);
+}
+
+int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu,
+ struct msg_req *req,
+ struct npc_mcam_read_base_rule_rsp *rsp)
+{
struct npc_mcam *mcam = &rvu->hw->mcam;
- int blkaddr, index;
- bool enable;
+ int index, blkaddr, nixlf, rc = 0;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ u8 intf, enable;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ return NPC_MCAM_INVALID_REQ;
- if (!pfvf->rxvlan)
- return 0;
+ /* Return the channel number in case of PF */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ rsp->entry.kw[0] = pfvf->rx_chan_base;
+ rsp->entry.kw_mask[0] = 0xFFFULL;
+ goto out;
+ }
+
+ /* Find the pkt steering rule installed by PF to this VF */
+ mutex_lock(&mcam->lock);
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2target_pffunc[index] == pcifunc)
+ goto read_entry;
+ }
+ rc = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
+ if (rc < 0) {
+ mutex_unlock(&mcam->lock);
+ goto out;
+ }
+ /* Read the default ucast entry if there is no pkt steering rule */
index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
NIXLF_UCAST_ENTRY);
- pfvf->entry.action = npc_get_mcam_action(rvu, mcam, blkaddr, index);
- enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, index);
- npc_config_mcam_entry(rvu, mcam, blkaddr, pfvf->rxvlan_index,
- NIX_INTF_RX, &pfvf->entry, enable);
+read_entry:
+ /* Read the mcam entry */
+ npc_read_mcam_entry(rvu, mcam, blkaddr, index, &rsp->entry, &intf,
+ &enable);
+ mutex_unlock(&mcam->lock);
+out:
+ return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu,
+ struct npc_mcam_get_stats_req *req,
+ struct npc_mcam_get_stats_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 index, cntr;
+ int blkaddr;
+ u64 regval;
+ u32 bank;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+
+ index = req->entry & (mcam->banksize - 1);
+ bank = npc_get_bank(mcam, req->entry);
+
+ /* read MCAM entry STAT_ACT register */
+ regval = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank));
+
+ if (!(regval & rvu->hw->npc_stat_ena)) {
+ rsp->stat_ena = 0;
+ mutex_unlock(&mcam->lock);
+ return 0;
+ }
+
+ cntr = regval & 0x1FF;
+
+ rsp->stat_ena = 1;
+ rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(cntr));
+ rsp->stat &= BIT_ULL(48) - 1;
+
+ mutex_unlock(&mcam->lock);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
new file mode 100644
index 000000000000..0ad83405aacd
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -0,0 +1,1434 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2020 Marvell.
+ */
+
+#include <linux/bitfield.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "npc.h"
+
+#define NPC_BYTESM GENMASK_ULL(19, 16)
+#define NPC_HDR_OFFSET GENMASK_ULL(15, 8)
+#define NPC_KEY_OFFSET GENMASK_ULL(5, 0)
+#define NPC_LDATA_EN BIT_ULL(7)
+
+static const char * const npc_flow_names[] = {
+ [NPC_DMAC] = "dmac",
+ [NPC_SMAC] = "smac",
+ [NPC_ETYPE] = "ether type",
+ [NPC_VLAN_ETYPE_CTAG] = "vlan ether type ctag",
+ [NPC_VLAN_ETYPE_STAG] = "vlan ether type stag",
+ [NPC_OUTER_VID] = "outer vlan id",
+ [NPC_TOS] = "tos",
+ [NPC_SIP_IPV4] = "ipv4 source ip",
+ [NPC_DIP_IPV4] = "ipv4 destination ip",
+ [NPC_SIP_IPV6] = "ipv6 source ip",
+ [NPC_DIP_IPV6] = "ipv6 destination ip",
+ [NPC_IPPROTO_TCP] = "ip proto tcp",
+ [NPC_IPPROTO_UDP] = "ip proto udp",
+ [NPC_IPPROTO_SCTP] = "ip proto sctp",
+ [NPC_IPPROTO_ICMP] = "ip proto icmp",
+ [NPC_IPPROTO_ICMP6] = "ip proto icmp6",
+ [NPC_IPPROTO_AH] = "ip proto AH",
+ [NPC_IPPROTO_ESP] = "ip proto ESP",
+ [NPC_SPORT_TCP] = "tcp source port",
+ [NPC_DPORT_TCP] = "tcp destination port",
+ [NPC_SPORT_UDP] = "udp source port",
+ [NPC_DPORT_UDP] = "udp destination port",
+ [NPC_SPORT_SCTP] = "sctp source port",
+ [NPC_DPORT_SCTP] = "sctp destination port",
+ [NPC_FDSA_VAL] = "FDSA tag value ",
+ [NPC_UNKNOWN] = "unknown",
+};
+
+const char *npc_get_field_name(u8 hdr)
+{
+ if (hdr >= ARRAY_SIZE(npc_flow_names))
+ return npc_flow_names[NPC_UNKNOWN];
+
+ return npc_flow_names[hdr];
+}
+
+/* Compute keyword masks and figure out the number of keywords a field
+ * spans in the key.
+ */
+static void npc_set_kw_masks(struct npc_mcam *mcam, u8 type,
+ u8 nr_bits, int start_kwi, int offset, u8 intf)
+{
+ struct npc_key_field *field = &mcam->rx_key_fields[type];
+ u8 bits_in_kw;
+ int max_kwi;
+
+ if (mcam->banks_per_entry == 1)
+ max_kwi = 1; /* NPC_MCAM_KEY_X1 */
+ else if (mcam->banks_per_entry == 2)
+ max_kwi = 3; /* NPC_MCAM_KEY_X2 */
+ else
+ max_kwi = 6; /* NPC_MCAM_KEY_X4 */
+
+ if (is_npc_intf_tx(intf))
+ field = &mcam->tx_key_fields[type];
+
+ if (offset + nr_bits <= 64) {
+ /* one KW only */
+ if (start_kwi > max_kwi)
+ return;
+ field->kw_mask[start_kwi] |= GENMASK_ULL(nr_bits - 1, 0)
+ << offset;
+ field->nr_kws = 1;
+ } else if (offset + nr_bits > 64 &&
+ offset + nr_bits <= 128) {
+ /* two KWs */
+ if (start_kwi + 1 > max_kwi)
+ return;
+ /* first KW mask */
+ bits_in_kw = 64 - offset;
+ field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0)
+ << offset;
+ /* second KW mask i.e. mask for rest of bits */
+ bits_in_kw = nr_bits + offset - 64;
+ field->kw_mask[start_kwi + 1] |= GENMASK_ULL(bits_in_kw - 1, 0);
+ field->nr_kws = 2;
+ } else {
+ /* three KWs */
+ if (start_kwi + 2 > max_kwi)
+ return;
+ /* first KW mask */
+ bits_in_kw = 64 - offset;
+ field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0)
+ << offset;
+ /* second KW mask */
+ field->kw_mask[start_kwi + 1] = ~0ULL;
+ /* third KW mask i.e. mask for rest of bits */
+ bits_in_kw = nr_bits + offset - 128;
+ field->kw_mask[start_kwi + 2] |= GENMASK_ULL(bits_in_kw - 1, 0);
+ field->nr_kws = 3;
+ }
+}
+
+/* Helper function to figure out whether field exists in the key */
+static bool npc_is_field_present(struct rvu *rvu, enum key_fields type, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct npc_key_field *input;
+
+ input = &mcam->rx_key_fields[type];
+ if (is_npc_intf_tx(intf))
+ input = &mcam->tx_key_fields[type];
+
+ return input->nr_kws > 0;
+}
+
+static bool npc_is_same(struct npc_key_field *input,
+ struct npc_key_field *field)
+{
+ return memcmp(&input->layer_mdata, &field->layer_mdata,
+ sizeof(struct npc_layer_mdata)) == 0;
+}
+
+static void npc_set_layer_mdata(struct npc_mcam *mcam, enum key_fields type,
+ u64 cfg, u8 lid, u8 lt, u8 intf)
+{
+ struct npc_key_field *input = &mcam->rx_key_fields[type];
+
+ if (is_npc_intf_tx(intf))
+ input = &mcam->tx_key_fields[type];
+
+ input->layer_mdata.hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
+ input->layer_mdata.key = FIELD_GET(NPC_KEY_OFFSET, cfg);
+ input->layer_mdata.len = FIELD_GET(NPC_BYTESM, cfg) + 1;
+ input->layer_mdata.ltype = lt;
+ input->layer_mdata.lid = lid;
+}
+
+static bool npc_check_overlap_fields(struct npc_key_field *input1,
+ struct npc_key_field *input2)
+{
+ int kwi;
+
+ /* Fields with same layer id and different ltypes are mutually
+ * exclusive hence they can be overlapped
+ */
+ if (input1->layer_mdata.lid == input2->layer_mdata.lid &&
+ input1->layer_mdata.ltype != input2->layer_mdata.ltype)
+ return false;
+
+ for (kwi = 0; kwi < NPC_MAX_KWS_IN_KEY; kwi++) {
+ if (input1->kw_mask[kwi] & input2->kw_mask[kwi])
+ return true;
+ }
+
+ return false;
+}
+
+/* Helper function to check whether given field overlaps with any other fields
+ * in the key. Due to limitations on key size and the key extraction profile in
+ * use higher layers can overwrite lower layer's header fields. Hence overlap
+ * needs to be checked.
+ */
+static bool npc_check_overlap(struct rvu *rvu, int blkaddr,
+ enum key_fields type, u8 start_lid, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct npc_key_field *dummy, *input;
+ int start_kwi, offset;
+ u8 nr_bits, lid, lt, ld;
+ u64 cfg;
+
+ dummy = &mcam->rx_key_fields[NPC_UNKNOWN];
+ input = &mcam->rx_key_fields[type];
+
+ if (is_npc_intf_tx(intf)) {
+ dummy = &mcam->tx_key_fields[NPC_UNKNOWN];
+ input = &mcam->tx_key_fields[type];
+ }
+
+ for (lid = start_lid; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG
+ (intf, lid, lt, ld));
+ if (!FIELD_GET(NPC_LDATA_EN, cfg))
+ continue;
+ memset(dummy, 0, sizeof(struct npc_key_field));
+ npc_set_layer_mdata(mcam, NPC_UNKNOWN, cfg,
+ lid, lt, intf);
+ /* exclude input */
+ if (npc_is_same(input, dummy))
+ continue;
+ start_kwi = dummy->layer_mdata.key / 8;
+ offset = (dummy->layer_mdata.key * 8) % 64;
+ nr_bits = dummy->layer_mdata.len * 8;
+ /* form KW masks */
+ npc_set_kw_masks(mcam, NPC_UNKNOWN, nr_bits,
+ start_kwi, offset, intf);
+ /* check any input field bits falls in any
+ * other field bits.
+ */
+ if (npc_check_overlap_fields(dummy, input))
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static bool npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type,
+ u8 intf)
+{
+ if (!npc_is_field_present(rvu, type, intf) ||
+ npc_check_overlap(rvu, blkaddr, type, 0, intf))
+ return false;
+ return true;
+}
+
+static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number,
+ u8 key_nibble, u8 intf)
+{
+ u8 offset = (key_nibble * 4) % 64; /* offset within key word */
+ u8 kwi = (key_nibble * 4) / 64; /* which word in key */
+ u8 nr_bits = 4; /* bits in a nibble */
+ u8 type;
+
+ switch (bit_number) {
+ case 0 ... 2:
+ type = NPC_CHAN;
+ break;
+ case 3:
+ type = NPC_ERRLEV;
+ break;
+ case 4 ... 5:
+ type = NPC_ERRCODE;
+ break;
+ case 6:
+ type = NPC_LXMB;
+ break;
+ /* check for LTYPE only as of now */
+ case 9:
+ type = NPC_LA;
+ break;
+ case 12:
+ type = NPC_LB;
+ break;
+ case 15:
+ type = NPC_LC;
+ break;
+ case 18:
+ type = NPC_LD;
+ break;
+ case 21:
+ type = NPC_LE;
+ break;
+ case 24:
+ type = NPC_LF;
+ break;
+ case 27:
+ type = NPC_LG;
+ break;
+ case 30:
+ type = NPC_LH;
+ break;
+ default:
+ return;
+ }
+ npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf);
+}
+
+static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct npc_key_field *key_fields;
+ /* Ether type can come from three layers
+ * (ethernet, single tagged, double tagged)
+ */
+ struct npc_key_field *etype_ether;
+ struct npc_key_field *etype_tag1;
+ struct npc_key_field *etype_tag2;
+ /* Outer VLAN TCI can come from two layers
+ * (single tagged, double tagged)
+ */
+ struct npc_key_field *vlan_tag1;
+ struct npc_key_field *vlan_tag2;
+ u64 *features;
+ u8 start_lid;
+ int i;
+
+ key_fields = mcam->rx_key_fields;
+ features = &mcam->rx_features;
+
+ if (is_npc_intf_tx(intf)) {
+ key_fields = mcam->tx_key_fields;
+ features = &mcam->tx_features;
+ }
+
+ /* Handle header fields which can come from multiple layers like
+ * etype, outer vlan tci. These fields should have same position in
+ * the key otherwise to install a mcam rule more than one entry is
+ * needed which complicates mcam space management.
+ */
+ etype_ether = &key_fields[NPC_ETYPE_ETHER];
+ etype_tag1 = &key_fields[NPC_ETYPE_TAG1];
+ etype_tag2 = &key_fields[NPC_ETYPE_TAG2];
+ vlan_tag1 = &key_fields[NPC_VLAN_TAG1];
+ vlan_tag2 = &key_fields[NPC_VLAN_TAG2];
+
+ /* if key profile programmed does not extract Ethertype at all */
+ if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws)
+ goto vlan_tci;
+
+ /* if key profile programmed extracts Ethertype from one layer */
+ if (etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws)
+ key_fields[NPC_ETYPE] = *etype_ether;
+ if (!etype_ether->nr_kws && etype_tag1->nr_kws && !etype_tag2->nr_kws)
+ key_fields[NPC_ETYPE] = *etype_tag1;
+ if (!etype_ether->nr_kws && !etype_tag1->nr_kws && etype_tag2->nr_kws)
+ key_fields[NPC_ETYPE] = *etype_tag2;
+
+ /* if key profile programmed extracts Ethertype from multiple layers */
+ if (etype_ether->nr_kws && etype_tag1->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (etype_ether->kw_mask[i] != etype_tag1->kw_mask[i])
+ goto vlan_tci;
+ }
+ key_fields[NPC_ETYPE] = *etype_tag1;
+ }
+ if (etype_ether->nr_kws && etype_tag2->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (etype_ether->kw_mask[i] != etype_tag2->kw_mask[i])
+ goto vlan_tci;
+ }
+ key_fields[NPC_ETYPE] = *etype_tag2;
+ }
+ if (etype_tag1->nr_kws && etype_tag2->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (etype_tag1->kw_mask[i] != etype_tag2->kw_mask[i])
+ goto vlan_tci;
+ }
+ key_fields[NPC_ETYPE] = *etype_tag2;
+ }
+
+ /* check none of higher layers overwrite Ethertype */
+ start_lid = key_fields[NPC_ETYPE].layer_mdata.lid + 1;
+ if (npc_check_overlap(rvu, blkaddr, NPC_ETYPE, start_lid, intf))
+ goto vlan_tci;
+ *features |= BIT_ULL(NPC_ETYPE);
+vlan_tci:
+ /* if key profile does not extract outer vlan tci at all */
+ if (!vlan_tag1->nr_kws && !vlan_tag2->nr_kws)
+ goto done;
+
+ /* if key profile extracts outer vlan tci from one layer */
+ if (vlan_tag1->nr_kws && !vlan_tag2->nr_kws)
+ key_fields[NPC_OUTER_VID] = *vlan_tag1;
+ if (!vlan_tag1->nr_kws && vlan_tag2->nr_kws)
+ key_fields[NPC_OUTER_VID] = *vlan_tag2;
+
+ /* if key profile extracts outer vlan tci from multiple layers */
+ if (vlan_tag1->nr_kws && vlan_tag2->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (vlan_tag1->kw_mask[i] != vlan_tag2->kw_mask[i])
+ goto done;
+ }
+ key_fields[NPC_OUTER_VID] = *vlan_tag2;
+ }
+ /* check none of higher layers overwrite outer vlan tci */
+ start_lid = key_fields[NPC_OUTER_VID].layer_mdata.lid + 1;
+ if (npc_check_overlap(rvu, blkaddr, NPC_OUTER_VID, start_lid, intf))
+ goto done;
+ *features |= BIT_ULL(NPC_OUTER_VID);
+done:
+ return;
+}
+
+static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid,
+ u8 lt, u64 cfg, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u8 hdr, key, nr_bytes, bit_offset;
+ u8 la_ltype, la_start;
+ /* starting KW index and starting bit position */
+ int start_kwi, offset;
+
+ nr_bytes = FIELD_GET(NPC_BYTESM, cfg) + 1;
+ hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
+ key = FIELD_GET(NPC_KEY_OFFSET, cfg);
+ start_kwi = key / 8;
+ offset = (key * 8) % 64;
+
+ /* For Tx, Layer A has NIX_INST_HDR_S(64 bytes) preceding
+ * ethernet header.
+ */
+ if (is_npc_intf_tx(intf)) {
+ la_ltype = NPC_LT_LA_IH_NIX_ETHER;
+ la_start = 8;
+ } else {
+ la_ltype = NPC_LT_LA_ETHER;
+ la_start = 0;
+ }
+
+#define NPC_SCAN_HDR(name, hlid, hlt, hstart, hlen) \
+do { \
+ if (lid == (hlid) && lt == (hlt)) { \
+ if ((hstart) >= hdr && \
+ ((hstart) + (hlen)) <= (hdr + nr_bytes)) { \
+ bit_offset = (hdr + nr_bytes - (hstart) - (hlen)) * 8; \
+ npc_set_layer_mdata(mcam, (name), cfg, lid, lt, intf); \
+ npc_set_kw_masks(mcam, (name), (hlen) * 8, \
+ start_kwi, offset + bit_offset, intf);\
+ } \
+ } \
+} while (0)
+
+ /* List LID, LTYPE, start offset from layer and length(in bytes) of
+ * packet header fields below.
+ * Example: Source IP is 4 bytes and starts at 12th byte of IP header
+ */
+ NPC_SCAN_HDR(NPC_TOS, NPC_LID_LC, NPC_LT_LC_IP, 1, 1);
+ NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4);
+ NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4);
+ NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16);
+ NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16);
+ NPC_SCAN_HDR(NPC_SPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 0, 2);
+ NPC_SCAN_HDR(NPC_DPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 2, 2);
+ NPC_SCAN_HDR(NPC_SPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 0, 2);
+ NPC_SCAN_HDR(NPC_DPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 2, 2);
+ NPC_SCAN_HDR(NPC_SPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 0, 2);
+ NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2);
+ NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2);
+ NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2);
+ NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2);
+ NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2);
+ NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2);
+ NPC_SCAN_HDR(NPC_FDSA_VAL, NPC_LID_LB, NPC_LT_LB_FDSA, 1, 1);
+ NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6);
+ NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start, 6);
+ /* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */
+ NPC_SCAN_HDR(NPC_PF_FUNC, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, 0, 2);
+}
+
+static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u64 *features = &mcam->rx_features;
+ u64 tcp_udp_sctp;
+ int hdr;
+
+ if (is_npc_intf_tx(intf))
+ features = &mcam->tx_features;
+
+ for (hdr = NPC_DMAC; hdr < NPC_HEADER_FIELDS_MAX; hdr++) {
+ if (npc_check_field(rvu, blkaddr, hdr, intf))
+ *features |= BIT_ULL(hdr);
+ }
+
+ tcp_udp_sctp = BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_SPORT_UDP) |
+ BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) |
+ BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP);
+
+ /* for tcp/udp/sctp corresponding layer type should be in the key */
+ if (*features & tcp_udp_sctp) {
+ if (!npc_check_field(rvu, blkaddr, NPC_LD, intf))
+ *features &= ~tcp_udp_sctp;
+ else
+ *features |= BIT_ULL(NPC_IPPROTO_TCP) |
+ BIT_ULL(NPC_IPPROTO_UDP) |
+ BIT_ULL(NPC_IPPROTO_SCTP);
+ }
+
+ /* for AH/ICMP/ICMPv6/, check if corresponding layer type is present in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_LD, intf)) {
+ *features |= BIT_ULL(NPC_IPPROTO_AH);
+ *features |= BIT_ULL(NPC_IPPROTO_ICMP);
+ *features |= BIT_ULL(NPC_IPPROTO_ICMP6);
+ }
+
+ /* for ESP, check if corresponding layer type is present in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_LE, intf))
+ *features |= BIT_ULL(NPC_IPPROTO_ESP);
+
+ /* for vlan corresponding layer type should be in the key */
+ if (*features & BIT_ULL(NPC_OUTER_VID) ||
+ *features & BIT_ULL(NPC_FDSA_VAL))
+ if (!npc_check_field(rvu, blkaddr, NPC_LB, intf)) {
+ *features &= ~BIT_ULL(NPC_OUTER_VID);
+ *features &= ~BIT_ULL(NPC_FDSA_VAL);
+ }
+
+ /* for vlan ethertypes corresponding layer type should be in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_LB, intf))
+ *features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG) |
+ BIT_ULL(NPC_VLAN_ETYPE_STAG);
+}
+
+/* Scan key extraction profile and record how fields of our interest
+ * fill the key structure. Also verify Channel and DMAC exists in
+ * key and not overwritten by other header fields.
+ */
+static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u8 lid, lt, ld, bitnr;
+ u8 key_nibble = 0;
+ u64 cfg;
+
+ /* Scan and note how parse result is going to be in key.
+ * A bit set in PARSE_NIBBLE_ENA corresponds to a nibble from
+ * parse result in the key. The enabled nibbles from parse result
+ * will be concatenated in key.
+ */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf));
+ cfg &= NPC_PARSE_NIBBLE;
+ for_each_set_bit(bitnr, (unsigned long *)&cfg, 31) {
+ npc_scan_parse_result(mcam, bitnr, key_nibble, intf);
+ key_nibble++;
+ }
+
+ /* Scan and note how layer data is going to be in key */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG
+ (intf, lid, lt, ld));
+ if (!FIELD_GET(NPC_LDATA_EN, cfg))
+ continue;
+ npc_scan_ldata(rvu, blkaddr, lid, lt, cfg,
+ intf);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int npc_scan_verify_kex(struct rvu *rvu, int blkaddr)
+{
+ int err;
+
+ err = npc_scan_kex(rvu, blkaddr, NIX_INTF_RX);
+ if (err)
+ return err;
+
+ err = npc_scan_kex(rvu, blkaddr, NIX_INTF_TX);
+ if (err)
+ return err;
+
+ /* Channel is mandatory */
+ if (!npc_is_field_present(rvu, NPC_CHAN, NIX_INTF_RX)) {
+ dev_err(rvu->dev, "Channel not present in Key\n");
+ return -EINVAL;
+ }
+ /* check that none of the fields overwrite channel */
+ if (npc_check_overlap(rvu, blkaddr, NPC_CHAN, 0, NIX_INTF_RX)) {
+ dev_err(rvu->dev, "Channel cannot be overwritten\n");
+ return -EINVAL;
+ }
+ /* DMAC should be present in key for unicast filter to work */
+ if (!npc_is_field_present(rvu, NPC_DMAC, NIX_INTF_RX)) {
+ dev_err(rvu->dev, "DMAC not present in Key\n");
+ return -EINVAL;
+ }
+ /* check that none of the fields overwrite DMAC */
+ if (npc_check_overlap(rvu, blkaddr, NPC_DMAC, 0, NIX_INTF_RX)) {
+ dev_err(rvu->dev, "DMAC cannot be overwritten\n");
+ return -EINVAL;
+ }
+
+ npc_set_features(rvu, blkaddr, NIX_INTF_TX);
+ npc_set_features(rvu, blkaddr, NIX_INTF_RX);
+ npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_TX);
+ npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_RX);
+
+ return 0;
+}
+
+int npc_flow_steering_init(struct rvu *rvu, int blkaddr)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+
+ INIT_LIST_HEAD(&mcam->mcam_rules);
+
+ return npc_scan_verify_kex(rvu, blkaddr);
+}
+
+static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u64 *mcam_features = &mcam->rx_features;
+ u64 unsupported;
+ u8 bit;
+
+ if (is_npc_intf_tx(intf))
+ mcam_features = &mcam->tx_features;
+
+ unsupported = (*mcam_features ^ features) & ~(*mcam_features);
+ if (unsupported) {
+ dev_info(rvu->dev, "Unsupported flow(s):\n");
+ for_each_set_bit(bit, (unsigned long *)&unsupported, 64)
+ dev_info(rvu->dev, "%s ", npc_get_field_name(bit));
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/* npc_update_entry - Based on the masks generated during
+ * the key scanning, updates the given entry with value and
+ * masks for the field of interest. Maximum 16 bytes of a packet
+ * header can be extracted by HW hence lo and hi are sufficient.
+ * When field bytes are less than or equal to 8 then hi should be
+ * 0 for value and mask.
+ *
+ * If exact match of value is required then mask should be all 1's.
+ * If any bits in mask are 0 then corresponding bits in value are
+ * dont care.
+ */
+static void npc_update_entry(struct rvu *rvu, enum key_fields type,
+ struct mcam_entry *entry, u64 val_lo,
+ u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct mcam_entry dummy = { {0} };
+ struct npc_key_field *field;
+ u64 kw1, kw2, kw3;
+ u8 shift;
+ int i;
+
+ field = &mcam->rx_key_fields[type];
+ if (is_npc_intf_tx(intf))
+ field = &mcam->tx_key_fields[type];
+
+ if (!field->nr_kws)
+ return;
+
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (!field->kw_mask[i])
+ continue;
+ /* place key value in kw[x] */
+ shift = __ffs64(field->kw_mask[i]);
+ /* update entry value */
+ kw1 = (val_lo << shift) & field->kw_mask[i];
+ dummy.kw[i] = kw1;
+ /* update entry mask */
+ kw1 = (mask_lo << shift) & field->kw_mask[i];
+ dummy.kw_mask[i] = kw1;
+
+ if (field->nr_kws == 1)
+ break;
+ /* place remaining bits of key value in kw[x + 1] */
+ if (field->nr_kws == 2) {
+ /* update entry value */
+ kw2 = shift ? val_lo >> (64 - shift) : 0;
+ kw2 |= (val_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ dummy.kw[i + 1] = kw2;
+ /* update entry mask */
+ kw2 = shift ? mask_lo >> (64 - shift) : 0;
+ kw2 |= (mask_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ dummy.kw_mask[i + 1] = kw2;
+ break;
+ }
+ /* place remaining bits of key value in kw[x + 1], kw[x + 2] */
+ if (field->nr_kws == 3) {
+ /* update entry value */
+ kw2 = shift ? val_lo >> (64 - shift) : 0;
+ kw2 |= (val_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ kw3 = shift ? val_hi >> (64 - shift) : 0;
+ kw3 &= field->kw_mask[i + 2];
+ dummy.kw[i + 1] = kw2;
+ dummy.kw[i + 2] = kw3;
+ /* update entry mask */
+ kw2 = shift ? mask_lo >> (64 - shift) : 0;
+ kw2 |= (mask_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ kw3 = shift ? mask_hi >> (64 - shift) : 0;
+ kw3 &= field->kw_mask[i + 2];
+ dummy.kw_mask[i + 1] = kw2;
+ dummy.kw_mask[i + 2] = kw3;
+ break;
+ }
+ }
+ /* dummy is ready with values and masks for given key
+ * field now clear and update input entry with those
+ */
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (!field->kw_mask[i])
+ continue;
+ entry->kw[i] &= ~field->kw_mask[i];
+ entry->kw_mask[i] &= ~field->kw_mask[i];
+
+ entry->kw[i] |= dummy.kw[i];
+ entry->kw_mask[i] |= dummy.kw_mask[i];
+ }
+}
+
+#define IPV6_WORDS 4
+
+static void npc_update_ipv6_flow(struct rvu *rvu, struct mcam_entry *entry,
+ u64 features, struct flow_msg *pkt,
+ struct flow_msg *mask,
+ struct rvu_npc_mcam_rule *output, u8 intf)
+{
+ u32 src_ip[IPV6_WORDS], src_ip_mask[IPV6_WORDS];
+ u32 dst_ip[IPV6_WORDS], dst_ip_mask[IPV6_WORDS];
+ struct flow_msg *opkt = &output->packet;
+ struct flow_msg *omask = &output->mask;
+ u64 mask_lo, mask_hi;
+ u64 val_lo, val_hi;
+
+ /* For an ipv6 address fe80::2c68:63ff:fe5e:2d0a the packet
+ * values to be programmed in MCAM should as below:
+ * val_high: 0xfe80000000000000
+ * val_low: 0x2c6863fffe5e2d0a
+ */
+ if (features & BIT_ULL(NPC_SIP_IPV6)) {
+ be32_to_cpu_array(src_ip_mask, mask->ip6src, IPV6_WORDS);
+ be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS);
+
+ mask_hi = (u64)src_ip_mask[0] << 32 | src_ip_mask[1];
+ mask_lo = (u64)src_ip_mask[2] << 32 | src_ip_mask[3];
+ val_hi = (u64)src_ip[0] << 32 | src_ip[1];
+ val_lo = (u64)src_ip[2] << 32 | src_ip[3];
+
+ npc_update_entry(rvu, NPC_SIP_IPV6, entry, val_lo, val_hi,
+ mask_lo, mask_hi, intf);
+ memcpy(opkt->ip6src, pkt->ip6src, sizeof(opkt->ip6src));
+ memcpy(omask->ip6src, mask->ip6src, sizeof(omask->ip6src));
+ }
+ if (features & BIT_ULL(NPC_DIP_IPV6)) {
+ be32_to_cpu_array(dst_ip_mask, mask->ip6dst, IPV6_WORDS);
+ be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS);
+
+ mask_hi = (u64)dst_ip_mask[0] << 32 | dst_ip_mask[1];
+ mask_lo = (u64)dst_ip_mask[2] << 32 | dst_ip_mask[3];
+ val_hi = (u64)dst_ip[0] << 32 | dst_ip[1];
+ val_lo = (u64)dst_ip[2] << 32 | dst_ip[3];
+
+ npc_update_entry(rvu, NPC_DIP_IPV6, entry, val_lo, val_hi,
+ mask_lo, mask_hi, intf);
+ memcpy(opkt->ip6dst, pkt->ip6dst, sizeof(opkt->ip6dst));
+ memcpy(omask->ip6dst, mask->ip6dst, sizeof(omask->ip6dst));
+ }
+}
+
+static void npc_update_vlan_features(struct rvu *rvu, struct mcam_entry *entry,
+ u64 features, u8 intf)
+{
+ bool ctag = !!(features & BIT_ULL(NPC_VLAN_ETYPE_CTAG));
+ bool stag = !!(features & BIT_ULL(NPC_VLAN_ETYPE_STAG));
+ bool vid = !!(features & BIT_ULL(NPC_OUTER_VID));
+
+ /* If only VLAN id is given then always match outer VLAN id */
+ if (vid && !ctag && !stag) {
+ npc_update_entry(rvu, NPC_LB, entry,
+ NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0,
+ NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf);
+ return;
+ }
+ if (ctag)
+ npc_update_entry(rvu, NPC_LB, entry, NPC_LT_LB_CTAG, 0,
+ ~0ULL, 0, intf);
+ if (stag)
+ npc_update_entry(rvu, NPC_LB, entry, NPC_LT_LB_STAG_QINQ, 0,
+ ~0ULL, 0, intf);
+}
+
+static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry,
+ u64 features, struct flow_msg *pkt,
+ struct flow_msg *mask,
+ struct rvu_npc_mcam_rule *output, u8 intf)
+{
+ u64 dmac_mask = ether_addr_to_u64(mask->dmac);
+ u64 smac_mask = ether_addr_to_u64(mask->smac);
+ u64 dmac_val = ether_addr_to_u64(pkt->dmac);
+ u64 smac_val = ether_addr_to_u64(pkt->smac);
+ struct flow_msg *opkt = &output->packet;
+ struct flow_msg *omask = &output->mask;
+
+ if (!features)
+ return;
+
+ /* For tcp/udp/sctp LTYPE should be present in entry */
+ if (features & BIT_ULL(NPC_IPPROTO_TCP))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_TCP,
+ 0, ~0ULL, 0, intf);
+ if (features & BIT_ULL(NPC_IPPROTO_UDP))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_UDP,
+ 0, ~0ULL, 0, intf);
+ if (features & BIT_ULL(NPC_IPPROTO_SCTP))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_SCTP,
+ 0, ~0ULL, 0, intf);
+ if (features & BIT_ULL(NPC_IPPROTO_ICMP))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP,
+ 0, ~0ULL, 0, intf);
+ if (features & BIT_ULL(NPC_IPPROTO_ICMP6))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP6,
+ 0, ~0ULL, 0, intf);
+ if (features & BIT_ULL(NPC_FDSA_VAL))
+ npc_update_entry(rvu, NPC_LB, entry, NPC_LT_LB_FDSA,
+ 0, ~0ULL, 0, intf);
+
+ /* For AH, LTYPE should be present in entry */
+ if (features & BIT_ULL(NPC_IPPROTO_AH))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_AH,
+ 0, ~0ULL, 0, intf);
+ /* For ESP, LTYPE should be present in entry */
+ if (features & BIT_ULL(NPC_IPPROTO_ESP))
+ npc_update_entry(rvu, NPC_LE, entry, NPC_LT_LE_ESP,
+ 0, ~0ULL, 0, intf);
+
+#define NPC_WRITE_FLOW(field, member, val_lo, val_hi, mask_lo, mask_hi) \
+do { \
+ if (features & BIT_ULL((field))) { \
+ npc_update_entry(rvu, (field), entry, (val_lo), (val_hi), \
+ (mask_lo), (mask_hi), intf); \
+ memcpy(&opkt->member, &pkt->member, sizeof(pkt->member)); \
+ memcpy(&omask->member, &mask->member, sizeof(mask->member)); \
+ } \
+} while (0)
+
+ NPC_WRITE_FLOW(NPC_DMAC, dmac, dmac_val, 0, dmac_mask, 0);
+ NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0);
+ NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0,
+ ntohs(mask->etype), 0);
+ NPC_WRITE_FLOW(NPC_TOS, tos, pkt->tos, 0, mask->tos, 0);
+ NPC_WRITE_FLOW(NPC_SIP_IPV4, ip4src, ntohl(pkt->ip4src), 0,
+ ntohl(mask->ip4src), 0);
+ NPC_WRITE_FLOW(NPC_DIP_IPV4, ip4dst, ntohl(pkt->ip4dst), 0,
+ ntohl(mask->ip4dst), 0);
+ NPC_WRITE_FLOW(NPC_SPORT_TCP, sport, ntohs(pkt->sport), 0,
+ ntohs(mask->sport), 0);
+ NPC_WRITE_FLOW(NPC_SPORT_UDP, sport, ntohs(pkt->sport), 0,
+ ntohs(mask->sport), 0);
+ NPC_WRITE_FLOW(NPC_DPORT_TCP, dport, ntohs(pkt->dport), 0,
+ ntohs(mask->dport), 0);
+ NPC_WRITE_FLOW(NPC_DPORT_UDP, dport, ntohs(pkt->dport), 0,
+ ntohs(mask->dport), 0);
+ NPC_WRITE_FLOW(NPC_SPORT_SCTP, sport, ntohs(pkt->sport), 0,
+ ntohs(mask->sport), 0);
+ NPC_WRITE_FLOW(NPC_DPORT_SCTP, dport, ntohs(pkt->dport), 0,
+ ntohs(mask->dport), 0);
+
+ NPC_WRITE_FLOW(NPC_OUTER_VID, vlan_tci, ntohs(pkt->vlan_tci), 0,
+ ntohs(mask->vlan_tci), 0);
+ NPC_WRITE_FLOW(NPC_FDSA_VAL, vlan_tci, ntohs(pkt->vlan_tci), 0,
+ ntohs(mask->vlan_tci), 0);
+
+ npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf);
+ npc_update_vlan_features(rvu, entry, features, intf);
+}
+
+static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam,
+ u16 entry)
+{
+ struct rvu_npc_mcam_rule *iter;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(iter, &mcam->mcam_rules, list) {
+ if (iter->entry == entry) {
+ mutex_unlock(&mcam->lock);
+ return iter;
+ }
+ }
+ mutex_unlock(&mcam->lock);
+
+ return NULL;
+}
+
+static void rvu_mcam_add_rule(struct npc_mcam *mcam,
+ struct rvu_npc_mcam_rule *rule)
+{
+ struct list_head *head = &mcam->mcam_rules;
+ struct rvu_npc_mcam_rule *iter;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(iter, &mcam->mcam_rules, list) {
+ if (iter->entry > rule->entry)
+ break;
+ head = &iter->list;
+ }
+
+ list_add(&rule->list, head);
+ mutex_unlock(&mcam->lock);
+}
+
+static void rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc,
+ struct rvu_npc_mcam_rule *rule)
+{
+ struct npc_mcam_oper_counter_req free_req = { 0 };
+ struct msg_rsp free_rsp;
+
+ if (!rule->has_cntr)
+ return;
+
+ free_req.hdr.pcifunc = pcifunc;
+ free_req.cntr = rule->cntr;
+
+ rvu_mbox_handler_npc_mcam_free_counter(rvu, &free_req, &free_rsp);
+ rule->has_cntr = false;
+}
+
+static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
+ struct rvu_npc_mcam_rule *rule,
+ struct npc_install_flow_rsp *rsp)
+{
+ struct npc_mcam_alloc_counter_req cntr_req = { 0 };
+ struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 };
+ int err;
+
+ cntr_req.hdr.pcifunc = pcifunc;
+ cntr_req.contig = true;
+ cntr_req.count = 1;
+
+ /* we try to allocate a counter to track the stats of this
+ * rule. If counter could not be allocated then proceed
+ * without counter because counters are limited than entries.
+ */
+ err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req,
+ &cntr_rsp);
+ if (!err && cntr_rsp.count) {
+ rule->cntr = cntr_rsp.cntr;
+ rule->has_cntr = true;
+ rsp->counter = rule->cntr;
+ } else {
+ rsp->counter = err;
+ }
+}
+
+static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct mcam_entry *entry,
+ struct npc_install_flow_req *req,
+ u16 target, bool pf_set_vfs_mac)
+{
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ struct nix_rx_action action;
+
+ if (rswitch->mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && pf_set_vfs_mac)
+ req->chan_mask = 0x0; /* Do not care channel */
+
+ npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, req->chan_mask,
+ 0, NIX_INTF_RX);
+
+ *(u64 *)&action = 0x00;
+ action.pf_func = target;
+ action.op = req->op;
+ action.index = req->index;
+ action.match_id = req->match_id;
+ action.flow_key_alg = req->flow_key_alg;
+
+ if (req->op == NIX_RX_ACTION_DEFAULT && pfvf->def_ucast_rule)
+ action = pfvf->def_ucast_rule->rx_action;
+
+ entry->action = *(u64 *)&action;
+
+ /* VTAG0 starts at 0th byte of LID_B.
+ * VTAG1 starts at 4th byte of LID_B.
+ */
+ entry->vtag_action = FIELD_PREP(RX_VTAG0_VALID_BIT, req->vtag0_valid) |
+ FIELD_PREP(RX_VTAG0_TYPE_MASK, req->vtag0_type) |
+ FIELD_PREP(RX_VTAG0_LID_MASK, NPC_LID_LB) |
+ FIELD_PREP(RX_VTAG0_RELPTR_MASK, 0) |
+ FIELD_PREP(RX_VTAG1_VALID_BIT, req->vtag1_valid) |
+ FIELD_PREP(RX_VTAG1_TYPE_MASK, req->vtag1_type) |
+ FIELD_PREP(RX_VTAG1_LID_MASK, NPC_LID_LB) |
+ FIELD_PREP(RX_VTAG1_RELPTR_MASK, 4);
+}
+
+static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct mcam_entry *entry,
+ struct npc_install_flow_req *req, u16 target)
+{
+ struct nix_tx_action action;
+ u64 mask = ~0ULL;
+
+ /* If AF is installing then do not care about
+ * PF_FUNC in Send Descriptor
+ */
+ if (is_pffunc_af(req->hdr.pcifunc))
+ mask = 0;
+
+ npc_update_entry(rvu, NPC_PF_FUNC, entry, (__force u16)htons(target),
+ 0, mask, 0, NIX_INTF_TX);
+
+ *(u64 *)&action = 0x00;
+ action.op = req->op;
+ action.index = req->index;
+ action.match_id = req->match_id;
+
+ entry->action = *(u64 *)&action;
+
+ /* VTAG0 starts at 0th byte of LID_B.
+ * VTAG1 starts at 4th byte of LID_B.
+ */
+ entry->vtag_action = FIELD_PREP(TX_VTAG0_DEF_MASK, req->vtag0_def) |
+ FIELD_PREP(TX_VTAG0_OP_MASK, req->vtag0_op) |
+ FIELD_PREP(TX_VTAG0_LID_MASK, NPC_LID_LA) |
+ FIELD_PREP(TX_VTAG0_RELPTR_MASK, 20) |
+ FIELD_PREP(TX_VTAG1_DEF_MASK, req->vtag1_def) |
+ FIELD_PREP(TX_VTAG1_OP_MASK, req->vtag1_op) |
+ FIELD_PREP(TX_VTAG1_LID_MASK, NPC_LID_LA) |
+ FIELD_PREP(TX_VTAG1_RELPTR_MASK, 24);
+}
+
+static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
+ int nixlf, struct rvu_pfvf *pfvf,
+ struct npc_install_flow_req *req,
+ struct npc_install_flow_rsp *rsp, bool enable,
+ bool pf_set_vfs_mac)
+{
+ struct rvu_npc_mcam_rule *def_ucast_rule = pfvf->def_ucast_rule;
+ u64 features, installed_features, missing_features = 0;
+ struct npc_mcam_write_entry_req write_req = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule dummy = { 0 };
+ struct rvu_npc_mcam_rule *rule;
+ u16 owner = req->hdr.pcifunc;
+ struct msg_rsp write_rsp;
+ struct mcam_entry *entry;
+ int entry_index, err;
+ bool new = false;
+
+ installed_features = req->features;
+ features = req->features;
+ entry = &write_req.entry_data;
+ entry_index = req->entry;
+
+ npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy,
+ req->intf);
+
+ if (is_npc_intf_rx(req->intf))
+ npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac);
+ else
+ npc_update_tx_entry(rvu, pfvf, entry, req, target);
+
+ /* Default unicast rules do not exist for TX */
+ if (is_npc_intf_tx(req->intf))
+ goto find_rule;
+
+ if (req->default_rule) {
+ entry_index = npc_get_nixlf_mcam_index(mcam, target, nixlf,
+ NIXLF_UCAST_ENTRY);
+ enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, entry_index);
+ }
+
+ /* update mcam entry with default unicast rule attributes */
+ if (def_ucast_rule && (req->default_rule && req->append)) {
+ missing_features = (def_ucast_rule->features ^ features) &
+ def_ucast_rule->features;
+ if (missing_features)
+ npc_update_flow(rvu, entry, missing_features,
+ &def_ucast_rule->packet,
+ &def_ucast_rule->mask,
+ &dummy, req->intf);
+ installed_features = req->features | missing_features;
+ }
+find_rule:
+ rule = rvu_mcam_find_rule(mcam, entry_index);
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+ new = true;
+ }
+
+ /* allocate new counter if rule has no counter */
+ if (!req->default_rule && req->set_cntr && !rule->has_cntr)
+ rvu_mcam_add_counter_to_rule(rvu, owner, rule, rsp);
+
+ /* if user wants to delete an existing counter for a rule then
+ * free the counter
+ */
+ if (!req->set_cntr && rule->has_cntr)
+ rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
+
+ write_req.hdr.pcifunc = owner;
+
+ /* AF owns the default rules so change the owner just to relax
+ * the checks in rvu_mbox_handler_npc_mcam_write_entry
+ */
+ if (req->default_rule)
+ write_req.hdr.pcifunc = 0;
+
+ write_req.entry = entry_index;
+ write_req.intf = req->intf;
+ write_req.enable_entry = (u8)enable;
+ /* if counter is available then clear and use it */
+ if (req->set_cntr && rule->has_cntr) {
+ rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), 0x00);
+ write_req.set_cntr = 1;
+ write_req.cntr = rule->cntr;
+ }
+
+ /* update rule */
+ memcpy(&rule->packet, &dummy.packet, sizeof(rule->packet));
+ memcpy(&rule->mask, &dummy.mask, sizeof(rule->mask));
+ rule->entry = entry_index;
+ memcpy(&rule->rx_action, &entry->action, sizeof(struct nix_rx_action));
+ if (is_npc_intf_tx(req->intf))
+ memcpy(&rule->tx_action, &entry->action,
+ sizeof(struct nix_tx_action));
+ rule->vtag_action = entry->vtag_action;
+ rule->features = installed_features;
+ rule->default_rule = req->default_rule;
+ rule->owner = owner;
+ rule->enable = enable;
+ rule->chan_mask = write_req.entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK;
+ rule->chan = write_req.entry_data.kw[0] & NPC_KEX_CHAN_MASK;
+ rule->chan &= rule->chan_mask;
+ if (is_npc_intf_tx(req->intf))
+ rule->intf = pfvf->nix_tx_intf;
+ else
+ rule->intf = pfvf->nix_rx_intf;
+
+ if (new)
+ rvu_mcam_add_rule(mcam, rule);
+ if (req->default_rule)
+ pfvf->def_ucast_rule = rule;
+
+ /* write to mcam entry registers */
+ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req,
+ &write_rsp);
+ if (err) {
+ rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
+ if (new) {
+ list_del(&rule->list);
+ kfree(rule);
+ }
+ return err;
+ }
+
+ /* VF's MAC address is being changed via PF */
+ if (pf_set_vfs_mac) {
+ ether_addr_copy(pfvf->default_mac, req->packet.dmac);
+ ether_addr_copy(pfvf->mac_addr, req->packet.dmac);
+ set_bit(PF_SET_VF_MAC, &pfvf->flags);
+ }
+
+ if (test_bit(PF_SET_VF_CFG, &pfvf->flags) &&
+ req->vtag0_type == NIX_AF_LFX_RX_VTAG_TYPE7)
+ rule->vfvlan_cfg = true;
+
+ if (is_npc_intf_rx(req->intf) && req->match_id &&
+ (req->op == NIX_RX_ACTIONOP_UCAST || req->op == NIX_RX_ACTIONOP_RSS))
+ return rvu_nix_setup_ratelimit_aggr(rvu, req->hdr.pcifunc,
+ req->index, req->match_id);
+ return 0;
+}
+
+int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
+ struct npc_install_flow_req *req,
+ struct npc_install_flow_rsp *rsp)
+{
+ bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK);
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ int blkaddr, nixlf, err;
+ struct rvu_pfvf *pfvf;
+ bool pf_set_vfs_mac = false;
+ bool enable = true;
+ u16 target;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return NPC_MCAM_INVALID_REQ;
+ }
+
+ if (!is_npc_interface_valid(rvu, req->intf))
+ return NPC_FLOW_INTF_INVALID;
+
+ if (from_vf && req->default_rule)
+ return NPC_FLOW_VF_PERM_DENIED;
+
+ /* Each PF/VF info is maintained in struct rvu_pfvf.
+ * rvu_pfvf for the target PF/VF needs to be retrieved
+ * hence modify pcifunc accordingly.
+ */
+
+ /* AF installing for a PF/VF */
+ if (!req->hdr.pcifunc)
+ target = req->vf;
+ /* PF installing for its VF */
+ else if (!from_vf && req->vf) {
+ target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf;
+ pf_set_vfs_mac = req->default_rule &&
+ (req->features & BIT_ULL(NPC_DMAC));
+ }
+ /* msg received from PF/VF */
+ else
+ target = req->hdr.pcifunc;
+
+ /* ignore chan_mask in case pf func is not AF, revisit later */
+ if (!is_pffunc_af(req->hdr.pcifunc))
+ req->chan_mask = 0xFFF;
+
+ err = npc_check_unsupported_flows(rvu, req->features, req->intf);
+ if (err)
+ return NPC_FLOW_NOT_SUPPORTED;
+
+ pfvf = rvu_get_pfvf(rvu, target);
+
+ /* PF installing for its VF */
+ if (req->hdr.pcifunc && !from_vf && req->vf)
+ set_bit(PF_SET_VF_CFG, &pfvf->flags);
+
+ /* update req destination mac addr */
+ if ((req->features & BIT_ULL(NPC_DMAC)) && is_npc_intf_rx(req->intf) &&
+ is_zero_ether_addr(req->packet.dmac)) {
+ ether_addr_copy(req->packet.dmac, pfvf->mac_addr);
+ eth_broadcast_addr((u8 *)&req->mask.dmac);
+ }
+
+ /* Proceed if NIXLF is attached or not for TX rules */
+ err = nix_get_nixlf(rvu, target, &nixlf, NULL);
+ if (err && is_npc_intf_rx(req->intf) && !pf_set_vfs_mac)
+ return NPC_FLOW_NO_NIXLF;
+
+ /* don't enable rule when nixlf not attached or initialized */
+ if (!(is_nixlf_attached(rvu, target) &&
+ test_bit(NIXLF_INITIALIZED, &pfvf->flags)))
+ enable = false;
+
+ /* Packets reaching NPC in Tx path implies that a
+ * NIXLF is properly setup and transmitting.
+ * Hence rules can be enabled for Tx.
+ */
+ if (is_npc_intf_tx(req->intf))
+ enable = true;
+
+ /* Do not allow requests from uninitialized VFs */
+ if (from_vf && !enable)
+ return NPC_FLOW_VF_NOT_INIT;
+
+ /* PF sets VF mac & VF NIXLF is not attached, update the mac addr */
+ if (pf_set_vfs_mac && !enable) {
+ ether_addr_copy(pfvf->default_mac, req->packet.dmac);
+ ether_addr_copy(pfvf->mac_addr, req->packet.dmac);
+ set_bit(PF_SET_VF_MAC, &pfvf->flags);
+ return 0;
+ }
+
+ mutex_lock(&rswitch->switch_lock);
+ err = npc_install_flow(rvu, blkaddr, target, nixlf, pfvf,
+ req, rsp, enable, pf_set_vfs_mac);
+ mutex_unlock(&rswitch->switch_lock);
+
+ return err;
+}
+
+static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule,
+ u16 pcifunc)
+{
+ struct npc_mcam_ena_dis_entry_req dis_req = { 0 };
+ struct msg_rsp dis_rsp;
+
+ if (rule->default_rule)
+ return 0;
+
+ if (rule->has_cntr)
+ rvu_mcam_remove_counter_from_rule(rvu, pcifunc, rule);
+
+ dis_req.hdr.pcifunc = pcifunc;
+ dis_req.entry = rule->entry;
+
+ list_del(&rule->list);
+ kfree(rule);
+
+ return rvu_mbox_handler_npc_mcam_dis_entry(rvu, &dis_req, &dis_rsp);
+}
+
+int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu,
+ struct npc_delete_flow_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *iter, *tmp;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct list_head del_list;
+
+ INIT_LIST_HEAD(&del_list);
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry_safe(iter, tmp, &mcam->mcam_rules, list) {
+ if (iter->owner == pcifunc) {
+ /* All rules */
+ if (req->all) {
+ list_move_tail(&iter->list, &del_list);
+ /* Range of rules */
+ } else if (req->end && iter->entry >= req->start &&
+ iter->entry <= req->end) {
+ list_move_tail(&iter->list, &del_list);
+ /* single rule */
+ } else if (req->entry == iter->entry) {
+ list_move_tail(&iter->list, &del_list);
+ break;
+ }
+ }
+ }
+ mutex_unlock(&mcam->lock);
+
+ list_for_each_entry_safe(iter, tmp, &del_list, list) {
+ u16 entry = iter->entry;
+
+ /* clear the mcam entry target pcifunc */
+ mcam->entry2target_pffunc[entry] = 0x0;
+ if (npc_delete_flow(rvu, iter, pcifunc))
+ dev_err(rvu->dev, "rule deletion failed for entry:%u",
+ entry);
+ }
+
+ return 0;
+}
+
+static int npc_update_dmac_value(struct rvu *rvu, int npcblkaddr,
+ struct rvu_npc_mcam_rule *rule,
+ struct rvu_pfvf *pfvf)
+{
+ struct npc_mcam_write_entry_req write_req = { 0 };
+ struct mcam_entry *entry = &write_req.entry_data;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct msg_rsp rsp;
+ u8 intf, enable;
+ int err;
+
+ ether_addr_copy(rule->packet.dmac, pfvf->mac_addr);
+
+ npc_read_mcam_entry(rvu, mcam, npcblkaddr, rule->entry,
+ entry, &intf, &enable);
+
+ npc_update_entry(rvu, NPC_DMAC, entry,
+ ether_addr_to_u64(pfvf->mac_addr), 0,
+ 0xffffffffffffull, 0, intf);
+
+ write_req.hdr.pcifunc = rule->owner;
+ write_req.entry = rule->entry;
+ write_req.intf = pfvf->nix_rx_intf;
+
+ mutex_unlock(&mcam->lock);
+ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req, &rsp);
+ mutex_lock(&mcam->lock);
+
+ return err;
+}
+
+void npc_mcam_enable_flows(struct rvu *rvu, u16 target)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, target);
+ struct rvu_npc_mcam_rule *def_ucast_rule;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule;
+ int blkaddr, bank, index;
+ u64 def_action;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ def_ucast_rule = pfvf->def_ucast_rule;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (is_npc_intf_rx(rule->intf) &&
+ rule->rx_action.pf_func == target && !rule->enable) {
+ if (rule->default_rule) {
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ rule->entry, true);
+ rule->enable = true;
+ continue;
+ }
+
+ if (rule->vfvlan_cfg)
+ npc_update_dmac_value(rvu, blkaddr, rule, pfvf);
+
+ if (rule->rx_action.op == NIX_RX_ACTION_DEFAULT) {
+ if (!def_ucast_rule)
+ continue;
+ /* Use default unicast entry action */
+ rule->rx_action = def_ucast_rule->rx_action;
+ def_action = *(u64 *)&def_ucast_rule->rx_action;
+ bank = npc_get_bank(mcam, rule->entry);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION
+ (rule->entry, bank), def_action);
+ }
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ rule->entry, true);
+ rule->enable = true;
+ }
+ }
+
+ /* Enable MCAM entries installed by PF with target as VF pcifunc */
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2target_pffunc[index] == target)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ index, true);
+ }
+ mutex_unlock(&mcam->lock);
+}
+
+void npc_mcam_disable_flows(struct rvu *rvu, u16 target)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ mutex_lock(&mcam->lock);
+ /* Disable MCAM entries installed by PF with target as VF pcifunc */
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2target_pffunc[index] == target)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ index, false);
+ }
+ mutex_unlock(&mcam->lock);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_ree.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_ree.c
new file mode 100644
index 000000000000..6b0d86582243
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_ree.c
@@ -0,0 +1,1242 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include "rvu.h"
+#include "rvu_reg.h"
+
+/* Maximum number of REE blocks */
+#define MAX_REE_BLKS 2
+
+/* Graph maximum number of entries, each of 8B */
+#define REE_GRAPH_CNT (16 * 1024 * 1024)
+
+/* Prefix Block size 1K of 16B entries
+ * maximum number of blocks for a single ROF is 128
+ */
+#define REE_PREFIX_PTR_LEN 1024
+#define REE_PREFIX_CNT (128 * 1024)
+
+/* Rule DB entries are held in memory */
+#define REE_RULE_DB_ALLOC_SIZE (4 * 1024 * 1024)
+#define REE_RULE_DB_ALLOC_SHIFT 22
+#define REE_RULE_DB_BLOCK_CNT 64
+
+/* Rule DB incremental */
+#define REE_RULE_DBI_SIZE (16 * 6)
+
+/* Administrative instruction queue size */
+#define REE_AQ_SIZE 128
+
+static const char *ree_irq_name[MAX_REE_BLKS][REE_AF_INT_VEC_CNT] = {
+ { "REE0_AF_RAS", "REE0_AF_RVU", "REE0_AF_DONE", "REE0_AF_AQ" },
+ { "REE1_AF_RAS", "REE1_AF_RVU", "REE1_AF_DONE", "REE1_AF_AQ" },
+};
+
+enum ree_cmp_ops {
+ REE_CMP_EQ, /* Equal to data*/
+ REE_CMP_GEQ, /* Equal or greater than data */
+ REE_CMP_LEQ, /* Equal or less than data */
+ REE_CMP_KEY_FIELDS_MAX,
+};
+
+enum ree_rof_types {
+ REE_ROF_TYPE_0 = 0, /* Legacy */
+ REE_ROF_TYPE_1 = 1, /* Check CSR EQ */
+ REE_ROF_TYPE_2 = 2, /* Check CSR GEQ */
+ REE_ROF_TYPE_3 = 3, /* Check CSR LEQ */
+ REE_ROF_TYPE_4 = 4, /* Not relevant */
+ REE_ROF_TYPE_5 = 5, /* Check CSR checksum only for internal memory */
+ REE_ROF_TYPE_6 = 6, /* Internal memory */
+ REE_ROF_TYPE_7 = 7, /* External memory */
+};
+
+struct ree_rule_db_entry {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 addr : 32;
+ u64 pad : 24;
+ u64 type : 8;
+#else
+ u64 type : 8;
+ u64 pad : 24;
+ u64 addr : 32;
+#endif
+ u64 value;
+};
+
+static void ree_reex_enable(struct rvu *rvu, struct rvu_block *block)
+{
+ u64 reg;
+
+ /* Set GO bit */
+ reg = rvu_read64(rvu, block->addr, REE_AF_REEXM_CTRL);
+ reg |= REE_AF_REEXM_CTRL_GO;
+ rvu_write64(rvu, block->addr, REE_AF_REEXM_CTRL, reg);
+}
+
+static void ree_reex_force_clock(struct rvu *rvu, struct rvu_block *block,
+ bool force_on)
+{
+ u64 reg;
+
+ /* Force ON or OFF for SCLK / RXPCLK */
+ reg = rvu_read64(rvu, block->addr, REE_AF_CMD_CTL);
+ if (force_on)
+ reg = reg | REE_AF_FORCE_CCLK | REE_AF_FORCE_CSCLK;
+ else
+ reg = reg & ~(REE_AF_FORCE_CCLK | REE_AF_FORCE_CSCLK);
+ rvu_write64(rvu, block->addr, REE_AF_CMD_CTL, reg);
+}
+
+static int ree_graceful_disable_control(struct rvu *rvu,
+ struct rvu_block *block, bool apply)
+{
+ u64 val, mask;
+ int err;
+
+ /* Graceful Disable is available on all queues 0..35
+ * 0 = Queue is not gracefully-disabled (apply is false)
+ * 1 = Queue was gracefully-disabled (apply is true)
+ */
+ mask = GENMASK(35, 0);
+
+ /* Check what is graceful disable status */
+ val = rvu_read64(rvu, block->addr, REE_AF_GRACEFUL_DIS_STATUS) & mask;
+ if (apply & val)
+ return REE_AF_ERR_Q_IS_GRACEFUL_DIS;
+ else if (!apply & !val)
+ return REE_AF_ERR_Q_NOT_GRACEFUL_DIS;
+
+ /* Apply Graceful Enable or Disable on all queues 0..35 */
+ if (apply)
+ val = GENMASK(35, 0);
+ else
+ val = 0;
+
+ rvu_write64(rvu, block->addr, REE_AF_GRACEFUL_DIS_CTL, val);
+
+ /* Poll For graceful disable if it is applied or not on all queues */
+ /* This might take time */
+ err = rvu_poll_reg(rvu, block->addr, REE_AF_GRACEFUL_DIS_STATUS, mask,
+ !apply);
+ if (err) {
+ dev_err(rvu->dev, "REE graceful disable control failed");
+ return err;
+ }
+ return 0;
+}
+
+static int ree_reex_programming(struct rvu *rvu, struct rvu_block *block,
+ u8 incremental)
+{
+ int err;
+
+ if (!incremental) {
+ /* REEX Set & Clear MAIN_CSR init */
+ rvu_write64(rvu, block->addr, REE_AF_REEXM_CTRL,
+ REE_AF_REEXM_CTRL_INIT);
+ rvu_write64(rvu, block->addr, REE_AF_REEXM_CTRL, 0x0);
+
+ /* REEX Poll MAIN_CSR INIT_DONE */
+ err = rvu_poll_reg(rvu, block->addr, REE_AF_REEXM_STATUS,
+ REE_AF_REEXM_STATUS_INIT_DONE, false);
+ if (err) {
+ dev_err(rvu->dev, "REE poll reexm status failed");
+ return err;
+ }
+
+ /* REEX Set Mem Init Mode */
+ rvu_write64(rvu, block->addr, REE_AF_REEXR_CTRL,
+ (REE_AF_REEXR_CTRL_INIT |
+ REE_AF_REEXR_CTRL_MODE_IM_L1_L2));
+
+ /* REEX Set & Clear Mem Init */
+ rvu_write64(rvu, block->addr, REE_AF_REEXR_CTRL,
+ REE_AF_REEXR_CTRL_MODE_IM_L1_L2);
+
+ /* REEX Poll all RTRU DONE 3 bits */
+ err = rvu_poll_reg(rvu, block->addr, REE_AF_REEXR_STATUS,
+ (REE_AF_REEXR_STATUS_IM_INIT_DONE |
+ REE_AF_REEXR_STATUS_L1_CACHE_INIT_DONE |
+ REE_AF_REEXR_STATUS_L2_CACHE_INIT_DONE),
+ false);
+ if (err) {
+ dev_err(rvu->dev, "REE for cache done failed");
+ return err;
+ }
+ } else {
+ /* REEX Set Mem Init Mode */
+ rvu_write64(rvu, block->addr, REE_AF_REEXR_CTRL,
+ (REE_AF_REEXR_CTRL_INIT |
+ REE_AF_REEXR_CTRL_MODE_L1_L2));
+
+ /* REEX Set & Clear Mem Init */
+ rvu_write64(rvu, block->addr, REE_AF_REEXR_CTRL,
+ REE_AF_REEXR_CTRL_MODE_L1_L2);
+
+ /* REEX Poll all RTRU DONE 2 bits */
+ err = rvu_poll_reg(rvu, block->addr, REE_AF_REEXR_STATUS,
+ (REE_AF_REEXR_STATUS_L1_CACHE_INIT_DONE |
+ REE_AF_REEXR_STATUS_L2_CACHE_INIT_DONE),
+ false);
+ if (err) {
+ dev_err(rvu->dev, "REE cache & init done failed");
+ return err;
+ }
+ }
+
+ /* Before 1st time en-queue, set REEX RTRU.GO bit to 1 */
+ rvu_write64(rvu, block->addr, REE_AF_REEXR_CTRL, REE_AF_REEXR_CTRL_GO);
+ return 0;
+}
+
+static int ree_afaq_done_ack(struct rvu *rvu, struct rvu_block *block,
+ bool poll)
+{
+ u64 val;
+ int err;
+
+ /* Poll on Done count until it is 1 to see that last instruction
+ * is completed. Then write this value to DONE_ACK to decrement
+ * the value of Done count
+ * Note that no interrupts are used for this counters
+ */
+ if (poll) {
+ err = rvu_poll_reg(rvu, block->addr, REE_AF_AQ_DONE,
+ 0x1, false);
+ if (err) {
+ dev_err(rvu->dev, "REE AFAQ done failed");
+ return err;
+ }
+ }
+ val = rvu_read64(rvu, block->addr, REE_AF_AQ_DONE);
+ if (val)
+ rvu_write64(rvu, block->addr, REE_AF_AQ_DONE_ACK, val);
+ return 0;
+}
+
+static void ree_aq_inst_enq(struct rvu *rvu, struct rvu_block *block,
+ struct ree_rsrc *ree, dma_addr_t head, u32 size,
+ int doneint)
+{
+ struct admin_queue *aq = block->aq;
+ struct ree_af_aq_inst_s inst;
+
+ /* Fill instruction */
+ memset(&inst, 0, sizeof(struct ree_af_aq_inst_s));
+ inst.length = size;
+ inst.rof_ptr_addr = (u64)head;
+ inst.doneint = doneint;
+ /* Copy instruction to AF AQ head */
+ memcpy(aq->inst->base + (ree->aq_head * aq->inst->entry_sz),
+ &inst, aq->inst->entry_sz);
+ /* Sync into memory */
+ wmb();
+ /* SW triggers HW AQ.DOORBELL */
+ rvu_write64(rvu, block->addr, REE_AF_AQ_DOORBELL, 1);
+ /* Move Head to next cell in AF AQ.
+ * HW CSR gives only AF AQ tail address
+ */
+ ree->aq_head++;
+ if (ree->aq_head >= aq->inst->qsize)
+ ree->aq_head = 0;
+}
+
+static int ree_reex_memory_alloc(struct rvu *rvu, struct rvu_block *block,
+ struct ree_rsrc *ree, int db_len,
+ int is_incremental)
+{
+ int alloc_len, err, i;
+
+ /* Allocate Graph Memory 128MB. This is an IOVA base address
+ * for the memory image of regular expressions graphs.
+ * Software is filling this memory with graph instructions (type 7)
+ * and HW uses this as external memory for graph search.
+ */
+ if (!ree->graph_ctx) {
+ err = qmem_alloc(rvu->dev, &ree->graph_ctx, REE_GRAPH_CNT,
+ sizeof(u64));
+ if (err)
+ return err;
+ /* Update Graph address in DRAM */
+ rvu_write64(rvu, block->addr, REE_AF_EM_BASE,
+ (u64)ree->graph_ctx->iova);
+ }
+
+ /* If not incremental programming, clear Graph Memory
+ * before programming
+ */
+ if (!is_incremental)
+ memset(ree->graph_ctx->base, 0, REE_GRAPH_CNT * sizeof(u64));
+
+ /* Allocate buffers to hold ROF data. Each buffer holds maximum length
+ * of 16384 Bytes, which is 1K instructions block. These blocks are
+ * pointed to by REE_AF_AQ_INST_S:ROF_PTR_ADDR. Multiple blocks are
+ * allocated for concurrent work with HW
+ */
+ if (!ree->prefix_ctx) {
+ err = qmem_alloc(rvu->dev, &ree->prefix_ctx, REE_PREFIX_CNT,
+ sizeof(struct ree_rof_s));
+ if (err) {
+ qmem_free(rvu->dev, ree->graph_ctx);
+ ree->graph_ctx = NULL;
+ return err;
+ }
+ }
+
+ /* Allocate memory to hold incremental programming checksum reference
+ * data which later be retrieved via mbox by the application
+ */
+ if (!ree->ruledbi) {
+ ree->ruledbi = kmalloc_array(REE_RULE_DBI_SIZE, sizeof(void *),
+ GFP_KERNEL);
+ if (!ree->ruledbi) {
+ qmem_free(rvu->dev, ree->graph_ctx);
+ ree->graph_ctx = NULL;
+ qmem_free(rvu->dev, ree->prefix_ctx);
+ ree->prefix_ctx = NULL;
+ return REE_AF_ERR_RULE_DBI_ALLOC_FAILED;
+ }
+ }
+ /* Allocate memory to hold ROF instructions. ROF instructions are
+ * passed from application by multiple mbox messages. Once the last
+ * instruction is passed, they are programmed to REE.
+ * ROF instructions are kept in memory for future retrieve by
+ * application in order to make incremental programming
+ */
+ if (!ree->ruledb) {
+ ree->ruledb = kmalloc_array(REE_RULE_DB_BLOCK_CNT,
+ sizeof(void *), GFP_KERNEL);
+ if (!ree->ruledb) {
+ qmem_free(rvu->dev, ree->graph_ctx);
+ ree->graph_ctx = NULL;
+ qmem_free(rvu->dev, ree->prefix_ctx);
+ ree->prefix_ctx = NULL;
+ kfree(ree->ruledbi);
+ ree->ruledbi = NULL;
+ return REE_AF_ERR_RULE_DB_ALLOC_FAILED;
+ }
+ ree->ruledb_blocks = 0;
+ }
+ alloc_len = ree->ruledb_blocks * REE_RULE_DB_ALLOC_SIZE;
+ while (alloc_len < db_len) {
+ if (ree->ruledb_blocks >= REE_RULE_DB_BLOCK_CNT) {
+ /* No need to free memory here since it is just
+ * indication of rule DB that is too big.
+ * Unlike previous allocation that happens only once,
+ * this allocation can happen along time if larger
+ * ROF files are sent
+ */
+ return REE_AF_ERR_RULE_DB_TOO_BIG;
+ }
+ ree->ruledb[ree->ruledb_blocks] =
+ kmalloc(REE_RULE_DB_ALLOC_SIZE, GFP_KERNEL);
+ if (!ree->ruledb[ree->ruledb_blocks]) {
+ for (i = 0; i < ree->ruledb_blocks; i++)
+ kfree(ree->ruledb[i]);
+ qmem_free(rvu->dev, ree->graph_ctx);
+ ree->graph_ctx = NULL;
+ qmem_free(rvu->dev, ree->prefix_ctx);
+ ree->prefix_ctx = NULL;
+ kfree(ree->ruledbi);
+ ree->ruledbi = NULL;
+ kfree(ree->ruledb);
+ ree->ruledb = NULL;
+ return REE_AF_ERR_RULE_DB_BLOCK_ALLOC_FAILED;
+ }
+ ree->ruledb_blocks += 1;
+ alloc_len += REE_RULE_DB_ALLOC_SIZE;
+ }
+
+ return 0;
+}
+
+static
+int ree_reex_cksum_compare(struct rvu *rvu, int blkaddr,
+ struct ree_rule_db_entry **rule_db,
+ int *rule_db_len, enum ree_cmp_ops cmp)
+{
+ u64 offset;
+ u64 reg;
+
+ /* ROF instructions have 3 fields: type, address and data.
+ * Instructions of type 1,2,3 and 5 are compared against CSR values.
+ * The address of the CSR is calculated from the instruction address.
+ * The CSR value is compared against instruction data.
+ * REE AF REEX comparison registers are in 2 sections: main and rtru.
+ * Main CSR base address is 0x8000, rtru CSR base address is 0x8200
+ * Instruction address bits 16 to 18 indicate the block from which one
+ * can take the base address. Main is 0x0000, RTRU is 0x0001
+ * Low 5 bits indicate the offset, one should multiply it by 8.
+ * The address is calculated as follows:
+ * - Base address is 0x8000
+ * - bits 16 to 18 are multiplied by 0x200
+ * - Low 5 bits are multiplied by 8
+ */
+ offset = REE_AF_REEX_CSR_BLOCK_BASE_ADDR +
+ ((((*rule_db)->addr & REE_AF_REEX_CSR_BLOCK_ID_MASK) >>
+ REE_AF_REEX_CSR_BLOCK_ID_SHIFT) *
+ REE_AF_REEX_CSR_BLOCK_ID) +
+ (((*rule_db)->addr & REE_AF_REEX_CSR_INDEX_MASK) *
+ REE_AF_REEX_CSR_INDEX);
+ reg = rvu_read64(rvu, blkaddr, offset);
+ switch (cmp) {
+ case REE_CMP_EQ:
+ if (reg != (*rule_db)->value) {
+ dev_err(rvu->dev, "REE addr %llx data %llx neq %llx",
+ offset, reg, (*rule_db)->value);
+ return REE_AF_ERR_RULE_DB_EQ_BAD_VALUE;
+ }
+ break;
+ case REE_CMP_GEQ:
+ if (reg < (*rule_db)->value) {
+ dev_err(rvu->dev, "REE addr %llx data %llx ngeq %llx",
+ offset, reg, (*rule_db)->value);
+ return REE_AF_ERR_RULE_DB_GEQ_BAD_VALUE;
+ }
+ break;
+ case REE_CMP_LEQ:
+ if (reg > (*rule_db)->value) {
+ dev_err(rvu->dev, "REE addr %llx data %llx nleq %llx",
+ offset, reg, (*rule_db)->value);
+ return REE_AF_ERR_RULE_DB_LEQ_BAD_VALUE;
+ }
+ break;
+ default:
+ dev_err(rvu->dev, "REE addr %llx data %llx default %llx",
+ offset, reg, (*rule_db)->value);
+ return REE_AF_ERR_RULE_UNKNOWN_VALUE;
+ }
+
+ (*rule_db)++;
+ *rule_db_len -= sizeof(struct ree_rule_db_entry);
+ return 0;
+}
+
+static
+void ree_reex_prefix_write(void **prefix_ptr,
+ struct ree_rule_db_entry **rule_db,
+ int *rule_db_len, u32 *count,
+ u32 *db_block_len)
+{
+ struct ree_rof_s rof_entry;
+
+ while ((*rule_db)->type == REE_ROF_TYPE_6) {
+ rof_entry.typ = (*rule_db)->type;
+ rof_entry.addr = (*rule_db)->addr;
+ rof_entry.data = (*rule_db)->value;
+ memcpy((*prefix_ptr), (void *)(&rof_entry),
+ sizeof(struct ree_rof_s));
+ /* AF AQ prefix block to copy to */
+ (*prefix_ptr) += sizeof(struct ree_rof_s);
+ /* Location in ROF DB that was parsed by now */
+ (*rule_db)++;
+ /* Length of ROF DB left to handle*/
+ (*rule_db_len) -= sizeof(struct ree_rule_db_entry);
+ /* Number of type 6 rows that were parsed */
+ (*count)++;
+ /* Go over current block only */
+ (*db_block_len)--;
+ if (*db_block_len == 0)
+ break;
+ }
+}
+
+static
+int ree_reex_graph_write(struct ree_rsrc *ree,
+ struct ree_rule_db_entry **rule_db, int *rule_db_len,
+ u32 *db_block_len)
+{
+ u32 offset;
+
+ while ((*rule_db)->type == REE_ROF_TYPE_7) {
+ offset = ((*rule_db)->addr & 0xFFFFFF) << 3;
+ if (offset > REE_GRAPH_CNT * 8)
+ return REE_AF_ERR_GRAPH_ADDRESS_TOO_BIG;
+ memcpy(ree->graph_ctx->base + offset,
+ &(*rule_db)->value, sizeof((*rule_db)->value));
+ (*rule_db)++;
+ *rule_db_len -= sizeof(struct ree_rule_db_entry);
+ /* Go over current block only */
+ (*db_block_len)--;
+ if (*db_block_len == 0)
+ break;
+ }
+ return 0;
+}
+
+static
+int ree_rof_data_validation(struct rvu *rvu, int blkaddr,
+ struct ree_rsrc *ree, int *db_block,
+ struct ree_rule_db_entry **rule_db_ptr,
+ int *rule_db_len, u32 *db_block_len)
+{
+ int err;
+
+ /* Parse ROF data */
+ while (*rule_db_len > 0) {
+ switch ((*rule_db_ptr)->type) {
+ case REE_ROF_TYPE_1:
+ err = ree_reex_cksum_compare(rvu, blkaddr, rule_db_ptr,
+ rule_db_len, REE_CMP_EQ);
+ if (err < 0)
+ return err;
+ break;
+ case REE_ROF_TYPE_2:
+ err = ree_reex_cksum_compare(rvu, blkaddr, rule_db_ptr,
+ rule_db_len, REE_CMP_GEQ);
+ if (err < 0)
+ return err;
+ break;
+ case REE_ROF_TYPE_3:
+ err = ree_reex_cksum_compare(rvu, blkaddr, rule_db_ptr,
+ rule_db_len, REE_CMP_LEQ);
+ if (err < 0)
+ return err;
+ break;
+ case REE_ROF_TYPE_4:
+ /* Type 4 handles internal memory */
+ (*rule_db_ptr)++;
+ (*rule_db_len) -= sizeof(struct ree_rof_s);
+ break;
+ case REE_ROF_TYPE_5:
+ err = ree_reex_cksum_compare(rvu, blkaddr, rule_db_ptr,
+ rule_db_len, REE_CMP_EQ);
+ if (err < 0)
+ return err;
+ break;
+ case REE_ROF_TYPE_6:
+ case REE_ROF_TYPE_7:
+ return 0;
+ default:
+ /* Other types not supported */
+ (*rule_db_ptr)++;
+ *rule_db_len -= sizeof(struct ree_rof_s);
+ return REE_AF_ERR_BAD_RULE_TYPE;
+ }
+ (*db_block_len)--;
+ /* If rule DB is larger than 4M there is a need
+ * to move between db blocks of 4M
+ */
+ if (*db_block_len == 0) {
+ (*db_block)++;
+ *rule_db_ptr = ree->ruledb[(*db_block)];
+ *db_block_len = (REE_RULE_DB_ALLOC_SIZE >> 4);
+ }
+ }
+ return 0;
+}
+
+static
+int ree_rof_data_enq(struct rvu *rvu, struct rvu_block *block,
+ struct ree_rsrc *ree,
+ struct ree_rule_db_entry **rule_db_ptr,
+ int *rule_db_len, int *db_block, u32 *db_block_len)
+{
+ void *prefix_ptr = ree->prefix_ctx->base;
+ u32 size, num_of_entries = 0;
+ dma_addr_t head;
+ int err;
+
+ /* Parse ROF data */
+ while (*rule_db_len > 0) {
+ switch ((*rule_db_ptr)->type) {
+ case REE_ROF_TYPE_1:
+ case REE_ROF_TYPE_2:
+ case REE_ROF_TYPE_3:
+ case REE_ROF_TYPE_4:
+ case REE_ROF_TYPE_5:
+ break;
+ case REE_ROF_TYPE_6:
+ ree_reex_prefix_write(&prefix_ptr, rule_db_ptr,
+ rule_db_len, &num_of_entries,
+ db_block_len);
+ break;
+ case REE_ROF_TYPE_7:
+ err = ree_reex_graph_write(ree, rule_db_ptr,
+ rule_db_len, db_block_len);
+ if (err)
+ return err;
+ break;
+ default:
+ /* Other types not supported */
+ return REE_AF_ERR_BAD_RULE_TYPE;
+ }
+ /* If rule DB is larger than 4M there is a need
+ * to move between db blocks of 4M
+ */
+ if (*db_block_len == 0) {
+ (*db_block)++;
+ *rule_db_ptr = ree->ruledb[(*db_block)];
+ *db_block_len = (REE_RULE_DB_ALLOC_SIZE >> 4);
+ }
+ /* If there are no more prefix and graph data
+ * en-queue prefix data and continue with data validation
+ */
+ if (((*rule_db_ptr)->type != REE_ROF_TYPE_6) &&
+ ((*rule_db_ptr)->type != REE_ROF_TYPE_7))
+ break;
+ }
+
+ /* Block is filled with 1K instructions
+ * En-queue to AF AQ all available blocks
+ */
+ head = ree->prefix_ctx->iova;
+ while (num_of_entries > 0) {
+ if (num_of_entries > REE_PREFIX_PTR_LEN) {
+ size = REE_PREFIX_PTR_LEN * sizeof(struct ree_rof_s);
+ ree_aq_inst_enq(rvu, block, ree, head, size, false);
+ head += REE_PREFIX_PTR_LEN * sizeof(struct ree_rof_s);
+ num_of_entries -= REE_PREFIX_PTR_LEN;
+ } else {
+ /* Last chunk of instructions to handle */
+ size = num_of_entries * sizeof(struct ree_rof_s);
+ ree_aq_inst_enq(rvu, block, ree, head, size, true);
+ num_of_entries = 0;
+ }
+ }
+ /* Verify completion of type 6 */
+ return ree_afaq_done_ack(rvu, block, true);
+}
+
+static
+int ree_rule_db_prog(struct rvu *rvu, struct rvu_block *block,
+ struct ree_rsrc *ree, int inc)
+{
+ /* db_block_len holds number of ROF instruction in a memory block */
+ u32 db_block_len = (REE_RULE_DB_ALLOC_SIZE >> 4);
+ struct ree_rule_db_entry *rule_db_ptr;
+ int rule_db_len, ret = 0, db_block = 0;
+ u64 reg;
+
+ /* Stop fetching new instructions while programming*/
+ ret = ree_graceful_disable_control(rvu, block, true);
+ if (ret)
+ return ret;
+
+ /* Force Clock ON
+ * Force bits should be set throughout REEX programming, whether full
+ * or incremental
+ */
+ ree_reex_force_clock(rvu, block, true);
+
+ /* Ack afaq done count
+ * In case previous programming timed-out before receiving done
+ * indication. Before programming process starts acknowledge all
+ * existing done counts from previous run
+ */
+ ret = ree_afaq_done_ack(rvu, block, false);
+ if (ret)
+ goto err;
+
+ /* Reinitialize REEX block for programming */
+ ret = ree_reex_programming(rvu, block, inc);
+ if (ret)
+ goto err;
+
+ /* Parse ROF data - validation part */
+ rule_db_len = ree->ruledb_len;
+ rule_db_ptr = (struct ree_rule_db_entry *)ree->ruledb[db_block];
+ ret = ree_rof_data_validation(rvu, block->addr, ree, &db_block,
+ &rule_db_ptr, &rule_db_len,
+ &db_block_len);
+ if (ret)
+ goto err;
+
+ /* Parse ROF data - data part */
+ ret = ree_rof_data_enq(rvu, block, ree, &rule_db_ptr, &rule_db_len,
+ &db_block, &db_block_len);
+ if (ret)
+ goto err;
+ /* Parse ROF data - validation part */
+ ret = ree_rof_data_validation(rvu, block->addr, ree, &db_block,
+ &rule_db_ptr, &rule_db_len,
+ &db_block_len);
+ if (ret)
+ goto err;
+
+ /* REEX Programming DONE: clear GO bit */
+ reg = rvu_read64(rvu, block->addr, REE_AF_REEXR_CTRL);
+ reg = reg & ~(REE_AF_REEXR_CTRL_GO);
+ rvu_write64(rvu, block->addr, REE_AF_REEXR_CTRL, reg);
+
+ ree_reex_enable(rvu, block);
+
+err:
+ /* Force Clock OFF */
+ ree_reex_force_clock(rvu, block, false);
+
+ /* Resume fetching instructions */
+ ree_graceful_disable_control(rvu, block, false);
+
+ return ret;
+}
+
+int rvu_mbox_handler_ree_rule_db_prog(struct rvu *rvu,
+ struct ree_rule_db_prog_req_msg *req,
+ struct msg_rsp *rsp)
+{
+ int blkaddr, db_block = 0, blkid = 0, err;
+ struct rvu_block *block;
+ struct ree_rsrc *ree;
+
+ blkaddr = req->blkaddr;
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return REE_AF_ERR_BLOCK_NOT_IMPLEMENTED;
+ if (blkaddr == BLKADDR_REE1)
+ blkid = 1;
+
+ block = &rvu->hw->block[blkaddr];
+ ree = &rvu->hw->ree[blkid];
+
+ /* If this is the first block of ROF */
+ if (!req->offset) {
+ if (req->total_len >
+ REE_RULE_DB_ALLOC_SIZE * REE_RULE_DB_BLOCK_CNT)
+ return REE_AF_ERR_RULE_DB_TOO_BIG;
+
+ /* Initialize Programming memory */
+ err = ree_reex_memory_alloc(rvu, block, ree, req->total_len,
+ req->is_incremental);
+ if (err)
+ return err;
+ /* Programming overwrites existing rule db
+ * Incremental programming overwrites both rule db and rule dbi
+ */
+ ree->ruledb_len = 0;
+ if (!req->is_incremental)
+ ree->ruledbi_len = 0;
+ }
+
+ /* Copy rof data from mbox to ruledb.
+ * Rule db is later used for programming
+ */
+ if (ree->ruledb_len + req->len >
+ ree->ruledb_blocks * REE_RULE_DB_ALLOC_SIZE)
+ return REE_AF_ERR_RULE_DB_WRONG_LENGTH;
+ if (ree->ruledb_len != req->offset)
+ return REE_AF_ERR_RULE_DB_WRONG_OFFSET;
+ /* All messages should be in block size, apart for last one */
+ if (req->len < REE_RULE_DB_REQ_BLOCK_SIZE && !req->is_last)
+ return REE_AF_ERR_RULE_DB_SHOULD_FILL_REQUEST;
+ /* Each mbox is 32KB each ruledb block is 4096KB
+ * Single mbox shouldn't spread over blocks
+ */
+ db_block = ree->ruledb_len >> REE_RULE_DB_ALLOC_SHIFT;
+ if (db_block >= ree->ruledb_blocks)
+ return REE_AF_ERR_RULE_DB_BLOCK_TOO_BIG;
+ memcpy((void *)((u64)ree->ruledb[db_block] + ree->ruledb_len -
+ db_block * REE_RULE_DB_ALLOC_SIZE), req->rule_db, req->len);
+ ree->ruledb_len += req->len;
+ /* ROF file is sent in chunks
+ * wait for last chunk to start programming
+ */
+ if (!req->is_last)
+ return 0;
+
+ if (req->total_len != ree->ruledb_len)
+ return REE_AF_ERR_RULE_DB_PARTIAL;
+
+ if (!req->is_incremental || req->is_dbi) {
+ err = ree_rule_db_prog(rvu, block, ree, req->is_incremental);
+ if (err)
+ return err;
+ }
+
+ if (req->is_dbi) {
+ memcpy(ree->ruledbi,
+ ree->ruledb[db_block] +
+ req->total_len - REE_RULE_DBI_SIZE,
+ REE_RULE_DBI_SIZE);
+ ree->ruledbi_len = REE_RULE_DBI_SIZE;
+ }
+
+ return 0;
+}
+
+int
+rvu_mbox_handler_ree_rule_db_get(struct rvu *rvu,
+ struct ree_rule_db_get_req_msg *req,
+ struct ree_rule_db_get_rsp_msg *rsp)
+{
+ int blkaddr, len, blkid = 0, db_block;
+ struct ree_rsrc *ree;
+
+ blkaddr = req->blkaddr;
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return REE_AF_ERR_BLOCK_NOT_IMPLEMENTED;
+ if (blkaddr == BLKADDR_REE1)
+ blkid = 1;
+ ree = &rvu->hw->ree[blkid];
+
+ /* In case no programming or incremental programming was done yet */
+ if ((req->is_dbi && ree->ruledbi_len == 0) ||
+ (!req->is_dbi && ree->ruledb_len == 0)) {
+ rsp->len = 0;
+ return 0;
+ }
+
+ /* ROF file is sent in chunks
+ * Verify that offset is inside db range
+ */
+ if (req->is_dbi) {
+ if (ree->ruledbi_len < req->offset)
+ return REE_AF_ERR_RULE_DB_INC_OFFSET_TOO_BIG;
+ len = ree->ruledbi_len - req->offset;
+ } else {
+ if (ree->ruledb_len < req->offset)
+ return REE_AF_ERR_RULE_DB_OFFSET_TOO_BIG;
+ len = ree->ruledb_len - req->offset;
+ }
+
+ /* Check if this is the last chunk of db */
+ if (len < REE_RULE_DB_RSP_BLOCK_SIZE) {
+ rsp->is_last = true;
+ rsp->len = len;
+ } else {
+ rsp->is_last = false;
+ rsp->len = REE_RULE_DB_RSP_BLOCK_SIZE;
+ }
+
+ /* Copy DB chunk to response */
+ if (req->is_dbi) {
+ memcpy(rsp->rule_db, ree->ruledbi + req->offset, rsp->len);
+ } else {
+ db_block = req->offset >> 22;
+ memcpy(rsp->rule_db, ree->ruledb[db_block] + req->offset,
+ rsp->len);
+ }
+
+ return 0;
+}
+
+int
+rvu_mbox_handler_ree_rule_db_len_get(struct rvu *rvu, struct ree_req_msg *req,
+ struct ree_rule_db_len_rsp_msg *rsp)
+{
+ int blkaddr, blkid = 0;
+
+ blkaddr = req->blkaddr;
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return REE_AF_ERR_BLOCK_NOT_IMPLEMENTED;
+ if (blkaddr == BLKADDR_REE1)
+ blkid = 1;
+ rsp->len = rvu->hw->ree[blkid].ruledb_len;
+ rsp->inc_len = rvu->hw->ree[blkid].ruledbi_len;
+ return 0;
+}
+
+int rvu_mbox_handler_ree_config_lf(struct rvu *rvu,
+ struct ree_lf_req_msg *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr, num_lfs;
+ struct rvu_block *block;
+ u64 val;
+
+ blkaddr = req->blkaddr;
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return REE_AF_ERR_BLOCK_NOT_IMPLEMENTED;
+ block = &rvu->hw->block[blkaddr];
+
+ /* Need to translate REE LF slot to global number
+ * VFs use local numbering from 0 to number of LFs - 1
+ */
+ lf = rvu_get_lf(rvu, block, pcifunc, req->lf);
+ if (lf < 0)
+ return REE_AF_ERR_LF_INVALID;
+
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, req->hdr.pcifunc),
+ blkaddr);
+ if (lf >= num_lfs)
+ return REE_AF_ERR_LF_NO_MORE_RESOURCES;
+
+ /* LF instruction buffer size and priority are configured by AF.
+ * Priority value can be 0 or 1
+ */
+ if (req->pri > 1)
+ return REE_AF_ERR_LF_WRONG_PRIORITY;
+ if (req->size > REE_AF_QUE_SBUF_CTL_MAX_SIZE)
+ return REE_AF_ERR_LF_SIZE_TOO_BIG;
+ val = req->size;
+ val = val << REE_AF_QUE_SBUF_CTL_SIZE_SHIFT;
+ val += req->pri;
+ rvu_write64(rvu, blkaddr, REE_AF_QUE_SBUF_CTL(lf), val);
+
+ return 0;
+}
+
+int rvu_mbox_handler_ree_rd_wr_register(struct rvu *rvu,
+ struct ree_rd_wr_reg_msg *req,
+ struct ree_rd_wr_reg_msg *rsp)
+{
+ int blkaddr;
+
+ blkaddr = req->blkaddr;
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return REE_AF_ERR_BLOCK_NOT_IMPLEMENTED;
+ rsp->reg_offset = req->reg_offset;
+ rsp->ret_val = req->ret_val;
+ rsp->is_write = req->is_write;
+
+ switch (req->reg_offset) {
+ case REE_AF_REEXM_MAX_MATCH:
+ break;
+
+ default:
+ /* Access to register denied */
+ return REE_AF_ERR_ACCESS_DENIED;
+ }
+
+ if (req->is_write)
+ rvu_write64(rvu, blkaddr, req->reg_offset, req->val);
+ else
+ rsp->val = rvu_read64(rvu, blkaddr, req->reg_offset);
+
+ return 0;
+}
+
+static int ree_aq_inst_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
+ int qsize, int inst_size, int res_size)
+{
+ struct admin_queue *aq;
+ int err;
+
+ *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
+ if (!*ad_queue)
+ return -ENOMEM;
+ aq = *ad_queue;
+
+ /* Allocate memory for instructions i.e AQ */
+ err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
+ if (err) {
+ devm_kfree(rvu->dev, aq);
+ return err;
+ }
+
+ /* REE AF AQ does not have result and lock is not used */
+ aq->res = NULL;
+
+ return 0;
+}
+
+static irqreturn_t rvu_ree_af_ras_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 intr;
+
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ intr = rvu_read64(block->rvu, blkaddr, REE_AF_RAS);
+ if (intr & REE_AF_RAS_DAT_PSN)
+ dev_err_ratelimited(rvu->dev, "REE: Poison received on a NCB data response\n");
+ if (intr & REE_AF_RAS_LD_CMD_PSN)
+ dev_err_ratelimited(rvu->dev, "REE: Poison received on a NCB instruction response\n");
+ if (intr & REE_AF_RAS_LD_REEX_PSN)
+ dev_err_ratelimited(rvu->dev, "REE: Poison received on a REEX response\n");
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, REE_AF_RAS, intr);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_ree_af_rvu_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 intr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_REE, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ intr = rvu_read64(rvu, blkaddr, REE_AF_RVU_INT);
+ if (intr & REE_AF_RVU_INT_UNMAPPED_SLOT)
+ dev_err_ratelimited(rvu->dev, "REE: Unmapped slot error\n");
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, REE_AF_RVU_INT, intr);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_ree_af_aq_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 intr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_REE, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ intr = rvu_read64(rvu, blkaddr, REE_AF_AQ_INT);
+
+ if (intr & REE_AF_AQ_INT_DOVF)
+ dev_err_ratelimited(rvu->dev, "REE: DOORBELL overflow\n");
+ if (intr & REE_AF_AQ_INT_IRDE)
+ dev_err_ratelimited(rvu->dev, "REE: Instruction NCB read response error\n");
+ if (intr & REE_AF_AQ_INT_PRDE)
+ dev_err_ratelimited(rvu->dev, "REE: Payload NCB read response error\n");
+ if (intr & REE_AF_AQ_INT_PLLE)
+ dev_err_ratelimited(rvu->dev, "REE: Payload length error\n");
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, REE_AF_AQ_INT, intr);
+ return IRQ_HANDLED;
+}
+
+static void rvu_ree_unregister_interrupts_block(struct rvu *rvu, int blkaddr)
+{
+ int i, offs;
+ struct rvu_block *block;
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (!is_block_implemented(hw, blkaddr))
+ return;
+ block = &hw->block[blkaddr];
+
+ offs = rvu_read64(rvu, blkaddr, REE_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs) {
+ dev_warn(rvu->dev,
+ "Failed to get REE_AF_INT vector offsets");
+ return;
+ }
+
+ /* Disable all REE AF interrupts */
+ rvu_write64(rvu, blkaddr, REE_AF_RAS_ENA_W1C, 0x1);
+ rvu_write64(rvu, blkaddr, REE_AF_RVU_INT_ENA_W1C, 0x1);
+ rvu_write64(rvu, blkaddr, REE_AF_AQ_DONE_INT_ENA_W1C, 0x1);
+ rvu_write64(rvu, blkaddr, REE_AF_AQ_INT_ENA_W1C, 0x1);
+
+ for (i = 0; i < REE_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), block);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+void rvu_ree_unregister_interrupts(struct rvu *rvu)
+{
+ rvu_ree_unregister_interrupts_block(rvu, BLKADDR_REE0);
+ rvu_ree_unregister_interrupts_block(rvu, BLKADDR_REE1);
+}
+
+static int rvu_ree_af_request_irq(struct rvu_block *block,
+ int offset, irq_handler_t handler,
+ const char *name)
+{
+ int ret = 0;
+ struct rvu *rvu = block->rvu;
+
+ WARN_ON(rvu->irq_allocated[offset]);
+ rvu->irq_allocated[offset] = false;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "%s", name);
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset), handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE], block);
+ if (ret)
+ dev_warn(block->rvu->dev, "Failed to register %s irq\n", name);
+ else
+ rvu->irq_allocated[offset] = true;
+
+ return rvu->irq_allocated[offset];
+}
+
+static int rvu_ree_register_interrupts_block(struct rvu *rvu, int blkaddr,
+ int blkid)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int offs, ret = 0;
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return 0;
+
+ block = &hw->block[blkaddr];
+
+ /* Read interrupt vector */
+ offs = rvu_read64(rvu, blkaddr, REE_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs) {
+ dev_warn(rvu->dev,
+ "Failed to get REE_AF_INT vector offsets");
+ return 0;
+ }
+
+ /* Register and enable RAS interrupt */
+ ret = rvu_ree_af_request_irq(block, offs + REE_AF_INT_VEC_RAS,
+ rvu_ree_af_ras_intr_handler,
+ ree_irq_name[blkid][REE_AF_INT_VEC_RAS]);
+ if (!ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, REE_AF_RAS_ENA_W1S, ~0ULL);
+
+ /* Register and enable RVU interrupt */
+ ret = rvu_ree_af_request_irq(block, offs + REE_AF_INT_VEC_RVU,
+ rvu_ree_af_rvu_intr_handler,
+ ree_irq_name[blkid][REE_AF_INT_VEC_RVU]);
+ if (!ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, REE_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ /* QUE DONE */
+ /* Interrupt for QUE DONE is not required, software is polling
+ * DONE count to get indication that all instructions are completed
+ */
+
+ /* Register and enable AQ interrupt */
+ ret = rvu_ree_af_request_irq(block, offs + REE_AF_INT_VEC_AQ,
+ rvu_ree_af_aq_intr_handler,
+ ree_irq_name[blkid][REE_AF_INT_VEC_AQ]);
+ if (!ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, REE_AF_AQ_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+err:
+ rvu_ree_unregister_interrupts(rvu);
+ return ret;
+}
+
+int rvu_ree_register_interrupts(struct rvu *rvu)
+{
+ int ret;
+
+ ret = rvu_ree_register_interrupts_block(rvu, BLKADDR_REE0, 0);
+ if (ret)
+ return ret;
+
+ return rvu_ree_register_interrupts_block(rvu, BLKADDR_REE1, 1);
+}
+
+static int rvu_ree_init_block(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int ret = 0, blkid = 0;
+ struct ree_rsrc *ree;
+ u64 val;
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return 0;
+
+ block = &hw->block[blkaddr];
+ if (blkaddr == BLKADDR_REE1)
+ blkid = 1;
+ ree = &rvu->hw->ree[blkid];
+
+ /* Administrative instruction queue allocation */
+ ret = ree_aq_inst_alloc(rvu, &block->aq,
+ REE_AQ_SIZE,
+ sizeof(struct ree_af_aq_inst_s),
+ 0);
+ if (ret)
+ return ret;
+
+ /* Administrative instruction queue address */
+ rvu_write64(rvu, block->addr, REE_AF_AQ_SBUF_ADDR,
+ (u64)block->aq->inst->iova);
+
+ /* Move head to start only when a new AQ is allocated and configured.
+ * Otherwise head is wrap around
+ */
+ ree->aq_head = 0;
+
+ /* Administrative queue instruction buffer size, in units of 128B
+ * (8 * REE_AF_AQ_INST_S)
+ */
+ val = REE_AQ_SIZE >> 3;
+ rvu_write64(rvu, block->addr, REE_AF_AQ_SBUF_CTL,
+ (val << REE_AF_AQ_SBUF_CTL_SIZE_SHIFT));
+
+ /* Enable instruction queue */
+ rvu_write64(rvu, block->addr, REE_AF_AQ_ENA, 0x1);
+
+ /* Force Clock ON
+ * Force bits should be set throughout the REEX Initialization
+ */
+ ree_reex_force_clock(rvu, block, true);
+
+ /* REEX MAIN_CSR configuration */
+ rvu_write64(rvu, block->addr, REE_AF_REEXM_MAX_MATCH,
+ REE_AF_REEXM_MAX_MATCH_MAX);
+ rvu_write64(rvu, block->addr, REE_AF_REEXM_MAX_PRE_CNT,
+ REE_AF_REEXM_MAX_PRE_CNT_COUNT);
+ rvu_write64(rvu, block->addr, REE_AF_REEXM_MAX_PTHREAD_CNT,
+ REE_AF_REEXM_MAX_PTHREAD_COUNT);
+ rvu_write64(rvu, block->addr, REE_AF_REEXM_MAX_LATENCY_CNT,
+ REE_AF_REEXM_MAX_LATENCY_COUNT);
+
+ /* REEX Set & Clear MAIN_CSR init */
+ rvu_write64(rvu, block->addr, REE_AF_REEXM_CTRL, 0x1);
+ rvu_write64(rvu, block->addr, REE_AF_REEXM_CTRL, 0x0);
+
+ /* REEX Poll MAIN_CSR INIT_DONE */
+ ret = rvu_poll_reg(rvu, block->addr, REE_AF_REEXM_STATUS,
+ BIT_ULL(0), false);
+ if (ret) {
+ dev_err(rvu->dev, "REE reexm poll for init done failed");
+ goto err;
+ }
+
+err:
+ /* Force Clock OFF */
+ ree_reex_force_clock(rvu, block, false);
+
+ return ret;
+}
+
+int rvu_ree_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int err;
+
+ hw->ree = devm_kcalloc(rvu->dev, MAX_REE_BLKS, sizeof(struct ree_rsrc),
+ GFP_KERNEL);
+ if (!hw->ree)
+ return -ENOMEM;
+
+ err = rvu_ree_init_block(rvu, BLKADDR_REE0);
+ if (err)
+ return err;
+ return rvu_ree_init_block(rvu, BLKADDR_REE1);
+}
+
+static void rvu_ree_freemem_block(struct rvu *rvu, int blkaddr, int blkid)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ struct ree_rsrc *ree;
+ int i = 0;
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return;
+
+ block = &hw->block[blkaddr];
+ ree = &hw->ree[blkid];
+
+ rvu_aq_free(rvu, block->aq);
+ if (ree->graph_ctx)
+ qmem_free(rvu->dev, ree->graph_ctx);
+ if (ree->prefix_ctx)
+ qmem_free(rvu->dev, ree->prefix_ctx);
+ if (ree->ruledb) {
+ for (i = 0; i < ree->ruledb_blocks; i++)
+ kfree(ree->ruledb[i]);
+ kfree(ree->ruledb);
+ }
+ kfree(ree->ruledbi);
+}
+
+void rvu_ree_freemem(struct rvu *rvu)
+{
+ rvu_ree_freemem_block(rvu, BLKADDR_REE0, 0);
+ rvu_ree_freemem_block(rvu, BLKADDR_REE1, 1);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
index 9d7c135c7965..b3150f053291 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
@@ -1,11 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
@@ -33,9 +30,9 @@ static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = {
{NIX_TXSCH_LVL_SMQ, 2, 0xFFFF, {{0x0700, 0x0708}, {0x1400, 0x14C8} } },
{NIX_TXSCH_LVL_TL4, 3, 0xFFFF, {{0x0B00, 0x0B08}, {0x0B10, 0x0B18},
{0x1200, 0x12E0} } },
- {NIX_TXSCH_LVL_TL3, 3, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608},
- {0x1610, 0x1618} } },
- {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x1768} } },
+ {NIX_TXSCH_LVL_TL3, 4, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608},
+ {0x1610, 0x1618}, {0x1700, 0x17B0} } },
+ {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17B0} } },
{NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } },
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 7ca599b973c0..4689041bbdcf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -1,11 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef RVU_REG_H
@@ -44,6 +41,18 @@
#define RVU_AF_PFME_INT_W1S (0x28c8)
#define RVU_AF_PFME_INT_ENA_W1S (0x28d0)
#define RVU_AF_PFME_INT_ENA_W1C (0x28d8)
+#define RVU_AF_PFX_BAR4_ADDR(a) (0x5000 | (a) << 4)
+#define RVU_AF_PFX_BAR4_CFG (0x5200 | (a) << 4)
+#define RVU_AF_PFX_VF_BAR4_ADDR (0x5400 | (a) << 4)
+#define RVU_AF_PFX_VF_BAR4_CFG (0x5600 | (a) << 4)
+#define RVU_AF_PFX_LMTLINE_ADDR (0x5800 | (a) << 4)
+#define RVU_AF_SMMU_ADDR_REQ (0x6000)
+#define RVU_AF_SMMU_TXN_REQ (0x6008)
+#define RVU_AF_SMMU_ADDR_RSP_STS (0x6010)
+#define RVU_AF_SMMU_ADDR_TLN (0x6018)
+#define RVU_AF_SMMU_TLN_FLIT0 (0x6020)
+
+#define RVU_AF_BAR2_ALIASX(a, b) (0x9100000ull | (a) << 12 | (b))
/* Admin function's privileged PF/VF registers */
#define RVU_PRIV_CONST (0x8000000)
@@ -54,20 +63,22 @@
#define RVU_PRIV_PFX_MSIX_CFG(a) (0x8000110 | (a) << 16)
#define RVU_PRIV_PFX_ID_CFG(a) (0x8000120 | (a) << 16)
#define RVU_PRIV_PFX_INT_CFG(a) (0x8000200 | (a) << 16)
-#define RVU_PRIV_PFX_NIX0_CFG (0x8000300)
+#define RVU_PRIV_PFX_NIXX_CFG(a) (0x8000300 | (a) << 3)
#define RVU_PRIV_PFX_NPA_CFG (0x8000310)
#define RVU_PRIV_PFX_SSO_CFG (0x8000320)
#define RVU_PRIV_PFX_SSOW_CFG (0x8000330)
#define RVU_PRIV_PFX_TIM_CFG (0x8000340)
-#define RVU_PRIV_PFX_CPT0_CFG (0x8000350)
+#define RVU_PRIV_PFX_CPTX_CFG(a) (0x8000350 | (a) << 3)
#define RVU_PRIV_BLOCK_TYPEX_REV(a) (0x8000400 | (a) << 3)
#define RVU_PRIV_HWVFX_INT_CFG(a) (0x8001280 | (a) << 16)
-#define RVU_PRIV_HWVFX_NIX0_CFG (0x8001300)
+#define RVU_PRIV_HWVFX_NIXX_CFG(a) (0x8001300 | (a) << 3)
#define RVU_PRIV_HWVFX_NPA_CFG (0x8001310)
#define RVU_PRIV_HWVFX_SSO_CFG (0x8001320)
#define RVU_PRIV_HWVFX_SSOW_CFG (0x8001330)
#define RVU_PRIV_HWVFX_TIM_CFG (0x8001340)
-#define RVU_PRIV_HWVFX_CPT0_CFG (0x8001350)
+#define RVU_PRIV_HWVFX_CPTX_CFG(a) (0x8001350 | (a) << 3)
+#define RVU_PRIV_PFX_REEX_CFG(a) (0x8000360 | (a) << 3)
+#define RVU_PRIV_HWVFX_REEX_CFG(a) (0x8001360 | (a) << 3)
/* RVU PF registers */
#define RVU_PF_VFX_PFVF_MBOX0 (0x00000)
@@ -100,6 +111,8 @@
#define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+#define RVU_PF_VF_MBOX_ADDR (0xC40)
+#define RVU_PF_LMTLINE_ADDR (0xC48)
/* RVU VF registers */
#define RVU_VF_VFPF_MBOX0 (0x00000)
@@ -112,6 +125,7 @@
#define NPA_AF_LF_RST (0x0020)
#define NPA_AF_GEN_CFG (0x0030)
#define NPA_AF_NDC_CFG (0x0040)
+#define NPA_AF_NDC_SYNC (0x0050)
#define NPA_AF_INP_CTL (0x00D0)
#define NPA_AF_ACTIVE_CYCLES_PC (0x00F0)
#define NPA_AF_AVG_DELAY (0x0100)
@@ -144,6 +158,7 @@
#define NPA_AF_AQ_DONE_INT_W1S (0x0688)
#define NPA_AF_AQ_DONE_ENA_W1S (0x0690)
#define NPA_AF_AQ_DONE_ENA_W1C (0x0698)
+#define NPA_AF_BATCH_CTL (0x06a0)
#define NPA_AF_LFX_AURAS_CFG(a) (0x4000 | (a) << 18)
#define NPA_AF_LFX_LOC_AURAS_BASE(a) (0x4010 | (a) << 18)
#define NPA_AF_LFX_QINTS_CFG(a) (0x4100 | (a) << 18)
@@ -153,6 +168,9 @@
#define NPA_PRIV_LFX_INT_CFG (0x10020)
#define NPA_AF_RVU_LF_CFG_DEBUG (0x10030)
+#define NPA_AF_BAR2_SEL (0x9000000ull)
+#define NPA_AF_BAR2_ALIASX(a, b) RVU_AF_BAR2_ALIASX(a, b)
+
/* NIX block's admin function registers */
#define NIX_AF_CFG (0x0000)
#define NIX_AF_STATUS (0x0010)
@@ -164,6 +182,7 @@
#define NIX_AF_SQ_CONST (0x0040)
#define NIX_AF_CQ_CONST (0x0048)
#define NIX_AF_RQ_CONST (0x0050)
+#define NIX_AF_PL_CONST (0x0058)
#define NIX_AF_PSE_CONST (0x0060)
#define NIX_AF_TL1_CONST (0x0070)
#define NIX_AF_TL2_CONST (0x0078)
@@ -174,9 +193,11 @@
#define NIX_AF_LSO_CFG (0x00A8)
#define NIX_AF_BLK_RST (0x00B0)
#define NIX_AF_TX_TSTMP_CFG (0x00C0)
+#define NIX_AF_PL_TS (0x00C8)
#define NIX_AF_RX_CFG (0x00D0)
#define NIX_AF_AVG_DELAY (0x00E0)
#define NIX_AF_CINT_DELAY (0x00F0)
+#define NIX_AF_VWQE_TIMER (0x00F8)
#define NIX_AF_RX_MCAST_BASE (0x0100)
#define NIX_AF_RX_MCAST_CFG (0x0110)
#define NIX_AF_RX_MCAST_BUF_BASE (0x0120)
@@ -201,21 +222,31 @@
#define NIX_AF_RVU_INT_ENA_W1S (0x01D0)
#define NIX_AF_RVU_INT_ENA_W1C (0x01D8)
#define NIX_AF_TCP_TIMER (0x01E0)
-#define NIX_AF_RX_WQE_TAG_CTL (0x01F0)
+#define NIX_AF_RX_DEF_ET(a) (0x01F0ull | (uint64_t)(a) << 3)
#define NIX_AF_RX_DEF_OL2 (0x0200)
#define NIX_AF_RX_DEF_OIP4 (0x0210)
#define NIX_AF_RX_DEF_IIP4 (0x0220)
+#define NIX_AF_RX_DEF_VLAN0_PCP_DEI (0x0228)
#define NIX_AF_RX_DEF_OIP6 (0x0230)
+#define NIX_AF_RX_DEF_VLAN1_PCP_DEI (0x0238)
#define NIX_AF_RX_DEF_IIP6 (0x0240)
#define NIX_AF_RX_DEF_OTCP (0x0250)
#define NIX_AF_RX_DEF_ITCP (0x0260)
#define NIX_AF_RX_DEF_OUDP (0x0270)
#define NIX_AF_RX_DEF_IUDP (0x0280)
#define NIX_AF_RX_DEF_OSCTP (0x0290)
+#define NIX_AF_RX_DEF_CST_APAD0 (0x0298)
#define NIX_AF_RX_DEF_ISCTP (0x02A0)
-#define NIX_AF_RX_DEF_IPSECX (0x02B0)
+#define NIX_AF_RX_DEF_CST_APAD1 (0x02A8)
+#define NIX_AF_RX_DEF_IPSECX(a) (0x02B0ull | (uint64_t)(a) << 3)
+#define NIX_AF_RX_DEF_IIP4_DSCP (0x02E0)
+#define NIX_AF_RX_DEF_OIP4_DSCP (0x02E8)
+#define NIX_AF_RX_DEF_IIP6_DSCP (0x02F0)
+#define NIX_AF_RX_DEF_OIP6_DSCP (0x02F8)
#define NIX_AF_RX_IPSEC_GEN_CFG (0x0300)
-#define NIX_AF_RX_CPTX_INST_ADDR (0x0310)
+#define NIX_AF_RX_CPTX_INST_QSEL(a) (0x0320ull | (uint64_t)(a) << 3)
+#define NIX_AF_RX_CPTX_CREDIT(a) (0x0360ull | (uint64_t)(a) << 3)
+#define NIX_AF_NDC_RX_SYNC (0x03E0)
#define NIX_AF_NDC_TX_SYNC (0x03F0)
#define NIX_AF_AQ_CFG (0x0400)
#define NIX_AF_AQ_BASE (0x0410)
@@ -239,20 +270,22 @@
#define NIX_AF_SEB_ECO (0x0600)
#define NIX_AF_SEB_TEST_BP (0x0610)
#define NIX_AF_NORM_TX_FIFO_STATUS (0x0620)
-#define NIX_AF_EXPR_TX_FIFO_STATUS (0x0630)
#define NIX_AF_SDP_TX_FIFO_STATUS (0x0640)
#define NIX_AF_TX_NPC_CAPTURE_CONFIG (0x0660)
#define NIX_AF_TX_NPC_CAPTURE_INFO (0x0670)
+#define NIX_AF_SEB_CFG (0x05F0)
+#define NIX_PTP_1STEP_EN BIT_ULL(2)
#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3)
#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
+#define NIX_AF_SMQX_STATUS(a) (0x730 | (a) << 16)
#define NIX_AF_SQM_DBG_CTL_STATUS (0x750)
+#define NIX_AF_DWRR_SDP_MTU (0x790)
+#define NIX_AF_DWRR_RPM_MTU (0x7A0)
#define NIX_AF_PSE_CHANNEL_LEVEL (0x800)
#define NIX_AF_PSE_SHAPER_CFG (0x810)
-#define NIX_AF_TX_EXPR_CREDIT (0x830)
#define NIX_AF_MARK_FORMATX_CTL(a) (0x900 | (a) << 18)
#define NIX_AF_TX_LINKX_NORM_CREDIT(a) (0xA00 | (a) << 16)
-#define NIX_AF_TX_LINKX_EXPR_CREDIT(a) (0xA10 | (a) << 16)
#define NIX_AF_TX_LINKX_SW_XOFF(a) (0xA20 | (a) << 16)
#define NIX_AF_TX_LINKX_HW_XOFF(a) (0xA30 | (a) << 16)
#define NIX_AF_SDP_LINK_CREDIT (0xa40)
@@ -386,7 +419,7 @@
#define NIX_AF_LFX_RX_IPSEC_CFG0(a) (0x4140 | (a) << 17)
#define NIX_AF_LFX_RX_IPSEC_CFG1(a) (0x4148 | (a) << 17)
#define NIX_AF_LFX_RX_IPSEC_DYNO_CFG(a) (0x4150 | (a) << 17)
-#define NIX_AF_LFX_RX_IPSEC_DYNO_BASE(a) (0x4158 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_DYNO_BASE(a)(0x4158 | (a) << 17)
#define NIX_AF_LFX_RX_IPSEC_SA_BASE(a) (0x4170 | (a) << 17)
#define NIX_AF_LFX_TX_STATUS(a) (0x4180 | (a) << 17)
#define NIX_AF_LFX_RX_VTAG_TYPEX(a, b) (0x4200 | (a) << 17 | (b) << 3)
@@ -399,20 +432,188 @@
#define NIX_AF_RX_NPC_MIRROR_RCV (0x4720)
#define NIX_AF_RX_NPC_MIRROR_DROP (0x4730)
#define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16)
+#define NIX_AF_RQM_BP_TEST (0x4880)
+#define NIX_AF_CQM_BP_TEST (0x48c0)
+#define NIX_AF_LINKX_CFG(a) (0x4010 | (a) << 17)
#define NIX_PRIV_AF_INT_CFG (0x8000000)
#define NIX_PRIV_LFX_CFG (0x8000010)
#define NIX_PRIV_LFX_INT_CFG (0x8000020)
#define NIX_AF_RVU_LF_CFG_DEBUG (0x8000030)
+#define NIX_AF_LF_CFG_SHIFT 17
+#define NIX_AF_LF_SSO_PF_FUNC_SHIFT 16
+
+#define NIX_AF_LINKX_BASE_MASK GENMASK_ULL(11, 0)
+#define NIX_AF_LINKX_RANGE_MASK GENMASK_ULL(19, 16)
+
/* SSO */
#define SSO_AF_CONST (0x1000)
#define SSO_AF_CONST1 (0x1008)
-#define SSO_AF_BLK_RST (0x10f8)
+#define SSO_AF_WQ_INT_PC (0x1020)
+#define SSO_AF_NOS_CNT (0x1050)
+#define SSO_AF_AW_WE (0x1080)
+#define SSO_AF_WS_CFG (0x1088)
#define SSO_AF_LF_HWGRP_RST (0x10e0)
+#define SSO_AF_AW_CFG (0x10f0)
+#define SSO_AF_BLK_RST (0x10f8)
+#define SSO_AF_ACTIVE_CYCLES0 (0x1100)
+#define SSO_AF_ACTIVE_CYCLES1 (0x1108)
+#define SSO_AF_ACTIVE_CYCLES2 (0x1110)
+#define SSO_AF_ERR0 (0x1220)
+#define SSO_AF_ERR0_W1S (0x1228)
+#define SSO_AF_ERR0_ENA_W1C (0x1230)
+#define SSO_AF_ERR0_ENA_W1S (0x1238)
+#define SSO_AF_ERR2 (0x1260)
+#define SSO_AF_ERR2_W1S (0x1268)
+#define SSO_AF_ERR2_ENA_W1C (0x1270)
+#define SSO_AF_ERR2_ENA_W1S (0x1278)
+#define SSO_AF_UNMAP_INFO (0x12f0)
+#define SSO_AF_UNMAP_INFO2 (0x1300)
+#define SSO_AF_UNMAP_INFO3 (0x1310)
+#define SSO_AF_RAS (0x1420)
+#define SSO_AF_RAS_W1S (0x1430)
+#define SSO_AF_RAS_ENA_W1C (0x1460)
+#define SSO_AF_RAS_ENA_W1S (0x1470)
+#define SSO_PRIV_AF_INT_CFG (0x3000)
+#define SSO_AF_AW_ADD (0x2080)
+#define SSO_AF_AW_READ_ARB (0x2090)
+#define SSO_AF_XAQ_REQ_PC (0x20B0)
+#define SSO_AF_XAQ_LATENCY_PC (0x20B8)
+#define SSO_AF_TAQ_CNT (0x20c0)
+#define SSO_AF_TAQ_ADD (0x20e0)
+#define SSO_AF_POISONX(a) (0x2100 | (a) << 3)
+#define SSO_AF_POISONX_W1S(a) (0x2200 | (a) << 3)
#define SSO_AF_RVU_LF_CFG_DEBUG (0x3800)
#define SSO_PRIV_LFX_HWGRP_CFG (0x10000)
#define SSO_PRIV_LFX_HWGRP_INT_CFG (0x20000)
+#define SSO_AF_XAQX_GMCTL(a) (0xe0000 | (a) << 3)
+#define SSO_AF_XAQX_HEAD_PTR(a) (0x80000 | (a) << 3)
+#define SSO_AF_XAQX_TAIL_PTR(a) (0x90000 | (a) << 3)
+#define SSO_AF_XAQX_HEAD_NEXT(a) (0xa0000 | (a) << 3)
+#define SSO_AF_XAQX_TAIL_NEXT(a) (0xb0000 | (a) << 3)
+#define SSO_AF_TOAQX_STATUS(a) (0xd0000 | (a) << 3)
+#define SSO_AF_TIAQX_STATUS(a) (0xc0000 | (a) << 3)
+#define SSO_AF_HWGRPX_IAQ_THR(a) (0x200000 | (a) << 12)
+#define SSO_AF_HWGRPX_TAQ_THR(a) (0x200010 | (a) << 12)
+#define SSO_AF_HWGRPX_PRI(a) (0x200020 | (a) << 12)
+#define SSO_AF_HWGRPX_WS_PC(a) (0x200050 | (a) << 12)
+#define SSO_AF_HWGRPX_EXT_PC(a) (0x200060 | (a) << 12)
+#define SSO_AF_HWGRPX_WA_PC(a) (0x200070 | (a) << 12)
+#define SSO_AF_HWGRPX_TS_PC(a) (0x200080 | (a) << 12)
+#define SSO_AF_HWGRPX_DS_PC(a) (0x200090 | (a) << 12)
+#define SSO_AF_HWGRPX_DQ_PC(a) (0x2000A0 | (a) << 12)
+#define SSO_AF_HWGRPX_LS_PC(a) (0x2000C0 | (a) << 12)
+#define SSO_AF_HWGRPX_PAGE_CNT(a) (0x200100 | (a) << 12)
+#define SSO_AF_IU_ACCNTX_CFG(a) (0x50000 | (a) << 3)
+#define SSO_AF_IU_ACCNTX_RST(a) (0x60000 | (a) << 3)
+#define SSO_AF_HWGRPX_AW_STATUS(a) (0x200110 | (a) << 12)
+#define SSO_AF_HWGRPX_AW_CFG(a) (0x200120 | (a) << 12)
+#define SSO_AF_HWGRPX_AW_TAGSPACE(a) (0x200130 | (a) << 12)
+#define SSO_AF_HWGRPX_XAQ_AURA(a) (0x200140 | (a) << 12)
+#define SSO_AF_HWGRPX_XAQ_LIMIT(a) (0x200220 | (a) << 12)
+#define SSO_AF_HWGRPX_IU_ACCNT(a) (0x200230 | (a) << 12)
+#define SSO_AF_HWSX_ARB(a) (0x400100 | (a) << 12)
+#define SSO_AF_HWSX_INV(a) (0x400180 | (a) << 12)
+#define SSO_AF_HWSX_GMCTL(a) (0x400200 | (a) << 12)
+#define SSO_AF_HWSX_LSW_CFG(a) (0x400300 | (a) << 12)
+#define SSO_AF_HWSX_SX_GRPMSKX(a, b, c) \
+ (0x400400 | (a) << 12 | (b) << 5 | (c) << 3)
+#define SSO_AF_TAQX_LINK(a) (0xc00000 | (a) << 3)
+#define SSO_AF_TAQX_WAEY_TAG(a, b) (0xe00000 | (a) << 8 | (b) << 4)
+#define SSO_AF_TAQX_WAEY_WQP(a, b) (0xe00008 | (a) << 8 | (b) << 4)
+#define SSO_AF_IPL_FREEX(a) (0x800000 | (a) << 3)
+#define SSO_AF_IPL_IAQX(a) (0x840000 | (a) << 3)
+#define SSO_AF_IPL_DESCHEDX(a) (0x860000 | (a) << 3)
+#define SSO_AF_IPL_CONFX(a) (0x880000 | (a) << 3)
+#define SSO_AF_IENTX_TAG(a) (0Xa00000 | (a) << 3)
+#define SSO_AF_IENTX_GRP(a) (0xa20000 | (a) << 3)
+#define SSO_AF_IENTX_PENDTAG(a) (0xa40000 | (a) << 3)
+#define SSO_AF_IENTX_LINKS(a) (0xa60000 | (a) << 3)
+#define SSO_AF_IENTX_QLINKS(a) (0xa80000 | (a) << 3)
+#define SSO_AF_IENTX_WQP(a) (0xaa0000 | (a) << 3)
+#define SSO_AF_XAQDIS_DIGESTX(a) (0x901000 | (a) << 3)
+#define SSO_AF_FLR_AQ_DIGESTX(a) (0x901200 | (a) << 3)
+#define SSO_AF_QCTLDIS_DIGESTX(a) (0x900E00 | (a) << 3)
+#define SSO_AF_WQP0_DIGESTX(a) (0x900A00 | (a) << 3)
+#define SSO_AF_NPA_DIGESTX(a) (0x900000 | (a) << 3)
+#define SSO_AF_BFP_DIGESTX(a) (0x900200 | (a) << 3)
+#define SSO_AF_BFPN_DIGESTX(a) (0x900400 | (a) << 3)
+#define SSO_AF_GRPDIS_DIGESTX(a) (0x900600 | (a) << 3)
+
+#define SSO_AF_CONST1_NO_NSCHED BIT_ULL(34)
+#define SSO_AF_CONST1_LSW_PRESENT BIT_ULL(36)
+#define SSO_AF_CONST1_PRF_PRESENT BIT_ULL(37)
+#define SSO_AF_IAQ_FREE_CNT_MASK 0x3FFFull
+#define SSO_AF_IAQ_RSVD_FREE_MASK 0x3FFFull
+#define SSO_AF_IAQ_RSVD_FREE_SHIFT 16
+#define SSO_AF_IAQ_FREE_CNT_MAX SSO_AF_IAQ_FREE_CNT_MASK
+#define SSO_AF_AW_ADD_RSVD_FREE_MASK 0x3FFFull
+#define SSO_AF_AW_ADD_RSVD_FREE_SHIFT 16
+#define SSO_HWGRP_IAQ_MAX_THR_MASK 0x3FFFull
+#define SSO_HWGRP_IAQ_RSVD_THR_MASK 0x3FFFull
+#define SSO_HWGRP_IAQ_MAX_THR_SHIFT 32
+#define SSO_HWGRP_IAQ_RSVD_THR 0x2
+#define SSO_HWGRP_IAQ_GRP_CNT_SHIFT 48
+#define SSO_HWGRP_IAQ_GRP_CNT_MASK 0x3FFFull
+#define SSO_AF_HWGRPX_IUEX_NOSCHED(a, b)\
+ ((((b >> 48) & 0x3FF) == a) && (b & BIT_ULL(60)))
+#define SSO_AF_HWGRP_PAGE_CNT_MASK (BIT_ULL(32) - 1)
+#define SSO_AF_HWGRP_PAGE_CNT_MASK (BIT_ULL(32) - 1)
+#define SSO_HWGRP_IAQ_MAX_THR_STRM_PERF 0xD0
+#define SSO_AF_HWGRP_IU_ACCNT_MAX_THR 0x7FFFull
+
+#define SSO_AF_TAQ_FREE_CNT_MASK 0x7FFull
+#define SSO_AF_TAQ_RSVD_FREE_MASK 0x7FFull
+#define SSO_AF_TAQ_RSVD_FREE_SHIFT 16
+#define SSO_AF_TAQ_FREE_CNT_MAX SSO_AF_TAQ_FREE_CNT_MASK
+#define SSO_AF_TAQ_ADD_RSVD_FREE_MASK 0x1FFFull
+#define SSO_AF_TAQ_ADD_RSVD_FREE_SHIFT 16
+#define SSO_HWGRP_TAQ_MAX_THR_MASK 0x7FFull
+#define SSO_HWGRP_TAQ_RSVD_THR_MASK 0x7FFull
+#define SSO_HWGRP_TAQ_MAX_THR_SHIFT 32
+#define SSO_HWGRP_TAQ_RSVD_THR 0x3
+#define SSO_AF_ERR0_MASK 0xFFEull
+#define SSO_AF_ERR2_MASK 0xF001F000ull
+#define SSO_HWGRP_TAQ_MAX_THR_STRM_PERF 0x10
+
+#define SSO_HWGRP_PRI_MASK 0x7ull
+#define SSO_HWGRP_PRI_AFF_MASK 0xFull
+#define SSO_HWGRP_PRI_AFF_SHIFT 8
+#define SSO_HWGRP_PRI_WGT_MASK 0x3Full
+#define SSO_HWGRP_PRI_WGT_SHIFT 16
+#define SSO_HWGRP_PRI_WGT_LEFT_MASK 0x3Full
+#define SSO_HWGRP_PRI_WGT_LEFT_SHIFT 24
+
+#define SSO_HWGRP_AW_CFG_RWEN BIT_ULL(0)
+#define SSO_HWGRP_AW_CFG_LDWB BIT_ULL(1)
+#define SSO_HWGRP_AW_CFG_LDT BIT_ULL(2)
+#define SSO_HWGRP_AW_CFG_STT BIT_ULL(3)
+#define SSO_HWGRP_AW_CFG_XAQ_BYP_DIS BIT_ULL(4)
+#define SSO_HWGRP_AW_CFG_XAQ_ALLOC_DIS BIT_ULL(6)
+
+#define SSO_HWGRP_AW_STS_TPTR_VLD BIT_ULL(8)
+#define SSO_HWGRP_AW_STS_NPA_FETCH BIT_ULL(9)
+#define SSO_HWGRP_AW_STS_TPTR_NEXT_VLD BIT_ULL(10)
+#define SSO_HWGRP_AW_STS_XAQ_BUFSC_MASK 0x7ull
+#define SSO_HWGRP_AW_STS_INIT_STS 0x18ull
+
+#define SSO_LF_GGRP_OP_ADD_WORK1 (0x8ull)
+#define SSO_LF_GGRP_QCTL (0x20ull)
+#define SSO_LF_GGRP_INT (0x100ull)
+#define SSO_LF_GGRP_INT_ENA_W1S (0x110ull)
+#define SSO_LF_GGRP_INT_ENA_W1C (0x118ull)
+#define SSO_LF_GGRP_INT_THR (0x140ull)
+#define SSO_LF_GGRP_INT_CNT (0x180ull)
+#define SSO_LF_GGRP_XAQ_CNT (0x1b0ull)
+#define SSO_LF_GGRP_AQ_CNT (0x1c0ull)
+#define SSO_LF_GGRP_AQ_THR (0x1e0ull)
+#define SSO_LF_GGRP_MISC_CNT (0x200ull)
+
+#define SSO_LF_GGRP_INT_MASK (0X7)
+#define SSO_LF_GGRP_AQ_THR_MASK (BIT_ULL(33) - 1)
+#define SSO_LF_GGRP_XAQ_CNT_MASK (BIT_ULL(33) - 1)
+#define SSO_LF_GGRP_INT_CNT_MASK (0x3FFF3FFF0000ull)
/* SSOW */
#define SSOW_AF_RVU_LF_HWS_CFG_DEBUG (0x0010)
@@ -420,6 +621,34 @@
#define SSOW_PRIV_LFX_HWS_CFG (0x1000)
#define SSOW_PRIV_LFX_HWS_INT_CFG (0x2000)
+#define SSOW_LF_GWS_PENDSTATE (0x50ull)
+#define SSOW_LF_GWS_NW_TIM (0x70ull)
+#define SSOW_LF_GWS_INT (0x100ull)
+#define SSOW_LF_GWS_INT_ENA_W1C (0x118ull)
+#define SSOW_LF_GWS_TAG (0x200ull)
+#define SSOW_LF_GWS_WQP (0x210ull)
+#define SSOW_LF_GWS_PRF_TAG (0x400ull)
+#define SSOW_LF_GWS_OP_GET_WORK (0x600ull)
+#define SSOW_LF_GWS_OP_SWTAG_FLUSH (0x800ull)
+#define SSOW_LF_GWS_OP_DESCHED (0x880ull)
+#define SSOW_LF_GWS_OP_CLR_NSCHED0 (0xA00ull)
+#define SSOW_LF_GWS_OP_GWC_INVAL (0xe00ull)
+
+#define SSO_TT_EMPTY (0x3)
+#define SSOW_LF_GWS_INT_MASK (0x7FF)
+#define SSOW_LF_GWS_MAX_NW_TIM (BIT_ULL(10) - 1)
+#define SSOW_LF_GWS_OP_GET_WORK_WAIT BIT_ULL(16)
+#define SSOW_LF_GWS_OP_GET_WORK_GROUPED BIT_ULL(18)
+#define SSOW_LF_GWS_TAG_PEND_DESCHED BIT_ULL(58)
+#define SSOW_LF_GWS_TAG_PEND_SWITCH BIT_ULL(62)
+#define SSOW_LF_GWS_TAG_PEND_GET_WORK BIT_ULL(63)
+
+#define SSOW_AF_BAR2_SEL (0x9000000ull)
+#define SSOW_AF_BAR2_ALIASX(a, b) RVU_AF_BAR2_ALIASX(a, b)
+
+#define SSO_AF_BAR2_SEL (0x9000000ull)
+#define SSO_AF_BAR2_ALIASX(a, b) RVU_AF_BAR2_ALIASX(a, b)
+
/* TIM */
#define TIM_AF_CONST (0x90)
#define TIM_PRIV_LFX_CFG (0x20000)
@@ -427,17 +656,125 @@
#define TIM_AF_RVU_LF_CFG_DEBUG (0x30000)
#define TIM_AF_BLK_RST (0x10)
#define TIM_AF_LF_RST (0x20)
+#define TIM_AF_BLK_RST (0x10)
+#define TIM_AF_RINGX_GMCTL(a) (0x2000 | (a) << 3)
+#define TIM_AF_RINGX_CTL0(a) (0x4000 | (a) << 3)
+#define TIM_AF_RINGX_CTL1(a) (0x6000 | (a) << 3)
+#define TIM_AF_RINGX_CTL2(a) (0x8000 | (a) << 3)
+#define TIM_AF_FLAGS_REG (0x80)
+#define TIM_AF_FLAGS_REG_ENA_TIM BIT_ULL(0)
+#define TIM_AF_RINGX_CTL1_ENA BIT_ULL(47)
+#define TIM_AF_RINGX_CTL1_RCF_BUSY BIT_ULL(50)
+#define TIM_AF_ADJUST_TENNS (0x160)
+#define TIM_AF_ADJUST_GPIOS (0x170)
+#define TIM_AF_ADJUST_GTI (0x180)
+#define TIM_AF_ADJUST_PTP (0x190)
+#define TIM_AF_ADJUST_BTS (0x1B0)
+#define TIM_AF_ADJUST_TIMERS (0x1C0)
+#define TIM_AF_ADJUST_TIMERS_MASK BIT_ULL(0)
+#define TIM_AF_CAPTURE_TENNS (0x1D0)
+#define TIM_AF_CAPTURE_GPIOS (0x1E0)
+#define TIM_AF_CAPTURE_GTI (0x1F0)
+#define TIM_AF_CAPTURE_PTP (0x200)
+#define TIM_AF_CAPTURE_BTS (0x220)
+#define TIM_AF_CAPTURE_EXT_GTI (0x240)
+#define TIM_AF_CAPTURE_TIMERS (0x250)
+#define TIM_AF_CAPTURE_TIMERS_MASK GENMASK_ULL(1, 0)
+#define TIM_AF_RING_GMCTL_SHIFT 3
+#define TIM_AF_RING_SSO_PF_FUNC_SHIFT 0
+#define TIM_AF_FLAGS_REG_GPIO_EDGE_MASK GENMASK_ULL(6, 5)
/* CPT */
-#define CPT_AF_CONSTANTS0 (0x0000)
-#define CPT_PRIV_LFX_CFG (0x41000)
-#define CPT_PRIV_LFX_INT_CFG (0x43000)
-#define CPT_AF_RVU_LF_CFG_DEBUG (0x45000)
-#define CPT_AF_LF_RST (0x44000)
-#define CPT_AF_BLK_RST (0x46000)
+#define CPT_AF_CONSTANTS0 (0x0ull)
+#define CPT_AF_CONSTANTS1 (0x1000ull)
+#define CPT_AF_DIAG (0x3000ull)
+#define CPT_AF_ECO (0x4000ull)
+#define CPT_AF_FLTX_INT(a) (0xa000ull | (u64)(a) << 3)
+#define CPT_AF_FLTX_INT_W1S(a) (0xb000ull | (u64)(a) << 3)
+#define CPT_AF_FLTX_INT_ENA_W1C(a) (0xc000ull | (u64)(a) << 3)
+#define CPT_AF_FLTX_INT_ENA_W1S(a) (0xd000ull | (u64)(a) << 3)
+#define CPT_AF_PSNX_EXE(a) (0xe000ull | (u64)(a) << 3)
+#define CPT_AF_PSNX_EXE_W1S(a) (0xf000ull | (u64)(a) << 3)
+#define CPT_AF_PSNX_LF(a) (0x10000ull | (u64)(a) << 3)
+#define CPT_AF_PSNX_LF_W1S(a) (0x11000ull | (u64)(a) << 3)
+#define CPT_AF_EXEX_CTL2(a) (0x12000ull | (u64)(a) << 3)
+#define CPT_AF_EXEX_STS(a) (0x13000ull | (u64)(a) << 3)
+#define CPT_AF_EXE_ERR_INFO (0x14000ull)
+#define CPT_AF_EXEX_ACTIVE(a) (0x16000ull | (u64)(a) << 3)
+#define CPT_AF_INST_REQ_PC (0x17000ull)
+#define CPT_AF_INST_LATENCY_PC (0x18000ull)
+#define CPT_AF_RD_REQ_PC (0x19000ull)
+#define CPT_AF_RD_LATENCY_PC (0x1a000ull)
+#define CPT_AF_RD_UC_PC (0x1b000ull)
+#define CPT_AF_ACTIVE_CYCLES_PC (0x1c000ull)
+#define CPT_AF_EXE_DBG_CTL (0x1d000ull)
+#define CPT_AF_EXE_DBG_DATA (0x1e000ull)
+#define CPT_AF_EXE_REQ_TIMER (0x1f000ull)
+#define CPT_AF_EXEX_CTL(a) (0x20000ull | (u64)(a) << 3)
+#define CPT_AF_EXE_PERF_CTL (0x21000ull)
+#define CPT_AF_EXE_DBG_CNTX(a) (0x22000ull | (u64)(a) << 3)
+#define CPT_AF_EXE_PERF_EVENT_CNT (0x23000ull)
+#define CPT_AF_EXE_EPCI_INBX_CNT(a) (0x24000ull | (u64)(a) << 3)
+#define CPT_AF_EXE_EPCI_OUTBX_CNT(a) (0x25000ull | (u64)(a) << 3)
+#define CPT_AF_EXEX_UCODE_BASE(a) (0x26000ull | (u64)(a) << 3)
+#define CPT_AF_LFX_CTL(a) (0x27000ull | (u64)(a) << 3)
+#define CPT_AF_LFX_CTL2(a) (0x29000ull | (u64)(a) << 3)
+#define CPT_AF_CPTCLK_CNT (0x2a000ull)
+#define CPT_AF_PF_FUNC (0x2b000ull)
+#define CPT_AF_LFX_PTR_CTL(a) (0x2c000ull | (u64)(a) << 3)
+#define CPT_AF_GRPX_THR(a) (0x2d000ull | (u64)(a) << 3)
+#define CPT_AF_CTL (0x2e000ull)
+#define CPT_AF_XEX_THR(a) (0x2f000ull | (u64)(a) << 3)
+#define CPT_PRIV_LFX_CFG (0x41000ull)
+#define CPT_PRIV_AF_INT_CFG (0x42000ull)
+#define CPT_PRIV_LFX_INT_CFG (0x43000ull)
+#define CPT_AF_LF_RST (0x44000ull)
+#define CPT_AF_RVU_LF_CFG_DEBUG (0x45000ull)
+#define CPT_AF_BLK_RST (0x46000ull)
+#define CPT_AF_RVU_INT (0x47000ull)
+#define CPT_AF_RVU_INT_W1S (0x47008ull)
+#define CPT_AF_RVU_INT_ENA_W1S (0x47010ull)
+#define CPT_AF_RVU_INT_ENA_W1C (0x47018ull)
+#define CPT_AF_RAS_INT (0x47020ull)
+#define CPT_AF_RAS_INT_W1S (0x47028ull)
+#define CPT_AF_RAS_INT_ENA_W1S (0x47030ull)
+#define CPT_AF_RAS_INT_ENA_W1C (0x47038ull)
+#define CPT_AF_CTX_FLUSH_TIMER (0x48000ull)
+#define CPT_AF_CTX_ERR (0x48008ull)
+#define CPT_AF_CTX_ENC_ID (0x48010ull)
+#define CPT_AF_CTX_MIS_PC (0x49400ull)
+#define CPT_AF_CTX_HIT_PC (0x49408ull)
+#define CPT_AF_CTX_AOP_PC (0x49410ull)
+#define CPT_AF_CTX_AOP_LATENCY_PC (0x49418ull)
+#define CPT_AF_CTX_IFETCH_PC (0x49420ull)
+#define CPT_AF_CTX_IFETCH_LATENCY_PC (0x49428ull)
+#define CPT_AF_CTX_FFETCH_PC (0x49430ull)
+#define CPT_AF_CTX_FFETCH_LATENCY_PC (0x49438ull)
+#define CPT_AF_CTX_WBACK_PC (0x49440ull)
+#define CPT_AF_CTX_WBACK_LATENCY_PC (0x49448ull)
+#define CPT_AF_CTX_PSH_PC (0x49450ull)
+#define CPT_AF_CTX_PSH_LATENCY_PC (0x49458ull)
+#define CPT_AF_CTX_CAM_DATA(a) (0x49800ull | (u64)(a) << 3)
+#define CPT_AF_RXC_TIME (0x50010ull)
+#define CPT_AF_RXC_TIME_CFG (0x50018ull)
+#define CPT_AF_RXC_DFRG (0x50020ull)
+#define CPT_AF_RXC_ACTIVE_STS (0x50028ull)
+#define CPT_AF_RXC_ZOMBIE_STS (0x50030ull)
+#define CPT_AF_X2PX_LINK_CFG(a) (0x51000ull | (u64)(a) << 3)
+
+#define CPT_AF_BAR2_SEL 0x9000000
+#define CPT_AF_BAR2_ALIASX(a, b) RVU_AF_BAR2_ALIASX(a, b)
#define NPC_AF_BLK_RST (0x00040)
+#define CPT_AF_LF_CTL2_SHIFT 3
+#define CPT_AF_LF_SSO_PF_FUNC_SHIFT 32
+
+#define CPT_LF_CTL 0x10
+#define CPT_LF_INPROG 0x40
+#define CPT_LF_Q_GRP_PTR 0x120
+#define CPT_LF_CTX_FLUSH 0x510
+
/* NPC */
#define NPC_AF_CFG (0x00000)
#define NPC_AF_ACTIVE_PC (0x00010)
@@ -446,6 +783,8 @@
#define NPC_AF_BLK_RST (0x00040)
#define NPC_AF_MCAM_SCRUB_CTL (0x000a0)
#define NPC_AF_KCAM_SCRUB_CTL (0x000b0)
+#define NPC_AF_CONST2 (0x00100)
+#define NPC_AF_CONST3 (0x00110)
#define NPC_AF_KPUX_CFG(a) (0x00500 | (a) << 3)
#define NPC_AF_PCK_CFG (0x00600)
#define NPC_AF_PCK_DEF_OL2 (0x00610)
@@ -469,20 +808,7 @@
(0x900000 | (a) << 16 | (b) << 12 | (c) << 5 | (d) << 3)
#define NPC_AF_INTFX_LDATAX_FLAGSX_CFG(a, b, c) \
(0x980000 | (a) << 16 | (b) << 12 | (c) << 3)
-#define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) \
- (0x1000000ull | (a) << 10 | (b) << 6 | (c) << 3)
-#define NPC_AF_MCAMEX_BANKX_CAMX_W0(a, b, c) \
- (0x1000010ull | (a) << 10 | (b) << 6 | (c) << 3)
-#define NPC_AF_MCAMEX_BANKX_CAMX_W1(a, b, c) \
- (0x1000020ull | (a) << 10 | (b) << 6 | (c) << 3)
-#define NPC_AF_MCAMEX_BANKX_CFG(a, b) (0x1800000ull | (a) << 8 | (b) << 4)
-#define NPC_AF_MCAMEX_BANKX_STAT_ACT(a, b) \
- (0x1880000 | (a) << 8 | (b) << 4)
-#define NPC_AF_MATCH_STATX(a) (0x1880008 | (a) << 8)
#define NPC_AF_INTFX_MISS_STAT_ACT(a) (0x1880040 + (a) * 0x8)
-#define NPC_AF_MCAMEX_BANKX_ACTION(a, b) (0x1900000ull | (a) << 8 | (b) << 4)
-#define NPC_AF_MCAMEX_BANKX_TAG_ACT(a, b) \
- (0x1900008 | (a) << 8 | (b) << 4)
#define NPC_AF_INTFX_MISS_ACT(a) (0x1a00000 | (a) << 4)
#define NPC_AF_INTFX_MISS_TAG_ACT(a) (0x1b00008 | (a) << 4)
#define NPC_AF_MCAM_BANKX_HITX(a, b) (0x1c80000 | (a) << 8 | (b) << 4)
@@ -499,6 +825,147 @@
#define NPC_AF_DBG_DATAX(a) (0x3001400 | (a) << 4)
#define NPC_AF_DBG_RESULTX(a) (0x3001800 | (a) << 4)
+#define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) ({ \
+ u64 offset; \
+ \
+ offset = (0x1000000ull | (a) << 10 | (b) << 6 | (c) << 3); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000000ull | (a) << 8 | (b) << 22 | (c) << 3); \
+ offset; })
+
+#define NPC_AF_MCAMEX_BANKX_CAMX_W0(a, b, c) ({ \
+ u64 offset; \
+ \
+ offset = (0x1000010ull | (a) << 10 | (b) << 6 | (c) << 3); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000010ull | (a) << 8 | (b) << 22 | (c) << 3); \
+ offset; })
+
+#define NPC_AF_MCAMEX_BANKX_CAMX_W1(a, b, c) ({ \
+ u64 offset; \
+ \
+ offset = (0x1000020ull | (a) << 10 | (b) << 6 | (c) << 3); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000020ull | (a) << 8 | (b) << 22 | (c) << 3); \
+ offset; })
+
+#define NPC_AF_MCAMEX_BANKX_CFG(a, b) ({ \
+ u64 offset; \
+ \
+ offset = (0x1800000ull | (a) << 8 | (b) << 4); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000038ull | (a) << 8 | (b) << 22); \
+ offset; })
+
+#define NPC_AF_MCAMEX_BANKX_ACTION(a, b) ({ \
+ u64 offset; \
+ \
+ offset = (0x1900000ull | (a) << 8 | (b) << 4); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000040ull | (a) << 8 | (b) << 22); \
+ offset; }) \
+
+#define NPC_AF_MCAMEX_BANKX_TAG_ACT(a, b) ({ \
+ u64 offset; \
+ \
+ offset = (0x1900008ull | (a) << 8 | (b) << 4); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000048ull | (a) << 8 | (b) << 22); \
+ offset; }) \
+
+#define NPC_AF_MCAMEX_BANKX_STAT_ACT(a, b) ({ \
+ u64 offset; \
+ \
+ offset = (0x1880000ull | (a) << 8 | (b) << 4); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000050ull | (a) << 8 | (b) << 22); \
+ offset; }) \
+
+#define NPC_AF_MATCH_STATX(a) ({ \
+ u64 offset; \
+ \
+ offset = (0x1880008ull | (a) << 8); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000078ull | (a) << 8); \
+ offset; }) \
+
+
+/* REE */
+#define REE_AF_CMD_CTL (0x00ull)
+#define REE_AF_CONSTANTS (0x0A0ull)
+#define REE_AF_AQ_SBUF_CTL (0x100ull)
+#define REE_AF_AQ_SBUF_ADDR (0x110ull)
+#define REE_AF_AQ_DONE (0x128ull)
+#define REE_AF_AQ_DONE_ACK (0x130ull)
+#define REE_AF_AQ_DONE_INT (0x150ull)
+#define REE_AF_AQ_DONE_INT_ENA_W1S (0x168ull)
+#define REE_AF_AQ_DONE_INT_ENA_W1C (0x170ull)
+#define REE_AF_AQ_ENA (0x180ull)
+#define REE_AF_AQ_DOORBELL (0x200ull)
+#define REE_AF_PF_FUNC (0x210ull)
+#define REE_AF_EM_BASE (0x300ull)
+#define REE_AF_RAS (0x980ull)
+#define REE_AF_RAS_ENA_W1C (0x990ull)
+#define REE_AF_RAS_ENA_W1S (0x998ull)
+#define REE_AF_QUE_SBUF_CTL(a) (0x1200ull | (a) << 3)
+#define REE_PRIV_AF_INT_CFG (0x4000ull)
+#define REE_AF_REEXM_STATUS (0x8050ull)
+#define REE_AF_REEXM_CTRL (0x80C0ull)
+#define REE_AF_REEXM_MAX_MATCH (0x80C8ull)
+#define REE_AF_REEXM_MAX_PRE_CNT (0x80D0ull)
+#define REE_AF_REEXM_MAX_PTHREAD_CNT (0x80D8ull)
+#define REE_AF_REEXM_MAX_LATENCY_CNT (0x80E0ull)
+#define REE_AF_REEXR_STATUS (0x8250ull)
+#define REE_AF_REEXR_CTRL (0x82C0ull)
+#define REE_PRIV_LFX_CFG (0x41000ull)
+#define REE_PRIV_LFX_INT_CFG (0x42000ull)
+#define REE_AF_LF_RST (0x43000ull)
+#define REE_AF_RVU_LF_CFG_DEBUG (0x44000ull)
+#define REE_AF_BLK_RST (0x45000ull)
+#define REE_AF_RVU_INT (0x46000ull)
+#define REE_AF_RVU_INT_ENA_W1S (0x46010ull)
+#define REE_AF_RVU_INT_ENA_W1C (0x46018ull)
+#define REE_AF_AQ_INT (0x46020ull)
+#define REE_AF_AQ_INT_ENA_W1S (0x46030ull)
+#define REE_AF_AQ_INT_ENA_W1C (0x46038ull)
+#define REE_AF_GRACEFUL_DIS_CTL (0x46100ull)
+#define REE_AF_GRACEFUL_DIS_STATUS (0x46110ull)
+
+#define REE_AF_FORCE_CSCLK BIT_ULL(1)
+#define REE_AF_FORCE_CCLK BIT_ULL(2)
+#define REE_AF_RAS_DAT_PSN BIT_ULL(0)
+#define REE_AF_RAS_LD_CMD_PSN BIT_ULL(1)
+#define REE_AF_RAS_LD_REEX_PSN BIT_ULL(2)
+#define REE_AF_RVU_INT_UNMAPPED_SLOT BIT_ULL(0)
+#define REE_AF_AQ_INT_DOVF BIT_ULL(0)
+#define REE_AF_AQ_INT_IRDE BIT_ULL(1)
+#define REE_AF_AQ_INT_PRDE BIT_ULL(2)
+#define REE_AF_AQ_INT_PLLE BIT_ULL(3)
+#define REE_AF_REEXM_CTRL_INIT BIT_ULL(0)
+#define REE_AF_REEXM_CTRL_GO BIT_ULL(3)
+#define REE_AF_REEXM_STATUS_INIT_DONE BIT_ULL(0)
+#define REE_AF_REEXR_CTRL_INIT BIT_ULL(0)
+#define REE_AF_REEXR_CTRL_GO BIT_ULL(1)
+#define REE_AF_REEXR_CTRL_MODE_IM_L1_L2 BIT_ULL(4)
+#define REE_AF_REEXR_CTRL_MODE_L1_L2 BIT_ULL(5)
+
+#define REE_AF_AQ_SBUF_CTL_SIZE_SHIFT 32
+#define REE_AF_REEXM_MAX_MATCH_MAX 0xFEull
+#define REE_AF_REEXM_MAX_PRE_CNT_COUNT 0x3F0ull
+#define REE_AF_REEXM_MAX_PTHREAD_COUNT 0xFFFFull
+#define REE_AF_REEXM_MAX_LATENCY_COUNT 0xFFFFull
+#define REE_AF_QUE_SBUF_CTL_SIZE_SHIFT 32
+#define REE_AF_REEX_CSR_BLOCK_BASE_ADDR (0x8000ull)
+#define REE_AF_REEX_CSR_BLOCK_ID (0x200ull)
+#define REE_AF_REEX_CSR_BLOCK_ID_MASK GENMASK_ULL(18, 16)
+#define REE_AF_REEX_CSR_BLOCK_ID_SHIFT 16
+#define REE_AF_REEX_CSR_INDEX 8
+#define REE_AF_REEX_CSR_INDEX_MASK GENMASK_ULL(4, 0)
+#define REE_AF_QUE_SBUF_CTL_MAX_SIZE GENMASK_ULL((50 - 32), 0)
+#define REE_AF_REEXR_STATUS_IM_INIT_DONE BIT_ULL(4)
+#define REE_AF_REEXR_STATUS_L1_CACHE_INIT_DONE BIT_ULL(5)
+#define REE_AF_REEXR_STATUS_L2_CACHE_INIT_DONE BIT_ULL(6)
+
/* NDC */
#define NDC_AF_CONST (0x00000)
#define NDC_AF_CLK_EN (0x00020)
@@ -525,4 +992,26 @@
(0x00F00 | (a) << 5 | (b) << 4)
#define NDC_AF_BANKX_HIT_PC(a) (0x01000 | (a) << 3)
#define NDC_AF_BANKX_MISS_PC(a) (0x01100 | (a) << 3)
+
+/* LBK */
+#define LBK_CONST (0x10ull)
+#define LBK_LINK_CFG_P2X (0x400ull)
+#define LBK_LINK_CFG_X2P (0x408ull)
+#define LBK_CONST_CHANS GENMASK_ULL(47, 32)
+#define LBK_CONST_DST GENMASK_ULL(31, 28)
+#define LBK_CONST_SRC GENMASK_ULL(27, 24)
+#define LBK_CONST_BUF_SIZE GENMASK_ULL(23, 0)
+#define LBK_LINK_CFG_RANGE_MASK GENMASK_ULL(19, 16)
+#define LBK_LINK_CFG_ID_MASK GENMASK_ULL(11, 6)
+#define LBK_LINK_CFG_BASE_MASK GENMASK_ULL(5, 0)
+
+/* APR */
+#define APR_AF_LMT_CFG (0x000ull)
+#define APR_AF_LMT_MAP_BASE (0x008ull)
+#define APR_AF_LMT_CTL (0x010ull)
+
+#define APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT 23
+#define APR_LMT_MAP_ENT_SCH_ENA_SHIFT 22
+#define APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT 21
+
#endif /* RVU_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
new file mode 100644
index 000000000000..b04fb226f708
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include "rvu.h"
+
+/* SDP PF device id */
+#define PCI_DEVID_OTX2_SDP_PF 0xA0F6
+
+/* Maximum SDP blocks in a chip */
+#define MAX_SDP 2
+
+/* SDP PF number */
+static int sdp_pf_num[MAX_SDP] = {-1, -1};
+
+bool is_sdp_pfvf(u16 pcifunc)
+{
+ u16 pf = rvu_get_pf(pcifunc);
+ u32 found = 0, i = 0;
+
+ while (i < MAX_SDP) {
+ if (pf == sdp_pf_num[i])
+ found = 1;
+ i++;
+ }
+
+ if (!found)
+ return false;
+
+ return true;
+}
+
+bool is_sdp_pf(u16 pcifunc)
+{
+ return (is_sdp_pfvf(pcifunc) &&
+ !(pcifunc & RVU_PFVF_FUNC_MASK));
+}
+
+bool is_sdp_vf(u16 pcifunc)
+{
+ return (is_sdp_pfvf(pcifunc) &&
+ !!(pcifunc & RVU_PFVF_FUNC_MASK));
+}
+
+int rvu_sdp_init(struct rvu *rvu)
+{
+ struct pci_dev *pdev = NULL;
+ struct rvu_pfvf *pfvf;
+ u32 i = 0;
+
+ while ((i < MAX_SDP) && (pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OTX2_SDP_PF,
+ pdev)) != NULL) {
+ /* The RVU PF number is one less than bus number */
+ sdp_pf_num[i] = pdev->bus->number - 1;
+ pfvf = &rvu->pf[sdp_pf_num[i]];
+
+ pfvf->sdp_info = devm_kzalloc(rvu->dev,
+ sizeof(struct sdp_node_info),
+ GFP_KERNEL);
+ if (!pfvf->sdp_info)
+ return -ENOMEM;
+
+ dev_info(rvu->dev, "SDP PF number:%d\n", sdp_pf_num[i]);
+
+ put_device(&pdev->dev);
+ i++;
+ }
+
+ return 0;
+}
+
+int
+rvu_mbox_handler_set_sdp_chan_info(struct rvu *rvu,
+ struct sdp_chan_info_msg *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ memcpy(pfvf->sdp_info, &req->info, sizeof(struct sdp_node_info));
+ dev_info(rvu->dev, "AF: SDP%d max_vfs %d num_pf_rings %d pf_srn %d\n",
+ req->info.node_id, req->info.max_vfs, req->info.num_pf_rings,
+ req->info.pf_srn);
+ return 0;
+}
+
+int
+rvu_mbox_handler_get_sdp_chan_info(struct rvu *rvu, struct msg_req *req,
+ struct sdp_get_chan_info_msg *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr;
+
+ if (!hw->cap.programmable_chans) {
+ rsp->chan_base = NIX_CHAN_SDP_CH_START;
+ rsp->num_chan = NIX_CHAN_SDP_NUM_CHANS;
+ } else {
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ rsp->chan_base = hw->sdp_chan_base;
+ rsp->num_chan = rvu_read64(rvu, blkaddr, NIX_AF_CONST1) & 0xFFFUL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sso.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sso.c
new file mode 100644
index 000000000000..f3685901aaa1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sso.c
@@ -0,0 +1,1661 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/types.h>
+
+#include "rvu_struct.h"
+
+#include "rvu_reg.h"
+#include "rvu.h"
+
+#define NPA_LF_AURA_OP_FREE0 0x20
+#define NPA_LF_AURA_OP_CNT 0x30
+
+#if defined(CONFIG_ARM64)
+#define rvu_sso_store_pair(val0, val1, addr) ({ \
+ __asm__ volatile("stp %x[x0], %x[x1], [%x[p1]]" \
+ : \
+ : \
+ [x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr)); \
+ })
+
+#define rvu_sso_ldadd(result, incr, ptr) ({ \
+ __asm__ volatile(".cpu generic+lse\n" \
+ "ldadd %x[i], %x[r], [%[b]]" \
+ : [r] "=r" (result), "+m" (*ptr) \
+ : [i] "r" (incr), [b] "r" (ptr) \
+ : "memory"); \
+ })
+#else
+#define rvu_sso_store_pair(val0, val1, addr) \
+ do { \
+ u64 *addr1 = (void *)addr; \
+ *addr1 = val0; \
+ *(u64 *)(((u8 *)addr1) + 8) = val1; \
+ } while (0)
+
+#define rvu_sso_ldadd(result, incr, ptr) \
+ do { \
+ } while (0)
+#endif
+
+#define SSO_AF_INT_DIGEST_PRNT(reg) \
+ for (i = 0; i < block->lf.max / 64; i++) { \
+ reg0 = rvu_read64(rvu, blkaddr, reg##X(i)); \
+ dev_err_ratelimited(rvu->dev, #reg "(%d) : 0x%llx", i, \
+ reg0); \
+ rvu_write64(rvu, blkaddr, reg##X(i), reg0); \
+ }
+
+void rvu_sso_hwgrp_config_thresh(struct rvu *rvu, int blkaddr, int lf)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 add, grp_thr, grp_rsvd;
+ u64 reg;
+
+ /* Configure IAQ Thresholds */
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_IAQ_THR(lf));
+ grp_rsvd = reg & SSO_HWGRP_IAQ_RSVD_THR_MASK;
+ add = hw->sso.iaq_rsvd - grp_rsvd;
+
+ grp_thr = hw->sso.iaq_rsvd & SSO_HWGRP_IAQ_RSVD_THR_MASK;
+ grp_thr |= ((hw->sso.iaq_max & SSO_HWGRP_IAQ_MAX_THR_MASK) <<
+ SSO_HWGRP_IAQ_MAX_THR_SHIFT);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_IAQ_THR(lf), grp_thr);
+
+ if (add)
+ rvu_write64(rvu, blkaddr, SSO_AF_AW_ADD,
+ (add & SSO_AF_AW_ADD_RSVD_FREE_MASK) <<
+ SSO_AF_AW_ADD_RSVD_FREE_SHIFT);
+
+ /* Configure TAQ Thresholds */
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_TAQ_THR(lf));
+ grp_rsvd = reg & SSO_HWGRP_TAQ_RSVD_THR_MASK;
+ add = hw->sso.taq_rsvd - grp_rsvd;
+
+ grp_thr = hw->sso.taq_rsvd & SSO_HWGRP_TAQ_RSVD_THR_MASK;
+ grp_thr |= ((hw->sso.taq_max & SSO_HWGRP_TAQ_MAX_THR_MASK) <<
+ SSO_HWGRP_TAQ_MAX_THR_SHIFT);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_TAQ_THR(lf), grp_thr);
+
+ if (add)
+ rvu_write64(rvu, blkaddr, SSO_AF_TAQ_ADD,
+ (add & SSO_AF_TAQ_RSVD_FREE_MASK) <<
+ SSO_AF_TAQ_ADD_RSVD_FREE_SHIFT);
+}
+
+static void rvu_sso_enable_aw_src(struct rvu *rvu, int lf_cnt, int sub_blkaddr,
+ u64 addr, int *lf_arr, u16 pcifunc, u8 shift,
+ u8 addr_off)
+{
+ u64 reg;
+ int lf;
+
+ for (lf = 0; lf < lf_cnt; lf++) {
+ reg = rvu_read64(rvu, sub_blkaddr, addr |
+ lf_arr[lf] << addr_off);
+
+ reg |= ((u64)pcifunc << shift);
+ rvu_write64(rvu, sub_blkaddr, addr |
+ lf_arr[lf] << addr_off, reg);
+ }
+}
+
+static int rvu_sso_disable_aw_src(struct rvu *rvu, int **lf_arr,
+ int sub_blkaddr, u8 shift, u8 addr_off,
+ u16 pcifunc, u64 addr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int lf_cnt = 0, lf;
+ u64 reg;
+
+ if (sub_blkaddr >= 0) {
+ block = &hw->block[sub_blkaddr];
+ *lf_arr = kmalloc(block->lf.max * sizeof(int), GFP_KERNEL);
+ if (!*lf_arr)
+ return 0;
+
+ for (lf = 0; lf < block->lf.max; lf++) {
+ reg = rvu_read64(rvu, sub_blkaddr,
+ addr | lf << addr_off);
+ if (((reg >> shift) & 0xFFFFul) != pcifunc)
+ continue;
+
+ reg &= ~(0xFFFFul << shift);
+ rvu_write64(rvu, sub_blkaddr, addr | lf << addr_off,
+ reg);
+ (*lf_arr)[lf_cnt] = lf;
+ lf_cnt++;
+ }
+ }
+
+ return lf_cnt;
+}
+
+static void rvu_sso_ggrp_taq_flush(struct rvu *rvu, u16 pcifunc, int lf,
+ int slot, int ssow_lf, u64 blkaddr,
+ u64 ssow_blkaddr)
+{
+ int nix_lf_cnt, cpt_lf_cnt, tim_lf_cnt;
+ int *nix_lf, *cpt_lf, *tim_lf;
+ u64 reg, val;
+
+ /* Disable add work. */
+ rvu_write64(rvu, blkaddr, SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_QCTL),
+ 0);
+
+ /* Disable all sources of work. */
+ nix_lf = NULL;
+ nix_lf_cnt = rvu_sso_disable_aw_src(rvu, &nix_lf,
+ rvu_get_blkaddr(rvu, BLKTYPE_NIX,
+ pcifunc),
+ NIX_AF_LF_SSO_PF_FUNC_SHIFT,
+ NIX_AF_LF_CFG_SHIFT, pcifunc,
+ NIX_AF_LFX_CFG(0));
+
+ cpt_lf = NULL;
+ cpt_lf_cnt = rvu_sso_disable_aw_src(rvu, &cpt_lf,
+ rvu_get_blkaddr(rvu, BLKTYPE_CPT,
+ 0),
+ CPT_AF_LF_SSO_PF_FUNC_SHIFT,
+ CPT_AF_LF_CTL2_SHIFT, pcifunc,
+ CPT_AF_LFX_CTL2(0));
+
+ tim_lf = NULL;
+ tim_lf_cnt = rvu_sso_disable_aw_src(rvu, &tim_lf,
+ rvu_get_blkaddr(rvu, BLKTYPE_TIM,
+ 0),
+ TIM_AF_RING_SSO_PF_FUNC_SHIFT,
+ TIM_AF_RING_GMCTL_SHIFT, pcifunc,
+ TIM_AF_RINGX_GMCTL(0));
+
+ /* ZIP and DPI blocks not yet implemented. */
+
+ /* Enable add work. */
+ rvu_write64(rvu, blkaddr, SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_QCTL),
+ 0x1);
+
+ /* Make sure that all the in-flights are complete before invalidate. */
+ mb();
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_GWC_INVAL), 0x0);
+ /* Prepare WS for GW operations. */
+ do {
+ reg = rvu_read64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_TAG));
+ } while (reg & BIT_ULL(63));
+
+ if (reg & BIT_ULL(62))
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_DESCHED),
+ 0x0);
+ else if (((reg >> 32) & SSO_TT_EMPTY) != SSO_TT_EMPTY)
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_SWTAG_FLUSH),
+ 0x0);
+
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_GWC_INVAL), 0x0);
+ /* Drain TAQ. */
+ val = slot;
+ val |= BIT_ULL(18);
+ val |= BIT_ULL(16);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_TAQ_THR(lf));
+ while ((reg >> 48) & 0x7FF) {
+ rvu_write64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_OP_ADD_WORK1),
+ 0x1 << 3);
+get_work:
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_GET_WORK),
+ val);
+ do {
+ reg = rvu_read64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0,
+ SSOW_LF_GWS_TAG));
+ } while (reg & BIT_ULL(63));
+
+ if (!rvu_read64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_WQP)))
+ goto get_work;
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_TAQ_THR(lf));
+ }
+
+ reg = rvu_read64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_TAG));
+ if (((reg >> 32) & SSO_TT_EMPTY) != SSO_TT_EMPTY)
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_SWTAG_FLUSH),
+ 0x0);
+
+ /* Disable add work. */
+ rvu_write64(rvu, blkaddr, SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_QCTL),
+ 0x0);
+
+ /* restore all sources of work. */
+ rvu_sso_enable_aw_src(rvu, nix_lf_cnt, rvu_get_blkaddr(rvu, BLKTYPE_NIX,
+ pcifunc),
+ NIX_AF_LFX_CFG(0), nix_lf, pcifunc,
+ NIX_AF_LF_SSO_PF_FUNC_SHIFT,
+ NIX_AF_LF_CFG_SHIFT);
+ rvu_sso_enable_aw_src(rvu, cpt_lf_cnt, rvu_get_blkaddr(rvu, BLKTYPE_CPT,
+ 0),
+ CPT_AF_LFX_CTL2(0), cpt_lf, pcifunc,
+ CPT_AF_LF_SSO_PF_FUNC_SHIFT,
+ CPT_AF_LF_CTL2_SHIFT);
+ rvu_sso_enable_aw_src(rvu, tim_lf_cnt, rvu_get_blkaddr(rvu, BLKTYPE_TIM,
+ 0),
+ TIM_AF_RINGX_GMCTL(0), tim_lf, pcifunc,
+ TIM_AF_RING_SSO_PF_FUNC_SHIFT,
+ TIM_AF_RING_GMCTL_SHIFT);
+
+ kfree(nix_lf);
+ kfree(cpt_lf);
+ kfree(tim_lf);
+}
+
+static void rvu_sso_clean_nscheduled(struct rvu *rvu, int lf)
+{
+ struct sso_rsrc *sso = &rvu->hw->sso;
+ int blkaddr, ssow_blkaddr, iue;
+ u64 wqp, reg, op_clr_nsched;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ ssow_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, 0);
+ op_clr_nsched = (ssow_blkaddr << 28) |
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_CLR_NSCHED0);
+ for (iue = 0; iue < sso->sso_iue; iue++) {
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_GRP(iue));
+ if (SSO_AF_HWGRPX_IUEX_NOSCHED(lf, reg)) {
+ wqp = rvu_read64(rvu, blkaddr, SSO_AF_IENTX_WQP(iue));
+ rvu_sso_store_pair(wqp, iue,
+ rvu->afreg_base + op_clr_nsched);
+ }
+ }
+}
+
+static void rvu_ssow_clean_prefetch(struct rvu *rvu, int slot)
+{
+ int ssow_blkaddr, err;
+ u64 val, reg;
+
+ ssow_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, 0);
+
+ /* Make sure that all the in-flights are complete before invalidate. */
+ mb();
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot, SSOW_LF_GWS_OP_GWC_INVAL), 0x0);
+
+ err = rvu_poll_reg(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot, SSOW_LF_GWS_PRF_TAG),
+ SSOW_LF_GWS_TAG_PEND_GET_WORK, true);
+ if (err)
+ dev_warn(rvu->dev,
+ "SSOW_LF_GWS_PRF_TAG[PEND_GET_WORK] not cleared\n");
+
+ reg = rvu_read64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot, SSOW_LF_GWS_PRF_TAG));
+ if (((reg >> 32) & SSO_TT_EMPTY) != SSO_TT_EMPTY) {
+ val = 0x0;
+ val |= SSOW_LF_GWS_OP_GET_WORK_WAIT;
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot, SSOW_LF_GWS_OP_GET_WORK),
+ val);
+ err = rvu_poll_reg(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot,
+ SSOW_LF_GWS_TAG),
+ SSOW_LF_GWS_TAG_PEND_GET_WORK, true);
+ if (err)
+ dev_warn(rvu->dev,
+ "SSOW_LF_GWS_PENDSTATE[PEND_GET_WORK] not cleared\n");
+
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot,
+ SSOW_LF_GWS_OP_SWTAG_FLUSH),
+ 0x0);
+ }
+}
+
+void rvu_sso_lf_drain_queues(struct rvu *rvu, u16 pcifunc, int lf, int slot)
+{
+ bool has_prefetch, has_nsched, has_lsw;
+ int ssow_lf, blkaddr, ssow_blkaddr;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 aq_cnt, ds_cnt, cq_ds_cnt;
+ u64 reg, val;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Read hardware capabilities */
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_CONST1);
+ has_lsw = !!(reg & SSO_AF_CONST1_LSW_PRESENT);
+ has_nsched = !!!(reg & SSO_AF_CONST1_NO_NSCHED);
+ has_prefetch = !!(reg & SSO_AF_CONST1_PRF_PRESENT);
+
+ ssow_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, 0);
+ if (ssow_blkaddr < 0)
+ return;
+ /* Check if LF is in slot 0, if not no HWS are attached. */
+ ssow_lf = rvu_get_lf(rvu, &hw->block[ssow_blkaddr], pcifunc, 0);
+ if (ssow_lf < 0)
+ return;
+
+ /* Enable BAR2 ALIAS for this pcifunc. */
+ reg = BIT_ULL(16) | pcifunc;
+ rvu_write64(rvu, blkaddr, SSO_AF_BAR2_SEL, reg);
+ rvu_write64(rvu, ssow_blkaddr, SSOW_AF_BAR2_SEL, reg);
+
+ /* Ignore all interrupts */
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_INT_ENA_W1C),
+ SSOW_LF_GWS_INT_MASK);
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_INT),
+ SSOW_LF_GWS_INT_MASK);
+
+ if (has_lsw)
+ rvu_write64(rvu, blkaddr, SSO_AF_HWSX_LSW_CFG(ssow_lf), 0x0);
+
+ /* Make sure that all the in-flights are complete before invalidate. */
+ mb();
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_GWC_INVAL), 0x0);
+ /* Prepare WS for GW operations. */
+ rvu_poll_reg(rvu, ssow_blkaddr, SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_TAG),
+ SSOW_LF_GWS_TAG_PEND_GET_WORK, true);
+
+ reg = rvu_read64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_TAG));
+ if (reg & SSOW_LF_GWS_TAG_PEND_SWITCH)
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_DESCHED), 0);
+ else if (((reg >> 32) & SSO_TT_EMPTY) != SSO_TT_EMPTY)
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_SWTAG_FLUSH),
+ 0);
+
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_GWC_INVAL), 0);
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_NW_TIM),
+ SSOW_LF_GWS_MAX_NW_TIM);
+
+ if (has_prefetch)
+ rvu_ssow_clean_prefetch(rvu, 0);
+
+ /* Disable add work. */
+ rvu_write64(rvu, blkaddr, SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_QCTL),
+ 0x0);
+
+ /* HRM 14.13.4 (4) */
+ /* Clean up nscheduled IENT let the work flow. */
+ if (has_nsched)
+ rvu_sso_clean_nscheduled(rvu, lf);
+
+ /* HRM 14.13.4 (6) */
+ /* Drain all the work using grouped gw. */
+ aq_cnt = rvu_read64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_AQ_CNT));
+ ds_cnt = rvu_read64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_MISC_CNT));
+ cq_ds_cnt = rvu_read64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_INT_CNT));
+ cq_ds_cnt &= SSO_LF_GGRP_INT_CNT_MASK;
+
+ val = slot; /* GGRP ID */
+ val |= SSOW_LF_GWS_OP_GET_WORK_GROUPED;
+ val |= SSOW_LF_GWS_OP_GET_WORK_WAIT;
+
+ while (aq_cnt || cq_ds_cnt || ds_cnt) {
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_GET_WORK),
+ val);
+ do {
+ reg = rvu_read64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0,
+ SSOW_LF_GWS_TAG));
+ } while (reg & SSOW_LF_GWS_TAG_PEND_GET_WORK);
+ if (((reg >> 32) & SSO_TT_EMPTY) != SSO_TT_EMPTY)
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0,
+ SSOW_LF_GWS_OP_SWTAG_FLUSH),
+ 0x0);
+ aq_cnt = rvu_read64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_AQ_CNT)
+ );
+ ds_cnt = rvu_read64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot,
+ SSO_LF_GGRP_MISC_CNT));
+ cq_ds_cnt = rvu_read64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot,
+ SSO_LF_GGRP_INT_CNT));
+ /* Extract cq and ds count */
+ cq_ds_cnt &= SSO_LF_GGRP_INT_CNT_MASK;
+ }
+
+ /* Due to the Errata 35432, SSO doesn't release the partially consumed
+ * TAQ buffer used by HWGRP when HWGRP is reset. Use SW routine to
+ * drain it manually.
+ */
+ if (is_rvu_96xx_B0(rvu))
+ rvu_sso_ggrp_taq_flush(rvu, pcifunc, lf, slot, ssow_lf, blkaddr,
+ ssow_blkaddr);
+
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_NW_TIM), 0x0);
+
+ /* HRM 14.13.4 (7) */
+ reg = rvu_read64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_XAQ_CNT))
+ & SSO_LF_GGRP_XAQ_CNT_MASK;
+ if (reg != 0)
+ dev_warn(rvu->dev,
+ "SSO_LF[%d]_GGRP_XAQ_CNT is %lld expected 0", lf, reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_PAGE_CNT(lf))
+ & SSO_AF_HWGRP_PAGE_CNT_MASK;
+ if (reg != 0)
+ dev_warn(rvu->dev,
+ "SSO_AF_HWGRP[%d]_PAGE_CNT is %lld expected 0", lf,
+ reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_IAQ_THR(lf))
+ >> SSO_HWGRP_IAQ_GRP_CNT_SHIFT;
+ reg &= SSO_HWGRP_IAQ_GRP_CNT_MASK;
+ if (reg != 0)
+ dev_warn(rvu->dev,
+ "SSO_AF_HWGRP[%d]_IAQ_THR is %lld expected 0", lf,
+ reg);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWSX_INV(ssow_lf), 0x1);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_BAR2_SEL, 0);
+ rvu_write64(rvu, ssow_blkaddr, SSOW_AF_BAR2_SEL, 0);
+}
+
+int rvu_sso_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot)
+{
+ u64 reg, add;
+ bool has_lsw;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ /* Read hardware capabilities */
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_CONST1);
+ has_lsw = !!(reg & SSO_AF_CONST1_LSW_PRESENT);
+
+ /* Enable BAR2 ALIAS for this pcifunc. */
+ reg = BIT_ULL(16) | pcifunc;
+ rvu_write64(rvu, blkaddr, SSO_AF_BAR2_SEL, reg);
+
+ rvu_write64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_INT_THR), 0x0);
+ rvu_write64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_AQ_THR),
+ SSO_LF_GGRP_AQ_THR_MASK);
+
+ rvu_write64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_INT),
+ SSO_LF_GGRP_INT_MASK);
+ rvu_write64(rvu, blkaddr,
+ SSO_AF_BAR2_ALIASX(slot, SSO_LF_GGRP_INT_ENA_W1C),
+ SSO_LF_GGRP_INT_MASK);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_BAR2_SEL, 0x0);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_UNMAP_INFO);
+ if ((reg & 0xFFF) == pcifunc)
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR0, SSO_AF_ERR0_MASK);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_UNMAP_INFO2);
+ if ((reg & 0xFFF) == pcifunc)
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR2, SSO_AF_ERR2_MASK);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_UNMAP_INFO3);
+ if ((reg & 0xFFF) == pcifunc)
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR2, SSO_AF_ERR2_MASK);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_POISONX(lf / 64), lf % 64);
+ rvu_write64(rvu, blkaddr, SSO_AF_IU_ACCNTX_RST(lf), 0x1);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR0, ~0ULL);
+ /* Re-enable error reporting once we're finished */
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR0_ENA_W1S, ~0ULL);
+
+ /* HRM 14.13.4 (13) */
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_CFG(lf),
+ SSO_HWGRP_AW_CFG_LDWB | SSO_HWGRP_AW_CFG_LDT |
+ SSO_HWGRP_AW_CFG_STT);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_XAQ_AURA(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_GMCTL(lf), 0x0);
+ reg = (SSO_HWGRP_PRI_AFF_MASK << SSO_HWGRP_PRI_AFF_SHIFT) |
+ (SSO_HWGRP_PRI_WGT_MASK << SSO_HWGRP_PRI_WGT_SHIFT) |
+ (0x1 << SSO_HWGRP_PRI_WGT_SHIFT);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_PRI(lf), reg);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_WS_PC(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_EXT_PC(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_WA_PC(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_TS_PC(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_DS_PC(lf), 0x0);
+ if (has_lsw)
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_LS_PC(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_XAQ_LIMIT(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_IU_ACCNT(lf), 0x0);
+
+ /* The delta between the current and default thresholds
+ * need to be returned to the SSO
+ */
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_IAQ_THR(lf)) &
+ SSO_HWGRP_IAQ_RSVD_THR_MASK;
+ add = SSO_HWGRP_IAQ_RSVD_THR - reg;
+ reg = (SSO_HWGRP_IAQ_MAX_THR_MASK << SSO_HWGRP_IAQ_MAX_THR_SHIFT) |
+ SSO_HWGRP_IAQ_RSVD_THR;
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_IAQ_THR(lf), reg);
+
+ if (add)
+ rvu_write64(rvu, blkaddr, SSO_AF_AW_ADD,
+ (add & SSO_AF_AW_ADD_RSVD_FREE_MASK) <<
+ SSO_AF_AW_ADD_RSVD_FREE_SHIFT);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_TAQ_THR(lf)) &
+ SSO_HWGRP_TAQ_RSVD_THR_MASK;
+ add = SSO_HWGRP_TAQ_RSVD_THR - reg;
+ reg = (SSO_HWGRP_TAQ_MAX_THR_MASK << SSO_HWGRP_TAQ_MAX_THR_SHIFT) |
+ SSO_HWGRP_TAQ_RSVD_THR;
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_TAQ_THR(lf), reg);
+ if (add)
+ rvu_write64(rvu, blkaddr, SSO_AF_TAQ_ADD,
+ (add & SSO_AF_TAQ_RSVD_FREE_MASK) <<
+ SSO_AF_TAQ_ADD_RSVD_FREE_SHIFT);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_HEAD_PTR(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_TAIL_PTR(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_HEAD_NEXT(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_TAIL_NEXT(lf), 0x0);
+
+ return 0;
+}
+
+int rvu_ssow_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot)
+{
+ struct sso_rsrc *sso = &rvu->hw->sso;
+ bool has_prefetch, has_lsw;
+ int blkaddr, ssow_blkaddr;
+ u64 reg, grpmsk;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ ssow_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, 0);
+ if (ssow_blkaddr < 0)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ /* Read hardware capabilities */
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_CONST1);
+ has_lsw = !!(reg & SSO_AF_CONST1_LSW_PRESENT);
+ has_prefetch = !!(reg & SSO_AF_CONST1_PRF_PRESENT);
+
+ /* Enable BAR2 alias access. */
+ reg = BIT_ULL(16) | pcifunc;
+ rvu_write64(rvu, ssow_blkaddr, SSOW_AF_BAR2_SEL, reg);
+
+ /* Ignore all interrupts */
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_INT_ENA_W1C),
+ SSOW_LF_GWS_INT_MASK);
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_INT),
+ SSOW_LF_GWS_INT_MASK);
+
+ if (has_lsw)
+ rvu_write64(rvu, blkaddr, SSO_AF_HWSX_LSW_CFG(lf), 0x0);
+
+ /* Make sure that all the in-flights are complete before invalidate. */
+ mb();
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot, SSOW_LF_GWS_OP_GWC_INVAL), 0x0);
+ /* HRM 14.13.4 (3) */
+ /* Wait till waitw/desched completes. */
+ rvu_poll_reg(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot, SSOW_LF_GWS_PENDSTATE),
+ SSOW_LF_GWS_TAG_PEND_GET_WORK |
+ SSOW_LF_GWS_TAG_PEND_DESCHED, true);
+
+ reg = rvu_read64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot, SSOW_LF_GWS_TAG));
+ /* Switch Tag Pending */
+ if (reg & SSOW_LF_GWS_TAG_PEND_SWITCH)
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot, SSOW_LF_GWS_OP_DESCHED),
+ 0x0);
+ /* Tag Type != EMPTY use swtag_flush to release tag-chain. */
+ else if (((reg >> 32) & SSO_TT_EMPTY) != SSO_TT_EMPTY)
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot,
+ SSOW_LF_GWS_OP_SWTAG_FLUSH),
+ 0x0);
+
+ /* Wait for desched to complete. */
+ rvu_poll_reg(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(slot, SSOW_LF_GWS_PENDSTATE),
+ SSOW_LF_GWS_TAG_PEND_DESCHED, true);
+
+ if (has_prefetch)
+ rvu_ssow_clean_prefetch(rvu, slot);
+
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_NW_TIM), 0x0);
+ rvu_write64(rvu, ssow_blkaddr,
+ SSOW_AF_BAR2_ALIASX(0, SSOW_LF_GWS_OP_GWC_INVAL), 0x0);
+
+ /* set SAI_INVAL bit */
+ rvu_write64(rvu, blkaddr, SSO_AF_HWSX_INV(lf), 0x1);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWSX_ARB(lf), 0x0);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWSX_GMCTL(lf), 0x0);
+
+ /* Unset the HWS Hardware Group Mask. */
+ for (grpmsk = 0; grpmsk < (sso->sso_hwgrps / 64); grpmsk++) {
+ rvu_write64(rvu, blkaddr,
+ SSO_AF_HWSX_SX_GRPMSKX(lf, 0, grpmsk),
+ 0x0);
+ rvu_write64(rvu, blkaddr,
+ SSO_AF_HWSX_SX_GRPMSKX(lf, 1, grpmsk),
+ 0x0);
+ }
+
+ rvu_write64(rvu, ssow_blkaddr, SSOW_AF_BAR2_SEL, 0x0);
+
+ return 0;
+}
+
+int rvu_sso_poll_aura_cnt(struct rvu *rvu, int npa_blkaddr, int aura)
+{
+ unsigned long timeout = jiffies + usecs_to_jiffies(20000);
+ bool twice = false;
+ u64 __iomem *addr;
+ u64 res, wdata;
+
+ wdata = (u64)aura << 44;
+ addr = rvu->afreg_base + ((npa_blkaddr << 28) |
+ NPA_AF_BAR2_ALIASX(0, NPA_LF_AURA_OP_CNT));
+again:
+ rvu_sso_ldadd(res, wdata, addr);
+ if (res & BIT_ULL(42))
+ return 0;
+ if (!(res & 0xFFFFFFFFF))
+ return 0;
+ if (time_before(jiffies, timeout)) {
+ usleep_range(1, 5);
+ goto again;
+ }
+ /* In scenarios where CPU is scheduled out before checking
+ * 'time_before' (above) and gets scheduled in such that
+ * jiffies are beyond timeout value, then check again if HW is
+ * done with the operation in the meantime.
+ */
+ if (!twice) {
+ twice = true;
+ goto again;
+ }
+ return -EBUSY;
+}
+
+void rvu_sso_deinit_xaq_aura(struct rvu *rvu, int blkaddr, int npa_blkaddr,
+ int aura, int lf)
+{
+ void *free_addr;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_CONST1);
+ free_addr = rvu->afreg_base + ((npa_blkaddr << 28) |
+ NPA_AF_BAR2_ALIASX(0, NPA_LF_AURA_OP_FREE0));
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_AW_CFG(lf));
+ reg &= ~SSO_HWGRP_AW_CFG_RWEN;
+ reg |= SSO_HWGRP_AW_CFG_XAQ_ALLOC_DIS;
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_CFG(lf), reg);
+
+ rvu_poll_reg(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf),
+ SSO_HWGRP_AW_STS_XAQ_BUFSC_MASK, true);
+ rvu_poll_reg(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf),
+ SSO_HWGRP_AW_STS_NPA_FETCH, true);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf));
+ if (reg & SSO_HWGRP_AW_STS_TPTR_NEXT_VLD) {
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_XAQX_TAIL_NEXT(lf));
+ reg &= ~0x7F;
+ if (npa_blkaddr && reg)
+ rvu_sso_store_pair(reg, (u64)aura, free_addr);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf),
+ SSO_HWGRP_AW_STS_TPTR_NEXT_VLD);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_TAIL_NEXT(lf), 0x0);
+ }
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf));
+ if (reg & SSO_HWGRP_AW_STS_TPTR_VLD) {
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_XAQX_TAIL_PTR(lf));
+ reg &= ~0x7F;
+ if (npa_blkaddr && reg)
+ rvu_sso_store_pair(reg, (u64)aura, free_addr);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf),
+ SSO_HWGRP_AW_STS_TPTR_VLD);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_TAIL_PTR(lf), 0x0);
+ }
+}
+
+int rvu_sso_cleanup_xaq_aura(struct rvu *rvu, u16 pcifunc, int nb_hwgrps)
+{
+ int hwgrp, lf, blkaddr, npa_blkaddr, npa_pcifunc, aura, err;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (lf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_XAQX_GMCTL(lf));
+ npa_pcifunc = reg & 0xFFFF;
+ npa_blkaddr = 0;
+
+ if (npa_pcifunc) {
+ npa_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, npa_pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_INVAL_NPA_PF_FUNC;
+
+ reg = BIT_ULL(16) | npa_pcifunc;
+ rvu_write64(rvu, npa_blkaddr, NPA_AF_BAR2_SEL, reg);
+ aura = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_XAQ_AURA(lf));
+ }
+
+ for (hwgrp = 0; hwgrp < nb_hwgrps; hwgrp++) {
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, hwgrp);
+ if (lf < 0) {
+ err = SSO_AF_ERR_LF_INVALID;
+ goto fail;
+ }
+
+ rvu_sso_deinit_xaq_aura(rvu, blkaddr, npa_blkaddr, aura, lf);
+ /* disable XAQ */
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_CFG(lf),
+ SSO_HWGRP_AW_CFG_LDWB | SSO_HWGRP_AW_CFG_LDT |
+ SSO_HWGRP_AW_CFG_STT);
+ }
+
+ if (npa_pcifunc) {
+ err = rvu_sso_poll_aura_cnt(rvu, npa_blkaddr, aura);
+ if (err)
+ dev_err(rvu->dev, "[%d]Failed to free XAQs to aura[%d]\n",
+ __LINE__, aura);
+ }
+
+ for (hwgrp = 0; hwgrp < nb_hwgrps; hwgrp++) {
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_XAQ_AURA(lf), 0);
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_GMCTL(lf), 0);
+ }
+ err = 0;
+fail:
+ if (npa_pcifunc)
+ rvu_write64(rvu, npa_blkaddr, NPA_AF_BAR2_SEL, 0x0);
+ return err;
+}
+
+int rvu_mbox_handler_sso_hw_release_xaq_aura(struct rvu *rvu,
+ struct sso_release_xaq *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+
+ return rvu_sso_cleanup_xaq_aura(rvu, pcifunc, req->hwgrps);
+}
+
+int rvu_mbox_handler_sso_hw_setconfig(struct rvu *rvu,
+ struct sso_hw_setconfig *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int hwgrp, lf, err, blkaddr;
+ u32 npa_aura_id;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ npa_aura_id = req->npa_aura_id;
+
+ /* Check if requested 'SSOLF <=> NPALF' mapping is valid */
+ if (req->npa_pf_func) {
+ /* If default, use 'this' SSOLF's PFFUNC */
+ if (req->npa_pf_func == RVU_DEFAULT_PF_FUNC)
+ req->npa_pf_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->npa_pf_func, BLKTYPE_NPA))
+ return SSO_AF_INVAL_NPA_PF_FUNC;
+ }
+
+ err = rvu_sso_cleanup_xaq_aura(rvu, pcifunc, req->hwgrps);
+ if (err < 0)
+ return err;
+
+ /* Initialize XAQ ring */
+ for (hwgrp = 0; hwgrp < req->hwgrps; hwgrp++) {
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, hwgrp);
+ if (lf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_XAQ_AURA(lf),
+ npa_aura_id);
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_GMCTL(lf),
+ req->npa_pf_func);
+
+ /* enable XAQ */
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_CFG(lf), 0xF);
+
+ /* Wait for ggrp to ack. */
+ err = rvu_poll_reg(rvu, blkaddr,
+ SSO_AF_HWGRPX_AW_STATUS(lf),
+ SSO_HWGRP_AW_STS_INIT_STS, false);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf));
+ if (err || (reg & BIT_ULL(4)) || !(reg & BIT_ULL(8))) {
+ dev_warn(rvu->dev, "SSO_HWGRP(%d) XAQ NPA pointer initialization failed",
+ lf);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_sso_grp_set_priority(struct rvu *rvu,
+ struct sso_grp_priority *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr;
+ u64 regval;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ regval = (((u64)(req->weight & SSO_HWGRP_PRI_WGT_MASK)
+ << SSO_HWGRP_PRI_WGT_SHIFT) |
+ ((u64)(req->affinity & SSO_HWGRP_PRI_AFF_MASK)
+ << SSO_HWGRP_PRI_AFF_SHIFT) |
+ (req->priority & SSO_HWGRP_PRI_MASK));
+
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, req->grp);
+ if (lf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_PRI(lf), regval);
+
+ return 0;
+}
+
+int rvu_mbox_handler_sso_grp_get_priority(struct rvu *rvu,
+ struct sso_info_req *req,
+ struct sso_grp_priority *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr;
+ u64 regval;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, req->grp);
+ if (lf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ regval = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_PRI(lf));
+
+ rsp->weight = (regval >> SSO_HWGRP_PRI_WGT_SHIFT)
+ & SSO_HWGRP_PRI_WGT_MASK;
+ rsp->affinity = (regval >> SSO_HWGRP_PRI_AFF_SHIFT)
+ & SSO_HWGRP_PRI_AFF_MASK;
+ rsp->priority = regval & SSO_HWGRP_PRI_MASK;
+
+ return 0;
+}
+
+int rvu_mbox_handler_sso_grp_qos_config(struct rvu *rvu,
+ struct sso_grp_qos_cfg *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ u64 regval, grp_rsvd;
+ int lf, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, req->grp);
+ if (lf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ /* Check if GGRP has been active. */
+ regval = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_WA_PC(lf));
+ if (regval)
+ return SSO_AF_ERR_GRP_EBUSY;
+
+ /* Configure XAQ threhold */
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_XAQ_LIMIT(lf), req->xaq_limit);
+
+ /* Configure TAQ threhold */
+ regval = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_TAQ_THR(lf));
+ grp_rsvd = regval & SSO_HWGRP_TAQ_RSVD_THR_MASK;
+ if (req->taq_thr < grp_rsvd)
+ req->taq_thr = grp_rsvd;
+
+ regval = req->taq_thr & SSO_HWGRP_TAQ_MAX_THR_MASK;
+ regval = (regval << SSO_HWGRP_TAQ_MAX_THR_SHIFT) | grp_rsvd;
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_TAQ_THR(lf), regval);
+
+ /* Configure IAQ threhold */
+ regval = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_IAQ_THR(lf));
+ grp_rsvd = regval & SSO_HWGRP_IAQ_RSVD_THR_MASK;
+ if (req->iaq_thr < grp_rsvd + 4)
+ req->iaq_thr = grp_rsvd + 4;
+
+ regval = req->iaq_thr & SSO_HWGRP_IAQ_MAX_THR_MASK;
+ regval = (regval << SSO_HWGRP_IAQ_MAX_THR_SHIFT) | grp_rsvd;
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_IAQ_THR(lf), regval);
+
+ return 0;
+}
+
+int rvu_mbox_handler_sso_grp_get_stats(struct rvu *rvu,
+ struct sso_info_req *req,
+ struct sso_grp_stats *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, req->grp);
+ if (lf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ rsp->ws_pc = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_WS_PC(lf));
+ rsp->ext_pc = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_EXT_PC(lf));
+ rsp->wa_pc = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_WA_PC(lf));
+ rsp->ts_pc = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_TS_PC(lf));
+ rsp->ds_pc = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_DS_PC(lf));
+ rsp->dq_pc = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_DQ_PC(lf));
+ rsp->aw_status = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf));
+ rsp->page_cnt = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_PAGE_CNT(lf));
+
+ return 0;
+}
+
+int rvu_mbox_handler_sso_hws_get_stats(struct rvu *rvu,
+ struct sso_info_req *req,
+ struct sso_hws_stats *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr, ssow_blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ ssow_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, pcifunc);
+ if (ssow_blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &hw->block[ssow_blkaddr], pcifunc, req->hws);
+ if (lf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ rsp->arbitration = rvu_read64(rvu, blkaddr, SSO_AF_HWSX_ARB(lf));
+
+ return 0;
+}
+
+int rvu_mbox_handler_sso_lf_alloc(struct rvu *rvu, struct sso_lf_alloc_req *req,
+ struct sso_lf_alloc_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int ssolf, uniq_ident, rc = 0;
+ struct rvu_pfvf *pfvf;
+ int hwgrp, blkaddr;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (pfvf->sso <= 0 || blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ if (!pfvf->sso_uniq_ident) {
+ uniq_ident = rvu_alloc_rsrc(&hw->sso.pfvf_ident);
+ if (uniq_ident < 0) {
+ rc = SSO_AF_ERR_AF_LF_ALLOC;
+ goto exit;
+ }
+ pfvf->sso_uniq_ident = uniq_ident;
+ } else {
+ uniq_ident = pfvf->sso_uniq_ident;
+ }
+
+ /* Set threshold for the In-Unit Accounting Index*/
+ rvu_write64(rvu, blkaddr, SSO_AF_IU_ACCNTX_CFG(uniq_ident),
+ SSO_AF_HWGRP_IU_ACCNT_MAX_THR << 16);
+
+ for (hwgrp = 0; hwgrp < req->hwgrps; hwgrp++) {
+ ssolf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, hwgrp);
+ if (ssolf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ /* All groups assigned to single SR-IOV function must be
+ * assigned same unique in-unit accounting index.
+ */
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_IU_ACCNT(ssolf),
+ 0x10000 | uniq_ident);
+
+ /* Assign unique tagspace */
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_TAGSPACE(ssolf),
+ uniq_ident);
+ }
+
+exit:
+ rsp->xaq_buf_size = hw->sso.sso_xaq_buf_size;
+ rsp->xaq_wq_entries = hw->sso.sso_xaq_num_works;
+ rsp->in_unit_entries = hw->sso.sso_iue;
+ rsp->hwgrps = hw->sso.sso_hwgrps;
+ return rc;
+}
+
+int rvu_mbox_handler_sso_lf_free(struct rvu *rvu, struct sso_lf_free_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int hwgrp, lf, err, blkaddr;
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ for (hwgrp = 0; hwgrp < req->hwgrps; hwgrp++) {
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, hwgrp);
+ if (lf < 0)
+ continue;
+ rvu_sso_lf_drain_queues(rvu, pcifunc, lf, hwgrp);
+ }
+ rvu_sso_cleanup_xaq_aura(rvu, pcifunc, req->hwgrps);
+
+ /* Perform reset of SSO HW GRPs */
+ for (hwgrp = 0; hwgrp < req->hwgrps; hwgrp++) {
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, hwgrp);
+ if (lf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ err = rvu_sso_lf_teardown(rvu, pcifunc, lf, hwgrp);
+ if (err)
+ return err;
+
+ /* Reset this SSO LF */
+ err = rvu_lf_reset(rvu, &hw->block[blkaddr], lf);
+ if (err)
+ dev_err(rvu->dev, "SSO%d free: failed to reset\n", lf);
+ /* Reset the IAQ and TAQ thresholds */
+ rvu_sso_hwgrp_config_thresh(rvu, blkaddr, lf);
+ }
+
+ if (pfvf->sso_uniq_ident) {
+ rvu_free_rsrc(&hw->sso.pfvf_ident, pfvf->sso_uniq_ident);
+ pfvf->sso_uniq_ident = 0;
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_sso_ws_cache_inv(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int num_lfs, ssowlf, hws, blkaddr;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, pcifunc);
+ if (blkaddr < 0)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ block->addr);
+ if (!num_lfs)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ /* SSO HWS invalidate registers are part of SSO AF */
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ for (hws = 0; hws < num_lfs; hws++) {
+ ssowlf = rvu_get_lf(rvu, block, pcifunc, hws);
+ if (ssowlf < 0)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ /* Reset this SSO LF GWS cache */
+ rvu_write64(rvu, blkaddr, SSO_AF_HWSX_INV(ssowlf), 1);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_ssow_chng_mship(struct rvu *rvu,
+ struct ssow_chng_mship *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int ssolf, ssowlf, hwgrp;
+ u8 pos, bit;
+ int blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, pcifunc);
+ if (blkaddr < 0)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ ssowlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, req->hws);
+ if (ssowlf < 0)
+ return SSO_AF_ERR_PARAM;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ for (hwgrp = 0; hwgrp < req->nb_hwgrps; hwgrp++) {
+ ssolf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc,
+ req->hwgrps[hwgrp]);
+ if (ssolf < 0)
+ return SSO_AF_ERR_PARAM;
+
+ if (req->set > 1)
+ return SSO_AF_ERR_PARAM;
+ pos = ssolf / 64;
+ bit = ssolf % 64;
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWSX_SX_GRPMSKX(ssowlf,
+ req->set,
+ pos));
+ if (req->enable)
+ reg |= BIT_ULL(bit);
+ else
+ reg &= ~BIT_ULL(bit);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_HWSX_SX_GRPMSKX(ssowlf,
+ req->set,
+ pos), reg);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_ssow_lf_alloc(struct rvu *rvu,
+ struct ssow_lf_alloc_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (pfvf->ssow <= 0)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ return 0;
+}
+
+int rvu_mbox_handler_ssow_lf_free(struct rvu *rvu,
+ struct ssow_lf_free_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int ssowlf, hws, err, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, pcifunc);
+ if (blkaddr < 0)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ for (hws = 0; hws < req->hws; hws++) {
+ ssowlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, hws);
+ if (ssowlf < 0)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ err = rvu_ssow_lf_teardown(rvu, pcifunc, ssowlf, hws);
+ if (err)
+ return err;
+
+ /* Reset this SSO LF */
+ err = rvu_lf_reset(rvu, &hw->block[blkaddr], ssowlf);
+ if (err)
+ dev_err(rvu->dev, "SSOW%d free: failed to reset\n",
+ ssowlf);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_ssow_config_lsw(struct rvu *rvu,
+ struct ssow_config_lsw *req,
+ struct msg_rsp *rsp)
+{
+ int num_lfs, ssowlf, hws, blkaddr;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ bool has_lsw;
+ u64 val;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSOW, pcifunc);
+ if (blkaddr < 0)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ block->addr);
+ if (!num_lfs)
+ return SSOW_AF_ERR_LF_INVALID;
+
+ /* SSO HWS LSW config registers are part of SSO AF */
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ val = rvu_read64(rvu, blkaddr, SSO_AF_CONST1);
+ has_lsw = !!(val & SSO_AF_CONST1_LSW_PRESENT);
+
+ if (!has_lsw || req->lsw_mode > SSOW_LSW_GW_IMM ||
+ req->wqe_release > SSOW_WQE_REL_IMM)
+ return SSOW_AF_ERR_INVALID_CFG;
+
+ for (hws = 0; hws < num_lfs; hws++) {
+ ssowlf = rvu_get_lf(rvu, block, pcifunc, hws);
+ if (ssowlf < 0)
+ return SSOW_AF_ERR_LF_INVALID;
+ val = req->wqe_release << 2;
+ val |= req->lsw_mode;
+ rvu_write64(rvu, blkaddr, SSO_AF_HWSX_LSW_CFG(ssowlf), val);
+ }
+
+ return 0;
+}
+
+static int rvu_sso_do_register_interrupt(struct rvu *rvu, int irq_offs,
+ irq_handler_t handler,
+ const char *name)
+{
+ int ret = 0;
+
+ ret = request_irq(pci_irq_vector(rvu->pdev, irq_offs), handler, 0,
+ name, rvu);
+ if (ret) {
+ dev_err(rvu->dev, "SSOAF: %s irq registration failed", name);
+ goto err;
+ }
+
+ WARN_ON(rvu->irq_allocated[irq_offs]);
+ rvu->irq_allocated[irq_offs] = true;
+err:
+ return ret;
+}
+
+static irqreturn_t rvu_sso_af_err0_intr_handler(int irq, void *ptr)
+{
+ struct rvu *rvu = (struct rvu *)ptr;
+ struct rvu_block *block;
+ int i, blkaddr;
+ u64 reg, reg0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ block = &rvu->hw->block[blkaddr];
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_ERR0);
+ dev_err_ratelimited(rvu->dev, "Received SSO_AF_ERR0 irq : 0x%llx", reg);
+
+ if (reg & BIT_ULL(15)) {
+ dev_err_ratelimited(rvu->dev, "Received Bad-fill-packet NCB error");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_POISON)
+ }
+
+ if (reg & BIT_ULL(14)) {
+ dev_err_ratelimited(rvu->dev, "An FLR was initiated, but SSO_LF_GGRP_AQ_CNT[AQ_CNT] != 0");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_FLR_AQ_DIGEST)
+ }
+
+ if (reg & BIT_ULL(13)) {
+ dev_err_ratelimited(rvu->dev, "Add work dropped due to XAQ pointers not yet initialized.");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_XAQDIS_DIGEST)
+ }
+
+ if (reg & (0xF << 9)) {
+ dev_err_ratelimited(rvu->dev, "PF_FUNC mapping error.");
+ dev_err_ratelimited(rvu->dev, "SSO_AF_UNMAP_INFO : 0x%llx",
+ rvu_read64(rvu, blkaddr, SSO_AF_UNMAP_INFO));
+ }
+
+ if (reg & BIT_ULL(8)) {
+ dev_err_ratelimited(rvu->dev, "Add work dropped due to QTL being disabled, 0x0");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_QCTLDIS_DIGEST)
+ }
+
+ if (reg & BIT_ULL(7)) {
+ dev_err_ratelimited(rvu->dev, "Add work dropped due to WQP being 0x0");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_WQP0_DIGEST)
+ }
+
+ if (reg & BIT_ULL(6))
+ dev_err_ratelimited(rvu->dev, "Add work dropped due to 64 bit write");
+
+ if (reg & BIT_ULL(5))
+ dev_err_ratelimited(rvu->dev, "Set when received add work with tag type is specified as EMPTY");
+
+ if (reg & BIT_ULL(4)) {
+ dev_err_ratelimited(rvu->dev, "Add work to disabled hardware group. An ADDWQ was received and dropped to a hardware group with SSO_AF_HWGRP(0..255)_IAQ_THR[RSVD_THR] = 0.");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_GRPDIS_DIGEST)
+ }
+
+ if (reg & BIT_ULL(3)) {
+ dev_err_ratelimited(rvu->dev, "Bad-fill-packet NCB error");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_BFPN_DIGEST)
+ }
+
+ if (reg & BIT_ULL(2)) {
+ dev_err_ratelimited(rvu->dev, "Bad-fill-packet error.");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_BFP_DIGEST)
+ }
+
+ if (reg & BIT_ULL(1)) {
+ dev_err_ratelimited(rvu->dev, "The NPA returned an error indication");
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_NPA_DIGEST)
+ }
+
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR0, reg);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_sso_af_err2_intr_handler(int irq, void *ptr)
+{
+ struct rvu *rvu = (struct rvu *)ptr;
+ int blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_ERR2);
+ dev_err_ratelimited(rvu->dev, "received SSO_AF_ERR2 irq : 0x%llx", reg);
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR2, reg);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_sso_af_ras_intr_handler(int irq, void *ptr)
+{
+ struct rvu *rvu = (struct rvu *)ptr;
+ struct rvu_block *block;
+ int i, blkaddr;
+ u64 reg, reg0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ block = &rvu->hw->block[blkaddr];
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_RAS);
+ dev_err_ratelimited(rvu->dev, "received SSO_AF_RAS irq : 0x%llx", reg);
+ rvu_write64(rvu, blkaddr, SSO_AF_RAS, reg);
+ SSO_AF_INT_DIGEST_PRNT(SSO_AF_POISON)
+
+ return IRQ_HANDLED;
+}
+
+void rvu_sso_unregister_interrupts(struct rvu *rvu)
+{
+ int i, blkaddr, offs;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return;
+
+ offs = rvu_read64(rvu, blkaddr, SSO_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs)
+ return;
+
+ rvu_write64(rvu, blkaddr, SSO_AF_RAS_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR2_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR0_ENA_W1C, ~0ULL);
+
+ for (i = 0; i < SSO_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+int rvu_sso_register_interrupts(struct rvu *rvu)
+{
+ int blkaddr, offs, ret = 0;
+
+ if (!is_block_implemented(rvu->hw, BLKADDR_SSO))
+ return 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ offs = rvu_read64(rvu, blkaddr, SSO_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs) {
+ dev_warn(rvu->dev,
+ "Failed to get SSO_AF_INT vector offsets\n");
+ return 0;
+ }
+
+ ret = rvu_sso_do_register_interrupt(rvu, offs + SSO_AF_INT_VEC_ERR0,
+ rvu_sso_af_err0_intr_handler,
+ "SSO_AF_ERR0");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR0_ENA_W1S, ~0ULL);
+
+ ret = rvu_sso_do_register_interrupt(rvu, offs + SSO_AF_INT_VEC_ERR2,
+ rvu_sso_af_err2_intr_handler,
+ "SSO_AF_ERR2");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, SSO_AF_ERR2_ENA_W1S, ~0ULL);
+
+ ret = rvu_sso_do_register_interrupt(rvu, offs + SSO_AF_INT_VEC_RAS,
+ rvu_sso_af_ras_intr_handler,
+ "SSO_AF_RAS");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, SSO_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+err:
+ rvu_sso_unregister_interrupts(rvu);
+ return ret;
+}
+
+int rvu_sso_init(struct rvu *rvu)
+{
+ u64 iaq_free_cnt, iaq_rsvd, iaq_max, iaq_rsvd_cnt = 0;
+ u64 taq_free_cnt, taq_rsvd, taq_max, taq_rsvd_cnt = 0;
+ struct sso_rsrc *sso = &rvu->hw->sso;
+ int blkaddr, hwgrp, grpmsk, hws, err;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, 0);
+ if (blkaddr < 0)
+ return 0;
+
+ if (!is_rvu_otx2(rvu))
+ rvu_sso_block_cn10k_init(rvu, blkaddr);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_CONST);
+ /* number of SSO hardware work slots */
+ sso->sso_hws = (reg >> 56) & 0xFF;
+ /* number of SSO hardware groups */
+ sso->sso_hwgrps = (reg & 0xFFFF);
+ /* number of SSO In-Unit entries */
+ sso->sso_iue = (reg >> 16) & 0xFFFF;
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_CONST1);
+ /* number of work entries in external admission queue (XAQ) */
+ sso->sso_xaq_num_works = (reg >> 16) & 0xFFFF;
+ /* number of bytes in a XAQ buffer */
+ sso->sso_xaq_buf_size = (reg & 0xFFFF);
+
+ /* Configure IAQ entries */
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_AW_WE);
+ iaq_free_cnt = reg & SSO_AF_IAQ_FREE_CNT_MASK;
+
+ /* Give out half of buffers fairly, rest left floating */
+ iaq_rsvd = iaq_free_cnt / sso->sso_hwgrps / 2;
+
+ /* Enforce minimum per hardware requirements */
+ if (iaq_rsvd < SSO_HWGRP_IAQ_RSVD_THR)
+ iaq_rsvd = SSO_HWGRP_IAQ_RSVD_THR;
+ /* To ensure full streaming performance should be at least 208. */
+ iaq_max = iaq_rsvd + SSO_HWGRP_IAQ_MAX_THR_STRM_PERF;
+
+ if (iaq_max >= (SSO_AF_IAQ_FREE_CNT_MAX + 1))
+ iaq_max = SSO_AF_IAQ_FREE_CNT_MAX;
+
+ /* Configure TAQ entries */
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_TAQ_CNT);
+ taq_free_cnt = reg & SSO_AF_TAQ_FREE_CNT_MASK;
+
+ /* Give out half of buffers fairly, rest left floating */
+ taq_rsvd = taq_free_cnt / sso->sso_hwgrps / 2;
+
+ /* Enforce minimum per hardware requirements */
+ if (taq_rsvd < SSO_HWGRP_TAQ_RSVD_THR)
+ taq_rsvd = SSO_HWGRP_TAQ_RSVD_THR;
+ /* To ensure full streaming performance should be at least 16. */
+ taq_max = taq_rsvd + SSO_HWGRP_TAQ_MAX_THR_STRM_PERF;
+
+ if (taq_max >= (SSO_AF_TAQ_FREE_CNT_MAX + 1))
+ taq_max = SSO_AF_TAQ_FREE_CNT_MAX;
+
+ /* Save thresholds to reprogram HWGRPs on reset */
+ sso->iaq_rsvd = iaq_rsvd;
+ sso->iaq_max = iaq_max;
+ sso->taq_rsvd = taq_rsvd;
+ sso->taq_max = taq_max;
+
+ for (hwgrp = 0; hwgrp < sso->sso_hwgrps; hwgrp++) {
+ rvu_sso_hwgrp_config_thresh(rvu, blkaddr, hwgrp);
+ iaq_rsvd_cnt += iaq_rsvd;
+ taq_rsvd_cnt += taq_rsvd;
+ }
+
+ /* Verify SSO_AW_WE[RSVD_FREE], TAQ_CNT[RSVD_FREE] are greater than
+ * or equal to sum of IAQ[RSVD_THR], TAQ[RSRVD_THR] fields.
+ */
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_AW_WE);
+ reg = (reg >> SSO_AF_IAQ_RSVD_FREE_SHIFT) & SSO_AF_IAQ_RSVD_FREE_MASK;
+ if (reg < iaq_rsvd_cnt) {
+ dev_warn(rvu->dev, "WARN: Wrong IAQ resource calculations %llx vs %llx\n",
+ reg, iaq_rsvd_cnt);
+ rvu_write64(rvu, blkaddr, SSO_AF_AW_WE,
+ (iaq_rsvd_cnt & SSO_AF_IAQ_RSVD_FREE_MASK) <<
+ SSO_AF_IAQ_RSVD_FREE_SHIFT);
+ }
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_TAQ_CNT);
+ reg = (reg >> SSO_AF_TAQ_RSVD_FREE_SHIFT) & SSO_AF_TAQ_RSVD_FREE_MASK;
+ if (reg < taq_rsvd_cnt) {
+ dev_warn(rvu->dev, "WARN: Wrong TAQ resource calculations %llx vs %llx\n",
+ reg, taq_rsvd_cnt);
+ rvu_write64(rvu, blkaddr, SSO_AF_TAQ_CNT,
+ (taq_rsvd_cnt & SSO_AF_TAQ_RSVD_FREE_MASK) <<
+ SSO_AF_TAQ_RSVD_FREE_SHIFT);
+ }
+
+ /* Unset the HWS Hardware Group Mask.
+ * The hardware group mask should be set by PF/VF
+ * using SSOW_LF_GWS_GRPMSK_CHG based on the LF allocations.
+ */
+ for (grpmsk = 0; grpmsk < (sso->sso_hwgrps / 64); grpmsk++) {
+ for (hws = 0; hws < sso->sso_hws; hws++) {
+ rvu_write64(rvu, blkaddr,
+ SSO_AF_HWSX_SX_GRPMSKX(hws, 0, grpmsk),
+ 0x0);
+ rvu_write64(rvu, blkaddr,
+ SSO_AF_HWSX_SX_GRPMSKX(hws, 1, grpmsk),
+ 0x0);
+ }
+ }
+
+ /* Allocate SSO_AF_CONST::HWS + 1. As the total number of pf/vf are
+ * limited by the numeber of HWS available.
+ */
+ sso->pfvf_ident.max = sso->sso_hws + 1;
+ err = rvu_alloc_bitmap(&sso->pfvf_ident);
+ if (err)
+ return err;
+
+ /* Reserve one bit so that identifier starts from 1 */
+ rvu_alloc_rsrc(&sso->pfvf_ident);
+
+ /* Enable SSO time counter by default to a period of 10us */
+ rvu_write64(rvu, blkaddr, SSO_AF_WQ_INT_PC, 0x28UL << 8);
+
+ return 0;
+}
+
+void rvu_sso_freemem(struct rvu *rvu)
+{
+ struct sso_rsrc *sso = &rvu->hw->sso;
+
+ kfree(sso->pfvf_ident.bmap);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index a3ecb5de9000..d4ce01b46010 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -1,11 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef RVU_STRUCT_H
@@ -14,24 +11,31 @@
/* RVU Block revision IDs */
#define RVU_BLK_RVUM_REVID 0x01
+#define RVU_MULTI_BLK_VER 0x7ULL
+
/* RVU Block Address Enumeration */
enum rvu_block_addr_e {
- BLKADDR_RVUM = 0x0ULL,
- BLKADDR_LMT = 0x1ULL,
- BLKADDR_MSIX = 0x2ULL,
- BLKADDR_NPA = 0x3ULL,
- BLKADDR_NIX0 = 0x4ULL,
- BLKADDR_NIX1 = 0x5ULL,
- BLKADDR_NPC = 0x6ULL,
- BLKADDR_SSO = 0x7ULL,
- BLKADDR_SSOW = 0x8ULL,
- BLKADDR_TIM = 0x9ULL,
- BLKADDR_CPT0 = 0xaULL,
- BLKADDR_CPT1 = 0xbULL,
- BLKADDR_NDC_NIX0_RX = 0xcULL,
- BLKADDR_NDC_NIX0_TX = 0xdULL,
- BLKADDR_NDC_NPA0 = 0xeULL,
- BLK_COUNT = 0xfULL,
+ BLKADDR_RVUM = 0x0ULL,
+ BLKADDR_LMT = 0x1ULL,
+ BLKADDR_MSIX = 0x2ULL,
+ BLKADDR_NPA = 0x3ULL,
+ BLKADDR_NIX0 = 0x4ULL,
+ BLKADDR_NIX1 = 0x5ULL,
+ BLKADDR_NPC = 0x6ULL,
+ BLKADDR_SSO = 0x7ULL,
+ BLKADDR_SSOW = 0x8ULL,
+ BLKADDR_TIM = 0x9ULL,
+ BLKADDR_CPT0 = 0xaULL,
+ BLKADDR_CPT1 = 0xbULL,
+ BLKADDR_NDC_NIX0_RX = 0xcULL,
+ BLKADDR_NDC_NIX0_TX = 0xdULL,
+ BLKADDR_NDC_NPA0 = 0xeULL,
+ BLKADDR_NDC_NIX1_RX = 0x10ULL,
+ BLKADDR_NDC_NIX1_TX = 0x11ULL,
+ BLKADDR_REE0 = 0x14ULL,
+ BLKADDR_REE1 = 0x15ULL,
+ BLKADDR_APR = 0x16ULL,
+ BLK_COUNT = 0x17ULL,
};
/* RVU Block Type Enumeration */
@@ -47,7 +51,8 @@ enum rvu_block_type_e {
BLKTYPE_TIM = 0x8,
BLKTYPE_CPT = 0x9,
BLKTYPE_NDC = 0xa,
- BLKTYPE_MAX = 0xa,
+ BLKTYPE_REE = 0xe,
+ BLKTYPE_MAX = 0xe,
};
/* RVU Admin function Interrupt Vector Enumeration */
@@ -60,6 +65,60 @@ enum rvu_af_int_vec_e {
RVU_AF_INT_VEC_CNT = 0x5,
};
+/* SSO Admin function Interrupt Vector Enumeration */
+enum sso_af_int_vec_e {
+ SSO_AF_INT_VEC_ERR0 = 0x0,
+ SSO_AF_INT_VEC_ERR2 = 0x1,
+ SSO_AF_INT_VEC_RAS = 0x2,
+ SSO_AF_INT_VEC_CNT = 0x3,
+};
+
+/* CPT Admin function Interrupt Vector Enumeration */
+enum cpt_af_int_vec_e {
+ CPT_AF_INT_VEC_FLT0 = 0x0,
+ CPT_AF_INT_VEC_FLT1 = 0x1,
+ CPT_AF_INT_VEC_RVU = 0x2,
+ CPT_AF_INT_VEC_RAS = 0x3,
+ CPT_AF_INT_VEC_CNT = 0x4,
+};
+
+enum cpt_10k_af_int_vec_e {
+ CPT_10K_AF_INT_VEC_FLT0 = 0x0,
+ CPT_10K_AF_INT_VEC_FLT1 = 0x1,
+ CPT_10K_AF_INT_VEC_FLT2 = 0x2,
+ CPT_10K_AF_INT_VEC_RVU = 0x3,
+ CPT_10K_AF_INT_VEC_RAS = 0x4,
+ CPT_10K_AF_INT_VEC_CNT = 0x5,
+};
+/* REE Admin function Interrupt Vector Enumeration */
+enum ree_af_int_vec_e {
+ REE_AF_INT_VEC_RAS = 0x0,
+ REE_AF_INT_VEC_RVU = 0x1,
+ REE_AF_INT_VEC_QUE_DONE = 0x2,
+ REE_AF_INT_VEC_AQ = 0x3,
+ REE_AF_INT_VEC_CNT = 0x4,
+};
+
+/* NPA Admin function Interrupt Vector Enumeration */
+enum npa_af_int_vec_e {
+ NPA_AF_INT_VEC_RVU = 0x0,
+ NPA_AF_INT_VEC_GEN = 0x1,
+ NPA_AF_INT_VEC_AQ_DONE = 0x2,
+ NPA_AF_INT_VEC_AF_ERR = 0x3,
+ NPA_AF_INT_VEC_POISON = 0x4,
+ NPA_AF_INT_VEC_CNT = 0x5,
+};
+
+/* NIX Admin function Interrupt Vector Enumeration */
+enum nix_af_int_vec_e {
+ NIX_AF_INT_VEC_RVU = 0x0,
+ NIX_AF_INT_VEC_GEN = 0x1,
+ NIX_AF_INT_VEC_AQ_DONE = 0x2,
+ NIX_AF_INT_VEC_AF_ERR = 0x3,
+ NIX_AF_INT_VEC_POISON = 0x4,
+ NIX_AF_INT_VEC_CNT = 0x5,
+};
+
/**
* RVU PF Interrupt Vector Enumeration
*/
@@ -100,65 +159,44 @@ enum npa_aq_instop {
NPA_AQ_INSTOP_UNLOCK = 0x5,
};
+/* ALLOC/FREE input queues Enumeration from coprocessors */
+enum npa_inpq {
+ NPA_INPQ_NIX0_RX = 0x0,
+ NPA_INPQ_NIX0_TX = 0x1,
+ NPA_INPQ_NIX1_RX = 0x2,
+ NPA_INPQ_NIX1_TX = 0x3,
+ NPA_INPQ_SSO = 0x4,
+ NPA_INPQ_TIM = 0x5,
+ NPA_INPQ_DPI = 0x6,
+ NPA_INPQ_AURA_OP = 0xe,
+ NPA_INPQ_INTERNAL_RSV = 0xf,
+};
+
/* NPA admin queue instruction structure */
struct npa_aq_inst_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 doneint : 1; /* W0 */
- u64 reserved_44_62 : 19;
- u64 cindex : 20;
- u64 reserved_17_23 : 7;
- u64 lf : 9;
- u64 ctype : 4;
- u64 op : 4;
-#else
- u64 op : 4;
+ u64 op : 4; /* W0 */
u64 ctype : 4;
u64 lf : 9;
u64 reserved_17_23 : 7;
u64 cindex : 20;
u64 reserved_44_62 : 19;
u64 doneint : 1;
-#endif
u64 res_addr; /* W1 */
};
/* NPA admin queue result structure */
struct npa_aq_res_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_17_63 : 47; /* W0 */
- u64 doneint : 1;
- u64 compcode : 8;
- u64 ctype : 4;
- u64 op : 4;
-#else
- u64 op : 4;
+ u64 op : 4; /* W0 */
u64 ctype : 4;
u64 compcode : 8;
u64 doneint : 1;
u64 reserved_17_63 : 47;
-#endif
u64 reserved_64_127; /* W1 */
};
struct npa_aura_s {
u64 pool_addr; /* W0 */
-#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
- u64 avg_level : 8;
- u64 reserved_118_119 : 2;
- u64 shift : 6;
- u64 aura_drop : 8;
- u64 reserved_98_103 : 6;
- u64 bp_ena : 2;
- u64 aura_drop_ena : 1;
- u64 pool_drop_ena : 1;
- u64 reserved_93 : 1;
- u64 avg_con : 9;
- u64 pool_way_mask : 16;
- u64 pool_caching : 1;
- u64 reserved_65 : 2;
- u64 ena : 1;
-#else
- u64 ena : 1;
+ u64 ena : 1; /* W1 */
u64 reserved_65 : 2;
u64 pool_caching : 1;
u64 pool_way_mask : 16;
@@ -172,59 +210,24 @@ struct npa_aura_s {
u64 shift : 6;
u64 reserved_118_119 : 2;
u64 avg_level : 8;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
- u64 reserved_189_191 : 3;
- u64 nix1_bpid : 9;
- u64 reserved_177_179 : 3;
- u64 nix0_bpid : 9;
- u64 reserved_164_167 : 4;
- u64 count : 36;
-#else
- u64 count : 36;
+ u64 count : 36; /* W2 */
u64 reserved_164_167 : 4;
u64 nix0_bpid : 9;
u64 reserved_177_179 : 3;
u64 nix1_bpid : 9;
u64 reserved_189_191 : 3;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
- u64 reserved_252_255 : 4;
- u64 fc_hyst_bits : 4;
- u64 fc_stype : 2;
- u64 fc_up_crossing : 1;
- u64 fc_ena : 1;
- u64 reserved_240_243 : 4;
- u64 bp : 8;
- u64 reserved_228_231 : 4;
- u64 limit : 36;
-#else
- u64 limit : 36;
+ u64 limit : 36; /* W3 */
u64 reserved_228_231 : 4;
u64 bp : 8;
- u64 reserved_240_243 : 4;
+ u64 reserved_241_243 : 3;
+ u64 fc_be : 1;
u64 fc_ena : 1;
u64 fc_up_crossing : 1;
u64 fc_stype : 2;
u64 fc_hyst_bits : 4;
u64 reserved_252_255 : 4;
-#endif
u64 fc_addr; /* W4 */
-#if defined(__BIG_ENDIAN_BITFIELD) /* W5 */
- u64 reserved_379_383 : 5;
- u64 err_qint_idx : 7;
- u64 reserved_371 : 1;
- u64 thresh_qint_idx : 7;
- u64 reserved_363 : 1;
- u64 thresh_up : 1;
- u64 thresh_int_ena : 1;
- u64 thresh_int : 1;
- u64 err_int_ena : 8;
- u64 err_int : 8;
- u64 update_time : 16;
- u64 pool_drop : 8;
-#else
- u64 pool_drop : 8;
+ u64 pool_drop : 8; /* W5 */
u64 update_time : 16;
u64 err_int : 8;
u64 err_int_ena : 8;
@@ -236,31 +239,15 @@ struct npa_aura_s {
u64 reserved_371 : 1;
u64 err_qint_idx : 7;
u64 reserved_379_383 : 5;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W6 */
- u64 reserved_420_447 : 28;
- u64 thresh : 36;
-#else
- u64 thresh : 36;
- u64 reserved_420_447 : 28;
-#endif
+ u64 thresh : 36; /* W6*/
+ u64 rsvd_423_420 : 4;
+ u64 fc_msh_dst : 11;
+ u64 reserved_435_447 : 13;
u64 reserved_448_511; /* W7 */
};
struct npa_pool_s {
u64 stack_base; /* W0 */
-#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
- u64 reserved_115_127 : 13;
- u64 buf_size : 11;
- u64 reserved_100_103 : 4;
- u64 buf_offset : 12;
- u64 stack_way_mask : 16;
- u64 reserved_70_71 : 3;
- u64 stack_caching : 1;
- u64 reserved_66_67 : 2;
- u64 nat_align : 1;
- u64 ena : 1;
-#else
u64 ena : 1;
u64 nat_align : 1;
u64 reserved_66_67 : 2;
@@ -271,36 +258,10 @@ struct npa_pool_s {
u64 reserved_100_103 : 4;
u64 buf_size : 11;
u64 reserved_115_127 : 13;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
- u64 stack_pages : 32;
- u64 stack_max_pages : 32;
-#else
u64 stack_max_pages : 32;
u64 stack_pages : 32;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
- u64 reserved_240_255 : 16;
- u64 op_pc : 48;
-#else
u64 op_pc : 48;
u64 reserved_240_255 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W4 */
- u64 reserved_316_319 : 4;
- u64 update_time : 16;
- u64 reserved_297_299 : 3;
- u64 fc_up_crossing : 1;
- u64 fc_hyst_bits : 4;
- u64 fc_stype : 2;
- u64 fc_ena : 1;
- u64 avg_con : 9;
- u64 avg_level : 8;
- u64 reserved_270_271 : 2;
- u64 shift : 6;
- u64 reserved_260_263 : 4;
- u64 stack_offset : 4;
-#else
u64 stack_offset : 4;
u64 reserved_260_263 : 4;
u64 shift : 6;
@@ -311,26 +272,13 @@ struct npa_pool_s {
u64 fc_stype : 2;
u64 fc_hyst_bits : 4;
u64 fc_up_crossing : 1;
- u64 reserved_297_299 : 3;
+ u64 fc_be : 1;
+ u64 reserved_298_299 : 2;
u64 update_time : 16;
u64 reserved_316_319 : 4;
-#endif
u64 fc_addr; /* W5 */
u64 ptr_start; /* W6 */
u64 ptr_end; /* W7 */
-#if defined(__BIG_ENDIAN_BITFIELD) /* W8 */
- u64 reserved_571_575 : 5;
- u64 err_qint_idx : 7;
- u64 reserved_563 : 1;
- u64 thresh_qint_idx : 7;
- u64 reserved_555 : 1;
- u64 thresh_up : 1;
- u64 thresh_int_ena : 1;
- u64 thresh_int : 1;
- u64 err_int_ena : 8;
- u64 err_int : 8;
- u64 reserved_512_535 : 24;
-#else
u64 reserved_512_535 : 24;
u64 err_int : 8;
u64 err_int_ena : 8;
@@ -342,14 +290,10 @@ struct npa_pool_s {
u64 reserved_563 : 1;
u64 err_qint_idx : 7;
u64 reserved_571_575 : 5;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */
- u64 reserved_612_639 : 28;
- u64 thresh : 36;
-#else
u64 thresh : 36;
- u64 reserved_612_639 : 28;
-#endif
+ u64 rsvd_615_612 : 4;
+ u64 fc_msh_dst : 11;
+ u64 reserved_627_639 : 13;
u64 reserved_640_703; /* W10 */
u64 reserved_704_767; /* W11 */
u64 reserved_768_831; /* W12 */
@@ -377,6 +321,7 @@ enum nix_aq_ctype {
NIX_AQ_CTYPE_MCE = 0x3,
NIX_AQ_CTYPE_RSS = 0x4,
NIX_AQ_CTYPE_DYNO = 0x5,
+ NIX_AQ_CTYPE_BANDPROF = 0x6,
};
/* NIX admin queue instruction opcodes */
@@ -391,59 +336,29 @@ enum nix_aq_instop {
/* NIX admin queue instruction structure */
struct nix_aq_inst_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 doneint : 1; /* W0 */
- u64 reserved_44_62 : 19;
- u64 cindex : 20;
- u64 reserved_15_23 : 9;
- u64 lf : 7;
- u64 ctype : 4;
- u64 op : 4;
-#else
u64 op : 4;
u64 ctype : 4;
- u64 lf : 7;
- u64 reserved_15_23 : 9;
+ u64 lf : 9;
+ u64 reserved_17_23 : 7;
u64 cindex : 20;
u64 reserved_44_62 : 19;
u64 doneint : 1;
-#endif
u64 res_addr; /* W1 */
};
/* NIX admin queue result structure */
struct nix_aq_res_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_17_63 : 47; /* W0 */
- u64 doneint : 1;
- u64 compcode : 8;
- u64 ctype : 4;
- u64 op : 4;
-#else
u64 op : 4;
u64 ctype : 4;
u64 compcode : 8;
u64 doneint : 1;
u64 reserved_17_63 : 47;
-#endif
u64 reserved_64_127; /* W1 */
};
/* NIX Completion queue context structure */
struct nix_cq_ctx_s {
u64 base;
-#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
- u64 wrptr : 20;
- u64 avg_con : 9;
- u64 cint_idx : 7;
- u64 cq_err : 1;
- u64 qint_idx : 7;
- u64 rsvd_81_83 : 3;
- u64 bpid : 9;
- u64 rsvd_69_71 : 3;
- u64 bp_ena : 1;
- u64 rsvd_64_67 : 4;
-#else
u64 rsvd_64_67 : 4;
u64 bp_ena : 1;
u64 rsvd_69_71 : 3;
@@ -454,31 +369,10 @@ struct nix_cq_ctx_s {
u64 cint_idx : 7;
u64 avg_con : 9;
u64 wrptr : 20;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
- u64 update_time : 16;
- u64 avg_level : 8;
- u64 head : 20;
- u64 tail : 20;
-#else
u64 tail : 20;
u64 head : 20;
u64 avg_level : 8;
u64 update_time : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
- u64 cq_err_int_ena : 8;
- u64 cq_err_int : 8;
- u64 qsize : 4;
- u64 rsvd_233_235 : 3;
- u64 caching : 1;
- u64 substream : 20;
- u64 rsvd_210_211 : 2;
- u64 ena : 1;
- u64 drop_ena : 1;
- u64 drop : 8;
- u64 bp : 8;
-#else
u64 bp : 8;
u64 drop : 8;
u64 drop_ena : 1;
@@ -490,20 +384,161 @@ struct nix_cq_ctx_s {
u64 qsize : 4;
u64 cq_err_int : 8;
u64 cq_err_int_ena : 8;
-#endif
+};
+
+/* CN10K NIX Receive queue context structure */
+struct nix_cn10k_rq_ctx_s {
+ u64 ena : 1;
+ u64 sso_ena : 1;
+ u64 ipsech_ena : 1;
+ u64 ena_wqwd : 1;
+ u64 cq : 20;
+ u64 rsvd_36_24 : 13;
+ u64 lenerr_dis : 1;
+ u64 csum_il4_dis : 1;
+ u64 csum_ol4_dis : 1;
+ u64 len_il4_dis : 1;
+ u64 len_il3_dis : 1;
+ u64 len_ol4_dis : 1;
+ u64 len_ol3_dis : 1;
+ u64 wqe_aura : 20;
+ u64 spb_aura : 20;
+ u64 lpb_aura : 20;
+ u64 sso_grp : 10;
+ u64 sso_tt : 2;
+ u64 pb_caching : 2;
+ u64 wqe_caching : 1;
+ u64 xqe_drop_ena : 1;
+ u64 spb_drop_ena : 1;
+ u64 lpb_drop_ena : 1;
+ u64 pb_stashing : 1;
+ u64 ipsecd_drop_ena : 1;
+ u64 chi_ena : 1;
+ u64 rsvd_127_125 : 3;
+ u64 band_prof_id : 10; /* W2 */
+ u64 rsvd_138 : 1;
+ u64 policer_ena : 1;
+ u64 spb_sizem1 : 6;
+ u64 wqe_skip : 2;
+ u64 rsvd_150_148 : 3;
+ u64 spb_ena : 1;
+ u64 lpb_sizem1 : 12;
+ u64 first_skip : 7;
+ u64 rsvd_171 : 1;
+ u64 later_skip : 6;
+ u64 xqe_imm_size : 6;
+ u64 rsvd_189_184 : 6;
+ u64 xqe_imm_copy : 1;
+ u64 xqe_hdr_split : 1;
+ u64 xqe_drop : 8; /* W3 */
+ u64 xqe_pass : 8;
+ u64 wqe_pool_drop : 8;
+ u64 wqe_pool_pass : 8;
+ u64 spb_aura_drop : 8;
+ u64 spb_aura_pass : 8;
+ u64 spb_pool_drop : 8;
+ u64 spb_pool_pass : 8;
+ u64 lpb_aura_drop : 8; /* W4 */
+ u64 lpb_aura_pass : 8;
+ u64 lpb_pool_drop : 8;
+ u64 lpb_pool_pass : 8;
+ u64 rsvd_291_288 : 4;
+ u64 rq_int : 8;
+ u64 rq_int_ena : 8;
+ u64 qint_idx : 7;
+ u64 rsvd_319_315 : 5;
+ u64 ltag : 24; /* W5 */
+ u64 good_utag : 8;
+ u64 bad_utag : 8;
+ u64 flow_tagw : 6;
+ u64 ipsec_vwqe : 1;
+ u64 vwqe_ena : 1;
+ u64 vwqe_wait : 8;
+ u64 max_vsize_exp : 4;
+ u64 vwqe_skip : 2;
+ u64 rsvd_383_382 : 2;
+ u64 octs : 48; /* W6 */
+ u64 rsvd_447_432 : 16;
+ u64 pkts : 48; /* W7 */
+ u64 rsvd_511_496 : 16;
+ u64 drop_octs : 48; /* W8 */
+ u64 rsvd_575_560 : 16;
+ u64 drop_pkts : 48; /* W9 */
+ u64 rsvd_639_624 : 16;
+ u64 re_pkts : 48; /* W10 */
+ u64 rsvd_703_688 : 16;
+ u64 rsvd_767_704; /* W11 */
+ u64 rsvd_831_768; /* W12 */
+ u64 rsvd_895_832; /* W13 */
+ u64 rsvd_959_896; /* W14 */
+ u64 rsvd_1023_960; /* W15 */
+};
+
+/* CN10K NIX Send queue context structure */
+struct nix_cn10k_sq_ctx_s {
+ u64 ena : 1;
+ u64 qint_idx : 6;
+ u64 substream : 20;
+ u64 sdp_mcast : 1;
+ u64 cq : 20;
+ u64 sqe_way_mask : 16;
+ u64 smq : 10; /* W1 */
+ u64 cq_ena : 1;
+ u64 xoff : 1;
+ u64 sso_ena : 1;
+ u64 smq_rr_weight : 14;
+ u64 default_chan : 12;
+ u64 sqb_count : 16;
+ u64 rsvd_120_119 : 2;
+ u64 smq_rr_count_lb : 7;
+ u64 smq_rr_count_ub : 25; /* W2 */
+ u64 sqb_aura : 20;
+ u64 sq_int : 8;
+ u64 sq_int_ena : 8;
+ u64 sqe_stype : 2;
+ u64 rsvd_191 : 1;
+ u64 max_sqe_size : 2; /* W3 */
+ u64 cq_limit : 8;
+ u64 lmt_dis : 1;
+ u64 mnq_dis : 1;
+ u64 smq_next_sq : 20;
+ u64 smq_lso_segnum : 8;
+ u64 tail_offset : 6;
+ u64 smenq_offset : 6;
+ u64 head_offset : 6;
+ u64 smenq_next_sqb_vld : 1;
+ u64 smq_pend : 1;
+ u64 smq_next_sq_vld : 1;
+ u64 rsvd_255_253 : 3;
+ u64 next_sqb : 64; /* W4 */
+ u64 tail_sqb : 64; /* W5 */
+ u64 smenq_sqb : 64; /* W6 */
+ u64 smenq_next_sqb : 64; /* W7 */
+ u64 head_sqb : 64; /* W8 */
+ u64 rsvd_583_576 : 8; /* W9 */
+ u64 vfi_lso_total : 18;
+ u64 vfi_lso_sizem1 : 3;
+ u64 vfi_lso_sb : 8;
+ u64 vfi_lso_mps : 14;
+ u64 vfi_lso_vlan0_ins_ena : 1;
+ u64 vfi_lso_vlan1_ins_ena : 1;
+ u64 vfi_lso_vld : 1;
+ u64 rsvd_639_630 : 10;
+ u64 scm_lso_rem : 18; /* W10 */
+ u64 rsvd_703_658 : 46;
+ u64 octs : 48; /* W11 */
+ u64 rsvd_767_752 : 16;
+ u64 pkts : 48; /* W12 */
+ u64 rsvd_831_816 : 16;
+ u64 rsvd_895_832 : 64; /* W13 */
+ u64 dropped_octs : 48;
+ u64 rsvd_959_944 : 16;
+ u64 dropped_pkts : 48;
+ u64 rsvd_1023_1008 : 16;
};
/* NIX Receive queue context structure */
struct nix_rq_ctx_s {
-#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */
- u64 wqe_aura : 20;
- u64 substream : 20;
- u64 cq : 20;
- u64 ena_wqwd : 1;
- u64 ipsech_ena : 1;
- u64 sso_ena : 1;
- u64 ena : 1;
-#else
u64 ena : 1;
u64 sso_ena : 1;
u64 ipsech_ena : 1;
@@ -511,19 +546,6 @@ struct nix_rq_ctx_s {
u64 cq : 20;
u64 substream : 20;
u64 wqe_aura : 20;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
- u64 rsvd_127_122 : 6;
- u64 lpb_drop_ena : 1;
- u64 spb_drop_ena : 1;
- u64 xqe_drop_ena : 1;
- u64 wqe_caching : 1;
- u64 pb_caching : 2;
- u64 sso_tt : 2;
- u64 sso_grp : 10;
- u64 lpb_aura : 20;
- u64 spb_aura : 20;
-#else
u64 spb_aura : 20;
u64 lpb_aura : 20;
u64 sso_grp : 10;
@@ -534,23 +556,7 @@ struct nix_rq_ctx_s {
u64 spb_drop_ena : 1;
u64 lpb_drop_ena : 1;
u64 rsvd_127_122 : 6;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
- u64 xqe_hdr_split : 1;
- u64 xqe_imm_copy : 1;
- u64 rsvd_189_184 : 6;
- u64 xqe_imm_size : 6;
- u64 later_skip : 6;
- u64 rsvd_171 : 1;
- u64 first_skip : 7;
- u64 lpb_sizem1 : 12;
- u64 spb_ena : 1;
- u64 rsvd_150_148 : 3;
- u64 wqe_skip : 2;
- u64 spb_sizem1 : 6;
- u64 rsvd_139_128 : 12;
-#else
- u64 rsvd_139_128 : 12;
+ u64 rsvd_139_128 : 12; /* W2 */
u64 spb_sizem1 : 6;
u64 wqe_skip : 2;
u64 rsvd_150_148 : 3;
@@ -563,18 +569,7 @@ struct nix_rq_ctx_s {
u64 rsvd_189_184 : 6;
u64 xqe_imm_copy : 1;
u64 xqe_hdr_split : 1;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
- u64 spb_pool_pass : 8;
- u64 spb_pool_drop : 8;
- u64 spb_aura_pass : 8;
- u64 spb_aura_drop : 8;
- u64 wqe_pool_pass : 8;
- u64 wqe_pool_drop : 8;
- u64 xqe_pass : 8;
- u64 xqe_drop : 8;
-#else
- u64 xqe_drop : 8;
+ u64 xqe_drop : 8; /* W3*/
u64 xqe_pass : 8;
u64 wqe_pool_drop : 8;
u64 wqe_pool_pass : 8;
@@ -582,19 +577,7 @@ struct nix_rq_ctx_s {
u64 spb_aura_pass : 8;
u64 spb_pool_drop : 8;
u64 spb_pool_pass : 8;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W4 */
- u64 rsvd_319_315 : 5;
- u64 qint_idx : 7;
- u64 rq_int_ena : 8;
- u64 rq_int : 8;
- u64 rsvd_291_288 : 4;
- u64 lpb_pool_pass : 8;
- u64 lpb_pool_drop : 8;
- u64 lpb_aura_pass : 8;
- u64 lpb_aura_drop : 8;
-#else
- u64 lpb_aura_drop : 8;
+ u64 lpb_aura_drop : 8; /* W4 */
u64 lpb_aura_pass : 8;
u64 lpb_pool_drop : 8;
u64 lpb_pool_pass : 8;
@@ -603,55 +586,21 @@ struct nix_rq_ctx_s {
u64 rq_int_ena : 8;
u64 qint_idx : 7;
u64 rsvd_319_315 : 5;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W5 */
- u64 rsvd_383_366 : 18;
- u64 flow_tagw : 6;
- u64 bad_utag : 8;
- u64 good_utag : 8;
- u64 ltag : 24;
-#else
- u64 ltag : 24;
+ u64 ltag : 24; /* W5 */
u64 good_utag : 8;
u64 bad_utag : 8;
u64 flow_tagw : 6;
u64 rsvd_383_366 : 18;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W6 */
- u64 rsvd_447_432 : 16;
- u64 octs : 48;
-#else
- u64 octs : 48;
+ u64 octs : 48; /* W6 */
u64 rsvd_447_432 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W7 */
- u64 rsvd_511_496 : 16;
- u64 pkts : 48;
-#else
- u64 pkts : 48;
+ u64 pkts : 48; /* W7 */
u64 rsvd_511_496 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W8 */
+ u64 drop_octs : 48; /* W8 */
u64 rsvd_575_560 : 16;
- u64 drop_octs : 48;
-#else
- u64 drop_octs : 48;
- u64 rsvd_575_560 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */
- u64 rsvd_639_624 : 16;
- u64 drop_pkts : 48;
-#else
- u64 drop_pkts : 48;
+ u64 drop_pkts : 48; /* W9 */
u64 rsvd_639_624 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W10 */
- u64 rsvd_703_688 : 16;
- u64 re_pkts : 48;
-#else
- u64 re_pkts : 48;
+ u64 re_pkts : 48; /* W10 */
u64 rsvd_703_688 : 16;
-#endif
u64 rsvd_767_704; /* W11 */
u64 rsvd_831_768; /* W12 */
u64 rsvd_895_832; /* W13 */
@@ -674,30 +623,12 @@ enum nix_stype {
/* NIX Send queue context structure */
struct nix_sq_ctx_s {
-#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */
- u64 sqe_way_mask : 16;
- u64 cq : 20;
- u64 sdp_mcast : 1;
- u64 substream : 20;
- u64 qint_idx : 6;
- u64 ena : 1;
-#else
u64 ena : 1;
u64 qint_idx : 6;
u64 substream : 20;
u64 sdp_mcast : 1;
u64 cq : 20;
u64 sqe_way_mask : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
- u64 sqb_count : 16;
- u64 default_chan : 12;
- u64 smq_rr_quantum : 24;
- u64 sso_ena : 1;
- u64 xoff : 1;
- u64 cq_ena : 1;
- u64 smq : 9;
-#else
u64 smq : 9;
u64 cq_ena : 1;
u64 xoff : 1;
@@ -705,37 +636,12 @@ struct nix_sq_ctx_s {
u64 smq_rr_quantum : 24;
u64 default_chan : 12;
u64 sqb_count : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
- u64 rsvd_191 : 1;
- u64 sqe_stype : 2;
- u64 sq_int_ena : 8;
- u64 sq_int : 8;
- u64 sqb_aura : 20;
- u64 smq_rr_count : 25;
-#else
u64 smq_rr_count : 25;
u64 sqb_aura : 20;
u64 sq_int : 8;
u64 sq_int_ena : 8;
u64 sqe_stype : 2;
u64 rsvd_191 : 1;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
- u64 rsvd_255_253 : 3;
- u64 smq_next_sq_vld : 1;
- u64 smq_pend : 1;
- u64 smenq_next_sqb_vld : 1;
- u64 head_offset : 6;
- u64 smenq_offset : 6;
- u64 tail_offset : 6;
- u64 smq_lso_segnum : 8;
- u64 smq_next_sq : 20;
- u64 mnq_dis : 1;
- u64 lmt_dis : 1;
- u64 cq_limit : 8;
- u64 max_sqe_size : 2;
-#else
u64 max_sqe_size : 2;
u64 cq_limit : 8;
u64 lmt_dis : 1;
@@ -749,23 +655,11 @@ struct nix_sq_ctx_s {
u64 smq_pend : 1;
u64 smq_next_sq_vld : 1;
u64 rsvd_255_253 : 3;
-#endif
u64 next_sqb : 64;/* W4 */
u64 tail_sqb : 64;/* W5 */
u64 smenq_sqb : 64;/* W6 */
u64 smenq_next_sqb : 64;/* W7 */
u64 head_sqb : 64;/* W8 */
-#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */
- u64 rsvd_639_630 : 10;
- u64 vfi_lso_vld : 1;
- u64 vfi_lso_vlan1_ins_ena : 1;
- u64 vfi_lso_vlan0_ins_ena : 1;
- u64 vfi_lso_mps : 14;
- u64 vfi_lso_sb : 8;
- u64 vfi_lso_sizem1 : 3;
- u64 vfi_lso_total : 18;
- u64 rsvd_583_576 : 8;
-#else
u64 rsvd_583_576 : 8;
u64 vfi_lso_total : 18;
u64 vfi_lso_sizem1 : 3;
@@ -775,68 +669,28 @@ struct nix_sq_ctx_s {
u64 vfi_lso_vlan1_ins_ena : 1;
u64 vfi_lso_vld : 1;
u64 rsvd_639_630 : 10;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W10 */
- u64 rsvd_703_658 : 46;
- u64 scm_lso_rem : 18;
-#else
u64 scm_lso_rem : 18;
u64 rsvd_703_658 : 46;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W11 */
- u64 rsvd_767_752 : 16;
- u64 octs : 48;
-#else
u64 octs : 48;
u64 rsvd_767_752 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W12 */
- u64 rsvd_831_816 : 16;
- u64 pkts : 48;
-#else
u64 pkts : 48;
u64 rsvd_831_816 : 16;
-#endif
u64 rsvd_895_832 : 64;/* W13 */
-#if defined(__BIG_ENDIAN_BITFIELD) /* W14 */
- u64 rsvd_959_944 : 16;
- u64 dropped_octs : 48;
-#else
u64 dropped_octs : 48;
u64 rsvd_959_944 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W15 */
- u64 rsvd_1023_1008 : 16;
- u64 dropped_pkts : 48;
-#else
u64 dropped_pkts : 48;
u64 rsvd_1023_1008 : 16;
-#endif
};
/* NIX Receive side scaling entry structure*/
struct nix_rsse_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- uint32_t reserved_20_31 : 12;
- uint32_t rq : 20;
-#else
uint32_t rq : 20;
uint32_t reserved_20_31 : 12;
-#endif
};
/* NIX receive multicast/mirror entry structure */
struct nix_rx_mce_s {
-#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */
- uint64_t next : 16;
- uint64_t pf_func : 16;
- uint64_t rsvd_31_24 : 8;
- uint64_t index : 20;
- uint64_t eol : 1;
- uint64_t rsvd_2 : 1;
- uint64_t op : 2;
-#else
uint64_t op : 2;
uint64_t rsvd_2 : 1;
uint64_t eol : 1;
@@ -844,7 +698,89 @@ struct nix_rx_mce_s {
uint64_t rsvd_31_24 : 8;
uint64_t pf_func : 16;
uint64_t next : 16;
-#endif
+};
+
+enum nix_band_prof_layers {
+ BAND_PROF_LEAF_LAYER = 0,
+ BAND_PROF_INVAL_LAYER = 1,
+ BAND_PROF_MID_LAYER = 2,
+ BAND_PROF_TOP_LAYER = 3,
+ BAND_PROF_NUM_LAYERS = 4,
+};
+
+enum NIX_RX_BAND_PROF_ACTIONRESULT_E {
+ NIX_RX_BAND_PROF_ACTIONRESULT_PASS = 0x0,
+ NIX_RX_BAND_PROF_ACTIONRESULT_DROP = 0x1,
+ NIX_RX_BAND_PROF_ACTIONRESULT_RED = 0x2,
+};
+
+enum nix_band_prof_pc_mode {
+ NIX_RX_PC_MODE_VLAN = 0,
+ NIX_RX_PC_MODE_DSCP = 1,
+ NIX_RX_PC_MODE_GEN = 2,
+ NIX_RX_PC_MODE_RSVD = 3,
+};
+
+/* NIX ingress policer bandwidth profile structure */
+struct nix_bandprof_s {
+ uint64_t pc_mode : 2; /* W0 */
+ uint64_t icolor : 2;
+ uint64_t tnl_ena : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t peir_exponent : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pebs_exponent : 5;
+ uint64_t reserved_21_23 : 3;
+ uint64_t cir_exponent : 5;
+ uint64_t reserved_29_31 : 3;
+ uint64_t cbs_exponent : 5;
+ uint64_t reserved_37_39 : 3;
+ uint64_t peir_mantissa : 8;
+ uint64_t pebs_mantissa : 8;
+ uint64_t cir_mantissa : 8;
+ uint64_t cbs_mantissa : 8; /* W1 */
+ uint64_t lmode : 1;
+ uint64_t l_sellect : 3;
+ uint64_t rdiv : 4;
+ uint64_t adjust_exponent : 5;
+ uint64_t reserved_85_86 : 2;
+ uint64_t adjust_mantissa : 9;
+ uint64_t gc_action : 2;
+ uint64_t yc_action : 2;
+ uint64_t rc_action : 2;
+ uint64_t meter_algo : 2;
+ uint64_t band_prof_id : 7;
+ uint64_t reserved_111_118 : 8;
+ uint64_t hl_en : 1;
+ uint64_t reserved_120_127 : 8;
+ uint64_t ts : 48; /* W2 */
+ uint64_t reserved_176_191 : 16;
+ uint64_t pe_accum : 32; /* W3 */
+ uint64_t c_accum : 32;
+ uint64_t green_pkt_pass : 48; /* W4 */
+ uint64_t reserved_304_319 : 16;
+ uint64_t yellow_pkt_pass : 48; /* W5 */
+ uint64_t reserved_368_383 : 16;
+ uint64_t red_pkt_pass : 48; /* W6 */
+ uint64_t reserved_432_447 : 16;
+ uint64_t green_octs_pass : 48; /* W7 */
+ uint64_t reserved_496_511 : 16;
+ uint64_t yellow_octs_pass : 48; /* W8 */
+ uint64_t reserved_560_575 : 16;
+ uint64_t red_octs_pass : 48; /* W9 */
+ uint64_t reserved_624_639 : 16;
+ uint64_t green_pkt_drop : 48; /* W10 */
+ uint64_t reserved_688_703 : 16;
+ uint64_t yellow_pkt_drop : 48; /* W11 */
+ uint64_t reserved_752_767 : 16;
+ uint64_t red_pkt_drop : 48; /* W12 */
+ uint64_t reserved_816_831 : 16;
+ uint64_t green_octs_drop : 48; /* W13 */
+ uint64_t reserved_880_895 : 16;
+ uint64_t yellow_octs_drop : 48; /* W14 */
+ uint64_t reserved_944_959 : 16;
+ uint64_t red_octs_drop : 48; /* W15 */
+ uint64_t reserved_1008_1023 : 16;
};
enum nix_lsoalg {
@@ -863,15 +799,6 @@ enum nix_txlayer {
};
struct nix_lso_format {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 rsvd_19_63 : 45;
- u64 alg : 3;
- u64 rsvd_14_15 : 2;
- u64 sizem1 : 2;
- u64 rsvd_10_11 : 2;
- u64 layer : 2;
- u64 offset : 8;
-#else
u64 offset : 8;
u64 layer : 2;
u64 rsvd_10_11 : 2;
@@ -879,24 +806,9 @@ struct nix_lso_format {
u64 rsvd_14_15 : 2;
u64 alg : 3;
u64 rsvd_19_63 : 45;
-#endif
};
struct nix_rx_flowkey_alg {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_35_63 :29;
- u64 ltype_match :4;
- u64 ltype_mask :4;
- u64 sel_chan :1;
- u64 ena :1;
- u64 reserved_24_24 :1;
- u64 lid :3;
- u64 bytesm1 :5;
- u64 hdr_offset :8;
- u64 fn_mask :1;
- u64 ln_mask :1;
- u64 key_offset :6;
-#else
u64 key_offset :6;
u64 ln_mask :1;
u64 fn_mask :1;
@@ -909,7 +821,6 @@ struct nix_rx_flowkey_alg {
u64 ltype_mask :4;
u64 ltype_match :4;
u64 reserved_35_63 :29;
-#endif
};
/* NIX VTAG size */
@@ -917,4 +828,36 @@ enum nix_vtag_size {
VTAGSIZE_T4 = 0x0,
VTAGSIZE_T8 = 0x1,
};
+
+enum nix_tx_vtag_op {
+ NOP = 0x0,
+ VTAG_INSERT = 0x1,
+ VTAG_REPLACE = 0x2,
+};
+
+/* NIX RX VTAG actions */
+#define VTAG_STRIP BIT_ULL(4)
+#define VTAG_CAPTURE BIT_ULL(5)
+
+/* REE admin queue instruction structure */
+struct ree_af_aq_inst_s {
+ u64 rof_ptr_addr;
+ u64 reserved_64_64 : 1;
+ u64 nc : 1;
+ u64 reserved_66_66 : 1;
+ u64 doneint : 1;
+ u64 reserved_68_95 : 28;
+ u64 length : 15;
+ u64 reserved_111_127 : 17;
+};
+
+/* REE ROF file entry structure */
+struct ree_rof_s {
+ u64 addr : 24;
+ u64 reserved_24_31 : 8;
+ u64 typ : 8;
+ u64 reserved_40_63 : 24;
+ u64 data;
+};
+
#endif /* RVU_STRUCT_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
new file mode 100644
index 000000000000..6f2e7944381b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include "rvu.h"
+
+static int rvu_switch_install_rx_rule(struct rvu *rvu, u16 pcifunc,
+ u16 chan_mask)
+{
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ /* If the pcifunc is not initialized then nothing to do.
+ * This same function will be called again via rvu_switch_update_rules
+ * after pcifunc is initialized.
+ */
+ if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags))
+ return 0;
+
+ ether_addr_copy(req.packet.dmac, pfvf->mac_addr);
+ eth_broadcast_addr((u8 *)&req.mask.dmac);
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc;
+ req.features = BIT_ULL(NPC_DMAC);
+ req.channel = pfvf->rx_chan_base;
+ req.chan_mask = chan_mask;
+ req.intf = pfvf->nix_rx_intf;
+ req.op = NIX_RX_ACTION_DEFAULT;
+ req.default_rule = 1;
+
+ return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+static int rvu_switch_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry)
+{
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct rvu_pfvf *pfvf;
+ u8 lbkid;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ /* If the pcifunc is not initialized then nothing to do.
+ * This same function will be called again via rvu_switch_update_rules
+ * after pcifunc is initialized.
+ */
+ if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags))
+ return 0;
+
+ lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1;
+ ether_addr_copy(req.packet.dmac, pfvf->mac_addr);
+ eth_broadcast_addr((u8 *)&req.mask.dmac);
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc;
+ req.entry = entry;
+ req.features = BIT_ULL(NPC_DMAC);
+ req.intf = pfvf->nix_tx_intf;
+ req.op = NIX_TX_ACTIONOP_UCAST_CHAN;
+ req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN;
+ req.set_cntr = 1;
+
+ return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+static int rvu_switch_install_rules(struct rvu *rvu)
+{
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ u16 start = rswitch->start_entry;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf, vf, numvfs, hwvf;
+ u16 pcifunc, entry = 0;
+ int err;
+
+ for (pf = 1; pf < hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+
+ pcifunc = pf << 10;
+ /* rvu_get_nix_blkaddr sets up the corresponding NIX block
+ * address and NIX RX and TX interfaces for a pcifunc.
+ * Generally it is called during attach call of a pcifunc but it
+ * is called here since we are pre-installing rules before
+ * nixlfs are attached
+ */
+ rvu_get_nix_blkaddr(rvu, pcifunc);
+
+ /* MCAM RX rule for a PF/VF already exists as default unicast
+ * rules installed by AF. Hence change the channel in those
+ * rules to ignore channel so that packets with the required
+ * DMAC received from LBK(by other PF/VFs in system) or from
+ * external world (from wire) are accepted.
+ */
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+ if (err) {
+ dev_err(rvu->dev, "RX rule for PF%d failed(%d)\n",
+ pf, err);
+ return err;
+ }
+
+ err = rvu_switch_install_tx_rule(rvu, pcifunc, start + entry);
+ if (err) {
+ dev_err(rvu->dev, "TX rule for PF%d failed(%d)\n",
+ pf, err);
+ return err;
+ }
+
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
+ for (vf = 0; vf < numvfs; vf++, hwvf++) {
+ pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
+ rvu_get_nix_blkaddr(rvu, pcifunc);
+
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+ if (err) {
+ dev_err(rvu->dev,
+ "RX rule for PF%dVF%d failed(%d)\n",
+ pf, vf, err);
+ return err;
+ }
+
+ err = rvu_switch_install_tx_rule(rvu, pcifunc,
+ start + entry);
+ if (err) {
+ dev_err(rvu->dev,
+ "TX rule for PF%dVF%d failed(%d)\n",
+ pf, vf, err);
+ return err;
+ }
+
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+ }
+ }
+
+ return 0;
+}
+
+void rvu_switch_enable(struct rvu *rvu)
+{
+ struct npc_mcam_alloc_entry_req alloc_req = { 0 };
+ struct npc_mcam_alloc_entry_rsp alloc_rsp = { 0 };
+ struct npc_delete_flow_req uninstall_req = { 0 };
+ struct npc_mcam_free_entry_req free_req = { 0 };
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ struct msg_rsp rsp;
+ int ret;
+
+ alloc_req.contig = true;
+ alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs;
+ ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
+ &alloc_rsp);
+ if (ret) {
+ dev_err(rvu->dev,
+ "Unable to allocate MCAM entries\n");
+ goto exit;
+ }
+
+ if (alloc_rsp.count != alloc_req.count) {
+ dev_err(rvu->dev,
+ "Unable to allocate %d MCAM entries, got %d\n",
+ alloc_req.count, alloc_rsp.count);
+ goto free_entries;
+ }
+
+ rswitch->entry2pcifunc = kcalloc(alloc_req.count, sizeof(u16),
+ GFP_KERNEL);
+ if (!rswitch->entry2pcifunc)
+ goto free_entries;
+
+ rswitch->used_entries = alloc_rsp.count;
+ rswitch->start_entry = alloc_rsp.entry;
+
+ ret = rvu_switch_install_rules(rvu);
+ if (ret)
+ goto uninstall_rules;
+
+ return;
+
+uninstall_rules:
+ uninstall_req.start = rswitch->start_entry;
+ uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
+ rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
+ kfree(rswitch->entry2pcifunc);
+free_entries:
+ free_req.all = 1;
+ rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
+exit:
+ return;
+}
+
+void rvu_switch_disable(struct rvu *rvu)
+{
+ struct npc_delete_flow_req uninstall_req = { 0 };
+ struct npc_mcam_free_entry_req free_req = { 0 };
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf, vf, numvfs, hwvf;
+ struct msg_rsp rsp;
+ u16 pcifunc;
+ int err;
+
+ if (!rswitch->used_entries)
+ return;
+
+ for (pf = 1; pf < hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+
+ pcifunc = pf << 10;
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
+ if (err)
+ dev_err(rvu->dev,
+ "Reverting RX rule for PF%d failed(%d)\n",
+ pf, err);
+
+ for (vf = 0; vf < numvfs; vf++, hwvf++) {
+ pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
+ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
+ if (err)
+ dev_err(rvu->dev,
+ "Reverting RX rule for PF%dVF%d failed(%d)\n",
+ pf, vf, err);
+ }
+ }
+
+ uninstall_req.start = rswitch->start_entry;
+ uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
+ free_req.all = 1;
+ rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
+ rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
+ rswitch->used_entries = 0;
+ kfree(rswitch->entry2pcifunc);
+}
+
+void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ u32 max = rswitch->used_entries;
+ u16 entry;
+
+ if (!rswitch->used_entries)
+ return;
+
+ for (entry = 0; entry < max; entry++) {
+ if (rswitch->entry2pcifunc[entry] == pcifunc)
+ break;
+ }
+
+ if (entry >= max)
+ return;
+
+ rvu_switch_install_tx_rule(rvu, pcifunc, rswitch->start_entry + entry);
+ rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_tim.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_tim.c
new file mode 100644
index 000000000000..fa779cc3fe7a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_tim.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/types.h>
+#include <linux/bitfield.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+
+#define TIM_CHUNKSIZE_MULTIPLE (16)
+#define TIM_CHUNKSIZE_MIN (TIM_CHUNKSIZE_MULTIPLE * 0x2)
+#define TIM_CHUNKSIZE_MAX (TIM_CHUNKSIZE_MULTIPLE * 0x1FFF)
+
+static inline u64 get_tenns_tsc(void)
+{
+ u64 tsc;
+
+#if defined(CONFIG_ARM64)
+ asm volatile("mrs %0, cntvct_el0" : "=r" (tsc));
+#endif
+ return tsc;
+}
+
+static inline u64 get_tenns_clk(void)
+{
+ u64 tsc = 0;
+
+#if defined(CONFIG_ARM64)
+ asm volatile("mrs %0, cntfrq_el0" : "=r" (tsc));
+#endif
+ return tsc;
+}
+
+static inline int tim_block_cn10k_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int lf;
+
+ hw->tim.ring_intvls = kmalloc_array(hw->block[BLKTYPE_TIM].lf.max,
+ sizeof(enum tim_ring_interval),
+ GFP_KERNEL);
+ if (!hw->tim.ring_intvls)
+ return -ENOMEM;
+
+ for (lf = 0; lf < hw->block[BLKTYPE_TIM].lf.max; lf++)
+ hw->tim.ring_intvls[lf] = TIM_INTERVAL_INVAL;
+ hw->tim.rings_per_intvl[TIM_INTERVAL_1US] = 0;
+ hw->tim.rings_per_intvl[TIM_INTERVAL_10US] = 0;
+ hw->tim.rings_per_intvl[TIM_INTERVAL_1MS] = 0;
+
+ return 0;
+}
+
+static inline void tim_cn10k_clear_intvl(struct rvu *rvu, int lf)
+{
+ struct tim_rsrc *tim = &rvu->hw->tim;
+
+ if (tim->ring_intvls[lf] != TIM_INTERVAL_INVAL) {
+ tim->rings_per_intvl[tim->ring_intvls[lf]]--;
+ tim->ring_intvls[lf] = TIM_INTERVAL_INVAL;
+ }
+}
+
+static inline void tim_cn10k_record_intvl(struct rvu *rvu, int lf,
+ u64 intervalns)
+{
+ struct tim_rsrc *tim = &rvu->hw->tim;
+ enum tim_ring_interval intvl;
+
+ tim_cn10k_clear_intvl(rvu, lf);
+
+ if (intervalns < (u64)1E4)
+ intvl = TIM_INTERVAL_1US;
+ else if (intervalns < (u64)1E6)
+ intvl = TIM_INTERVAL_10US;
+ else
+ intvl = TIM_INTERVAL_1MS;
+
+ tim->ring_intvls[lf] = intvl;
+ tim->rings_per_intvl[tim->ring_intvls[lf]]++;
+}
+
+static inline int tim_get_min_intvl(struct rvu *rvu, u8 clocksource,
+ u64 clockfreq, u64 *intvl_ns,
+ u64 *intvl_cyc)
+{
+ struct tim_rsrc *tim = &rvu->hw->tim;
+ int intvl;
+
+ if (is_rvu_otx2(rvu)) {
+ switch (clocksource) {
+ case TIM_CLK_SRCS_TENNS:
+ case TIM_CLK_SRCS_GPIO:
+ intvl = 256;
+ break;
+ case TIM_CLK_SRCS_GTI:
+ case TIM_CLK_SRCS_PTP:
+ intvl = 300;
+ break;
+ default:
+ return TIM_AF_INVALID_CLOCK_SOURCE;
+ }
+
+ *intvl_cyc = (u64)intvl;
+ } else {
+ if (tim->rings_per_intvl[TIM_INTERVAL_1US] < 8)
+ intvl = (u64)1E3;
+ else if (tim->rings_per_intvl[TIM_INTERVAL_10US] < 8)
+ intvl = (u64)1E4;
+ else
+ intvl = (u64)1E6;
+
+ *intvl_cyc = (u64)DIV_ROUND_UP(clockfreq * (intvl), (u64)1E9);
+ }
+
+ *intvl_ns = (u64)DIV_ROUND_UP((*intvl_cyc) * (u64)1E9, clockfreq);
+
+ return 0;
+}
+
+static int rvu_tim_disable_lf(struct rvu *rvu, int lf, int blkaddr)
+{
+ u64 regval;
+
+ regval = rvu_read64(rvu, blkaddr, TIM_AF_RINGX_CTL1(lf));
+ if ((regval & TIM_AF_RINGX_CTL1_ENA) == 0)
+ return TIM_AF_RING_ALREADY_DISABLED;
+
+ /* Clear TIM_AF_RING(0..255)_CTL1[ENA]. */
+ regval = rvu_read64(rvu, blkaddr, TIM_AF_RINGX_CTL1(lf));
+ regval &= ~TIM_AF_RINGX_CTL1_ENA;
+ rvu_write64(rvu, blkaddr, TIM_AF_RINGX_CTL1(lf), regval);
+
+ /*
+ * Poll until the corresponding ring’s
+ * TIM_AF_RING(0..255)_CTL1[RCF_BUSY] is clear.
+ */
+ rvu_poll_reg(rvu, blkaddr, TIM_AF_RINGX_CTL1(lf),
+ TIM_AF_RINGX_CTL1_RCF_BUSY, true);
+ if (!is_rvu_otx2(rvu))
+ tim_cn10k_clear_intvl(rvu, lf);
+
+ return 0;
+}
+
+int rvu_mbox_handler_tim_get_min_intvl(struct rvu *rvu,
+ struct tim_intvl_req *req,
+ struct tim_intvl_rsp *rsp)
+{
+ if (!req->clockfreq)
+ return TIM_AF_INVALID_CLOCK_SOURCE;
+
+ return tim_get_min_intvl(rvu, req->clocksource, req->clockfreq,
+ &rsp->intvl_ns, &rsp->intvl_cyc);
+}
+
+int rvu_mbox_handler_tim_lf_alloc(struct rvu *rvu,
+ struct tim_lf_alloc_req *req,
+ struct tim_lf_alloc_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr;
+ u64 regval;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_TIM, pcifunc);
+ if (blkaddr < 0)
+ return TIM_AF_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, req->ring);
+ if (lf < 0)
+ return TIM_AF_LF_INVALID;
+
+ /* Check if requested 'TIMLF <=> NPALF' mapping is valid */
+ if (req->npa_pf_func) {
+ /* If default, use 'this' TIMLF's PFFUNC */
+ if (req->npa_pf_func == RVU_DEFAULT_PF_FUNC)
+ req->npa_pf_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->npa_pf_func, BLKTYPE_NPA))
+ return TIM_AF_INVAL_NPA_PF_FUNC;
+ }
+
+ /* Check if requested 'TIMLF <=> SSOLF' mapping is valid */
+ if (req->sso_pf_func) {
+ /* If default, use 'this' SSOLF's PFFUNC */
+ if (req->sso_pf_func == RVU_DEFAULT_PF_FUNC)
+ req->sso_pf_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->sso_pf_func, BLKTYPE_SSO))
+ return TIM_AF_INVAL_SSO_PF_FUNC;
+ }
+
+ regval = (((u64)req->npa_pf_func) << 16) |
+ ((u64)req->sso_pf_func);
+ rvu_write64(rvu, blkaddr, TIM_AF_RINGX_GMCTL(lf), regval);
+
+ rsp->tenns_clk = get_tenns_clk();
+
+ return 0;
+}
+
+int rvu_mbox_handler_tim_lf_free(struct rvu *rvu,
+ struct tim_ring_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_TIM, pcifunc);
+ if (blkaddr < 0)
+ return TIM_AF_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, req->ring);
+ if (lf < 0)
+ return TIM_AF_LF_INVALID;
+
+ rvu_tim_lf_teardown(rvu, pcifunc, lf, req->ring);
+
+ return 0;
+}
+
+int rvu_mbox_handler_tim_config_ring(struct rvu *rvu,
+ struct tim_config_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ u64 intvl_cyc, intvl_ns;
+ int lf, blkaddr;
+ u64 regval;
+ int rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_TIM, pcifunc);
+ if (blkaddr < 0)
+ return TIM_AF_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, req->ring);
+ if (lf < 0)
+ return TIM_AF_LF_INVALID;
+
+ /* Check the inputs. */
+ /* bigendian can only be 1 or 0. */
+ if (req->bigendian & ~1)
+ return TIM_AF_INVALID_BIG_ENDIAN_VALUE;
+
+ /* enableperiodic can only be 1 or 0. */
+ if (req->enableperiodic & ~1)
+ return TIM_AF_INVALID_ENABLE_PERIODIC;
+
+ /* enabledontfreebuffer can only be 1 or 0. */
+ if (req->enabledontfreebuffer & ~1)
+ return TIM_AF_INVALID_ENABLE_DONTFREE;
+
+ /*
+ * enabledontfreebuffer needs to be true if enableperiodic
+ * is enabled.
+ */
+ if (req->enableperiodic && !req->enabledontfreebuffer)
+ return TIM_AF_ENA_DONTFRE_NSET_PERIODIC;
+
+
+ /* bucketsize needs to between 2 and 2M (1<<20). */
+ if (req->bucketsize < 2 || req->bucketsize > 1<<20)
+ return TIM_AF_INVALID_BSIZE;
+
+ if (req->chunksize % TIM_CHUNKSIZE_MULTIPLE)
+ return TIM_AF_CSIZE_NOT_ALIGNED;
+
+ if (req->chunksize < TIM_CHUNKSIZE_MIN)
+ return TIM_AF_CSIZE_TOO_SMALL;
+
+ if (req->chunksize > TIM_CHUNKSIZE_MAX)
+ return TIM_AF_CSIZE_TOO_BIG;
+
+ rc = tim_get_min_intvl(rvu, req->clocksource, req->clockfreq,
+ &intvl_ns, &intvl_cyc);
+ if (rc)
+ return rc;
+
+ if (req->interval < intvl_cyc || req->intervalns < intvl_ns)
+ return TIM_AF_INTERVAL_TOO_SMALL;
+
+ /* Configure edge of GPIO clock source */
+ if (req->clocksource == TIM_CLK_SRCS_GPIO &&
+ req->gpioedge < TIM_GPIO_INVALID) {
+ regval = rvu_read64(rvu, blkaddr, TIM_AF_FLAGS_REG);
+ if (FIELD_GET(TIM_AF_FLAGS_REG_GPIO_EDGE_MASK, regval) ==
+ TIM_GPIO_NO_EDGE && req->gpioedge == TIM_GPIO_NO_EDGE)
+ return TIM_AF_GPIO_CLK_SRC_NOT_ENABLED;
+ if (req->gpioedge != TIM_GPIO_NO_EDGE && req->gpioedge !=
+ FIELD_GET(TIM_AF_FLAGS_REG_GPIO_EDGE_MASK, regval)) {
+ dev_info(rvu->dev,
+ "Change edge of GPIO input to %d from %lld.\n",
+ (int)req->gpioedge,
+ FIELD_GET(TIM_AF_FLAGS_REG_GPIO_EDGE_MASK,
+ regval));
+ regval &= ~TIM_AF_FLAGS_REG_GPIO_EDGE_MASK;
+ regval |= FIELD_PREP(TIM_AF_FLAGS_REG_GPIO_EDGE_MASK,
+ req->gpioedge);
+ rvu_write64(rvu, blkaddr, TIM_AF_FLAGS_REG, regval);
+ }
+ }
+
+ if (!is_rvu_otx2(rvu))
+ tim_cn10k_record_intvl(rvu, lf, req->intervalns);
+
+ /* CTL0 */
+ /* EXPIRE_OFFSET = 0 and is set correctly when enabling. */
+ regval = req->interval;
+ rvu_write64(rvu, blkaddr, TIM_AF_RINGX_CTL0(lf), regval);
+
+ /* CTL1 */
+ regval = (((u64)req->bigendian) << 53) |
+ (1ull << 48) | /* LOCK_EN */
+ (((u64)req->enableperiodic) << 45) |
+ (((u64)(req->enableperiodic ^ 1)) << 44) | /* ENA_LDWB */
+ (((u64)req->enabledontfreebuffer) << 43) |
+ (u64)(req->bucketsize - 1);
+ if (is_rvu_otx2(rvu))
+ regval |= (((u64)req->clocksource) << 51);
+ else
+ regval |= (((u64)req->clocksource) << 40);
+
+ rvu_write64(rvu, blkaddr, TIM_AF_RINGX_CTL1(lf), regval);
+
+ /* CTL2 */
+ regval = ((u64)req->chunksize / TIM_CHUNKSIZE_MULTIPLE) << 40;
+ rvu_write64(rvu, blkaddr, TIM_AF_RINGX_CTL2(lf), regval);
+
+ return 0;
+}
+
+int rvu_mbox_handler_tim_enable_ring(struct rvu *rvu,
+ struct tim_ring_req *req,
+ struct tim_enable_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr;
+ u64 regval;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_TIM, pcifunc);
+ if (blkaddr < 0)
+ return TIM_AF_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, req->ring);
+ if (lf < 0)
+ return TIM_AF_LF_INVALID;
+
+ /* Error out if the ring is already running. */
+ regval = rvu_read64(rvu, blkaddr, TIM_AF_RINGX_CTL1(lf));
+ if (regval & TIM_AF_RINGX_CTL1_ENA)
+ return TIM_AF_RING_STILL_RUNNING;
+
+ /* Enable, the ring. */
+ regval = rvu_read64(rvu, blkaddr, TIM_AF_RINGX_CTL1(lf));
+ regval |= TIM_AF_RINGX_CTL1_ENA;
+ rvu_write64(rvu, blkaddr, TIM_AF_RINGX_CTL1(lf), regval);
+
+ rsp->timestarted = get_tenns_tsc();
+ rsp->currentbucket = (regval >> 20) & 0xfffff;
+
+ return 0;
+}
+
+int rvu_mbox_handler_tim_disable_ring(struct rvu *rvu,
+ struct tim_ring_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int lf, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_TIM, pcifunc);
+ if (blkaddr < 0)
+ return TIM_AF_LF_INVALID;
+
+ lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, req->ring);
+ if (lf < 0)
+ return TIM_AF_LF_INVALID;
+
+ return rvu_tim_disable_lf(rvu, lf, blkaddr);
+}
+
+int rvu_tim_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot)
+{
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_TIM, pcifunc);
+ if (blkaddr < 0)
+ return TIM_AF_LF_INVALID;
+
+ /* Ensure TIM ring is disabled prior to clearing the mapping */
+ rvu_tim_disable_lf(rvu, lf, blkaddr);
+
+ rvu_write64(rvu, blkaddr, TIM_AF_RINGX_GMCTL(lf), 0);
+
+ return 0;
+}
+
+#define FOR_EACH_TIM_LF(lf) \
+for (lf = 0; lf < hw->block[BLKTYPE_TIM].lf.max; lf++)
+
+int rvu_tim_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int lf, blkaddr, rc = 0;
+ u8 gpio_edge;
+ u64 regval;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_TIM, 0);
+ if (blkaddr < 0)
+ return 0;
+
+ if (!is_rvu_otx2(rvu))
+ rc = tim_block_cn10k_init(rvu);
+
+ regval = rvu_read64(rvu, blkaddr, TIM_AF_FLAGS_REG);
+
+ /* Disable the TIM block, if not already disabled. */
+ if (regval & TIM_AF_FLAGS_REG_ENA_TIM) {
+ /* Disable each ring(lf). */
+ FOR_EACH_TIM_LF(lf) {
+ regval = rvu_read64(rvu, blkaddr,
+ TIM_AF_RINGX_CTL1(lf));
+ if (!(regval & TIM_AF_RINGX_CTL1_ENA))
+ continue;
+
+ rvu_tim_disable_lf(rvu, lf, blkaddr);
+ }
+
+ /* Disable the TIM block. */
+ regval = rvu_read64(rvu, blkaddr, TIM_AF_FLAGS_REG);
+ regval &= ~TIM_AF_FLAGS_REG_ENA_TIM;
+ rvu_write64(rvu, blkaddr, TIM_AF_FLAGS_REG, regval);
+ }
+
+ /* Reset each LF. */
+ FOR_EACH_TIM_LF(lf) {
+ rvu_lf_reset(rvu, &hw->block[BLKTYPE_TIM], lf);
+ }
+
+ /* Reset the TIM block; getting a clean slate. */
+ rvu_write64(rvu, blkaddr, TIM_AF_BLK_RST, 0x1);
+ rvu_poll_reg(rvu, blkaddr, TIM_AF_BLK_RST, BIT_ULL(63), true);
+
+ gpio_edge = TIM_GPIO_NO_EDGE;
+
+ /* Enable TIM block. */
+ regval = FIELD_PREP(TIM_AF_FLAGS_REG_GPIO_EDGE_MASK, gpio_edge) |
+ BIT_ULL(2) | /* RESET */
+ BIT_ULL(0); /* ENA_TIM */
+ rvu_write64(rvu, blkaddr, TIM_AF_FLAGS_REG, regval);
+
+ if(is_rvu_otx2(rvu))
+ rvu_tim_hw_fixes(rvu, blkaddr);
+
+ return rc;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
index 56f90cf9c4c0..775fd4c35794 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Admin Function driver tracepoints
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2020 Marvell.
*
- * Copyright (C) 2020 Marvell International Ltd.
*/
#define CREATE_TRACE_POINTS
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
index e6609068e81b..6d19dde52189 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 RVU Admin Function driver tracepoints
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2020 Marvell.
*
- * Copyright (C) 2020 Marvell International Ltd.
*/
#undef TRACE_SYSTEM
@@ -14,6 +15,8 @@
#include <linux/tracepoint.h>
#include <linux/pci.h>
+#include "mbox.h"
+
TRACE_EVENT(otx2_msg_alloc,
TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size),
TP_ARGS(pdev, id, size),
@@ -25,8 +28,8 @@ TRACE_EVENT(otx2_msg_alloc,
__entry->id = id;
__entry->size = size;
),
- TP_printk("[%s] msg:(0x%x) size:%lld\n", __get_str(dev),
- __entry->id, __entry->size)
+ TP_printk("[%s] msg:(%s) size:%lld\n", __get_str(dev),
+ otx2_mbox_id2name(__entry->id), __entry->size)
);
TRACE_EVENT(otx2_msg_send,
@@ -88,8 +91,8 @@ TRACE_EVENT(otx2_msg_process,
__entry->id = id;
__entry->err = err;
),
- TP_printk("[%s] msg:(0x%x) error:%d\n", __get_str(dev),
- __entry->id, __entry->err)
+ TP_printk("[%s] msg:(%s) error:%d\n", __get_str(dev),
+ otx2_mbox_id2name(__entry->id), __entry->err)
);
#endif /* __RVU_TRACE_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.c
new file mode 100644
index 000000000000..c9bb290e6072
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.c
@@ -0,0 +1,984 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include "rvu.h"
+
+#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
+#define PCI_DEVID_OCTEONTX2_SSO_RVU_PF 0xA0F9
+#define PCI_DEVID_OCTEONTX2_NPA_RVU_PF 0xA0FB
+#define PCI_DEVID_OCTEONTX2_CPT_RVU_PF 0xA0FD
+#define PCI_DEVID_OCTEONTX2_SDP_RVU_PF 0xA0F6
+#define PCI_DEVID_OCTEONTX2_CPT10_RVU_PF 0xA0F2
+
+static u64 quotas_get_sum(struct rvu_quotas *quotas)
+{
+ u64 lf_sum = 0;
+ int i;
+
+ for (i = 0; i < quotas->cnt; i++)
+ lf_sum += quotas->a[i].val;
+
+ return lf_sum;
+}
+
+static ssize_t quota_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct rvu_quota *quota;
+ int val;
+
+ quota = container_of(attr, struct rvu_quota, sysfs);
+
+ if (quota->base->lock)
+ mutex_lock(quota->base->lock);
+ val = quota->val;
+ if (quota->base->lock)
+ mutex_unlock(quota->base->lock);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t quota_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int old_val, new_val, res = 0;
+ struct rvu_quota *quota;
+ struct rvu_quotas *base;
+ struct device *dev;
+ u64 lf_sum;
+
+ quota = container_of(attr, struct rvu_quota, sysfs);
+ dev = quota->dev;
+ base = quota->base;
+
+ if (kstrtoint(buf, 0, &new_val)) {
+ dev_err(dev, "Invalid %s quota: %s\n", attr->attr.name, buf);
+ return -EIO;
+ }
+ if (new_val < 0) {
+ dev_err(dev, "Invalid %s quota: %d < 0\n", attr->attr.name,
+ new_val);
+ return -EIO;
+ }
+
+ if (new_val > base->max) {
+ dev_err(dev, "Invalid %s quota: %d > %d\n", attr->attr.name,
+ new_val, base->max);
+ return -EIO;
+ }
+
+ if (base->lock)
+ mutex_lock(base->lock);
+ old_val = quota->val;
+
+ if (base->ops.pre_store)
+ res = base->ops.pre_store(quota->ops_arg, quota, new_val);
+
+ if (res != 0) {
+ res = -EIO;
+ goto unlock;
+ }
+
+ lf_sum = quotas_get_sum(quota->base);
+
+ if (lf_sum + new_val - quota->val > base->max_sum) {
+ dev_err(dev,
+ "Not enough resources for %s quota. Used: %lld, Max: %lld\n",
+ attr->attr.name, lf_sum, base->max_sum);
+ res = -EIO;
+ goto unlock;
+ }
+ quota->val = new_val;
+
+ if (base->ops.post_store)
+ base->ops.post_store(quota->ops_arg, quota, old_val);
+
+ res = count;
+
+unlock:
+ if (base->lock)
+ mutex_unlock(base->lock);
+ return res;
+}
+
+static int quota_sysfs_destroy(struct rvu_quota *quota)
+{
+ if (quota == NULL)
+ return -EINVAL;
+ if (quota->sysfs.attr.mode != 0) {
+ sysfs_remove_file(quota->parent, &quota->sysfs.attr);
+ quota->sysfs.attr.mode = 0;
+ }
+ return 0;
+}
+
+static struct rvu_quotas *quotas_alloc(u32 cnt, u32 max, u64 max_sum,
+ int init_val, struct mutex *lock,
+ struct rvu_quota_ops *ops)
+{
+ struct rvu_quotas *quotas;
+ u64 i;
+
+ if (cnt == 0)
+ return NULL;
+
+ quotas = kzalloc(sizeof(struct rvu_quotas) +
+ cnt * sizeof(struct rvu_quota), GFP_KERNEL);
+ if (quotas == NULL)
+ return NULL;
+
+ for (i = 0; i < cnt; i++) {
+ quotas->a[i].base = quotas;
+ quotas->a[i].val = init_val;
+ }
+
+ quotas->cnt = cnt;
+ quotas->max = max;
+ quotas->max_sum = max_sum;
+ if (ops) {
+ quotas->ops.pre_store = ops->pre_store;
+ quotas->ops.post_store = ops->post_store;
+ }
+ quotas->lock = lock;
+
+ return quotas;
+}
+
+static void quotas_free(struct rvu_quotas *quotas)
+{
+ u64 i;
+
+ if (quotas == NULL)
+ return;
+ WARN_ON(quotas->cnt == 0);
+
+ for (i = 0; i < quotas->cnt; i++)
+ quota_sysfs_destroy(&quotas->a[i]);
+
+ kfree(quotas);
+}
+
+static int quota_sysfs_create(const char *name, struct kobject *parent,
+ struct device *log_dev, struct rvu_quota *quota,
+ void *ops_arg)
+{
+ int err;
+
+ if (name == NULL || quota == NULL || log_dev == NULL)
+ return -EINVAL;
+
+ quota->sysfs.show = quota_show;
+ quota->sysfs.store = quota_store;
+ quota->sysfs.attr.name = name;
+ quota->sysfs.attr.mode = 0644;
+ quota->parent = parent;
+ quota->dev = log_dev;
+ quota->ops_arg = ops_arg;
+
+ sysfs_attr_init(&quota->sysfs.attr);
+ err = sysfs_create_file(quota->parent, &quota->sysfs.attr);
+ if (err) {
+ dev_err(quota->dev,
+ "Failed to create '%s' quota sysfs for '%s'\n",
+ name, kobject_name(quota->parent));
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int rvu_blk_count_rsrc(struct rvu_block *block, u16 pcifunc, u8 rshift)
+{
+ int count = 0, lf;
+
+ for (lf = 0; lf < block->lf.max; lf++)
+ if ((block->fn_map[lf] >> rshift) == (pcifunc >> rshift) &&
+ block->fn_map[lf] != 0)
+ count++;
+
+ return count;
+}
+
+static int rvu_txsch_count_rsrc(struct rvu *rvu, int lvl, u16 pcifunc,
+ u8 rshift, struct nix_hw *nix_hw)
+{
+ struct nix_txsch *txsch = &nix_hw->txsch[lvl];
+ int count = 0, schq;
+
+ if (lvl == NIX_TXSCH_LVL_TL1)
+ return 0;
+
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (TXSCH_MAP_FLAGS(txsch->pfvf_map[schq]) & NIX_TXSCHQ_FREE)
+ continue;
+ if ((TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) >> rshift) ==
+ (pcifunc >> rshift))
+ count++;
+ }
+
+ return count;
+}
+
+static int free_rsrc_cnt(struct rvu *rvu, struct msg_req *req,
+ struct free_rsrcs_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ block = &hw->block[BLKADDR_NPA];
+ rsp->npa = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_NIX0];
+ rsp->nix = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_NIX1];
+ rsp->nix1 = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_SSO];
+ rsp->sso = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_SSOW];
+ rsp->ssow = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_TIM];
+ rsp->tim = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_CPT0];
+ rsp->cpt = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_CPT1];
+ rsp->cpt1 = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_REE0];
+ rsp->ree0 = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_REE1];
+ rsp->ree1 = rvu_rsrc_free_count(&block->lf);
+
+ if (rvu->hw->cap.nix_fixed_txschq_mapping) {
+ rsp->schq[NIX_TXSCH_LVL_SMQ] = 1;
+ rsp->schq[NIX_TXSCH_LVL_TL4] = 1;
+ rsp->schq[NIX_TXSCH_LVL_TL3] = 1;
+ rsp->schq[NIX_TXSCH_LVL_TL2] = 1;
+ /* NIX1 */
+ if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
+ goto out;
+ rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] = 1;
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL4] = 1;
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL3] = 1;
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL2] = 1;
+ } else {
+ nix_hw = get_nix_hw(hw, BLKADDR_NIX0);
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+ rsp->schq[NIX_TXSCH_LVL_SMQ] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
+ rsp->schq[NIX_TXSCH_LVL_TL4] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
+ rsp->schq[NIX_TXSCH_LVL_TL3] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+ rsp->schq[NIX_TXSCH_LVL_TL2] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
+ goto out;
+
+ nix_hw = get_nix_hw(hw, BLKADDR_NIX1);
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+ rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL4] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL3] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL2] =
+ rvu_rsrc_free_count(&txsch->schq);
+ }
+
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL1] = 1;
+out:
+ rsp->schq[NIX_TXSCH_LVL_TL1] = 1;
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return 0;
+}
+
+int rvu_mbox_handler_free_rsrc_cnt(struct rvu *rvu, struct msg_req *req,
+ struct free_rsrcs_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ int pf, curlfs;
+
+ if (!is_rvu_otx2(rvu))
+ return free_rsrc_cnt(rvu, req, rsp);
+
+ mutex_lock(&rvu->rsrc_lock);
+ pf = rvu_get_pf(pcifunc);
+
+ block = &hw->block[BLKADDR_NPA];
+ curlfs = rvu_blk_count_rsrc(block, pcifunc, RVU_PFVF_PF_SHIFT);
+ rsp->npa = rvu->pf_limits.npa->a[pf].val - curlfs;
+
+ block = &hw->block[BLKADDR_NIX0];
+ curlfs = rvu_blk_count_rsrc(block, pcifunc, RVU_PFVF_PF_SHIFT);
+ rsp->nix = rvu->pf_limits.nix->a[pf].val - curlfs;
+
+ block = &hw->block[BLKADDR_NIX1];
+ rsp->nix1 = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_SSO];
+ curlfs = rvu_blk_count_rsrc(block, pcifunc, RVU_PFVF_PF_SHIFT);
+ rsp->sso = rvu->pf_limits.sso->a[pf].val - curlfs;
+
+ block = &hw->block[BLKADDR_SSOW];
+ curlfs = rvu_blk_count_rsrc(block, pcifunc, RVU_PFVF_PF_SHIFT);
+ rsp->ssow = rvu->pf_limits.ssow->a[pf].val - curlfs;
+
+ block = &hw->block[BLKADDR_TIM];
+ curlfs = rvu_blk_count_rsrc(block, pcifunc, RVU_PFVF_PF_SHIFT);
+ rsp->tim = rvu->pf_limits.tim->a[pf].val - curlfs;
+
+ block = &hw->block[BLKADDR_CPT0];
+ rsp->cpt = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_CPT1];
+ rsp->cpt1 = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_REE0];
+ rsp->ree0 = rvu_rsrc_free_count(&block->lf);
+
+ block = &hw->block[BLKADDR_REE1];
+ rsp->ree1 = rvu_rsrc_free_count(&block->lf);
+
+ if (rvu->hw->cap.nix_fixed_txschq_mapping) {
+ rsp->schq[NIX_TXSCH_LVL_SMQ] = 1;
+ rsp->schq[NIX_TXSCH_LVL_TL4] = 1;
+ rsp->schq[NIX_TXSCH_LVL_TL3] = 1;
+ rsp->schq[NIX_TXSCH_LVL_TL2] = 1;
+ /* NIX1 */
+ if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
+ goto out;
+ rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] = 1;
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL4] = 1;
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL3] = 1;
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL2] = 1;
+ } else {
+ nix_hw = get_nix_hw(hw, BLKADDR_NIX0);
+ if (!nix_hw)
+ goto err;
+
+ curlfs = rvu_txsch_count_rsrc(rvu, NIX_TXSCH_LVL_SMQ, pcifunc,
+ RVU_PFVF_PF_SHIFT, nix_hw);
+ rsp->schq[NIX_TXSCH_LVL_SMQ] =
+ rvu->pf_limits.smq->a[pf].val - curlfs;
+
+ curlfs = rvu_txsch_count_rsrc(rvu, NIX_TXSCH_LVL_TL4, pcifunc,
+ RVU_PFVF_PF_SHIFT, nix_hw);
+ rsp->schq[NIX_TXSCH_LVL_TL4] =
+ rvu->pf_limits.tl4->a[pf].val - curlfs;
+
+ curlfs = rvu_txsch_count_rsrc(rvu, NIX_TXSCH_LVL_TL3, pcifunc,
+ RVU_PFVF_PF_SHIFT, nix_hw);
+ rsp->schq[NIX_TXSCH_LVL_TL3] =
+ rvu->pf_limits.tl3->a[pf].val - curlfs;
+
+ curlfs = rvu_txsch_count_rsrc(rvu, NIX_TXSCH_LVL_TL2, pcifunc,
+ RVU_PFVF_PF_SHIFT, nix_hw);
+ rsp->schq[NIX_TXSCH_LVL_TL2] =
+ rvu->pf_limits.tl2->a[pf].val - curlfs;
+ /* NIX1 */
+ if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
+ goto out;
+ nix_hw = get_nix_hw(hw, BLKADDR_NIX1);
+ if (!nix_hw)
+ goto err;
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+ rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL4] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL3] =
+ rvu_rsrc_free_count(&txsch->schq);
+
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL2] =
+ rvu_rsrc_free_count(&txsch->schq);
+ }
+
+ rsp->schq_nix1[NIX_TXSCH_LVL_TL1] = 1;
+out:
+ rsp->schq[NIX_TXSCH_LVL_TL1] = 1;
+err:
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return 0;
+}
+
+int rvu_check_txsch_policy(struct rvu *rvu, struct nix_txsch_alloc_req *req,
+ u16 pcifunc)
+{
+ int lvl, req_schq, pf = rvu_get_pf(pcifunc);
+ int limit, familylfs, delta;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ int blkaddr;
+
+ if (!is_rvu_otx2(rvu))
+ return 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+
+ if (!nix_hw)
+ return -ENODEV;
+
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+ req_schq = req->schq_contig[lvl] + req->schq[lvl];
+
+ switch (lvl) {
+ case NIX_TXSCH_LVL_SMQ:
+ limit = rvu->pf_limits.smq->a[pf].val;
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ limit = rvu->pf_limits.tl4->a[pf].val;
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ limit = rvu->pf_limits.tl3->a[pf].val;
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ limit = rvu->pf_limits.tl2->a[pf].val;
+ break;
+ case NIX_TXSCH_LVL_TL1:
+ if (req_schq > 2)
+ return -ENOSPC;
+ continue;
+ }
+
+ familylfs = rvu_txsch_count_rsrc(rvu, lvl, pcifunc,
+ RVU_PFVF_PF_SHIFT, nix_hw);
+ delta = req_schq - rvu_txsch_count_rsrc(rvu, lvl, pcifunc,
+ 0, nix_hw);
+
+ if ((delta > 0) && /* always allow usage decrease */
+ ((limit < familylfs + delta) ||
+ (delta > rvu_rsrc_free_count(&txsch->schq))))
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+int rvu_check_rsrc_policy(struct rvu *rvu, struct rsrc_attach *req,
+ u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int free_lfs, mappedlfs, familylfs, limit, delta;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf = rvu_get_pf(pcifunc);
+ struct rvu_block *block;
+
+ if (!is_rvu_otx2(rvu))
+ return 0;
+
+ /* Only one NIX LF can be attached */
+ if (req->nixlf) {
+ block = &hw->block[BLKADDR_NIX0];
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ limit = rvu->pf_limits.nix->a[pf].val;
+ familylfs = rvu_blk_count_rsrc(block, pcifunc,
+ RVU_PFVF_PF_SHIFT);
+ if (!free_lfs || (limit == familylfs))
+ goto fail;
+ }
+
+ if (req->sso) {
+ block = &hw->block[BLKADDR_SSO];
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ limit = rvu->pf_limits.sso->a[pf].val;
+ familylfs = rvu_blk_count_rsrc(block, pcifunc,
+ RVU_PFVF_PF_SHIFT);
+ /* Check if additional resources are available */
+ delta = req->sso - mappedlfs;
+ if ((delta > 0) && /* always allow usage decrease */
+ ((limit < familylfs + delta) ||
+ (delta > free_lfs)))
+ goto fail;
+ }
+
+ if (req->ssow) {
+ block = &hw->block[BLKADDR_SSOW];
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ limit = rvu->pf_limits.ssow->a[pf].val;
+ familylfs = rvu_blk_count_rsrc(block, pcifunc,
+ RVU_PFVF_PF_SHIFT);
+ /* Check if additional resources are available */
+ delta = req->ssow - mappedlfs;
+ if ((delta > 0) && /* always allow usage decrease */
+ ((limit < familylfs + delta) ||
+ (delta > free_lfs)))
+ goto fail;
+ }
+
+ if (req->timlfs) {
+ block = &hw->block[BLKADDR_TIM];
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ limit = rvu->pf_limits.tim->a[pf].val;
+ familylfs = rvu_blk_count_rsrc(block, pcifunc,
+ RVU_PFVF_PF_SHIFT);
+ /* Check if additional resources are available */
+ delta = req->timlfs - mappedlfs;
+ if ((delta > 0) && /* always allow usage decrease */
+ ((limit < familylfs + delta) ||
+ (delta > free_lfs)))
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_info(rvu->dev, "Request for %s failed\n", block->name);
+ return -ENOSPC;
+}
+
+static int check_mapped_rsrcs(void *arg, struct rvu_quota *quota, int new_val)
+{
+ struct rvu_pfvf *pf = arg;
+ int addr;
+
+ for (addr = 0; addr < BLK_COUNT; addr++) {
+ if (rvu_get_rsrc_mapcount(pf, addr) > 0)
+ return 1;
+ }
+ return 0;
+}
+
+static struct rvu_quota_ops pf_limit_ops = {
+ .pre_store = check_mapped_rsrcs,
+};
+
+static void rvu_set_default_limits(struct rvu *rvu)
+{
+ int i, nvfs, cpt_rvus, npa_rvus, sso_rvus, nix_rvus, nsso, nssow, ntim;
+ int total_cpt_lfs, ncptpf_cptlfs = 0, nssopf_cptlfs = 0;
+ int nnpa, nnix, nsmq = 0, ntl4 = 0, ntl3 = 0, ntl2 = 0;
+ unsigned short devid;
+
+ /* First pass, count number of SSO/TIM PFs. */
+ sso_rvus = 0;
+ nix_rvus = 0;
+ cpt_rvus = 0;
+ npa_rvus = 0;
+ for (i = 0; i < rvu->hw->total_pfs; i++) {
+ if (rvu->pf[i].pdev == NULL)
+ continue;
+ devid = rvu->pf[i].pdev->device;
+ if (devid == PCI_DEVID_OCTEONTX2_SSO_RVU_PF)
+ sso_rvus++;
+ else if (devid == PCI_DEVID_OCTEONTX2_RVU_PF ||
+ devid == PCI_DEVID_OCTEONTX2_RVU_AF ||
+ devid == PCI_DEVID_OCTEONTX2_SDP_RVU_PF)
+ nix_rvus++;
+ else if (devid == PCI_DEVID_OCTEONTX2_CPT_RVU_PF)
+ cpt_rvus++;
+ else if (devid == PCI_DEVID_OCTEONTX2_NPA_RVU_PF)
+ npa_rvus++;
+ }
+ /* Calculate default partitioning. */
+ nsso = rvu->pf_limits.sso->max_sum / sso_rvus;
+ nssow = rvu->pf_limits.ssow->max_sum / sso_rvus;
+ ntim = rvu->pf_limits.tim->max_sum / sso_rvus;
+ total_cpt_lfs = rvu->pf_limits.cpt->max_sum;
+ /* Divide CPT among SSO and CPT PFs since cores shouldn't be shared. */
+ if (total_cpt_lfs) {
+ /* One extra LF needed for inline ipsec inbound configuration */
+ ncptpf_cptlfs = num_online_cpus() + 1;
+ nssopf_cptlfs = (total_cpt_lfs - ncptpf_cptlfs) / sso_rvus;
+ }
+ /* NPA/NIX count depends on DTS VF config. Allocate until run out. */
+ nnpa = rvu->pf_limits.npa->max_sum;
+ nnix = rvu->pf_limits.nix->max_sum;
+ if (!rvu->hw->cap.nix_fixed_txschq_mapping) {
+ nsmq = rvu->pf_limits.smq->max_sum / nix_rvus;
+ ntl4 = rvu->pf_limits.tl4->max_sum / nix_rvus;
+ ntl3 = rvu->pf_limits.tl3->max_sum / nix_rvus;
+ ntl2 = rvu->pf_limits.tl2->max_sum / nix_rvus;
+ }
+
+ /* Second pass, set the default limit values. */
+ for (i = 0; i < rvu->hw->total_pfs; i++) {
+ if (rvu->pf[i].pdev == NULL)
+ continue;
+ nvfs = pci_sriov_get_totalvfs(rvu->pf[i].pdev);
+ switch (rvu->pf[i].pdev->device) {
+ case PCI_DEVID_OCTEONTX2_RVU_AF:
+ nnix -= nvfs;
+ nnpa -= nvfs;
+ rvu->pf_limits.nix->a[i].val = nnix > 0 ? nvfs : 0;
+ rvu->pf_limits.npa->a[i].val = nnpa > 0 ? nvfs : 0;
+ if (rvu->hw->cap.nix_fixed_txschq_mapping)
+ break;
+ rvu->pf_limits.smq->a[i].val = nsmq;
+ rvu->pf_limits.tl4->a[i].val = ntl4;
+ rvu->pf_limits.tl3->a[i].val = ntl3;
+ rvu->pf_limits.tl2->a[i].val = ntl2;
+ break;
+ case PCI_DEVID_OCTEONTX2_RVU_PF:
+ nnix -= 1 + nvfs;
+ nnpa -= 1 + nvfs;
+ rvu->pf_limits.nix->a[i].val = nnix > 0 ? 1 + nvfs : 0;
+ rvu->pf_limits.npa->a[i].val = nnpa > 0 ? 1 + nvfs : 0;
+ if (rvu->hw->cap.nix_fixed_txschq_mapping)
+ break;
+ rvu->pf_limits.smq->a[i].val = nsmq;
+ rvu->pf_limits.tl4->a[i].val = ntl4;
+ rvu->pf_limits.tl3->a[i].val = ntl3;
+ rvu->pf_limits.tl2->a[i].val = ntl2;
+ break;
+ case PCI_DEVID_OCTEONTX2_SSO_RVU_PF:
+ nnpa -= 1 + nvfs;
+ rvu->pf_limits.npa->a[i].val = nnpa > 0 ? 1 + nvfs : 0;
+ rvu->pf_limits.sso->a[i].val = nsso;
+ rvu->pf_limits.ssow->a[i].val = nssow;
+ rvu->pf_limits.tim->a[i].val = ntim;
+ rvu->pf_limits.cpt->a[i].val = nssopf_cptlfs;
+ break;
+ case PCI_DEVID_OCTEONTX2_NPA_RVU_PF:
+ nnpa -= 1 + nvfs;
+ rvu->pf_limits.npa->a[i].val = nnpa > 0 ? 1 + nvfs : 0;
+ break;
+ case PCI_DEVID_OCTEONTX2_CPT_RVU_PF:
+ case PCI_DEVID_OCTEONTX2_CPT10_RVU_PF:
+ nnpa -= 1;
+ rvu->pf_limits.npa->a[i].val = nnpa > 0 ? 1 : 0;
+ rvu->pf_limits.cpt->a[i].val = ncptpf_cptlfs;
+ break;
+ case PCI_DEVID_OCTEONTX2_SDP_RVU_PF:
+ nnix -= 1 + nvfs;
+ rvu->pf_limits.nix->a[i].val = nnix > 0 ? 1 + nvfs : 0;
+ if (rvu->hw->cap.nix_fixed_txschq_mapping)
+ break;
+ rvu->pf_limits.smq->a[i].val = nsmq;
+ rvu->pf_limits.tl4->a[i].val = ntl4;
+ rvu->pf_limits.tl3->a[i].val = ntl3;
+ rvu->pf_limits.tl2->a[i].val = ntl2;
+ break;
+ }
+ }
+}
+
+static int rvu_create_limits_sysfs(struct rvu *rvu)
+{
+ struct pci_dev *pdev;
+ struct rvu_pfvf *pf;
+ int i, err = 0;
+
+ for (i = 0; i < rvu->hw->total_pfs; i++) {
+ pf = &rvu->pf[i];
+ if (!pf->pdev)
+ continue;
+ pdev = pf->pdev;
+
+ pf->limits_kobj = kobject_create_and_add("limits",
+ &pdev->dev.kobj);
+
+ if (quota_sysfs_create("sso", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.sso->a[i], pf)) {
+ dev_err(rvu->dev,
+ "Failed to allocate quota for sso on %s\n",
+ pci_name(pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ if (quota_sysfs_create("ssow", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.ssow->a[i], pf)) {
+ dev_err(rvu->dev,
+ "Failed to allocate quota for ssow, on %s\n",
+ pci_name(pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ if (quota_sysfs_create("tim", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.tim->a[i], pf)) {
+ dev_err(rvu->dev,
+ "Failed to allocate quota for tim, on %s\n",
+ pci_name(pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ if (quota_sysfs_create("cpt", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.cpt->a[i], pf)) {
+ dev_err(rvu->dev,
+ "Failed to allocate quota for cpt, on %s\n",
+ pci_name(pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ if (quota_sysfs_create("npa", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.npa->a[i], pf)) {
+ dev_err(rvu->dev,
+ "Failed to allocate quota for npa, on %s\n",
+ pci_name(pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ if (quota_sysfs_create("nix", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.nix->a[i], pf)) {
+ dev_err(rvu->dev,
+ "Failed to allocate quota for nix, on %s\n",
+ pci_name(pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ /* In fixed TXSCHQ case each LF is assigned only 1 queue. */
+ if (rvu->hw->cap.nix_fixed_txschq_mapping)
+ continue;
+
+ if (quota_sysfs_create("smq", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.smq->a[i], pf)) {
+ dev_err(rvu->dev, "Failed to allocate quota for smq on %s\n",
+ pci_name(pf->pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ if (quota_sysfs_create("tl4", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.tl4->a[i], pf)) {
+ dev_err(rvu->dev, "Failed to allocate quota for tl4 on %s\n",
+ pci_name(pf->pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ if (quota_sysfs_create("tl3", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.tl3->a[i], pf)) {
+ dev_err(rvu->dev, "Failed to allocate quota for tl3 on %s\n",
+ pci_name(pf->pdev));
+ err = -EFAULT;
+ break;
+ }
+
+ if (quota_sysfs_create("tl2", pf->limits_kobj, rvu->dev,
+ &rvu->pf_limits.tl2->a[i], pf)) {
+ dev_err(rvu->dev, "Failed to allocate quota for tl2 on %s\n",
+ pci_name(pf->pdev));
+ err = -EFAULT;
+ break;
+ }
+ }
+
+ return err;
+}
+
+void rvu_policy_destroy(struct rvu *rvu)
+{
+ struct rvu_pfvf *pf = NULL;
+ int i;
+
+ if (!is_rvu_otx2(rvu))
+ return;
+
+ quotas_free(rvu->pf_limits.sso);
+ quotas_free(rvu->pf_limits.ssow);
+ quotas_free(rvu->pf_limits.npa);
+ quotas_free(rvu->pf_limits.cpt);
+ quotas_free(rvu->pf_limits.tim);
+ quotas_free(rvu->pf_limits.nix);
+
+ rvu->pf_limits.sso = NULL;
+ rvu->pf_limits.ssow = NULL;
+ rvu->pf_limits.npa = NULL;
+ rvu->pf_limits.cpt = NULL;
+ rvu->pf_limits.tim = NULL;
+ rvu->pf_limits.nix = NULL;
+
+ if (rvu->hw->cap.nix_fixed_txschq_mapping) {
+ quotas_free(rvu->pf_limits.smq);
+ quotas_free(rvu->pf_limits.tl4);
+ quotas_free(rvu->pf_limits.tl3);
+ quotas_free(rvu->pf_limits.tl2);
+
+ rvu->pf_limits.smq = NULL;
+ rvu->pf_limits.tl4 = NULL;
+ rvu->pf_limits.tl3 = NULL;
+ rvu->pf_limits.tl2 = NULL;
+ }
+
+ for (i = 0; i < rvu->hw->total_pfs; i++) {
+ pf = &rvu->pf[i];
+ kobject_del(pf->limits_kobj);
+ }
+}
+
+int rvu_policy_init(struct rvu *rvu)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, BLKADDR_NIX0);
+ struct pci_dev *pdev = rvu->pdev;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int err = -EINVAL, i = 0;
+ u32 max = 0;
+
+ if (!is_rvu_otx2(rvu))
+ return 0;
+
+ if (!nix_hw)
+ goto error;
+
+ max = hw->block[BLKADDR_SSO].lf.max;
+ rvu->pf_limits.sso = quotas_alloc(rvu->hw->total_pfs, max, max,
+ 0, &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.sso) {
+ dev_err(rvu->dev, "Failed to allocate sso limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ max = hw->block[BLKADDR_SSOW].lf.max;
+ rvu->pf_limits.ssow = quotas_alloc(rvu->hw->total_pfs, max, max,
+ 0, &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.ssow) {
+ dev_err(rvu->dev, "Failed to allocate ssow limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ max = hw->block[BLKADDR_TIM].lf.max;
+ rvu->pf_limits.tim = quotas_alloc(rvu->hw->total_pfs, max, max,
+ 0, &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.tim) {
+ dev_err(rvu->dev, "Failed to allocate tim limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ max = hw->block[BLKADDR_CPT0].lf.max;
+ rvu->pf_limits.cpt = quotas_alloc(rvu->hw->total_pfs, max, max,
+ 0, &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.cpt) {
+ dev_err(rvu->dev, "Failed to allocate cpt limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ /* Because limits track also VFs under PF, the maximum NPA LF limit for
+ * a single PF has to be max, not 1. Same for NIX below.
+ */
+ max = hw->block[BLKADDR_NPA].lf.max;
+ rvu->pf_limits.npa = quotas_alloc(rvu->hw->total_pfs, max, max,
+ 0, &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.npa) {
+ dev_err(rvu->dev, "Failed to allocate npa limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ max = hw->block[BLKADDR_NIX0].lf.max + hw->block[BLKADDR_NIX1].lf.max;
+ rvu->pf_limits.nix = quotas_alloc(rvu->hw->total_pfs, max, max,
+ 0, &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.nix) {
+ dev_err(rvu->dev, "Failed to allocate nix limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ if (rvu->hw->cap.nix_fixed_txschq_mapping)
+ goto skip_txschq_limits;
+
+ max = nix_hw->txsch[NIX_TXSCH_LVL_SMQ].schq.max;
+ rvu->pf_limits.smq = quotas_alloc(hw->total_pfs, max, max, 0,
+ &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.smq) {
+ dev_err(rvu->dev, "Failed to allocate SQM txschq limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ max = nix_hw->txsch[NIX_TXSCH_LVL_TL4].schq.max;
+ rvu->pf_limits.tl4 = quotas_alloc(hw->total_pfs, max, max, 0,
+ &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.tl4) {
+ dev_err(rvu->dev, "Failed to allocate TL4 txschq limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ max = nix_hw->txsch[NIX_TXSCH_LVL_TL3].schq.max;
+ rvu->pf_limits.tl3 = quotas_alloc(hw->total_pfs, max, max, 0,
+ &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.tl3) {
+ dev_err(rvu->dev, "Failed to allocate TL3 txschq limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+ max = nix_hw->txsch[NIX_TXSCH_LVL_TL2].schq.max;
+ rvu->pf_limits.tl2 = quotas_alloc(hw->total_pfs, max, max, 0,
+ &rvu->rsrc_lock, &pf_limit_ops);
+ if (!rvu->pf_limits.tl2) {
+ dev_err(rvu->dev, "Failed to allocate TL2 txschq limits\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+skip_txschq_limits:
+ for (i = 0; i < hw->total_pfs; i++)
+ rvu->pf[i].pdev =
+ pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
+ i + 1, 0);
+
+ rvu_set_default_limits(rvu);
+
+ err = rvu_create_limits_sysfs(rvu);
+ if (err) {
+ dev_err(rvu->dev, "Failed to create limits sysfs\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ rvu_policy_destroy(rvu);
+ return err;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.h
new file mode 100644
index 000000000000..731dde3ab0f3
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell.
+ *
+ */
+
+#ifndef RVU_VALIDATION_H
+#define RVU_VALIDATION_H
+
+struct rvu;
+struct rvu_quotas;
+
+struct rvu_quota {
+ struct kobj_attribute sysfs;
+ /* Device to scope logs to */
+ struct device *dev;
+ /* Kobject of the sysfs file */
+ struct kobject *parent;
+ /* Pointer to base structure */
+ struct rvu_quotas *base;
+ /* Argument passed to the quota_ops when this quota is modified */
+ void *ops_arg;
+ /* Value of the quota */
+ int val;
+};
+
+struct rvu_quota_ops {
+ /*
+ * Called before sysfs store(). store() will proceed if returns 0.
+ * It is called with struct rvu_quotas::lock taken.
+ */
+ int (*pre_store)(void *arg, struct rvu_quota *quota, int new_val);
+ /** called after sysfs store(). */
+ void (*post_store)(void *arg, struct rvu_quota *quota, int old_val);
+};
+
+struct rvu_quotas {
+ struct rvu_quota_ops ops;
+ struct mutex *lock; /* lock taken for each sysfs operation */
+ u32 cnt; /* number of elements in arr */
+ u32 max; /* maximum value for a single quota */
+ u64 max_sum; /* maximum sum of all quotas */
+ struct rvu_quota a[0]; /* array of quota assignments */
+};
+
+struct rvu_limits {
+ struct rvu_quotas *sso;
+ struct rvu_quotas *ssow;
+ struct rvu_quotas *tim;
+ struct rvu_quotas *cpt;
+ struct rvu_quotas *npa;
+ struct rvu_quotas *nix;
+ struct rvu_quotas *smq;
+ struct rvu_quotas *tl4;
+ struct rvu_quotas *tl3;
+ struct rvu_quotas *tl2;
+};
+
+int rvu_policy_init(struct rvu *rvu);
+void rvu_policy_destroy(struct rvu *rvu);
+int rvu_check_rsrc_policy(struct rvu *rvu,
+ struct rsrc_attach *req, u16 pcifunc);
+int rvu_check_txsch_policy(struct rvu *rvu, struct nix_txsch_alloc_req *req,
+ u16 pcifunc);
+
+int rvu_mbox_handler_free_rsrc_cnt(struct rvu *rvu, struct msg_req *req,
+ struct free_rsrcs_rsp *rsp);
+#endif /* RVU_VALIDATION_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/Makefile b/drivers/net/ethernet/marvell/octeontx2/bphy/Makefile
new file mode 100644
index 000000000000..a4dfa1b5c9d4
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's OcteonTX2 BPHY RFOE netdev driver
+#
+
+obj-$(CONFIG_OCTEONTX2_BPHY_RFOE_NETDEV) += octeontx2_bphy_netdev.o
+
+#EXTRA_CFLAGS += -DDEBUG
+
+octeontx2_bphy_netdev-y := otx2_bphy_main.o otx2_rfoe.o otx2_rfoe_ethtool.o otx2_rfoe_ptp.o \
+ otx2_cpri.o otx2_cpri_ethtool.o otx2_bphy_debugfs.o \
+ cnf10k_rfoe.o cnf10k_rfoe_ethtool.o cnf10k_rfoe_ptp.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/bphy_common.h b/drivers/net/ethernet/marvell/octeontx2/bphy/bphy_common.h
new file mode 100644
index 000000000000..41018b33b07a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/bphy_common.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell BPHY Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#ifndef _BPHY_COMMON_H_
+#define _BPHY_COMMON_H_
+
+/* BPHY definitions */
+#define OTX2_BPHY_PCI_VENDOR_ID 0x177D
+#define OTX2_BPHY_PCI_DEVICE_ID 0xA089
+
+/* eCPRI ethertype */
+#define ETH_P_ECPRI 0xAEFE
+
+/* max ptp tx requests */
+extern int max_ptp_req;
+
+/* reg base address */
+extern void __iomem *bphy_reg_base;
+extern void __iomem *psm_reg_base;
+extern void __iomem *rfoe_reg_base;
+extern void __iomem *bcn_reg_base;
+extern void __iomem *ptp_reg_base;
+extern void __iomem *cpri_reg_base;
+
+enum port_link_state {
+ LINK_STATE_DOWN,
+ LINK_STATE_UP,
+};
+
+/* iova to kernel virtual addr */
+static inline void *otx2_iova_to_virt(struct iommu_domain *domain, u64 iova)
+{
+ return phys_to_virt(iommu_iova_to_phys(domain, iova));
+}
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/bphy_netdev_comm_if.h b/drivers/net/ethernet/marvell/octeontx2/bphy/bphy_netdev_comm_if.h
new file mode 100644
index 000000000000..9fdeba5be2a3
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/bphy_netdev_comm_if.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell BPHY Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#ifndef _BPHY_NETDEV_COMM_IF_H_
+#define _BPHY_NETDEV_COMM_IF_H_
+
+/* Max LMAC's per RFOE MHAB */
+#define MAX_LMAC_PER_RFOE 4
+
+/* Max Lanes per CPRI MHAB */
+#define MAX_LANE_PER_CPRI 4
+
+#define MAX_PTP_MSG_PER_LMAC 4 /* 16 Per RFoE */
+#define MAX_OTH_MSG_PER_LMAC 16 /* 64 Per RFoE */
+/* 64 per RFoE; RFoE2 shall have 32 entries */
+#define MAX_OTH_MSG_PER_RFOE (MAX_OTH_MSG_PER_LMAC * MAX_LMAC_PER_RFOE)
+
+/**
+ * @enum bphy_netdev_if_type
+ * @brief BPHY Interface Types
+ *
+ */
+enum bphy_netdev_if_type {
+ IF_TYPE_ETHERNET = 0,
+ IF_TYPE_CPRI = 1,
+ IF_TYPE_NONE = 2,
+ IF_TYPE_MAX,
+};
+
+/**
+ * @enum bphy_netdev_packet_type
+ * @brief Packet types
+ *
+ */
+enum bphy_netdev_packet_type {
+ PACKET_TYPE_PTP = 0,
+ PACKET_TYPE_ECPRI = 1,
+ PACKET_TYPE_OTHER = 2,
+ PACKET_TYPE_MAX,
+};
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_bphy_hw.h b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_bphy_hw.h
new file mode 100644
index 000000000000..9b2a7a02b564
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_bphy_hw.h
@@ -0,0 +1,482 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell CNF10K BPHY Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#ifndef _CNF10K_BPHY_HW_H_
+#define _CNF10K_BPHY_HW_H_
+
+#include <linux/types.h>
+
+/* PSM register offsets */
+#define PSM_QUEUE_CMD_LO(a) (0x0 + (a) * 0x10)
+#define PSM_QUEUE_CMD_HI(a) (0x8 + (a) * 0x10)
+#define PSM_QUEUE_CFG(a) (0x1000 + (a) * 0x10)
+#define PSM_QUEUE_PTR(a) (0x2000 + (a) * 0x10)
+#define PSM_QUEUE_SPACE(a) (0x3000 + (a) * 0x10)
+#define PSM_QUEUE_TIMEOUT_CFG(a) (0x4000 + (a) * 0x10)
+#define PSM_QUEUE_INFO(a) (0x5000 + (a) * 0x10)
+#define PSM_QUEUE_ENA_W1S(a) (0x10000 + (a) * 0x8)
+#define PSM_QUEUE_ENA_W1C(a) (0x10100 + (a) * 0x8)
+#define PSM_QUEUE_FULL_STS(a) (0x10200 + (a) * 0x8)
+#define PSM_QUEUE_BUSY_STS(a) (0x10300 + (a) * 0x8)
+
+/* BPHY PSM GPINT register offsets */
+#define PSM_INT_GP_SUM_W1C(a) (0x10E0000 + (a) * 0x100)
+#define PSM_INT_GP_SUM_W1S(a) (0x10E0040 + (a) * 0x100)
+#define PSM_INT_GP_ENA_W1C(a) (0x10E0080 + (a) * 0x100)
+#define PSM_INT_GP_ENA_W1S(a) (0x10E00C0 + (a) * 0x100)
+
+/* RFOE MHAB register offsets */
+#define RFOEX_RX_CTL(a) (0x0818ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_VLANX_CFG(a, b) (0x0870ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((b) << 3))
+#define RFOEX_RX_INDIRECT_INDEX_OFFSET(a) (0x13F8ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_FTX_CFG(a, b) (0x1400ULL | \
+ (((unsigned long)(a) << 24)) + \
+ ((b) << 3))
+#define RFOEX_RX_IND_MBT_CFG(a) (0x1420ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_MBT_CFG2(a) (0x1428ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_MBT_ADDR(a) (0x1430ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_MBT_SEG_STATE(a) (0x1438ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_VLANX_FWD(a, b) (0x14D0ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((b) << 3))
+#define RFOEX_RX_IND_JDT_CFG0(a) (0x1440ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_JDT_CFG1(a) (0x1448ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_JDT_CFG2(a) (0x1490ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_JDT_PTR(a) (0x1450ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_JDT_STATE(a) (0x1478ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_ECPRI_FT_CFG(a) (0x14C0ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_TX_PTP_TSTMP_W0(a, b) (0x7A0ULL | \
+ (((unsigned long)(a) << 24)) | \
+ ((b) << 3))
+#define RFOEX_TX_PTP_TSTMP_W1(a, b) (0x7C0ULL | \
+ (((unsigned long)(a) << 24)) | \
+ ((b) << 3))
+#define RFOEX_TX_PKT_STAT(a, b) (0x720ULL | \
+ (((unsigned long)(a) << 24)) | \
+ ((b) << 3))
+#define RFOEX_TX_OCTS_STAT(a, b) (0x740ULL | \
+ (((unsigned long)(a) << 24)) | \
+ ((b) << 3))
+#define RFOEX_RX_VLAN_DROP_STAT(a, b) (0x8A0ULL | \
+ (((unsigned long)(a) << 24)) | \
+ ((b) << 3))
+#define RFOEX_RX_RPM_PKT_STAT(a, b) (0x15C0ULL | \
+ (((unsigned long)(a) << 24)) | \
+ ((b) << 3))
+#define RFOEX_RX_RPM_OCTS_STAT(a, b) (0x15E0ULL | \
+ (((unsigned long)(a) << 24)) | \
+ ((b) << 3))
+
+/* BCN register offsets and definitions */
+#define BCN_CAPTURE_CFG 0x400
+#define BCN_CAPTURE_N1_N2 0x410
+#define BCN_CAPTURE_PTP 0x430
+
+/* BCN_CAPTURE_CFG register definitions */
+#define CAPT_EN BIT(0)
+#define CAPT_TRIG_SW (3UL << 8)
+
+/* CPRI register offsets */
+#define CPRIX_RXD_GMII_UL_CBUF_CFG1(a) (0x1000ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_RXD_GMII_UL_CBUF_CFG2(a) (0x1008ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_RXD_GMII_UL_RD_DOORBELL(a) (0x1010ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_RXD_GMII_UL_SW_RD_PTR(a) (0x1018ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_RXD_GMII_UL_NXT_WR_PTR(a) (0x1020ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_RXD_GMII_UL_PKT_COUNT(a) (0x1028ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_TXD_GMII_DL_CBUF_CFG1(a) (0x1100ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_TXD_GMII_DL_CBUF_CFG2(a) (0x1108ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_TXD_GMII_DL_WR_DOORBELL(a) (0x1110ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_TXD_GMII_DL_SW_WR_PTR(a) (0x1118ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_TXD_GMII_DL_NXT_RD_PTR(a) (0x1120ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_ETH_UL_INT(a) (0x280ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_ETH_UL_INT_ENA_W1S(a) (0x288ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_ETH_UL_INT_ENA_W1C(a) (0x290ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_ETH_UL_INT_W1S(a) (0x298ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_ETH_BAD_CRC_CNT(a, b) (0x400ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_ERR_CNT(a, b) (0x408ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_OSIZE_CNT(a, b) (0x410ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_USIZE_CNT(a, b) (0x418ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_FIFO_ORUN_CNT(a, b) (0x420ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_GPKTS_CNT(a, b) (0x428ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_BOCT_CNT(a, b) (0x430ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_GOCT_CNT(a, b) (0x438ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_MALFORMED_CNT(a, b) (0x440ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_DL_GOCTETS_CNT(a, b) (0x450ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_DL_GPKTS_CNT(a, b) (0x458ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+
+/* MHAB Structures */
+struct mhbw_jd_dma_cfg_word_0_s {
+ u64 dma_mode : 3;
+ u64 reserved1 : 1;
+ u64 dswap : 3;
+ u64 cmd_type : 2;
+ u64 reserved2 : 7;
+ u64 chunk_size : 16;
+ u64 block_size : 16;
+ u64 thread_id : 6;
+ u64 reserved3 : 2;
+ u64 group_id : 4;
+ u64 reserved4 : 4;
+};
+
+struct mhbw_jd_dma_cfg_word_1_s {
+ u64 start_addr : 53;
+ u64 reserved1 : 11;
+};
+
+struct rfoex_abx_slotx_configuration {
+ u64 pkt_mode : 2;
+ u64 da_sel : 3;
+ u64 sa_sel : 3;
+ u64 etype_sel : 3;
+ u64 flowid : 8;
+ u64 subtype : 8;
+ u64 reserved1 : 2;
+ u64 sample_mode : 1;
+ u64 sample_width : 5;
+ u64 sample_width_option : 1;
+ u64 sample_width_sat_bypass : 1;
+ u64 orderinfotype : 1;
+ u64 orderinfooffset : 5;
+ u64 antenna : 8;
+ u64 symbol : 8;
+ u64 sos : 1;
+ u64 eos : 1;
+ u64 orderinfo_insert : 1;
+ u64 custom_timestamp_insert : 1;
+ u64 rfoe_mode : 1;
+};
+
+struct rfoex_abx_slotx_configuration1 {
+ u64 rbmap_bytes : 8;
+ u64 reserved1 : 16;
+ u64 hdr_len : 8;
+ u64 presentation_time_offset : 29;
+ u64 reserved2 : 1;
+ u64 sof_mode : 2;
+};
+
+struct rfoex_abx_slotx_configuration2 {
+ u64 vlan_sel : 3;
+ u64 vlan_num : 2;
+ u64 ptp_mode : 1;
+ u64 ecpri_id_insert : 1;
+ u64 ecpri_seq_id_insert : 1;
+ u64 ecpri_rev : 8;
+ u64 ecpri_msgtype : 8;
+ u64 ecpri_id : 16;
+ u64 ecpri_seq_id : 16;
+ u64 cc_mac_sec_en : 1;
+ u64 ptp_ring_id : 2;
+ u64 reserved1 : 5;
+};
+
+struct rfoex_abx_slotx_configuration3 {
+ u64 pkt_len : 16;
+ u64 lmacid : 2;
+ u64 tx_err : 1;
+ u64 reserved : 45;
+};
+
+struct mhab_job_desc_cfg {
+ struct rfoex_abx_slotx_configuration cfg;
+ struct rfoex_abx_slotx_configuration1 cfg1;
+ struct rfoex_abx_slotx_configuration2 cfg2;
+ struct rfoex_abx_slotx_configuration3 cfg3;
+} __packed;
+
+/* PSM Enumerations */
+enum psm_opcode_e {
+ PSM_OP_NOP = 0x0,
+ PSM_OP_ADDJOB = 0x1,
+ PSM_OP_CONTJOB = 0x2,
+ PSM_OP_DJCNT = 0x10,
+ PSM_OP_GPINT = 0x11,
+ PSM_OP_WAIT = 0x12,
+ PSM_OP_ADDWORK = 0x13,
+ PSM_OP_FREE = 0x14,
+ PSM_OP_WRSTS = 0x15,
+ PSM_OP_WRMSG = 0x16,
+ PSM_OP_ADDNOTIF = 0x17,
+ PSM_OP_QRST = 0x20,
+ PSM_OP_QBLK = 0x21,
+ PSM_OP_QRUN = 0x22,
+ PSM_OP_BCAST = 0x3E,
+ PSM_OP_RSP = 0x3F,
+};
+
+/* PSM Structures */
+struct psm_cmd_addjob_s {
+ /* W0 */
+ u64 opcode : 6;
+ u64 rsrc_set : 2;
+ u64 qid : 8;
+ u64 waitcond : 8;
+ u64 jobtag : 16;
+ u64 reserved1 : 8;
+ u64 mabq : 1;
+ u64 reserved2 : 3;
+ u64 tmem : 1;
+ u64 reserved3 : 3;
+ u64 jobtype : 8;
+ /* W1 */
+ u64 jobptr : 53;
+ u64 reserved4 : 8;
+ u64 gm_id : 3;
+};
+
+/* RFOE Enumerations */
+enum rfoe_ecpri_hdr_err_type_e {
+ NONE = 0x0,
+ CONCATENATION = 0x1,
+ ILLEGAL_VERSION = 0x2,
+ ILLEGAL_RSVD = 0x3,
+ PC_ID = 0x4,
+};
+
+enum rfoe_ecpri_pcid_flowid_mode_e {
+ HASH = 0x0,
+ BASE = 0x1,
+ LMAC_TRUNCATE = 0x2,
+ SHIFT = 0x3,
+};
+
+enum rfoe_order_info_type_e {
+ SEQNUM = 0x0,
+ TIMESTAMP = 0x1,
+};
+
+enum rfoe_rx_dir_ctl_pkt_type_e {
+ ROE = 0x0,
+ CHI = 0x1,
+ ALT = 0x2,
+ ECPRI = 0x4,
+ GENERIC = 0x8,
+};
+
+enum rfoe_rx_pswt_e {
+ RSVD5 = 0x0,
+ ROE_BCN_TYPE = 0x1,
+ RSVD6 = 0x2,
+ ECPRI_BCN_TYPE = 0x3,
+};
+
+enum rfoe_rx_pkt_err_e {
+ RE_NONE = 0x0,
+ RE_PARTIAL = 0x1,
+ RE_JABBER = 0x2,
+ RE_FCS = 0x7,
+ RE_FCS_RCV = 0x8,
+ RE_TERMINATE = 0x9,
+ RE_RX_CTL = 0xB,
+ RE_SKIP = 0xC,
+};
+
+enum rfoe_rx_pkt_logger_idx_e {
+ RX_PKT = 0x0,
+ TX_PKT = 0x1,
+};
+
+/* RFOE Structures */
+struct ecpri_hdr_s {
+ u64 seq_id : 16;
+ u64 pc_id : 16;
+ u64 pyld_size : 16;
+ u64 msg_type : 8;
+ u64 concatenation : 1;
+ u64 reserved : 3;
+ u64 version : 4;
+};
+
+struct rfoe_ab_cfg_w3_s {
+ u64 pkt_len : 16;
+ u64 lmac_id : 2;
+ u64 tx_err : 1;
+ u64 reserved : 45;
+};
+
+struct rfoe_psw_s {
+ /* W0 */
+ u64 jd_ptr : 53;
+ u64 jd_ptr_tmem : 1;
+ u64 jd_ptr_type : 1;
+ u64 reserved1 : 1;
+ u64 gm_id : 3;
+ u64 reserved2 : 3;
+ u64 pswt : 2;
+ /* W1 */
+ u64 ethertype : 16;
+ u64 eindex : 5;
+ u64 reserved3 : 3;
+ u64 pkt_len : 16;
+ u64 mcs_err_sts : 8;
+ u64 mac_err_sts : 6;
+ u64 reserved4 : 2;
+ u64 pkt_type : 4;
+ u64 reserved5 : 4;
+ /* W2 */
+ u64 proto_sts_word;
+ /* W3 */
+ u64 rfoe_tstamp;
+ /* W4 */
+ u64 ptp_timestamp;
+ /* W5 */
+ u64 reserved6;
+ /* W6 */
+ u64 reserved7 : 24;
+ u64 dec_error : 8;
+ u64 dec_num_sections : 8;
+ u64 dec_num_syminc : 8;
+ u64 reserved8 : 16;
+ /* W7 */
+ u64 reserved9;
+};
+
+struct rfoe_psw_w0_s {
+ u64 jd_ptr : 53;
+ u64 jd_ptr_tmem : 1;
+ u64 jd_ptr_type : 1;
+ u64 reserved1 : 1;
+ u64 gm_id : 3;
+ u64 reserved2 : 3;
+ u64 pswt : 2;
+};
+
+struct rfoe_psw_w1_s {
+ u64 ethertype : 16;
+ u64 eindex : 5;
+ u64 reserved3 : 3;
+ u64 pkt_len : 16;
+ u64 mcs_err_sts : 8;
+ u64 mac_err_sts : 6;
+ u64 reserved4 : 2;
+ u64 pkt_type : 4;
+ u64 reserved5 : 4;
+};
+
+struct rfoe_psw_w2_ecpri_s {
+ u64 msg_type : 8;
+ u64 pc_id : 16;
+ u64 seq_id : 16;
+ u64 flow_id : 10;
+ u64 lmac_id : 2;
+ u64 rfoe_id : 4;
+ u64 sa_table_index : 7;
+ u64 reserved : 1;
+};
+
+struct rfoe_psw_w2_roe_s {
+ u64 subtype : 8;
+ u64 fd_symbol : 8;
+ u64 fd_antid : 8;
+ u64 reserved1 : 16;
+ u64 flowid : 8;
+ u64 reserved2 : 2;
+ u64 lmac_id : 2;
+ u64 rfoe_id : 4;
+ u64 sa_table_index : 7;
+ u64 reserved3 : 1;
+};
+
+struct rfoe_psw_w3_bcn_s {
+ u64 n2 : 24;
+ u64 n1 : 40;
+};
+
+struct rfoe_psw_w4_s {
+ u64 ptp_timestamp;
+};
+
+struct rfoe_rx_pkt_log_s {
+ u64 timestamp;
+ u64 psw_w2;
+ u64 psw_w1;
+ u64 psw_w0;
+};
+
+struct rfoe_timestamp_s {
+ u32 time_tick : 16;
+ u32 sf : 4;
+ u32 bfn : 12;
+};
+
+struct rfoe_tx_pkt_log_s {
+ u64 timestamp;
+ u64 lmac_id : 2;
+ u64 rfoe_id : 4;
+ u64 jobid : 16;
+ u64 drop : 1;
+ u64 tx_err : 1;
+ u64 reserved : 40;
+};
+
+struct rfoe_tx_ptp_tstmp_s {
+ u64 ptp_timestamp;
+ u64 reserved1 : 2;
+ u64 rfoe_id : 4;
+ u64 jobid : 16;
+ u64 drop : 1;
+ u64 tx_err : 1;
+ u64 reserved2 : 39;
+ u64 valid : 1;
+};
+
+struct rfoe_rx_ind_vlanx_fwd {
+ u64 fwd : 64;
+};
+
+#endif /* _CNF10K_BPHY_HW_H_ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_bphy_netdev_comm_if.h b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_bphy_netdev_comm_if.h
new file mode 100644
index 000000000000..f9307b1e489e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_bphy_netdev_comm_if.h
@@ -0,0 +1,296 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell CNF10K BPHY Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#ifndef _CNF10K_BPHY_NETDEV_COMM_IF_H_
+#define _CNF10K_BPHY_NETDEV_COMM_IF_H_
+
+#include <linux/etherdevice.h>
+#include "bphy_netdev_comm_if.h"
+
+#define BPHY_MAX_RFOE_MHAB 8 /* Max RFOE MHAB instances */
+#define BPHY_MAX_CPRI_MHAB 4 /* Max CPRI MHAB instances */
+
+#define MAX_PTP_RING 4 /* Max ptp rings per lmac */
+
+#define CNF10KB_VERSION 2 /* chip version */
+#define CNF10KA_VERSION 3 /* chip version */
+
+#define CHIP_CNF10KB(v) (((v) == CNF10KB_VERSION) ? 1 : 0)
+#define CHIP_CNF10KA(v) (((v) == CNF10KA_VERSION) ? 1 : 0)
+
+#define CHIP_CNF10K(v) ({ \
+ typeof(v) _v = (v); \
+ (CHIP_CNF10KB(_v) | CHIP_CNF10KA(_v)); \
+})
+
+/**
+ * @enum BPHY_NETDEV_CPRI_RX_GP_INT_e_
+ * @brief GP_INT numbers for CPRI Ethernet packet Rx notification
+ * by BPHY to netdev.
+ *
+ */
+enum bphy_netdev_cpri_rx_gp_int {
+ CNF10K_RX_GP_INT_CPRI0_ETH = 93, //PSM_GPINT93,
+ CNF10K_RX_GP_INT_CPRI1_ETH = 94, //PSM_GPINT94,
+ CNF10K_RX_GP_INT_CPRI2_ETH = 95, //PSM_GPINT95
+};
+
+/**
+ * @enum BPHY_NETDEV_TX_GP_INT_e_
+ * @brief GP_INT numbers for packet notification by netdev to BPHY.
+ *
+ */
+#ifdef CNF10KB
+enum bphy_netdev_tx_gp_int {
+ CNF10K_TX_GP_INT_RFOE0_LMAC0 = 32, //PSM_GPINT32,
+ CNF10K_TX_GP_INT_RFOE0_LMAC1 = 33, //PSM_GPINT33,
+
+ CNF10K_TX_GP_INT_RFOE1_LMAC2 = 34, //PSM_GPINT34,
+ CNF10K_TX_GP_INT_RFOE1_LMAC3 = 35, //PSM_GPINT35,
+
+ CNF10K_TX_GP_INT_RFOE2_LMAC0 = 36, //PSM_GPINT36,
+ CNF10K_TX_GP_INT_RFOE2_LMAC1 = 37, //PSM_GPINT37,
+
+ CNF10K_TX_GP_INT_RFOE3_LMAC2 = 38, //PSM_GPINT38,
+ CNF10K_TX_GP_INT_RFOE3_LMAC3 = 39, //PSM_GPINT39,
+
+ CNF10K_TX_GP_INT_RFOE4_LMAC0 = 40, //PSM_GPINT40,
+ CNF10K_TX_GP_INT_RFOE4_LMAC1 = 41, //PSM_GPINT41
+
+ CNF10K_TX_GP_INT_RFOE5_LMAC0 = 42, //PSM_GPINT42,
+ CNF10K_TX_GP_INT_RFOE5_LMAC1 = 43, //PSM_GPINT43,
+
+ CNF10K_TX_GP_INT_RFOE6_LMAC2 = 44, //PSM_GPINT44,
+ CNF10K_TX_GP_INT_RFOE6_LMAC3 = 45, //PSM_GPINT45,
+};
+#else
+enum bphy_netdev_tx_gp_int {
+ CNF10K_TX_GP_INT_RFOE0_LMAC0 = 32, //PSM_GPINT32,
+ CNF10K_TX_GP_INT_RFOE0_LMAC1 = 33, //PSM_GPINT33,
+ CNF10K_TX_GP_INT_RFOE0_LMAC2 = 34, //PSM_GPINT34,
+ CNF10K_TX_GP_INT_RFOE0_LMAC3 = 35, //PSM_GPINT35,
+
+ CNF10K_TX_GP_INT_RFOE1_LMAC0 = 36, //PSM_GPINT36,
+ CNF10K_TX_GP_INT_RFOE1_LMAC1 = 37, //PSM_GPINT37,
+ CNF10K_TX_GP_INT_RFOE1_LMAC2 = 38, //PSM_GPINT38,
+ CNF10K_TX_GP_INT_RFOE1_LMAC3 = 39, //PSM_GPINT39,
+};
+#endif
+
+/**
+ * @enum BPHY_NETDEV_CNF10K_RX_GP_INT_e_
+ * @brief GP_INT numbers for packet notification by BPHY to netdev.
+ *
+ */
+enum bphy_netdev_rx_gp_int {
+ CNF10K_RX_GP_INT_RFOE0_PTP = 63, //PSM_GPINT63,
+ CNF10K_RX_GP_INT_RFOE0_ECPRI = 62, //PSM_GPINT62,
+ CNF10K_RX_GP_INT_RFOE0_GENERIC = 61, //PSM_GPINT61,
+
+ CNF10K_RX_GP_INT_RFOE1_PTP = 60, //PSM_GPINT60,
+ CNF10K_RX_GP_INT_RFOE1_ECPRI = 59, //PSM_GPINT59,
+ CNF10K_RX_GP_INT_RFOE1_GENERIC = 58, //PSM_GPINT58,
+#ifdef CNF10KB
+ CNF10K_RX_GP_INT_RFOE2_PTP = 57, //PSM_GPINT57,
+ CNF10K_RX_GP_INT_RFOE2_ECPRI = 56, //PSM_GPINT56,
+ CNF10K_RX_GP_INT_RFOE2_GENERIC = 55, //PSM_GPINT55,
+
+ CNF10K_RX_GP_INT_RFOE3_PTP = 54, //PSM_GPINT54,
+ CNF10K_RX_GP_INT_RFOE3_ECPRI = 53, //PSM_GPINT53,
+ CNF10K_RX_GP_INT_RFOE3_GENERIC = 52, //PSM_GPINT52,
+
+ CNF10K_RX_GP_INT_RFOE4_PTP = 51, //PSM_GPINT51,
+ CNF10K_RX_GP_INT_RFOE4_ECPRI = 50, //PSM_GPINT50,
+ CNF10K_RX_GP_INT_RFOE4_GENERIC = 49, //PSM_GPINT49,
+
+ CNF10K_RX_GP_INT_RFOE5_PTP = 48, //PSM_GPINT48,
+ CNF10K_RX_GP_INT_RFOE5_ECPRI = 47, //PSM_GPINT47,
+ CNF10K_RX_GP_INT_RFOE5_GENERIC = 46, //PSM_GPINT46,
+
+ CNF10K_RX_GP_INT_RFOE6_PTP = 66, //PSM_GPINT66,
+ CNF10K_RX_GP_INT_RFOE6_ECPRI = 65, //PSM_GPINT65,
+ CNF10K_RX_GP_INT_RFOE6_GENERIC = 64, //PSM_GPINT64,
+#endif
+};
+
+/**
+ * @struct BPHY_NETDEV_RBUF_INFO_s
+ * @brief Information about the packet ring buffer which shall be used to
+ * send the packets from BPHY to netdev.
+ *
+ */
+struct cnf10k_bphy_ndev_rbuf_info {
+ enum bphy_netdev_packet_type pkt_type;
+ enum bphy_netdev_rx_gp_int gp_int_num;
+ u16 flow_id;
+ u16 mbt_index;
+ /**Maximum number of buffers in the Ring/Pool*/
+ u16 num_bufs;
+ /**MAX Buffer Size configured */
+ u16 buf_size; // TBC: 1536?
+ /**MBT byffer target memory*/
+ u8 mbt_target_mem;
+ /**Buffers starting address*/
+ u64 mbt_iova_addr;
+ u16 jdt_index;
+ /**Maximum number of JD buffers in the Ring/Pool*/
+ u16 num_jd;
+ /**MAX JD size configured */
+ u8 jd_size;
+ /**MBT byffer target memory*/
+ u8 jdt_target_mem;
+ /**Buffers starting address*/
+ u64 jdt_iova_addr;
+ u64 reserved[4];
+};
+
+/**
+ * @struct BPHY_NETDEV_TX_PSM_CMD_INFO_s
+ * @brief TX PSM command information defnition to be shared with
+ * netdev for TX communication.
+ *
+ */
+struct cnf10k_bphy_ndev_tx_psm_cmd_info {
+ enum bphy_netdev_tx_gp_int gp_int_num; // Valid only for PTP messages
+ u64 jd_iova_addr;
+ u64 rd_dma_iova_addr;
+ u64 low_cmd;
+ u64 high_cmd;
+ u64 reserved[4];
+};
+
+/**
+ * @struct BPHY_NETDEV_TX_PTP_RING_INFO_s
+ * @brief TX PTP timestamp ring buffer configuration to be shared
+ * with netdev for reading ptp timestamp.
+ *
+ */
+struct cnf10k_bphy_ndev_tx_ptp_ring_info {
+ u8 is_enable;
+ u8 ring_idx;
+ /**Number of TX PTP timestamp entries in ring */
+ u8 ring_size;
+ /**PTP Ring buffer target memory*/
+ u8 ring_target_mem;
+ /**PTP Ring buffer byte swap mode when TMEM is LLC/DRAM*/
+ u8 dswap;
+ /**Stream ID*/
+ u8 gmid;
+ /**Buffers starting address*/
+ u64 ring_iova_addr;
+ u64 reserved[4];
+};
+
+/**
+ * @struct cnf10k_bphy_netdev_intf_info
+ * @brief LMAC lane number, mac address and status information
+ *
+ */
+struct cnf10k_bphy_ndev_intf_info {
+ u8 rfoe_num;
+ u8 lane_num;
+ /* Source mac address */
+ u8 eth_addr[ETH_ALEN];
+ /* LMAC interface status */
+ u8 status; //0-DOWN, 1-UP
+ /* Configuration valid status; This interface shall be
+ * invalid if this field is set to 0
+ */
+ u8 is_valid;
+ u64 reserved;
+};
+
+/**
+ * @struct BPHY_NETDEV_COMM_IF_s
+ * @brief The communication interface defnitions which would be used
+ * by the netdev and bphy application.
+ *
+ */
+struct cnf10k_bphy_ndev_comm_if {
+ struct cnf10k_bphy_ndev_intf_info lmac_info;
+ struct cnf10k_bphy_ndev_rbuf_info rbuf_info[PACKET_TYPE_MAX];
+ /** Defining single array to handle both PTP and OTHER cmds info.
+ */
+ struct cnf10k_bphy_ndev_tx_psm_cmd_info
+ ptp_pkt_info[MAX_PTP_MSG_PER_LMAC];
+ struct cnf10k_bphy_ndev_tx_ptp_ring_info
+ ptp_ts_ring_info[MAX_PTP_RING];
+ u64 reserved[4];
+};
+
+/**
+ * @struct BPHY_NETDEV_CPRI_IF_s
+ * @brief Communication interface structure defnition to be used by BPHY
+ * and NETDEV applications for CPRI Interface.
+ *
+ */
+struct cnf10k_bphy_ndev_cpri_intf_cfg {
+ u8 id; /**< CPRI_ID 0..2 */
+ u8 active_lane_mask; /**< Lane Id mask */
+ u8 ul_gp_int_num; /**< UL GP INT NUM */
+ u8 ul_int_threshold; /**< UL INT THRESHOLD */
+ u8 num_ul_buf; /**< Num UL Buffers */
+ u8 num_dl_buf; /**< Num DL Buffers */
+ u64 ul_circ_buf_iova_addr; /**< UL circular buffer base address */
+ u64 dl_circ_buf_iova_addr; /**< DL circular buffer base address */
+ u8 eth_addr[MAX_LANE_PER_CPRI][ETH_ALEN];
+ u64 reserved[4];
+};
+
+/**
+ * @struct BPHY_NETDEV_RFOE_10x_IF_s
+ * @brief New Communication interface structure defnition to be used
+ * by BPHY and NETDEV applications for RFOE Interface.
+ *
+ */
+struct cnf10k_bphy_ndev_rfoe_if {
+ /**< Interface configuration */
+ struct cnf10k_bphy_ndev_comm_if if_cfg[MAX_LMAC_PER_RFOE];
+ /**TX JD cmds to send packets other than PTP;
+ * These are defined per RFoE and all LMAC can share
+ */
+ struct cnf10k_bphy_ndev_tx_psm_cmd_info
+ oth_pkt_info[MAX_OTH_MSG_PER_RFOE];
+ /**Packet types for which the RX flows are configured.*/
+ u8 pkt_type_mask;
+ u64 reserved[4];
+};
+
+/* hardware specific information */
+struct bphy_hw_params {
+ u32 chip_ver; /* (version << 4) | revision */
+ u32 reserved[15]; /* reserved for future extension */
+};
+
+/**
+ * @struct BPHY_NETDEV_COMM_INTF_CFG_s
+ * @brief ODP-NETDEV communication interface defnition structure to
+ * share the RX/TX intrefaces information.
+ *
+ */
+struct cnf10k_rfoe_ndev_comm_intf_cfg {
+ /**< BPHY Hardware parameters */
+ struct bphy_hw_params hw_params;
+ /**< RFOE Interface Configuration */
+ struct cnf10k_bphy_ndev_rfoe_if rfoe_if_cfg[BPHY_MAX_RFOE_MHAB];
+ u64 reserved[4];
+};
+
+/**
+ * @struct BPHY_CPRI_NETDEV_COMM_INTF_CFG_s
+ * @brief Main Communication interface structure definition to be used
+ * by BPHY and NETDEV applications for CPRI Interface.
+ *
+ */
+struct cnf10k_bphy_cpri_netdev_comm_intf_cfg {
+ /**< BPHY Hardware parameters */
+ struct bphy_hw_params hw_params;
+ /**< RFOE Interface Configuration */
+ struct cnf10k_bphy_ndev_cpri_intf_cfg cpri_if_cfg[BPHY_MAX_CPRI_MHAB];
+ u64 reserved[4];
+};
+
+#endif //_CNF10K_BPHY_NETDEV_COMM_IF_H_
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe.c b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe.c
new file mode 100644
index 000000000000..283ee7f51431
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe.c
@@ -0,0 +1,1427 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CNF10K BPHY RFOE Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include "cnf10k_rfoe.h"
+#include "cnf10k_bphy_hw.h"
+
+/* global driver ctx */
+struct cnf10k_rfoe_drv_ctx cnf10k_rfoe_drv_ctx[CNF10K_RFOE_MAX_INTF];
+
+void cnf10k_bphy_intr_handler(struct otx2_bphy_cdev_priv *cdev_priv, u32 status)
+{
+ struct cnf10k_rfoe_drv_ctx *cnf10k_drv_ctx;
+ struct cnf10k_rfoe_ndev_priv *priv;
+ struct net_device *netdev;
+ int rfoe_num, i;
+ u32 intr_mask;
+
+ /* rx intr processing */
+ for (rfoe_num = 0; rfoe_num < cdev_priv->num_rfoe_mhab; rfoe_num++) {
+ intr_mask = CNF10K_RFOE_RX_INTR_MASK(rfoe_num);
+ if (status & intr_mask)
+ cnf10k_rfoe_rx_napi_schedule(rfoe_num, status);
+ }
+
+ /* tx intr processing */
+ for (i = 0; i < CNF10K_RFOE_MAX_INTF; i++) {
+ cnf10k_drv_ctx = &cnf10k_rfoe_drv_ctx[i];
+ if (cnf10k_drv_ctx->valid) {
+ netdev = cnf10k_drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ intr_mask = CNF10K_RFOE_TX_PTP_INTR_MASK(priv->rfoe_num,
+ priv->lmac_id,
+ cdev_priv->num_rfoe_lmac);
+ if ((status & intr_mask) && priv->ptp_tx_skb)
+ schedule_work(&priv->ptp_tx_work);
+ }
+ }
+}
+
+void cnf10k_rfoe_disable_intf(int rfoe_num)
+{
+ struct cnf10k_rfoe_drv_ctx *drv_ctx;
+ struct cnf10k_rfoe_ndev_priv *priv;
+ struct net_device *netdev;
+ int idx;
+
+ for (idx = 0; idx < CNF10K_RFOE_MAX_INTF; idx++) {
+ drv_ctx = &cnf10k_rfoe_drv_ctx[idx];
+ if (drv_ctx->rfoe_num == rfoe_num && drv_ctx->valid) {
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ priv->if_type = IF_TYPE_NONE;
+ }
+ }
+}
+
+void cnf10k_bphy_rfoe_cleanup(void)
+{
+ struct cnf10k_rfoe_drv_ctx *drv_ctx = NULL;
+ struct cnf10k_rfoe_ndev_priv *priv;
+ struct cnf10k_rx_ft_cfg *ft_cfg;
+ struct net_device *netdev;
+ int i, idx;
+
+ for (i = 0; i < CNF10K_RFOE_MAX_INTF; i++) {
+ drv_ctx = &cnf10k_rfoe_drv_ctx[i];
+ if (drv_ctx->valid) {
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ cnf10k_rfoe_ptp_destroy(priv);
+ unregister_netdev(netdev);
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ ft_cfg = &priv->rx_ft_cfg[idx];
+ netif_napi_del(&ft_cfg->napi);
+ }
+ --(priv->rfoe_common->refcnt);
+ if (priv->rfoe_common->refcnt == 0)
+ kfree(priv->rfoe_common);
+ free_netdev(netdev);
+ drv_ctx->valid = 0;
+ }
+ }
+}
+
+/* submit pending ptp tx requests */
+static void cnf10k_rfoe_ptp_submit_work(struct work_struct *work)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = container_of(work,
+ struct cnf10k_rfoe_ndev_priv,
+ ptp_queue_work);
+ struct mhbw_jd_dma_cfg_word_0_s *jd_dma_cfg_word_0;
+ struct mhbw_jd_dma_cfg_word_1_s *jd_dma_cfg_word_1;
+ struct mhab_job_desc_cfg *jd_cfg_ptr;
+ struct rfoe_tx_ptp_tstmp_s *tx_tstmp;
+ struct psm_cmd_addjob_s *psm_cmd_lo;
+ struct tx_job_queue_cfg *job_cfg;
+ struct tx_job_entry *job_entry;
+ struct ptp_tstamp_skb *ts_skb;
+ u16 psm_queue_id, queue_space;
+ struct sk_buff *skb = NULL;
+ struct list_head *head;
+ u64 jd_cfg_ptr_iova;
+ unsigned long flags;
+ u64 regval;
+
+ job_cfg = &priv->tx_ptp_job_cfg;
+
+ spin_lock_irqsave(&job_cfg->lock, flags);
+
+ /* check pending ptp requests */
+ if (list_empty(&priv->ptp_skb_list.list)) {
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "no pending ptp tx requests\n");
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+ return;
+ }
+
+ /* check psm queue space available */
+ psm_queue_id = job_cfg->psm_queue_id;
+ regval = readq(priv->psm_reg_base + PSM_QUEUE_SPACE(psm_queue_id));
+ queue_space = regval & 0x7FFF;
+ if (queue_space < 1) {
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "ptp tx psm queue %d full\n",
+ psm_queue_id);
+ /* reschedule to check later */
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+ schedule_work(&priv->ptp_queue_work);
+ return;
+ }
+
+ if (test_and_set_bit_lock(PTP_TX_IN_PROGRESS, &priv->state)) {
+ netif_dbg(priv, tx_queued, priv->netdev, "ptp tx ongoing\n");
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+ return;
+ }
+
+ head = &priv->ptp_skb_list.list;
+ ts_skb = list_entry(head->next, struct ptp_tstamp_skb, list);
+ skb = ts_skb->skb;
+ list_del(&ts_skb->list);
+ kfree(ts_skb);
+ priv->ptp_skb_list.count--;
+
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "submitting ptp tx skb %pS\n", skb);
+
+ priv->last_tx_ptp_jiffies = jiffies;
+
+ /* ptp timestamp entry is 128-bit in size */
+ tx_tstmp = (struct rfoe_tx_ptp_tstmp_s *)
+ ((u8 *)priv->ptp_ring_cfg.ptp_ring_base +
+ (16 * priv->ptp_ring_cfg.ptp_ring_idx));
+ memset(tx_tstmp, 0, sizeof(struct rfoe_tx_ptp_tstmp_s));
+
+ /* get the tx job entry */
+ job_entry = (struct tx_job_entry *)
+ &job_cfg->job_entries[job_cfg->q_idx];
+
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "rfoe=%d lmac=%d psm_queue=%d tx_job_entry %d job_cmd_lo=0x%llx job_cmd_high=0x%llx jd_iova_addr=0x%llx\n",
+ priv->rfoe_num, priv->lmac_id, psm_queue_id, job_cfg->q_idx,
+ job_entry->job_cmd_lo, job_entry->job_cmd_hi,
+ job_entry->jd_iova_addr);
+
+ priv->ptp_tx_skb = skb;
+ psm_cmd_lo = (struct psm_cmd_addjob_s *)&job_entry->job_cmd_lo;
+ priv->ptp_job_tag = psm_cmd_lo->jobtag;
+
+ /* update length and block size in jd dma cfg word */
+ jd_cfg_ptr_iova = *(u64 *)((u8 *)job_entry->jd_ptr + 8);
+ jd_cfg_ptr = otx2_iova_to_virt(priv->iommu_domain, jd_cfg_ptr_iova);
+ jd_cfg_ptr->cfg3.pkt_len = skb->len;
+ jd_dma_cfg_word_0 = (struct mhbw_jd_dma_cfg_word_0_s *)
+ job_entry->rd_dma_ptr;
+ jd_dma_cfg_word_0->block_size = (((skb->len + 15) >> 4) * 4);
+
+ /* copy packet data to rd_dma_ptr start addr */
+ jd_dma_cfg_word_1 = (struct mhbw_jd_dma_cfg_word_1_s *)
+ ((u8 *)job_entry->rd_dma_ptr + 8);
+ memcpy(otx2_iova_to_virt(priv->iommu_domain,
+ jd_dma_cfg_word_1->start_addr),
+ skb->data, skb->len);
+
+ /* make sure that all memory writes are completed */
+ dma_wmb();
+
+ /* submit PSM job */
+ writeq(job_entry->job_cmd_lo,
+ priv->psm_reg_base + PSM_QUEUE_CMD_LO(psm_queue_id));
+ writeq(job_entry->job_cmd_hi,
+ priv->psm_reg_base + PSM_QUEUE_CMD_HI(psm_queue_id));
+
+ /* increment queue index */
+ job_cfg->q_idx++;
+ if (job_cfg->q_idx == job_cfg->num_entries)
+ job_cfg->q_idx = 0;
+
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+}
+
+#define OTX2_RFOE_PTP_TSTMP_POLL_CNT 100
+
+/* ptp interrupt processing bottom half */
+static void cnf10k_rfoe_ptp_tx_work(struct work_struct *work)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = container_of(work,
+ struct cnf10k_rfoe_ndev_priv,
+ ptp_tx_work);
+ struct rfoe_tx_ptp_tstmp_s *tx_tstmp;
+ struct skb_shared_hwtstamps ts;
+ u64 timestamp;
+ u16 jobid;
+
+ if (!priv->ptp_tx_skb) {
+ netif_err(priv, tx_done, priv->netdev,
+ "ptp tx skb not found, something wrong!\n");
+ goto submit_next_req;
+ }
+
+ /* make sure that all memory writes by rfoe are completed */
+ dma_rmb();
+
+ /* ptp timestamp entry is 128-bit in size */
+ tx_tstmp = (struct rfoe_tx_ptp_tstmp_s *)
+ ((u8 *)priv->ptp_ring_cfg.ptp_ring_base +
+ (16 * priv->ptp_ring_cfg.ptp_ring_idx));
+
+ /* match job id */
+ jobid = tx_tstmp->jobid;
+ if (jobid != priv->ptp_job_tag) {
+ netif_err(priv, tx_done, priv->netdev,
+ "ptp job id doesn't match, job_id=0x%x skb->job_tag=0x%x\n",
+ jobid, priv->ptp_job_tag);
+ priv->stats.tx_hwtstamp_failures++;
+ goto submit_next_req;
+ }
+
+ if (tx_tstmp->drop || tx_tstmp->tx_err) {
+ netif_err(priv, tx_done, priv->netdev,
+ "ptp tx timstamp error\n");
+ priv->stats.tx_hwtstamp_failures++;
+ goto submit_next_req;
+ }
+
+ /* update timestamp value in skb */
+ timestamp = tx_tstmp->ptp_timestamp;
+
+ memset(&ts, 0, sizeof(ts));
+ ts.hwtstamp = ns_to_ktime(timestamp);
+ skb_tstamp_tx(priv->ptp_tx_skb, &ts);
+
+submit_next_req:
+ priv->ptp_ring_cfg.ptp_ring_idx++;
+ if (priv->ptp_ring_cfg.ptp_ring_idx >= priv->ptp_ring_cfg.ptp_ring_size)
+ priv->ptp_ring_cfg.ptp_ring_idx = 0;
+ if (priv->ptp_tx_skb)
+ dev_kfree_skb_any(priv->ptp_tx_skb);
+ priv->ptp_tx_skb = NULL;
+ clear_bit_unlock(PTP_TX_IN_PROGRESS, &priv->state);
+ schedule_work(&priv->ptp_queue_work);
+}
+
+/* psm queue timer callback to check queue space */
+static void cnf10k_rfoe_tx_timer_cb(struct timer_list *t)
+{
+ struct cnf10k_rfoe_ndev_priv *priv =
+ container_of(t, struct cnf10k_rfoe_ndev_priv, tx_timer);
+ u16 psm_queue_id, queue_space;
+ int reschedule = 0;
+ u64 regval;
+
+ /* check psm queue space for both ptp and oth packets */
+ if (netif_queue_stopped(priv->netdev)) {
+ psm_queue_id = priv->tx_ptp_job_cfg.psm_queue_id;
+ // check queue space
+ regval = readq(priv->psm_reg_base +
+ PSM_QUEUE_SPACE(psm_queue_id));
+ queue_space = regval & 0x7FFF;
+ if (queue_space > 1) {
+ netif_wake_queue(priv->netdev);
+ reschedule = 0;
+ } else {
+ reschedule = 1;
+ }
+
+ psm_queue_id = priv->rfoe_common->tx_oth_job_cfg.psm_queue_id;
+ // check queue space
+ regval = readq(priv->psm_reg_base +
+ PSM_QUEUE_SPACE(psm_queue_id));
+ queue_space = regval & 0x7FFF;
+ if (queue_space > 1) {
+ netif_wake_queue(priv->netdev);
+ reschedule = 0;
+ } else {
+ reschedule = 1;
+ }
+ }
+
+ if (reschedule)
+ mod_timer(&priv->tx_timer, jiffies + msecs_to_jiffies(100));
+}
+
+static void cnf10k_rfoe_process_rx_pkt(struct cnf10k_rfoe_ndev_priv *priv,
+ struct cnf10k_rx_ft_cfg *ft_cfg,
+ int mbt_buf_idx)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv = priv->cdev_priv;
+ struct mhbw_jd_dma_cfg_word_0_s *jd_dma_cfg_word_0;
+ u64 tstamp = 0, mbt_state, jdt_iova_addr;
+ struct rfoe_psw_w2_ecpri_s *ecpri_psw_w2;
+ struct rfoe_psw_w2_roe_s *rfoe_psw_w2;
+ struct cnf10k_rfoe_ndev_priv *priv2;
+ struct cnf10k_rfoe_drv_ctx *drv_ctx;
+ int found = 0, idx, len, pkt_type;
+ unsigned int ptp_message_len = 0;
+ struct rfoe_psw_s *psw = NULL;
+ struct net_device *netdev;
+ u8 *buf_ptr, *jdt_ptr;
+ struct sk_buff *skb;
+ u8 lmac_id;
+
+ /* read mbt state */
+ spin_lock(&cdev_priv->mbt_lock);
+ writeq(mbt_buf_idx, (priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num)));
+ mbt_state = readq(priv->rfoe_reg_base +
+ RFOEX_RX_IND_MBT_SEG_STATE(priv->rfoe_num));
+ spin_unlock(&cdev_priv->mbt_lock);
+
+ if ((mbt_state >> 16 & 0xf) != 0) {
+ pr_err("rx pkt error: mbt_buf_idx=%d, err=%d\n",
+ mbt_buf_idx, (u8)(mbt_state >> 16 & 0xf));
+ return;
+ }
+ if (mbt_state >> 20 & 0x1) {
+ pr_err("rx dma error: mbt_buf_idx=%d\n", mbt_buf_idx);
+ return;
+ }
+
+ buf_ptr = (u8 *)ft_cfg->mbt_virt_addr +
+ (ft_cfg->buf_size * mbt_buf_idx);
+
+ pkt_type = ft_cfg->pkt_type;
+
+ psw = (struct rfoe_psw_s *)buf_ptr;
+ if (psw->mac_err_sts || psw->mcs_err_sts) {
+ net_warn_ratelimited("%s: psw mac_err_sts = 0x%x, mcs_err_sts=0x%x\n",
+ priv->netdev->name,
+ psw->mac_err_sts,
+ psw->mcs_err_sts);
+ return;
+ }
+
+ if (pkt_type != PACKET_TYPE_ECPRI) {
+ /* check that the psw type is correct: */
+ if (unlikely(psw->pkt_type == ECPRI)) {
+ net_warn_ratelimited("%s: pswt is eCPRI for pkt_type = %d\n",
+ priv->netdev->name, pkt_type);
+ return;
+ }
+ jdt_iova_addr = (u64)psw->jd_ptr;
+ rfoe_psw_w2 = (struct rfoe_psw_w2_roe_s *)&psw->proto_sts_word;
+ lmac_id = rfoe_psw_w2->lmac_id;
+ tstamp = psw->ptp_timestamp;
+ } else {
+ /* check that the psw type is correct: */
+ if (unlikely(psw->pkt_type != ECPRI)) {
+ net_warn_ratelimited("%s: pswt is not eCPRI for pkt_type = %d\n",
+ priv->netdev->name, pkt_type);
+ return;
+ }
+ jdt_iova_addr = (u64)psw->jd_ptr;
+ ecpri_psw_w2 = (struct rfoe_psw_w2_ecpri_s *)
+ &psw->proto_sts_word;
+ lmac_id = ecpri_psw_w2->lmac_id;
+ tstamp = psw->ptp_timestamp;
+ }
+
+ netif_dbg(priv, rx_status, priv->netdev,
+ "Rx: rfoe=%d lmac=%d mbt_buf_idx=%d\n",
+ priv->rfoe_num, lmac_id, mbt_buf_idx);
+
+ /* read jd ptr from psw */
+ jdt_ptr = otx2_iova_to_virt(priv->iommu_domain, jdt_iova_addr);
+ jd_dma_cfg_word_0 = (struct mhbw_jd_dma_cfg_word_0_s *)
+ ((u8 *)jdt_ptr + ft_cfg->jd_rd_offset);
+ len = (jd_dma_cfg_word_0->block_size) << 2;
+ netif_dbg(priv, rx_status, priv->netdev, "jd rd_dma len = %d\n", len);
+
+ if (unlikely(netif_msg_pktdata(priv))) {
+ netdev_printk(KERN_DEBUG, priv->netdev, "RX MBUF DATA:");
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
+ buf_ptr, len, true);
+ }
+
+ buf_ptr += (ft_cfg->pkt_offset * 16);
+ len -= (ft_cfg->pkt_offset * 16);
+
+ for (idx = 0; idx < CNF10K_RFOE_MAX_INTF; idx++) {
+ drv_ctx = &cnf10k_rfoe_drv_ctx[idx];
+ if (drv_ctx->valid && drv_ctx->rfoe_num == priv->rfoe_num &&
+ drv_ctx->lmac_id == lmac_id) {
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ netdev = cnf10k_rfoe_drv_ctx[idx].netdev;
+ priv2 = netdev_priv(netdev);
+ } else {
+ pr_err("netdev not found, something went wrong!\n");
+ return;
+ }
+
+ /* drop the packet if interface is down */
+ if (unlikely(!netif_carrier_ok(netdev))) {
+ netif_err(priv2, rx_err, netdev,
+ "%s {rfoe%d lmac%d} link down, drop pkt\n",
+ netdev->name, priv2->rfoe_num,
+ priv2->lmac_id);
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_PTP) {
+ priv2->stats.ptp_rx_dropped++;
+ priv2->last_rx_ptp_dropped_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv2->stats.ecpri_rx_dropped++;
+ priv2->last_rx_dropped_jiffies = jiffies;
+ } else {
+ priv2->stats.rx_dropped++;
+ priv2->last_rx_dropped_jiffies = jiffies;
+ }
+ return;
+ }
+
+ skb = netdev_alloc_skb_ip_align(netdev, len);
+ if (!skb) {
+ netif_err(priv2, rx_err, netdev, "Rx: alloc skb failed\n");
+ return;
+ }
+
+ memcpy(skb->data, buf_ptr, len);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ /* remove trailing padding for ptp packets */
+ if (skb->protocol == htons(ETH_P_1588)) {
+ ptp_message_len = skb->data[2] << 8 | skb->data[3];
+ skb_trim(skb, ptp_message_len);
+ }
+
+ if (priv2->rx_hw_tstamp_en)
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tstamp);
+
+ netif_receive_skb(skb);
+
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_PTP) {
+ priv2->stats.ptp_rx_packets++;
+ priv2->last_rx_ptp_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv2->stats.ecpri_rx_packets++;
+ priv2->last_rx_jiffies = jiffies;
+ } else {
+ priv2->stats.rx_packets++;
+ priv2->last_rx_jiffies = jiffies;
+ }
+ priv2->stats.rx_bytes += skb->len;
+}
+
+static int cnf10k_rfoe_process_rx_flow(struct cnf10k_rfoe_ndev_priv *priv,
+ int pkt_type, int budget)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv = priv->cdev_priv;
+ int count = 0, processed_pkts = 0;
+ struct cnf10k_rx_ft_cfg *ft_cfg;
+ u64 mbt_cfg;
+ u16 nxt_buf;
+ int *mbt_last_idx = &priv->rfoe_common->rx_mbt_last_idx[pkt_type];
+ u16 *prv_nxt_buf = &priv->rfoe_common->nxt_buf[pkt_type];
+
+ ft_cfg = &priv->rx_ft_cfg[pkt_type];
+
+ spin_lock(&cdev_priv->mbt_lock);
+ /* read mbt nxt_buf */
+ writeq(ft_cfg->mbt_idx,
+ priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num));
+ mbt_cfg = readq(priv->rfoe_reg_base +
+ RFOEX_RX_IND_MBT_CFG(priv->rfoe_num));
+ spin_unlock(&cdev_priv->mbt_lock);
+
+ nxt_buf = (mbt_cfg >> 32) & 0xffff;
+
+ /* no mbt entries to process */
+ if (nxt_buf == *prv_nxt_buf) {
+ netif_dbg(priv, rx_status, priv->netdev,
+ "no rx packets to process, rfoe=%d pkt_type=%d mbt_idx=%d nxt_buf=%d mbt_buf_sw_head=%d\n",
+ priv->rfoe_num, pkt_type, ft_cfg->mbt_idx, nxt_buf,
+ *mbt_last_idx);
+ return 0;
+ }
+
+ *prv_nxt_buf = nxt_buf;
+
+ /* get count of pkts to process, check ring wrap condition */
+ if (*mbt_last_idx > nxt_buf) {
+ count = ft_cfg->num_bufs - *mbt_last_idx;
+ count += nxt_buf;
+ } else {
+ count = nxt_buf - *mbt_last_idx;
+ }
+
+ netif_dbg(priv, rx_status, priv->netdev,
+ "rfoe=%d pkt_type=%d mbt_idx=%d nxt_buf=%d mbt_buf_sw_head=%d count=%d\n",
+ priv->rfoe_num, pkt_type, ft_cfg->mbt_idx, nxt_buf,
+ *mbt_last_idx, count);
+
+ while (likely((processed_pkts < budget) && (processed_pkts < count))) {
+ cnf10k_rfoe_process_rx_pkt(priv, ft_cfg, *mbt_last_idx);
+
+ (*mbt_last_idx)++;
+ if (*mbt_last_idx == ft_cfg->num_bufs)
+ *mbt_last_idx = 0;
+
+ processed_pkts++;
+ }
+
+ return processed_pkts;
+}
+
+/* napi poll routine */
+static int cnf10k_rfoe_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct cnf10k_rfoe_ndev_priv *priv;
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ int workdone = 0, pkt_type;
+ struct cnf10k_rx_ft_cfg *ft_cfg;
+ u64 intr_en, regval;
+
+ ft_cfg = container_of(napi, struct cnf10k_rx_ft_cfg, napi);
+ priv = ft_cfg->priv;
+ cdev_priv = priv->cdev_priv;
+ pkt_type = ft_cfg->pkt_type;
+
+ /* pkt processing loop */
+ workdone += cnf10k_rfoe_process_rx_flow(priv, pkt_type, budget);
+
+ if (workdone < budget) {
+ napi_complete_done(napi, workdone);
+
+ /* Re enable the Rx interrupts */
+ intr_en = PKT_TYPE_TO_INTR(pkt_type) <<
+ CNF10K_RFOE_RX_INTR_SHIFT(priv->rfoe_num);
+ spin_lock(&cdev_priv->lock);
+ if (priv->rfoe_num < 6) {
+ regval = readq(bphy_reg_base + PSM_INT_GP_ENA_W1S(1));
+ regval |= intr_en;
+ writeq(regval, bphy_reg_base + PSM_INT_GP_ENA_W1S(1));
+ } else {
+ regval = readq(bphy_reg_base + PSM_INT_GP_ENA_W1S(2));
+ regval |= intr_en;
+ writeq(regval, bphy_reg_base + PSM_INT_GP_ENA_W1S(2));
+ }
+ spin_unlock(&cdev_priv->lock);
+ }
+
+ return workdone;
+}
+
+/* Rx GPINT napi schedule api */
+void cnf10k_rfoe_rx_napi_schedule(int rfoe_num, u32 status)
+{
+ enum bphy_netdev_packet_type pkt_type;
+ struct cnf10k_rfoe_drv_ctx *drv_ctx;
+ struct cnf10k_rfoe_ndev_priv *priv;
+ struct cnf10k_rx_ft_cfg *ft_cfg;
+ int intf, bit_idx;
+ u32 intr_sts;
+ u64 regval;
+
+ for (intf = 0; intf < CNF10K_RFOE_MAX_INTF; intf++) {
+ drv_ctx = &cnf10k_rfoe_drv_ctx[intf];
+ /* ignore lmac, one interrupt/pkt_type/rfoe */
+ if (!(drv_ctx->valid && drv_ctx->rfoe_num == rfoe_num))
+ continue;
+ /* check if i/f down, napi disabled */
+ priv = netdev_priv(drv_ctx->netdev);
+ if (test_bit(RFOE_INTF_DOWN, &priv->state))
+ continue;
+ /* check rx pkt type */
+ intr_sts = ((status >> CNF10K_RFOE_RX_INTR_SHIFT(rfoe_num)) &
+ RFOE_RX_INTR_EN);
+ for (bit_idx = 0; bit_idx < PACKET_TYPE_MAX; bit_idx++) {
+ if (!(intr_sts & BIT(bit_idx)))
+ continue;
+ pkt_type = INTR_TO_PKT_TYPE(bit_idx);
+ if (unlikely(!(priv->pkt_type_mask & (1U << pkt_type))))
+ continue;
+ /* clear intr enable bit, re-enable in napi handler */
+ regval = PKT_TYPE_TO_INTR(pkt_type) <<
+ CNF10K_RFOE_RX_INTR_SHIFT(rfoe_num);
+ if (rfoe_num < 6)
+ writeq(regval, bphy_reg_base + PSM_INT_GP_ENA_W1C(1));
+ else
+ writeq(regval, bphy_reg_base + PSM_INT_GP_ENA_W1C(2));
+ /* schedule napi */
+ ft_cfg = &drv_ctx->ft_cfg[pkt_type];
+ napi_schedule(&ft_cfg->napi);
+ }
+ /* napi scheduled per pkt_type, return */
+ return;
+ }
+}
+
+static void cnf10k_rfoe_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct otx2_rfoe_stats *dev_stats = &priv->stats;
+
+ stats->rx_bytes = dev_stats->rx_bytes;
+ stats->rx_packets = dev_stats->rx_packets +
+ dev_stats->ptp_rx_packets +
+ dev_stats->ecpri_rx_packets;
+ stats->rx_dropped = dev_stats->rx_dropped +
+ dev_stats->ptp_rx_dropped +
+ dev_stats->ecpri_rx_dropped;
+
+ stats->tx_bytes = dev_stats->tx_bytes;
+ stats->tx_packets = dev_stats->tx_packets +
+ dev_stats->ptp_tx_packets +
+ dev_stats->ecpri_tx_packets;
+ stats->tx_dropped = dev_stats->tx_dropped +
+ dev_stats->ptp_tx_dropped +
+ dev_stats->ecpri_tx_dropped;
+}
+
+static int cnf10k_rfoe_config_hwtstamp(struct net_device *netdev,
+ struct ifreq *ifr)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ /* ptp hw timestamp is always enabled, mark the sw flags
+ * so that tx ptp requests are submitted to ptp psm queue
+ * and rx timestamp is copied to skb
+ */
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->tx_hw_tstamp_en = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ priv->tx_hw_tstamp_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ priv->rx_hw_tstamp_en = 0;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ priv->rx_hw_tstamp_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/* netdev ioctl */
+static int cnf10k_rfoe_ioctl(struct net_device *netdev, struct ifreq *req,
+ int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return cnf10k_rfoe_config_hwtstamp(netdev, req);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* netdev xmit */
+static netdev_tx_t cnf10k_rfoe_eth_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct mhbw_jd_dma_cfg_word_0_s *jd_dma_cfg_word_0;
+ struct mhbw_jd_dma_cfg_word_1_s *jd_dma_cfg_word_1;
+ struct mhab_job_desc_cfg *jd_cfg_ptr;
+ struct rfoe_tx_ptp_tstmp_s *tx_tstmp;
+ struct psm_cmd_addjob_s *psm_cmd_lo;
+ struct tx_job_queue_cfg *job_cfg;
+ struct tx_job_entry *job_entry;
+ struct ptp_tstamp_skb *ts_skb;
+ int psm_queue_id, queue_space;
+ u64 jd_cfg_ptr_iova, regval;
+ unsigned long flags;
+ struct ethhdr *eth;
+ int pkt_type = 0;
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ if (!priv->tx_hw_tstamp_en) {
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "skb HW timestamp requested but not enabled, this packet will not be timestamped\n");
+ job_cfg = &priv->rfoe_common->tx_oth_job_cfg;
+ pkt_type = PACKET_TYPE_OTHER;
+ } else {
+ job_cfg = &priv->tx_ptp_job_cfg;
+ pkt_type = PACKET_TYPE_PTP;
+ }
+ } else {
+ job_cfg = &priv->rfoe_common->tx_oth_job_cfg;
+ eth = (struct ethhdr *)skb->data;
+ if (htons(eth->h_proto) == ETH_P_ECPRI)
+ pkt_type = PACKET_TYPE_ECPRI;
+ else
+ pkt_type = PACKET_TYPE_OTHER;
+ }
+
+ spin_lock_irqsave(&job_cfg->lock, flags);
+
+ if (unlikely(priv->if_type != IF_TYPE_ETHERNET)) {
+ netif_err(priv, tx_queued, netdev,
+ "%s {rfoe%d lmac%d} invalid intf mode, drop pkt\n",
+ netdev->name, priv->rfoe_num, priv->lmac_id);
+ /* update stats */
+ priv->stats.tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ goto exit;
+ }
+
+ if (unlikely(!netif_carrier_ok(netdev))) {
+ netif_err(priv, tx_err, netdev,
+ "%s {rfoe%d lmac%d} link down, drop pkt\n",
+ netdev->name, priv->rfoe_num,
+ priv->lmac_id);
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv->stats.ecpri_tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_PTP) {
+ priv->stats.ptp_tx_dropped++;
+ priv->last_tx_ptp_dropped_jiffies = jiffies;
+ } else {
+ priv->stats.tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ }
+
+ goto exit;
+ }
+
+ if (unlikely(!(priv->pkt_type_mask & (1U << pkt_type)))) {
+ netif_err(priv, tx_queued, netdev,
+ "%s {rfoe%d lmac%d} pkt not supported, drop pkt\n",
+ netdev->name, priv->rfoe_num,
+ priv->lmac_id);
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv->stats.ecpri_tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_PTP) {
+ priv->stats.ptp_tx_dropped++;
+ priv->last_tx_ptp_dropped_jiffies = jiffies;
+ } else {
+ priv->stats.tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ }
+
+ goto exit;
+ }
+
+ /* get psm queue number */
+ psm_queue_id = job_cfg->psm_queue_id;
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "psm: queue(%d): cfg=0x%llx ptr=0x%llx space=0x%llx\n",
+ psm_queue_id,
+ readq(priv->psm_reg_base + PSM_QUEUE_CFG(psm_queue_id)),
+ readq(priv->psm_reg_base + PSM_QUEUE_PTR(psm_queue_id)),
+ readq(priv->psm_reg_base + PSM_QUEUE_SPACE(psm_queue_id)));
+
+ /* check psm queue space available */
+ regval = readq(priv->psm_reg_base + PSM_QUEUE_SPACE(psm_queue_id));
+ queue_space = regval & 0x7FFF;
+ if (queue_space < 1 && pkt_type != PACKET_TYPE_PTP) {
+ netif_err(priv, tx_err, netdev,
+ "no space in psm queue %d, dropping pkt\n",
+ psm_queue_id);
+ netif_stop_queue(netdev);
+ dev_kfree_skb_any(skb);
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_ECPRI)
+ priv->stats.ecpri_tx_dropped++;
+ else
+ priv->stats.tx_dropped++;
+
+ priv->last_tx_dropped_jiffies = jiffies;
+
+ mod_timer(&priv->tx_timer, jiffies + msecs_to_jiffies(100));
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+ return NETDEV_TX_OK;
+ }
+
+ /* get the tx job entry */
+ job_entry = (struct tx_job_entry *)
+ &job_cfg->job_entries[job_cfg->q_idx];
+
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "rfoe=%d lmac=%d psm_queue=%d tx_job_entry %d job_cmd_lo=0x%llx job_cmd_high=0x%llx jd_iova_addr=0x%llx\n",
+ priv->rfoe_num, priv->lmac_id, psm_queue_id, job_cfg->q_idx,
+ job_entry->job_cmd_lo, job_entry->job_cmd_hi,
+ job_entry->jd_iova_addr);
+
+ /* hw timestamp */
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->tx_hw_tstamp_en) {
+ if (list_empty(&priv->ptp_skb_list.list) &&
+ !test_and_set_bit_lock(PTP_TX_IN_PROGRESS, &priv->state)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->ptp_tx_skb = skb;
+ psm_cmd_lo = (struct psm_cmd_addjob_s *)
+ &job_entry->job_cmd_lo;
+ priv->ptp_job_tag = psm_cmd_lo->jobtag;
+
+ /* ptp timestamp entry is 128-bit in size */
+ tx_tstmp = (struct rfoe_tx_ptp_tstmp_s *)
+ ((u8 *)priv->ptp_ring_cfg.ptp_ring_base +
+ (16 * priv->ptp_ring_cfg.ptp_ring_idx));
+ memset(tx_tstmp, 0, sizeof(struct rfoe_tx_ptp_tstmp_s));
+ } else {
+ /* check ptp queue count */
+ if (priv->ptp_skb_list.count >= max_ptp_req) {
+ netif_err(priv, tx_err, netdev,
+ "ptp list full, dropping pkt\n");
+ priv->stats.ptp_tx_dropped++;
+ priv->last_tx_ptp_dropped_jiffies = jiffies;
+ goto exit;
+ }
+ /* allocate and add ptp req to queue */
+ ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
+ if (!ts_skb) {
+ priv->stats.ptp_tx_dropped++;
+ priv->last_tx_ptp_dropped_jiffies = jiffies;
+ goto exit;
+ }
+ ts_skb->skb = skb;
+ list_add_tail(&ts_skb->list, &priv->ptp_skb_list.list);
+ priv->ptp_skb_list.count++;
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->stats.ptp_tx_packets++;
+ priv->stats.tx_bytes += skb->len;
+ /* sw timestamp */
+ skb_tx_timestamp(skb);
+ goto exit; /* submit the packet later */
+ }
+ }
+
+ /* sw timestamp */
+ skb_tx_timestamp(skb);
+
+ if (unlikely(netif_msg_pktdata(priv))) {
+ netdev_printk(KERN_DEBUG, priv->netdev, "Tx: skb %pS len=%d\n",
+ skb, skb->len);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
+ skb->data, skb->len, true);
+ }
+
+ /* update length and block size in jd dma cfg word */
+ jd_cfg_ptr_iova = *(u64 *)((u8 *)job_entry->jd_ptr + 8);
+ jd_cfg_ptr = otx2_iova_to_virt(priv->iommu_domain, jd_cfg_ptr_iova);
+ jd_cfg_ptr->cfg3.pkt_len = skb->len;
+ jd_dma_cfg_word_0 = (struct mhbw_jd_dma_cfg_word_0_s *)
+ job_entry->rd_dma_ptr;
+ jd_dma_cfg_word_0->block_size = (((skb->len + 15) >> 4) * 4);
+
+ /* update rfoe_mode and lmac id for non-ptp (shared) psm job entry */
+ if (pkt_type != PACKET_TYPE_PTP) {
+ jd_cfg_ptr->cfg3.lmacid = priv->lmac_id & 0x3;
+ if (pkt_type == PACKET_TYPE_ECPRI)
+ jd_cfg_ptr->cfg.rfoe_mode = 1;
+ else
+ jd_cfg_ptr->cfg.rfoe_mode = 0;
+ }
+
+ /* copy packet data to rd_dma_ptr start addr */
+ jd_dma_cfg_word_1 = (struct mhbw_jd_dma_cfg_word_1_s *)
+ ((u8 *)job_entry->rd_dma_ptr + 8);
+ memcpy(otx2_iova_to_virt(priv->iommu_domain,
+ jd_dma_cfg_word_1->start_addr),
+ skb->data, skb->len);
+
+ /* make sure that all memory writes are completed */
+ dma_wmb();
+
+ /* submit PSM job */
+ writeq(job_entry->job_cmd_lo,
+ priv->psm_reg_base + PSM_QUEUE_CMD_LO(psm_queue_id));
+ writeq(job_entry->job_cmd_hi,
+ priv->psm_reg_base + PSM_QUEUE_CMD_HI(psm_queue_id));
+
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv->stats.ecpri_tx_packets++;
+ priv->last_tx_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_PTP) {
+ priv->stats.ptp_tx_packets++;
+ priv->last_tx_ptp_jiffies = jiffies;
+ } else {
+ priv->stats.tx_packets++;
+ priv->last_tx_jiffies = jiffies;
+ }
+ priv->stats.tx_bytes += skb->len;
+
+ /* increment queue index */
+ job_cfg->q_idx++;
+ if (job_cfg->q_idx == job_cfg->num_entries)
+ job_cfg->q_idx = 0;
+exit:
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
+ dev_kfree_skb_any(skb);
+
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+/* netdev open */
+static int cnf10k_rfoe_eth_open(struct net_device *netdev)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ int idx;
+
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ napi_enable(&priv->rx_ft_cfg[idx].napi);
+ }
+
+ priv->ptp_tx_skb = NULL;
+
+ netif_carrier_on(netdev);
+ netif_start_queue(netdev);
+
+ clear_bit(RFOE_INTF_DOWN, &priv->state);
+ priv->link_state = 1;
+
+ return 0;
+}
+
+/* netdev close */
+static int cnf10k_rfoe_eth_stop(struct net_device *netdev)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct ptp_tstamp_skb *ts_skb, *ts_skb2;
+ int idx;
+
+ set_bit(RFOE_INTF_DOWN, &priv->state);
+
+ netif_stop_queue(netdev);
+ netif_carrier_off(netdev);
+ priv->link_state = 0;
+
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ napi_disable(&priv->rx_ft_cfg[idx].napi);
+ }
+
+ del_timer_sync(&priv->tx_timer);
+
+ /* cancel any pending ptp work item in progress */
+ cancel_work_sync(&priv->ptp_tx_work);
+ if (priv->ptp_tx_skb) {
+ dev_kfree_skb_any(priv->ptp_tx_skb);
+ priv->ptp_tx_skb = NULL;
+ clear_bit_unlock(PTP_TX_IN_PROGRESS, &priv->state);
+ }
+
+ /* clear ptp skb list */
+ cancel_work_sync(&priv->ptp_queue_work);
+ list_for_each_entry_safe(ts_skb, ts_skb2,
+ &priv->ptp_skb_list.list, list) {
+ list_del(&ts_skb->list);
+ kfree(ts_skb);
+ }
+ priv->ptp_skb_list.count = 0;
+
+ return 0;
+}
+
+static int cnf10k_rfoe_init(struct net_device *netdev)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ /* Enable VLAN TPID match */
+ writeq(0x18100, (priv->rfoe_reg_base +
+ RFOEX_RX_VLANX_CFG(priv->rfoe_num, 0)));
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ return 0;
+}
+
+static int cnf10k_rfoe_vlan_rx_configure(struct net_device *netdev, u16 vid,
+ bool forward)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct otx2_bphy_cdev_priv *cdev_priv = priv->cdev_priv;
+ struct rfoe_rx_ind_vlanx_fwd fwd;
+ unsigned long flags;
+ u64 mask, index;
+
+ if (vid >= VLAN_N_VID) {
+ netdev_err(netdev, "Invalid VLAN ID %d\n", vid);
+ return -EINVAL;
+ }
+
+ mask = (0x1ll << (vid & 0x3F));
+ index = (vid >> 6) & 0x3F;
+
+ spin_lock_irqsave(&cdev_priv->mbt_lock, flags);
+
+ if (forward && priv->rfoe_common->rx_vlan_fwd_refcnt[vid]++)
+ goto out;
+
+ if (!forward && --priv->rfoe_common->rx_vlan_fwd_refcnt[vid])
+ goto out;
+
+ /* read current fwd mask */
+ writeq(index, (priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num)));
+ fwd.fwd = readq(priv->rfoe_reg_base +
+ RFOEX_RX_IND_VLANX_FWD(priv->rfoe_num, 0));
+
+ if (forward)
+ fwd.fwd |= mask;
+ else
+ fwd.fwd &= ~mask;
+
+ /* write the new fwd mask */
+ writeq(index, (priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num)));
+ writeq(fwd.fwd, (priv->rfoe_reg_base +
+ RFOEX_RX_IND_VLANX_FWD(priv->rfoe_num, 0)));
+
+out:
+ spin_unlock_irqrestore(&cdev_priv->mbt_lock, flags);
+
+ return 0;
+}
+
+static int cnf10k_rfoe_vlan_rx_add(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ return cnf10k_rfoe_vlan_rx_configure(netdev, vid, true);
+}
+
+static int cnf10k_rfoe_vlan_rx_kill(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ return cnf10k_rfoe_vlan_rx_configure(netdev, vid, false);
+}
+
+static const struct net_device_ops cnf10k_rfoe_netdev_ops = {
+ .ndo_init = cnf10k_rfoe_init,
+ .ndo_open = cnf10k_rfoe_eth_open,
+ .ndo_stop = cnf10k_rfoe_eth_stop,
+ .ndo_start_xmit = cnf10k_rfoe_eth_start_xmit,
+ .ndo_do_ioctl = cnf10k_rfoe_ioctl,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_get_stats64 = cnf10k_rfoe_get_stats64,
+ .ndo_vlan_rx_add_vid = cnf10k_rfoe_vlan_rx_add,
+ .ndo_vlan_rx_kill_vid = cnf10k_rfoe_vlan_rx_kill,
+};
+
+static void cnf10k_rfoe_dump_rx_ft_cfg(struct cnf10k_rfoe_ndev_priv *priv)
+{
+ struct cnf10k_rx_ft_cfg *ft_cfg;
+ int idx;
+
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ ft_cfg = &priv->rx_ft_cfg[idx];
+ pr_debug("rfoe=%d lmac=%d pkttype=%d flowid=%d mbt: idx=%d size=%d nbufs=%d iova=0x%llx jdt: idx=%d size=%d num_jd=%d iova=0x%llx\n",
+ priv->rfoe_num, priv->lmac_id, ft_cfg->pkt_type,
+ ft_cfg->flow_id, ft_cfg->mbt_idx, ft_cfg->buf_size,
+ ft_cfg->num_bufs, ft_cfg->mbt_iova_addr,
+ ft_cfg->jdt_idx, ft_cfg->jd_size, ft_cfg->num_jd,
+ ft_cfg->jdt_iova_addr);
+ }
+}
+
+static void cnf10k_rfoe_fill_rx_ft_cfg(struct cnf10k_rfoe_ndev_priv *priv,
+ struct cnf10k_bphy_ndev_comm_if *if_cfg)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv = priv->cdev_priv;
+ struct cnf10k_bphy_ndev_rbuf_info *rbuf_info;
+ struct cnf10k_rx_ft_cfg *ft_cfg;
+ u64 jdt_cfg0, iova;
+ int idx;
+
+ /* RX flow table configuration */
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ ft_cfg = &priv->rx_ft_cfg[idx];
+ rbuf_info = &if_cfg->rbuf_info[idx];
+ ft_cfg->pkt_type = rbuf_info->pkt_type;
+ ft_cfg->gp_int_num = rbuf_info->gp_int_num;
+ ft_cfg->flow_id = rbuf_info->flow_id;
+ ft_cfg->mbt_idx = rbuf_info->mbt_index;
+ ft_cfg->buf_size = rbuf_info->buf_size * 16;
+ ft_cfg->num_bufs = rbuf_info->num_bufs;
+ ft_cfg->mbt_iova_addr = rbuf_info->mbt_iova_addr;
+ iova = ft_cfg->mbt_iova_addr;
+ ft_cfg->mbt_virt_addr = otx2_iova_to_virt(priv->iommu_domain,
+ iova);
+ ft_cfg->jdt_idx = rbuf_info->jdt_index;
+ ft_cfg->jd_size = rbuf_info->jd_size * 8;
+ ft_cfg->num_jd = rbuf_info->num_jd;
+ ft_cfg->jdt_iova_addr = rbuf_info->jdt_iova_addr;
+ iova = ft_cfg->jdt_iova_addr;
+ ft_cfg->jdt_virt_addr = otx2_iova_to_virt(priv->iommu_domain,
+ iova);
+ spin_lock(&cdev_priv->mbt_lock);
+ writeq(ft_cfg->jdt_idx,
+ (priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num)));
+ jdt_cfg0 = readq(priv->rfoe_reg_base +
+ RFOEX_RX_IND_JDT_CFG0(priv->rfoe_num));
+ spin_unlock(&cdev_priv->mbt_lock);
+ ft_cfg->jd_rd_offset = ((jdt_cfg0 >> 27) & 0x3f) * 8;
+ ft_cfg->pkt_offset = (u8)((jdt_cfg0 >> 52) & 0x1f);
+ ft_cfg->priv = priv;
+ netif_napi_add(priv->netdev, &ft_cfg->napi,
+ cnf10k_rfoe_napi_poll,
+ NAPI_POLL_WEIGHT);
+ }
+}
+
+static void cnf10k_rfoe_fill_tx_job_entries(struct cnf10k_rfoe_ndev_priv *priv,
+ struct tx_job_queue_cfg *job_cfg,
+ struct cnf10k_bphy_ndev_tx_psm_cmd_info *tx_job,
+ int num_entries)
+{
+ struct tx_job_entry *job_entry;
+ u64 jd_cfg_iova, iova;
+ int i;
+
+ for (i = 0; i < num_entries; i++) {
+ job_entry = &job_cfg->job_entries[i];
+ job_entry->job_cmd_lo = tx_job->low_cmd;
+ job_entry->job_cmd_hi = tx_job->high_cmd;
+ job_entry->jd_iova_addr = tx_job->jd_iova_addr;
+ iova = job_entry->jd_iova_addr;
+ job_entry->jd_ptr = otx2_iova_to_virt(priv->iommu_domain, iova);
+ jd_cfg_iova = *(u64 *)((u8 *)job_entry->jd_ptr + 8);
+ job_entry->jd_cfg_ptr = otx2_iova_to_virt(priv->iommu_domain,
+ jd_cfg_iova);
+ job_entry->rd_dma_iova_addr = tx_job->rd_dma_iova_addr;
+ iova = job_entry->rd_dma_iova_addr;
+ job_entry->rd_dma_ptr = otx2_iova_to_virt(priv->iommu_domain,
+ iova);
+ pr_debug("job_cmd_lo=0x%llx job_cmd_hi=0x%llx jd_iova_addr=0x%llx rd_dma_iova_addr=%llx\n",
+ tx_job->low_cmd, tx_job->high_cmd,
+ tx_job->jd_iova_addr, tx_job->rd_dma_iova_addr);
+ tx_job++;
+ }
+ /* get psm queue id */
+ job_entry = &job_cfg->job_entries[0];
+ job_cfg->psm_queue_id = (job_entry->job_cmd_lo >> 8) & 0xff;
+ job_cfg->q_idx = 0;
+ job_cfg->num_entries = num_entries;
+ spin_lock_init(&job_cfg->lock);
+}
+
+int cnf10k_rfoe_parse_and_init_intf(struct otx2_bphy_cdev_priv *cdev,
+ struct cnf10k_rfoe_ndev_comm_intf_cfg *cfg)
+{
+ int i, intf_idx = 0, num_entries, lmac, idx, ret;
+ struct cnf10k_bphy_ndev_tx_psm_cmd_info *tx_info;
+ struct cnf10k_bphy_ndev_tx_ptp_ring_info *info;
+ struct cnf10k_rfoe_drv_ctx *drv_ctx = NULL;
+ struct cnf10k_rfoe_ndev_priv *priv, *priv2;
+ struct cnf10k_bphy_ndev_rfoe_if *rfoe_cfg;
+ struct cnf10k_bphy_ndev_comm_if *if_cfg;
+ struct tx_ptp_ring_cfg *ptp_ring_cfg;
+ struct tx_job_queue_cfg *tx_cfg;
+ struct cnf10k_rx_ft_cfg *ft_cfg;
+ struct net_device *netdev;
+ u8 pkt_type_mask;
+
+ cdev->hw_version = cfg->hw_params.chip_ver;
+ dev_dbg(cdev->dev, "hw_version = 0x%x\n", cfg->hw_params.chip_ver);
+
+ if (CHIP_CNF10KB(cdev->hw_version)) {
+ cdev->num_rfoe_mhab = 7;
+ cdev->num_rfoe_lmac = 2;
+ cdev->tot_rfoe_intf = 14;
+ } else if (CHIP_CNF10KA(cdev->hw_version)) {
+ cdev->num_rfoe_mhab = 2;
+ cdev->num_rfoe_lmac = 4;
+ cdev->tot_rfoe_intf = 8;
+ } else {
+ dev_err(cdev->dev, "unsupported chip version\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < BPHY_MAX_RFOE_MHAB; i++) {
+ priv2 = NULL;
+ rfoe_cfg = &cfg->rfoe_if_cfg[i];
+ pkt_type_mask = rfoe_cfg->pkt_type_mask;
+ for (lmac = 0; lmac < MAX_LMAC_PER_RFOE; lmac++) {
+ if_cfg = &rfoe_cfg->if_cfg[lmac];
+ /* check if lmac is valid */
+ if (!if_cfg->lmac_info.is_valid) {
+ dev_dbg(cdev->dev,
+ "rfoe%d lmac%d invalid, skipping\n",
+ i, lmac);
+ continue;
+ }
+ if (lmac >= cdev->num_rfoe_lmac) {
+ dev_dbg(cdev->dev,
+ "rfoe%d, lmac%d not supported, skipping\n",
+ i, lmac);
+ continue;
+ }
+ netdev = alloc_etherdev(sizeof(*priv));
+ if (!netdev) {
+ dev_err(cdev->dev,
+ "error allocating net device\n");
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+ priv = netdev_priv(netdev);
+ memset(priv, 0, sizeof(*priv));
+ if (!priv2) {
+ priv->rfoe_common =
+ kzalloc(sizeof(struct rfoe_common_cfg),
+ GFP_KERNEL);
+ if (!priv->rfoe_common) {
+ dev_err(cdev->dev, "kzalloc failed\n");
+ free_netdev(netdev);
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+ priv->rfoe_common->refcnt = 1;
+ }
+ spin_lock_init(&priv->lock);
+ priv->netdev = netdev;
+ priv->cdev_priv = cdev;
+ priv->msg_enable = netif_msg_init(-1, 0);
+ spin_lock_init(&priv->stats.lock);
+ priv->rfoe_num = if_cfg->lmac_info.rfoe_num;
+ priv->lmac_id = if_cfg->lmac_info.lane_num;
+ priv->if_type = IF_TYPE_ETHERNET;
+ memcpy(priv->mac_addr, if_cfg->lmac_info.eth_addr,
+ ETH_ALEN);
+ if (is_valid_ether_addr(priv->mac_addr))
+ ether_addr_copy(netdev->dev_addr,
+ priv->mac_addr);
+ else
+ random_ether_addr(netdev->dev_addr);
+ priv->pdev = pci_get_device(OTX2_BPHY_PCI_VENDOR_ID,
+ OTX2_BPHY_PCI_DEVICE_ID,
+ NULL);
+ priv->iommu_domain =
+ iommu_get_domain_for_dev(&priv->pdev->dev);
+ priv->bphy_reg_base = bphy_reg_base;
+ priv->psm_reg_base = psm_reg_base;
+ priv->rfoe_reg_base = rfoe_reg_base;
+ priv->bcn_reg_base = bcn_reg_base;
+ priv->ptp_reg_base = ptp_reg_base;
+
+ /* Initialise PTP TX work queue */
+ INIT_WORK(&priv->ptp_tx_work, cnf10k_rfoe_ptp_tx_work);
+ INIT_WORK(&priv->ptp_queue_work,
+ cnf10k_rfoe_ptp_submit_work);
+
+ /* Initialise PTP skb list */
+ INIT_LIST_HEAD(&priv->ptp_skb_list.list);
+ priv->ptp_skb_list.count = 0;
+ timer_setup(&priv->tx_timer,
+ cnf10k_rfoe_tx_timer_cb, 0);
+
+ priv->pkt_type_mask = pkt_type_mask;
+ cnf10k_rfoe_fill_rx_ft_cfg(priv, if_cfg);
+ cnf10k_rfoe_dump_rx_ft_cfg(priv);
+
+ /* TX PTP job configuration */
+ if (priv->pkt_type_mask & (1U << PACKET_TYPE_PTP)) {
+ tx_cfg = &priv->tx_ptp_job_cfg;
+ tx_info = &if_cfg->ptp_pkt_info[0];
+ num_entries = MAX_PTP_MSG_PER_LMAC;
+ cnf10k_rfoe_fill_tx_job_entries(priv, tx_cfg,
+ tx_info,
+ num_entries);
+ /* fill ptp ring info */
+ ptp_ring_cfg = &priv->ptp_ring_cfg;
+ info = &if_cfg->ptp_ts_ring_info[0];
+ ptp_ring_cfg->ptp_ring_base =
+ otx2_iova_to_virt(priv->iommu_domain,
+ info->ring_iova_addr);
+ ptp_ring_cfg->ptp_ring_id = info->ring_idx;
+ ptp_ring_cfg->ptp_ring_size = info->ring_size;
+ ptp_ring_cfg->ptp_ring_idx = 0;
+ }
+
+ /* TX ECPRI/OTH(PTP) job configuration */
+ if (!priv2 &&
+ ((priv->pkt_type_mask &
+ (1U << PACKET_TYPE_OTHER)) ||
+ (priv->pkt_type_mask &
+ (1U << PACKET_TYPE_ECPRI)))) {
+ num_entries = cdev->num_rfoe_lmac *
+ MAX_OTH_MSG_PER_LMAC;
+ tx_cfg = &priv->rfoe_common->tx_oth_job_cfg;
+ tx_info = &rfoe_cfg->oth_pkt_info[0];
+ cnf10k_rfoe_fill_tx_job_entries(priv, tx_cfg,
+ tx_info,
+ num_entries);
+ } else {
+ /* share rfoe_common data */
+ priv->rfoe_common = priv2->rfoe_common;
+ ++(priv->rfoe_common->refcnt);
+ }
+
+ /* keep last (rfoe + lmac) priv structure */
+ if (!priv2)
+ priv2 = priv;
+
+ intf_idx = (i * cdev->num_rfoe_lmac) + lmac;
+ snprintf(netdev->name, sizeof(netdev->name),
+ "rfoe%d", intf_idx);
+ netdev->netdev_ops = &cnf10k_rfoe_netdev_ops;
+ cnf10k_rfoe_set_ethtool_ops(netdev);
+ cnf10k_rfoe_ptp_init(priv);
+ netdev->watchdog_timeo = (15 * HZ);
+ netdev->mtu = 1500U;
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = 1500U;
+ ret = register_netdev(netdev);
+ if (ret < 0) {
+ dev_err(cdev->dev,
+ "failed to register net device %s\n",
+ netdev->name);
+ free_netdev(netdev);
+ ret = -ENODEV;
+ goto err_exit;
+ }
+ dev_dbg(cdev->dev, "net device %s registered\n",
+ netdev->name);
+
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ set_bit(RFOE_INTF_DOWN, &priv->state);
+ priv->link_state = 0;
+
+ /* initialize global ctx */
+ drv_ctx = &cnf10k_rfoe_drv_ctx[intf_idx];
+ drv_ctx->rfoe_num = priv->rfoe_num;
+ drv_ctx->lmac_id = priv->lmac_id;
+ drv_ctx->valid = 1;
+ drv_ctx->netdev = netdev;
+ drv_ctx->ft_cfg = &priv->rx_ft_cfg[0];
+ }
+ }
+
+ return 0;
+
+err_exit:
+ for (i = 0; i < CNF10K_RFOE_MAX_INTF; i++) {
+ drv_ctx = &cnf10k_rfoe_drv_ctx[i];
+ if (drv_ctx->valid) {
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ cnf10k_rfoe_ptp_destroy(priv);
+ unregister_netdev(netdev);
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ ft_cfg = &priv->rx_ft_cfg[idx];
+ netif_napi_del(&ft_cfg->napi);
+ }
+ --(priv->rfoe_common->refcnt);
+ if (priv->rfoe_common->refcnt == 0)
+ kfree(priv->rfoe_common);
+ free_netdev(netdev);
+ drv_ctx->valid = 0;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe.h b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe.h
new file mode 100644
index 000000000000..215056a1c7ca
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell CNF10K BPHY RFOE Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#ifndef _CNF10K_RFOE_H_
+#define _CNF10K_RFOE_H_
+
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/if_vlan.h>
+
+#include "rfoe_common.h"
+#include "otx2_bphy.h"
+
+#define DEBUG
+
+#define CNF10K_RFOE_RX_INTR_SHIFT(a) ({ \
+ typeof(a) _a = (a); \
+ ((_a) < 6) ? (32 - ((_a) + 1) * 3) : (((_a) - 6) * 3); \
+})
+#define CNF10K_RFOE_RX_INTR_MASK(a) (RFOE_RX_INTR_EN << \
+ CNF10K_RFOE_RX_INTR_SHIFT(a))
+#define CNF10K_RFOE_TX_PTP_INTR_MASK(a, b, n) (1UL << ((a) * (n) + (b)))
+
+#define CNF10K_RFOE_MAX_INTF 14
+
+/* global driver context */
+struct cnf10k_rfoe_drv_ctx {
+ u8 rfoe_num;
+ u8 lmac_id;
+ int valid;
+ struct net_device *netdev;
+ struct cnf10k_rx_ft_cfg *ft_cfg;
+ int tx_gpint_bit;
+};
+
+extern struct cnf10k_rfoe_drv_ctx cnf10k_rfoe_drv_ctx[CNF10K_RFOE_MAX_INTF];
+
+/* rx flow table configuration */
+struct cnf10k_rx_ft_cfg {
+ enum bphy_netdev_packet_type pkt_type; /* pkt_type for psw */
+ enum bphy_netdev_rx_gpint gp_int_num;
+ u16 flow_id; /* flow id */
+ u16 mbt_idx; /* mbt index */
+ u16 buf_size; /* mbt buf size */
+ u16 num_bufs; /* mbt num bufs */
+ u64 mbt_iova_addr;
+ void __iomem *mbt_virt_addr;
+ u16 jdt_idx; /* jdt index */
+ u8 jd_size; /* jd size */
+ u16 num_jd; /* num jd's */
+ u64 jdt_iova_addr;
+ void __iomem *jdt_virt_addr;
+ u8 jd_rd_offset; /* jd rd offset */
+ u8 pkt_offset;
+ struct napi_struct napi;
+ struct cnf10k_rfoe_ndev_priv *priv;
+};
+
+struct tx_ptp_ring_cfg {
+ u8 ptp_ring_id;
+ void __iomem *ptp_ring_base;
+ u8 ptp_ring_size;
+ u8 ptp_ring_idx;
+};
+
+/* netdev priv */
+struct cnf10k_rfoe_ndev_priv {
+ u8 rfoe_num;
+ u8 lmac_id;
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ u32 msg_enable;
+ void __iomem *bphy_reg_base;
+ void __iomem *psm_reg_base;
+ void __iomem *rfoe_reg_base;
+ void __iomem *bcn_reg_base;
+ void __iomem *ptp_reg_base;
+ struct iommu_domain *iommu_domain;
+ struct cnf10k_rx_ft_cfg rx_ft_cfg[PACKET_TYPE_MAX];
+ struct tx_job_queue_cfg tx_ptp_job_cfg;
+ struct tx_ptp_ring_cfg ptp_ring_cfg;
+ struct rfoe_common_cfg *rfoe_common;
+ u8 pkt_type_mask;
+ /* priv lock */
+ spinlock_t lock;
+ int rx_hw_tstamp_en;
+ int tx_hw_tstamp_en;
+ struct sk_buff *ptp_tx_skb;
+ u16 ptp_job_tag;
+ struct timer_list tx_timer;
+ unsigned long state;
+ struct work_struct ptp_tx_work;
+ struct work_struct ptp_queue_work;
+ struct ptp_tx_skb_list ptp_skb_list;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ struct otx2_rfoe_stats stats;
+ u8 mac_addr[ETH_ALEN];
+ s32 sec_bcn_offset;
+ int if_type;
+ u8 link_state;
+ unsigned long last_tx_jiffies;
+ unsigned long last_tx_ptp_jiffies;
+ unsigned long last_rx_jiffies;
+ unsigned long last_rx_ptp_jiffies;
+ unsigned long last_tx_dropped_jiffies;
+ unsigned long last_tx_ptp_dropped_jiffies;
+ unsigned long last_rx_dropped_jiffies;
+ unsigned long last_rx_ptp_dropped_jiffies;
+};
+
+void cnf10k_rfoe_rx_napi_schedule(int rfoe_num, u32 status);
+
+int cnf10k_rfoe_parse_and_init_intf(struct otx2_bphy_cdev_priv *cdev,
+ struct cnf10k_rfoe_ndev_comm_intf_cfg *cfg);
+
+void cnf10k_bphy_rfoe_cleanup(void);
+
+void cnf10k_rfoe_disable_intf(int rfoe_num);
+
+/* ethtool */
+void cnf10k_rfoe_set_ethtool_ops(struct net_device *netdev);
+
+/* ptp */
+int cnf10k_rfoe_ptp_init(struct cnf10k_rfoe_ndev_priv *priv);
+void cnf10k_rfoe_ptp_destroy(struct cnf10k_rfoe_ndev_priv *priv);
+
+void cnf10k_bphy_intr_handler(struct otx2_bphy_cdev_priv *cdev_priv,
+ u32 status);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe_ethtool.c
new file mode 100644
index 000000000000..5d7bbd9fc82f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe_ethtool.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CNF10K BPHY RFOE Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include "cnf10k_rfoe.h"
+#include "cnf10k_bphy_hw.h"
+
+static const char ethtool_stat_strings[][ETH_GSTRING_LEN] = {
+ "oth_rx_packets",
+ "ptp_rx_packets",
+ "ecpri_rx_packets",
+ "rx_bytes",
+ "oth_rx_dropped",
+ "ptp_rx_dropped",
+ "ecpri_rx_dropped",
+ "oth_tx_packets",
+ "ptp_tx_packets",
+ "ecpri_tx_packets",
+ "tx_bytes",
+ "oth_tx_dropped",
+ "ptp_tx_dropped",
+ "ecpri_tx_dropped",
+ "ptp_tx_hwtstamp_failures",
+ "EthIfInFrames",
+ "EthIfInOctets",
+ "EthIfOutFrames",
+ "EthIfOutOctets",
+ "EthIfInUnknownVlan",
+};
+
+static void cnf10k_rfoe_get_strings(struct net_device *netdev, u32 sset,
+ u8 *data)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ memcpy(data, *ethtool_stat_strings,
+ sizeof(ethtool_stat_strings));
+ break;
+ }
+}
+
+static int cnf10k_rfoe_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(ethtool_stat_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void cnf10k_rfoe_update_lmac_stats(struct cnf10k_rfoe_ndev_priv *priv)
+{
+ struct otx2_rfoe_stats *stats = &priv->stats;
+
+ stats->EthIfInFrames = readq(priv->rfoe_reg_base +
+ RFOEX_RX_RPM_PKT_STAT(priv->rfoe_num,
+ priv->lmac_id));
+ stats->EthIfInOctets = readq(priv->rfoe_reg_base +
+ RFOEX_RX_RPM_OCTS_STAT(priv->rfoe_num,
+ priv->lmac_id));
+ stats->EthIfOutFrames = readq(priv->rfoe_reg_base +
+ RFOEX_TX_PKT_STAT(priv->rfoe_num,
+ priv->lmac_id));
+ stats->EthIfOutOctets = readq(priv->rfoe_reg_base +
+ RFOEX_TX_OCTS_STAT(priv->rfoe_num,
+ priv->lmac_id));
+ stats->EthIfInUnknownVlan =
+ readq(priv->rfoe_reg_base +
+ RFOEX_RX_VLAN_DROP_STAT(priv->rfoe_num,
+ priv->lmac_id));
+}
+
+static void cnf10k_rfoe_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ cnf10k_rfoe_update_lmac_stats(priv);
+ spin_lock(&priv->stats.lock);
+ memcpy(data, &priv->stats,
+ ARRAY_SIZE(ethtool_stat_strings) * sizeof(u64));
+ spin_unlock(&priv->stats.lock);
+}
+
+static void cnf10k_rfoe_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *p)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ snprintf(p->driver, sizeof(p->driver), "cnf10k_rfoe {rfoe%d lmac%d}",
+ priv->rfoe_num, priv->lmac_id);
+ strlcpy(p->bus_info, "platform", sizeof(p->bus_info));
+}
+
+static int cnf10k_rfoe_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = ptp_clock_index(priv->ptp_clock);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static u32 cnf10k_rfoe_get_msglevel(struct net_device *netdev)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ return priv->msg_enable;
+}
+
+static void cnf10k_rfoe_set_msglevel(struct net_device *netdev, u32 level)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ priv->msg_enable = level;
+}
+
+static const struct ethtool_ops cnf10k_rfoe_ethtool_ops = {
+ .get_drvinfo = cnf10k_rfoe_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = cnf10k_rfoe_get_ts_info,
+ .get_strings = cnf10k_rfoe_get_strings,
+ .get_sset_count = cnf10k_rfoe_get_sset_count,
+ .get_ethtool_stats = cnf10k_rfoe_get_ethtool_stats,
+ .get_msglevel = cnf10k_rfoe_get_msglevel,
+ .set_msglevel = cnf10k_rfoe_set_msglevel,
+};
+
+void cnf10k_rfoe_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &cnf10k_rfoe_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe_ptp.c b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe_ptp.c
new file mode 100644
index 000000000000..4ea2fc29ee71
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe_ptp.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CNF10K BPHY RFOE Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include "cnf10k_rfoe.h"
+
+static int cnf10k_rfoe_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+static int cnf10k_rfoe_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ return -EOPNOTSUPP;
+}
+
+static int cnf10k_rfoe_ptp_gettime(struct ptp_clock_info *ptp_info,
+ struct timespec64 *ts)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = container_of(ptp_info,
+ struct cnf10k_rfoe_ndev_priv,
+ ptp_clock_info);
+ u64 nsec;
+
+ nsec = readq(priv->ptp_reg_base + MIO_PTP_CLOCK_HI);
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+static int cnf10k_rfoe_ptp_settime(struct ptp_clock_info *ptp_info,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
+static int cnf10k_rfoe_ptp_enable(struct ptp_clock_info *ptp_info,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static const struct ptp_clock_info cnf10k_rfoe_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .max_adj = 1000000000ull,
+ .n_ext_ts = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfine = cnf10k_rfoe_ptp_adjfine,
+ .adjtime = cnf10k_rfoe_ptp_adjtime,
+ .gettime64 = cnf10k_rfoe_ptp_gettime,
+ .settime64 = cnf10k_rfoe_ptp_settime,
+ .enable = cnf10k_rfoe_ptp_enable,
+};
+
+int cnf10k_rfoe_ptp_init(struct cnf10k_rfoe_ndev_priv *priv)
+{
+ int err;
+
+ priv->ptp_clock_info = cnf10k_rfoe_ptp_clock_info;
+ snprintf(priv->ptp_clock_info.name, 16, "%s", priv->netdev->name);
+ priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_info,
+ &priv->pdev->dev);
+ if (IS_ERR_OR_NULL(priv->ptp_clock)) {
+ priv->ptp_clock = NULL;
+ err = PTR_ERR(priv->ptp_clock);
+ return err;
+ }
+
+ return 0;
+}
+
+void cnf10k_rfoe_ptp_destroy(struct cnf10k_rfoe_ndev_priv *priv)
+{
+ ptp_clock_unregister(priv->ptp_clock);
+ priv->ptp_clock = NULL;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy.h b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy.h
new file mode 100644
index 000000000000..5cb8a89eef0b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _OTX2_BPHY_H_
+#define _OTX2_BPHY_H_
+
+#include <linux/device.h>
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+
+#include "bphy_common.h"
+#include "rfoe_bphy_netdev_comm_if.h"
+#include "cnf10k_bphy_netdev_comm_if.h"
+
+#define DEVICE_NAME "otx2_rfoe"
+#define DRV_NAME "octeontx2-bphy-netdev"
+#define DRV_STRING "Marvell OcteonTX2 BPHY Ethernet Driver"
+
+/* char device ioctl numbers */
+#define OTX2_RFOE_IOCTL_BASE 0xCC /* Temporary */
+#define OTX2_RFOE_IOCTL_ODP_INTF_CFG _IOW(OTX2_RFOE_IOCTL_BASE, 0x01, \
+ struct bphy_netdev_comm_intf_cfg)
+#define OTX2_RFOE_IOCTL_ODP_DEINIT _IO(OTX2_RFOE_IOCTL_BASE, 0x02)
+#define OTX2_RFOE_IOCTL_RX_IND_CFG _IOWR(OTX2_RFOE_IOCTL_BASE, 0x03, \
+ struct otx2_rfoe_rx_ind_cfg)
+#define OTX2_RFOE_IOCTL_PTP_OFFSET _IOW(OTX2_RFOE_IOCTL_BASE, 0x04, \
+ struct ptp_clk_cfg)
+#define OTX2_RFOE_IOCTL_SEC_BCN_OFFSET _IOW(OTX2_RFOE_IOCTL_BASE, 0x05, \
+ struct bcn_sec_offset_cfg)
+#define OTX2_RFOE_IOCTL_MODE_CPRI _IOW(OTX2_RFOE_IOCTL_BASE, 0x06, \
+ int)
+#define OTX2_RFOE_IOCTL_LINK_EVENT _IOW(OTX2_RFOE_IOCTL_BASE, 0x07, \
+ struct otx2_rfoe_link_event)
+#define OTX2_CPRI_IOCTL_LINK_EVENT _IOW(OTX2_RFOE_IOCTL_BASE, 0x08, \
+ struct otx2_cpri_link_event)
+#define OTX2_IOCTL_RFOE_10x_CFG _IOW(OTX2_RFOE_IOCTL_BASE, 0x0A, \
+ uint64_t)
+#define OTX2_IOCTL_CPRI_INTF_CFG _IOW(OTX2_RFOE_IOCTL_BASE, 0x0B, \
+ uint64_t)
+
+//#define ASIM /* ASIM environment */
+
+#define OTX2_BPHY_MHAB_INST 3
+
+/* char driver private data */
+struct otx2_bphy_cdev_priv {
+ struct device *dev;
+ struct cdev cdev;
+ dev_t devt;
+ int is_open;
+ int odp_intf_cfg;
+ int irq;
+ struct mutex mutex_lock; /* mutex */
+ spinlock_t lock; /* irq lock */
+ spinlock_t mbt_lock; /* mbt ind lock */
+ u8 mhab_mode[BPHY_MAX_RFOE_MHAB];
+ /* cnf10k specific information */
+ u32 hw_version;
+ u8 num_rfoe_mhab;
+ u8 num_rfoe_lmac;
+ u8 tot_rfoe_intf;
+ int gpint2_irq;
+};
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_debugfs.c
new file mode 100644
index 000000000000..a2d03352c89d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_debugfs.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2021 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+
+#include "otx2_bphy_debugfs.h"
+#include "otx2_bphy.h"
+
+#define OTX2_BPHY_DEBUGFS_MODE 0400
+
+struct otx2_bphy_debugfs_reader_info {
+ atomic_t refcnt;
+ size_t buffer_size;
+ void *priv;
+ otx2_bphy_debugfs_reader reader;
+ struct dentry *entry;
+ char buffer[1];
+};
+
+static struct dentry *otx2_bphy_debugfs;
+
+static int otx2_bphy_debugfs_open(struct inode *inode, struct file *file);
+
+static int otx2_bphy_debugfs_release(struct inode *inode, struct file *file);
+
+static ssize_t otx2_bphy_debugfs_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *offset);
+
+static const struct file_operations otx2_bphy_debugfs_foper = {
+ .owner = THIS_MODULE,
+ .open = otx2_bphy_debugfs_open,
+ .release = otx2_bphy_debugfs_release,
+ .read = otx2_bphy_debugfs_read,
+};
+
+void __init otx2_bphy_debugfs_init(void)
+{
+ otx2_bphy_debugfs = debugfs_create_dir(DRV_NAME, NULL);
+ if (!otx2_bphy_debugfs)
+ pr_info("%s: debugfs is not enabled\n", DRV_NAME);
+}
+
+void *otx2_bphy_debugfs_add_file(const char *name,
+ size_t buffer_size,
+ void *priv,
+ otx2_bphy_debugfs_reader reader)
+{
+ struct otx2_bphy_debugfs_reader_info *info = NULL;
+ size_t total_size = 0;
+
+ if (!otx2_bphy_debugfs) {
+ pr_info("%s: debugfs not enabled, ignoring %s\n", DRV_NAME,
+ name);
+ goto out;
+ }
+
+ total_size = buffer_size +
+ offsetof(struct otx2_bphy_debugfs_reader_info,
+ buffer);
+
+ info = kzalloc(total_size, GFP_KERNEL);
+
+ if (!info)
+ goto out;
+
+ info->buffer_size = buffer_size;
+ info->priv = priv;
+ info->reader = reader;
+
+ atomic_set(&info->refcnt, 0);
+
+ info->entry = debugfs_create_file(name, OTX2_BPHY_DEBUGFS_MODE,
+ otx2_bphy_debugfs, info,
+ &otx2_bphy_debugfs_foper);
+
+ if (!info->entry) {
+ pr_err("%s: debugfs failed to add file %s\n", DRV_NAME, name);
+ kfree(info);
+ info = NULL;
+ goto out;
+ }
+
+ pr_info("%s: debugfs created successfully for %s\n", DRV_NAME, name);
+
+out:
+ return info;
+}
+
+void otx2_bphy_debugfs_remove_file(void *entry)
+{
+ struct otx2_bphy_debugfs_reader_info *info = entry;
+
+ debugfs_remove(info->entry);
+
+ kfree(info);
+}
+
+void __exit otx2_bphy_debugfs_exit(void)
+{
+ debugfs_remove_recursive(otx2_bphy_debugfs);
+}
+
+static int otx2_bphy_debugfs_open(struct inode *inode, struct file *file)
+{
+ struct otx2_bphy_debugfs_reader_info *info = NULL;
+
+ info = inode->i_private;
+
+ if (!atomic_cmpxchg(&info->refcnt, 0, 1)) {
+ file->private_data = info;
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+static int otx2_bphy_debugfs_release(struct inode *inode, struct file *file)
+{
+ struct otx2_bphy_debugfs_reader_info *info = NULL;
+
+ info = inode->i_private;
+
+ atomic_cmpxchg(&info->refcnt, 1, 0);
+
+ return 0;
+}
+
+static ssize_t otx2_bphy_debugfs_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *offset)
+{
+ struct otx2_bphy_debugfs_reader_info *info = NULL;
+ ssize_t retval = 0;
+
+ info = file->private_data;
+
+ if (!(*offset))
+ info->reader(&info->buffer[0], info->buffer_size, info->priv);
+
+ if (*offset >= info->buffer_size)
+ goto out;
+
+ if (*offset + count > info->buffer_size)
+ count = info->buffer_size - *offset;
+
+ if (copy_to_user((void __user *)buffer, info->buffer + *offset,
+ count)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ *offset += count;
+ retval = count;
+
+out:
+ return retval;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_debugfs.h b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_debugfs.h
new file mode 100644
index 000000000000..099290565bfa
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_debugfs.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2021 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+
+#ifndef _OTX2_BPHY_DEBUGFS_H_
+#define _OTX2_BPHY_DEBUGFS_H_
+
+typedef void (*otx2_bphy_debugfs_reader)(char *buffer, size_t buffer_size,
+ void *priv);
+
+void otx2_bphy_debugfs_init(void);
+
+void *otx2_bphy_debugfs_add_file(const char *name,
+ size_t buffer_size,
+ void *priv,
+ otx2_bphy_debugfs_reader reader);
+
+void otx2_bphy_debugfs_remove_file(void *entry);
+
+void otx2_bphy_debugfs_exit(void);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_hw.h b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_hw.h
new file mode 100644
index 000000000000..48bfd2017ea1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_hw.h
@@ -0,0 +1,381 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _OTX2_BPHY_HW_H_
+#define _OTX2_BPHY_HW_H_
+
+#include <linux/types.h>
+
+/* PSM register offsets */
+#define PSM_QUEUE_CMD_LO(a) (0x0 + (a) * 0x10)
+#define PSM_QUEUE_CMD_HI(a) (0x8 + (a) * 0x10)
+#define PSM_QUEUE_CFG(a) (0x1000 + (a) * 0x10)
+#define PSM_QUEUE_PTR(a) (0x2000 + (a) * 0x10)
+#define PSM_QUEUE_SPACE(a) (0x3000 + (a) * 0x10)
+#define PSM_QUEUE_TIMEOUT_CFG(a) (0x4000 + (a) * 0x10)
+#define PSM_QUEUE_INFO(a) (0x5000 + (a) * 0x10)
+#define PSM_QUEUE_ENA_W1S(a) (0x10000 + (a) * 0x8)
+#define PSM_QUEUE_ENA_W1C(a) (0x10100 + (a) * 0x8)
+#define PSM_QUEUE_FULL_STS(a) (0x10200 + (a) * 0x8)
+#define PSM_QUEUE_BUSY_STS(a) (0x10300 + (a) * 0x8)
+
+/* BPHY PSM GPINT register offsets */
+#define PSM_INT_GP_SUM_W1C(a) (0x10E0000 + (a) * 0x100)
+#define PSM_INT_GP_SUM_W1S(a) (0x10E0040 + (a) * 0x100)
+#define PSM_INT_GP_ENA_W1C(a) (0x10E0080 + (a) * 0x100)
+#define PSM_INT_GP_ENA_W1S(a) (0x10E00C0 + (a) * 0x100)
+
+/* RFOE MHAB register offsets */
+#define RFOEX_RX_CTL(a) (0x0818ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_VLANX_CFG(a, b) (0x0870ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((b) << 3))
+#define RFOEX_RX_INDIRECT_INDEX_OFFSET(a) (0x13F8ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_IND_FTX_CFG(a, b) (0x1400ULL | \
+ (((unsigned long)(a) << 36)) + \
+ ((b) << 3))
+#define RFOEX_RX_IND_MBT_CFG(a) (0x1420ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_IND_MBT_ADDR(a) (0x1428ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_IND_MBT_SEG_STATE(a) (0x1430ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_IND_VLANX_FWD(a, b) (0x14D0ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((b) << 3))
+#define RFOEX_RX_IND_JDT_CFG0(a) (0x1440ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_IND_JDT_CFG1(a) (0x1448ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_IND_JDT_PTR(a) (0x1450ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_IND_JDT_STATE(a) (0x1478ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_IND_ECPRI_FT_CFG(a) (0x14C0ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_TX_PTP_TSTMP_W0(a, b) (0x7A0ULL | \
+ (((unsigned long)(a) << 36)) | \
+ ((b) << 3))
+#define RFOEX_TX_PTP_TSTMP_W1(a, b) (0x7C0ULL | \
+ (((unsigned long)(a) << 36)) | \
+ ((b) << 3))
+#define RFOEX_TX_PKT_STAT(a, b) (0x720ULL | \
+ (((unsigned long)(a) << 36)) | \
+ ((b) << 3))
+#define RFOEX_TX_OCTS_STAT(a, b) (0x740ULL | \
+ (((unsigned long)(a) << 36)) | \
+ ((b) << 3))
+#define RFOEX_RX_VLAN_DROP_STAT(a, b) (0x8A0ULL | \
+ (((unsigned long)(a) << 36)) | \
+ ((b) << 3))
+#define RFOEX_RX_CGX_PKT_STAT(a, b) (0x15C0ULL | \
+ (((unsigned long)(a) << 36)) | \
+ ((b) << 3))
+#define RFOEX_RX_CGX_OCTS_STAT(a, b) (0x15E0ULL | \
+ (((unsigned long)(a) << 36)) | \
+ ((b) << 3))
+
+/* BCN register offsets and definitions */
+#define BCN_CAPTURE_CFG 0x10400
+#define BCN_CAPTURE_N1_N2 0x10410
+#define BCN_CAPTURE_PTP 0x10430
+
+/* BCN_CAPTURE_CFG register definitions */
+#define CAPT_EN BIT(0)
+#define CAPT_TRIG_SW (3UL << 8)
+
+/* CPRI register offsets */
+#define CPRIX_RXD_GMII_UL_CBUF_CFG1(a) (0x1000ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_RXD_GMII_UL_CBUF_CFG2(a) (0x1008ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_RXD_GMII_UL_RD_DOORBELL(a) (0x1010ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_RXD_GMII_UL_SW_RD_PTR(a) (0x1018ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_RXD_GMII_UL_NXT_WR_PTR(a) (0x1020ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_RXD_GMII_UL_PKT_COUNT(a) (0x1028ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_TXD_GMII_DL_CBUF_CFG1(a) (0x1100ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_TXD_GMII_DL_CBUF_CFG2(a) (0x1108ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_TXD_GMII_DL_WR_DOORBELL(a) (0x1110ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_TXD_GMII_DL_SW_WR_PTR(a) (0x1118ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_TXD_GMII_DL_NXT_RD_PTR(a) (0x1120ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_ETH_UL_INT(a) (0x280ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_ETH_UL_INT_ENA_W1S(a) (0x288ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_ETH_UL_INT_ENA_W1C(a) (0x290ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_ETH_UL_INT_W1S(a) (0x298ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_ETH_BAD_CRC_CNT(a, b) (0x400ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_ERR_CNT(a, b) (0x408ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_OSIZE_CNT(a, b) (0x410ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_USIZE_CNT(a, b) (0x418ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_FIFO_ORUN_CNT(a, b) (0x420ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_GPKTS_CNT(a, b) (0x428ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_BOCT_CNT(a, b) (0x430ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_GOCT_CNT(a, b) (0x438ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_DL_GOCTETS_CNT(a, b) (0x440ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_DL_GPKTS_CNT(a, b) (0x448ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+
+/* MHAB definitions */
+struct mhbw_jd_dma_cfg_word_0_s {
+ u64 dma_mode : 3;
+ u64 target_mem : 1;
+ u64 dswap : 3;
+ u64 cmd_type : 2;
+ u64 reserved1 : 7;
+ u64 chunk_size : 16;
+ u64 block_size : 16;
+ u64 thread_id : 6;
+ u64 reserved2 : 2;
+ u64 group_id : 4;
+ u64 reserved3 : 4;
+};
+
+struct mhbw_jd_dma_cfg_word_1_s {
+ u64 start_addr : 53;
+ u64 reserved1 : 11;
+};
+
+/* RFOE definitions */
+enum rfoe_rx_dir_ctl_pkt_type_e {
+ ROE = 0x0,
+ CHI = 0x1,
+ ALT = 0x2,
+ ECPRI = 0x3,
+ GENERIC = 0x8,
+};
+
+enum rfoe_rx_pswt_e {
+ ROE_TYPE = 0x0,
+ ECPRI_TYPE = 0x2,
+};
+
+enum rfoe_rx_pkt_err_e {
+ RE_NONE = 0x0,
+ RE_PARTIAL = 0x1,
+ RE_JABBER = 0x2,
+ RE_FCS = 0x7,
+ RE_FCS_RCV = 0x8,
+ RE_TERMINATE = 0x9,
+ RE_RX_CTL = 0xB,
+ RE_SKIP = 0xC,
+};
+
+enum rfoe_rx_pkt_logger_idx_e {
+ RX_PKT = 0x0,
+ TX_PKT = 0x1,
+};
+
+struct psm_cmd_addjob_s {
+ /* W0 */
+ u64 opcode : 6;
+ u64 rsrc_set : 2;
+ u64 qid : 8;
+ u64 waitcond : 8;
+ u64 jobtag : 16;
+ u64 reserved1 : 8;
+ u64 mabq : 1;
+ u64 reserved2 : 3;
+ u64 tmem : 1;
+ u64 reserved3 : 3;
+ u64 jobtype : 8;
+ /* W1 */
+ u64 jobptr : 53;
+ u64 reserved4 : 11;
+};
+
+struct rfoe_ecpri_psw0_s {
+ /* W0 */
+ u64 jd_ptr : 53;
+ u64 jd_ptr_tmem : 1;
+ u64 reserved1 : 2;
+ u64 src_id : 4;
+ u64 reserved2 : 2;
+ u64 pswt : 2;
+ /* W1 */
+ u64 msg_type : 8;
+ u64 ecpri_id : 16;
+ u64 flow_id : 8;
+ u64 reserved3 : 6;
+ u64 err_sts : 6;
+ u64 reserved4 : 2;
+ u64 seq_id : 16;
+};
+
+struct rfoe_ecpri_psw1_s {
+ /* W0 */
+ u64 ptp_timestamp;
+ /* W1 */
+ u64 ethertype : 16;
+ u64 eindex : 5;
+ u64 reserved1 : 3;
+ u64 dec_error : 8;
+ u64 dec_num_sections : 8;
+ u64 dec_num_syminc : 8;
+ u64 reserved2 : 8;
+ u64 ptype : 4;
+ u64 reserved3 : 4;
+};
+
+struct rfoe_psw0_s {
+ /* W0 */
+ u64 pkt_err_sts : 4;
+ u64 dma_error : 1;
+ u64 jd_ptr : 53;
+ u64 jd_target_mem : 1;
+ u64 orderinfo_status : 1;
+ u64 lmac_id : 2;
+ u64 pswt : 2;
+ /* W1 */
+ u64 roe_subtype : 8;
+ u64 roe_flowid : 8;
+ u64 fd_symbol : 8;
+ u64 fd_antid : 8;
+ u64 rfoe_timestamp : 32;
+};
+
+struct rfoe_psw1_s {
+ /* W0 */
+ u64 ptp_timestamp;
+ /* W1 */
+ u64 ethertype : 16;
+ u64 eindex : 5;
+ u64 reserved1 : 3;
+ u64 dec_error : 8;
+ u64 dec_num_sections : 8;
+ u64 dec_num_syminc : 8;
+ u64 reserved2 : 8;
+ u64 ptype : 4;
+ u64 reserved3 : 4;
+};
+
+struct rfoex_tx_ptp_tstmp_w1 {
+ u64 lmac_id : 2;
+ u64 rfoe_id : 2;
+ u64 jobid : 16;
+ u64 drop : 1;
+ u64 tx_err : 1;
+ u64 reserved1 : 41;
+ u64 valid : 1;
+};
+
+struct rfoex_abx_slotx_configuration {
+ u64 pkt_mode : 2;
+ u64 da_sel : 3;
+ u64 sa_sel : 3;
+ u64 etype_sel : 3;
+ u64 flowid : 8;
+ u64 subtype : 8;
+ u64 lmacid : 2;
+ u64 sample_mode : 1;
+ u64 sample_widt : 5;
+ u64 sample_width_option : 1;
+ u64 sample_width_sat_bypass : 1;
+ u64 orderinfotype : 1;
+ u64 orderinfooffset : 5;
+ u64 antenna : 8;
+ u64 symbol : 8;
+ u64 sos : 1;
+ u64 eos : 1;
+ u64 orderinfo_insert : 1;
+ u64 custom_timestamp_insert : 1;
+ u64 rfoe_mode : 1;
+};
+
+struct rfoex_abx_slotx_configuration1 {
+ u64 rbmap_bytes : 8;
+ u64 pkt_len : 16;
+ u64 hdr_len : 8;
+ u64 presentation_time_offset : 29;
+ u64 reserved1 : 1;
+ u64 sof_mode : 2;
+};
+
+struct rfoex_abx_slotx_configuration2 {
+ u64 vlan_sel : 3;
+ u64 vlan_num : 2;
+ u64 ptp_mode : 1;
+ u64 ecpri_id_insert : 1;
+ u64 ecpri_seq_id_insert : 1;
+ u64 ecpri_rev : 8;
+ u64 ecpri_msgtype : 8;
+ u64 ecpri_id : 16;
+ u64 ecpri_seq_id : 16;
+ u64 reserved1 : 8;
+};
+
+struct rfoe_rx_ind_vlanx_fwd {
+ u64 fwd : 64;
+};
+
+struct mhab_job_desc_cfg {
+ struct rfoex_abx_slotx_configuration cfg;
+ struct rfoex_abx_slotx_configuration1 cfg1;
+ struct rfoex_abx_slotx_configuration2 cfg2;
+} __packed;
+
+/* CPRI definitions */
+struct cpri_pkt_dl_wqe_hdr {
+ u64 lane_id : 2;
+ u64 reserved1 : 2;
+ u64 mhab_id : 2;
+ u64 reserved2 : 2;
+ u64 pkt_length : 11;
+ u64 reserved3 : 45;
+ u64 w1;
+};
+
+struct cpri_pkt_ul_wqe_hdr {
+ u64 lane_id : 2;
+ u64 reserved1 : 2;
+ u64 mhab_id : 2;
+ u64 reserved2 : 2;
+ u64 pkt_length : 11;
+ u64 reserved3 : 5;
+ u64 fcserr : 1;
+ u64 rsp_ferr : 1;
+ u64 rsp_nferr : 1;
+ u64 reserved4 : 37;
+ u64 w1;
+};
+
+#endif /* _OTX2_BPHY_HW_H_ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_main.c b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_main.c
new file mode 100644
index 000000000000..d0c222aeaa75
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_main.c
@@ -0,0 +1,887 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/interrupt.h>
+
+#include "otx2_bphy.h"
+#include "otx2_rfoe.h"
+#include "otx2_cpri.h"
+#include "otx2_bphy_debugfs.h"
+#include "cnf10k_rfoe.h"
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION(DRV_STRING);
+MODULE_LICENSE("GPL v2");
+
+/* max ptp tx requests */
+int max_ptp_req = 16;
+module_param(max_ptp_req, int, 0644);
+MODULE_PARM_DESC(max_ptp_req, "Maximum PTP Tx requests");
+
+/* cdev */
+static struct class *otx2rfoe_class;
+
+/* reg base address */
+void __iomem *bphy_reg_base;
+void __iomem *psm_reg_base;
+void __iomem *rfoe_reg_base;
+void __iomem *bcn_reg_base;
+void __iomem *ptp_reg_base;
+void __iomem *cpri_reg_base;
+
+/* check if cpri block is available */
+#define cpri_available() ((cpri_reg_base) ? 1 : 0)
+
+/* GPINT(2) interrupt handler routine */
+static irqreturn_t cnf10k_gpint2_intr_handler(int irq, void *dev_id)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ u32 status, intr_mask;
+ int rfoe_num;
+
+ cdev_priv = (struct otx2_bphy_cdev_priv *)dev_id;
+
+ /* clear interrupt status */
+ status = readq(bphy_reg_base + PSM_INT_GP_SUM_W1C(2)) & 0xFFFFFFFF;
+ writeq(status, bphy_reg_base + PSM_INT_GP_SUM_W1C(2));
+
+ pr_debug("gpint2 status = 0x%x\n", status);
+
+ /* rx intr processing */
+ for (rfoe_num = 0; rfoe_num < cdev_priv->num_rfoe_mhab; rfoe_num++) {
+ intr_mask = CNF10K_RFOE_RX_INTR_MASK(rfoe_num);
+ if (status & intr_mask)
+ cnf10k_rfoe_rx_napi_schedule(rfoe_num, status);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* GPINT(1) interrupt handler routine */
+static irqreturn_t otx2_bphy_intr_handler(int irq, void *dev_id)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ struct otx2_rfoe_drv_ctx *drv_ctx;
+ struct otx2_rfoe_ndev_priv *priv;
+ struct net_device *netdev;
+ int rfoe_num, cpri_num, i;
+ u32 intr_mask, status;
+
+ cdev_priv = (struct otx2_bphy_cdev_priv *)dev_id;
+
+ /* clear interrupt status */
+ status = readq(bphy_reg_base + PSM_INT_GP_SUM_W1C(1)) & 0xFFFFFFFF;
+ writeq(status, bphy_reg_base + PSM_INT_GP_SUM_W1C(1));
+
+ pr_debug("gpint status = 0x%x\n", status);
+
+ /* CNF10K intr processing */
+ if (CHIP_CNF10K(cdev_priv->hw_version)) {
+ cnf10k_bphy_intr_handler(cdev_priv, status);
+ return IRQ_HANDLED;
+ }
+
+ /* CNF95 intr processing */
+ for (rfoe_num = 0; rfoe_num < MAX_RFOE_INTF; rfoe_num++) {
+ intr_mask = RFOE_RX_INTR_MASK(rfoe_num);
+ if (status & intr_mask)
+ otx2_rfoe_rx_napi_schedule(rfoe_num, status);
+ }
+
+ for (cpri_num = 0; cpri_num < OTX2_BPHY_CPRI_MAX_MHAB; cpri_num++) {
+ intr_mask = CPRI_RX_INTR_MASK(cpri_num);
+ if (status & intr_mask) {
+ /* clear UL ETH interrupt */
+ writeq(0x1, cpri_reg_base + CPRIX_ETH_UL_INT(cpri_num));
+ otx2_cpri_rx_napi_schedule(cpri_num, status);
+ }
+ }
+
+ /* tx intr processing */
+ for (i = 0; i < RFOE_MAX_INTF; i++) {
+ drv_ctx = &rfoe_drv_ctx[i];
+ if (drv_ctx->valid) {
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ intr_mask = RFOE_TX_PTP_INTR_MASK(priv->rfoe_num,
+ priv->lmac_id);
+ if ((status & intr_mask) && priv->ptp_tx_skb)
+ schedule_work(&priv->ptp_tx_work);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static inline void msix_enable_ctrl(struct pci_dev *dev)
+{
+ u16 control;
+
+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
+ control |= PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
+}
+
+static long otx2_bphy_cdev_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct otx2_bphy_cdev_priv *cdev = filp->private_data;
+ int ret;
+
+ if (!cdev) {
+ pr_warn("ioctl: device not opened\n");
+ return -EIO;
+ }
+
+ mutex_lock(&cdev->mutex_lock);
+
+ switch (cmd) {
+ case OTX2_RFOE_IOCTL_ODP_INTF_CFG:
+ {
+ struct bphy_netdev_comm_intf_cfg *intf_cfg;
+ struct pci_dev *bphy_pdev;
+ int idx;
+
+ if (cdev->odp_intf_cfg) {
+ dev_info(cdev->dev, "odp interface cfg already done\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ intf_cfg = kzalloc(MAX_RFOE_INTF * sizeof(*intf_cfg),
+ GFP_KERNEL);
+ if (!intf_cfg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(intf_cfg, (void __user *)arg,
+ (MAX_RFOE_INTF *
+ sizeof(struct bphy_netdev_comm_intf_cfg)))) {
+ dev_err(cdev->dev, "copy from user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+
+ for (idx = 0; idx < OTX2_BPHY_MHAB_INST; idx++)
+ cdev->mhab_mode[idx] = intf_cfg[idx].if_type;
+
+ ret = otx2_rfoe_parse_and_init_intf(cdev, intf_cfg);
+ if (ret < 0) {
+ dev_err(cdev->dev, "odp <-> netdev parse error\n");
+ goto out;
+ }
+
+ if (cpri_available()) {
+ ret = otx2_cpri_parse_and_init_intf(cdev, intf_cfg);
+ if (ret < 0) {
+ dev_err(cdev->dev, "odp <-> netdev parse error\n");
+ goto out;
+ }
+ }
+
+ /* The MSIXEN bit is getting cleared when ODP BPHY driver
+ * resets BPHY. So enabling it back in IOCTL.
+ */
+ bphy_pdev = pci_get_device(OTX2_BPHY_PCI_VENDOR_ID,
+ OTX2_BPHY_PCI_DEVICE_ID, NULL);
+ if (!bphy_pdev) {
+ dev_err(cdev->dev, "Couldn't find BPHY PCI device %x\n",
+ OTX2_BPHY_PCI_DEVICE_ID);
+ ret = -ENODEV;
+ goto out;
+ }
+ msix_enable_ctrl(bphy_pdev);
+
+ /* Enable CPRI ETH UL INT */
+ for (idx = 0; idx < OTX2_BPHY_CPRI_MAX_MHAB; idx++) {
+ if (intf_cfg[idx].if_type == IF_TYPE_CPRI)
+ writeq(0x1, cpri_reg_base +
+ CPRIX_ETH_UL_INT_ENA_W1S(idx));
+ }
+
+ /* Enable GPINT Rx and Tx interrupts */
+ writeq(0xFFFFFFFF, bphy_reg_base + PSM_INT_GP_ENA_W1S(1));
+
+ cdev->odp_intf_cfg = 1;
+
+ kfree(intf_cfg);
+
+ ret = 0;
+ goto out;
+ }
+ case OTX2_RFOE_IOCTL_ODP_DEINIT:
+ {
+ u32 status;
+
+ /* Disable GPINT Rx and Tx interrupts */
+ writeq(0xFFFFFFFF, bphy_reg_base + PSM_INT_GP_ENA_W1C(1));
+
+ /* clear interrupt status */
+ status = readq(bphy_reg_base + PSM_INT_GP_SUM_W1C(1)) &
+ 0xFFFFFFFF;
+ writeq(status, bphy_reg_base + PSM_INT_GP_SUM_W1C(1));
+
+ otx2_bphy_rfoe_cleanup();
+ if (cpri_available())
+ otx2_bphy_cpri_cleanup();
+
+ cdev->odp_intf_cfg = 0;
+
+ ret = 0;
+ goto out;
+ }
+ case OTX2_RFOE_IOCTL_RX_IND_CFG:
+ {
+ struct otx2_rfoe_rx_ind_cfg cfg;
+ unsigned long flags;
+
+ if (copy_from_user(&cfg, (void __user *)arg,
+ sizeof(struct otx2_rfoe_rx_ind_cfg))) {
+ dev_err(cdev->dev, "copy from user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+
+ spin_lock_irqsave(&cdev->mbt_lock, flags);
+ writeq(cfg.rx_ind_idx, (rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(cfg.rfoe_num)));
+ if (cfg.dir == OTX2_RFOE_RX_IND_READ)
+ cfg.regval = readq(rfoe_reg_base + cfg.regoff);
+ else
+ writeq(cfg.regval, rfoe_reg_base + cfg.regoff);
+ spin_unlock_irqrestore(&cdev->mbt_lock, flags);
+ if (copy_to_user((void __user *)(unsigned long)arg, &cfg,
+ sizeof(struct otx2_rfoe_rx_ind_cfg))) {
+ dev_err(cdev->dev, "copy to user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+ ret = 0;
+ goto out;
+ }
+ case OTX2_RFOE_IOCTL_PTP_OFFSET:
+ {
+ u64 bcn_n1, bcn_n2, bcn_n1_ns, bcn_n2_ps, ptp0_ns, regval;
+ struct otx2_rfoe_drv_ctx *drv_ctx = NULL;
+ struct otx2_rfoe_ndev_priv *priv;
+ struct ptp_bcn_off_cfg *ptp_cfg;
+ struct ptp_clk_cfg clk_cfg;
+ struct net_device *netdev;
+ struct ptp_bcn_ref ref;
+ unsigned long expires;
+ int idx;
+
+ if (!cdev->odp_intf_cfg) {
+ dev_info(cdev->dev, "odp interface cfg is not done\n");
+ ret = -EBUSY;
+ goto out;
+ }
+ if (copy_from_user(&clk_cfg, (void __user *)arg,
+ sizeof(struct ptp_clk_cfg))) {
+ dev_err(cdev->dev, "copy from user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+ if (!(clk_cfg.clk_freq_ghz && clk_cfg.clk_freq_div)) {
+ dev_err(cdev->dev, "Invalid ptp clk parameters\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ for (idx = 0; idx < RFOE_MAX_INTF; idx++) {
+ drv_ctx = &rfoe_drv_ctx[idx];
+ if (drv_ctx->valid)
+ break;
+ }
+ if (idx >= RFOE_MAX_INTF) {
+ dev_err(cdev->dev, "drv ctx not found\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ ptp_cfg = priv->ptp_cfg;
+ ptp_cfg->clk_cfg.clk_freq_ghz = clk_cfg.clk_freq_ghz;
+ ptp_cfg->clk_cfg.clk_freq_div = clk_cfg.clk_freq_div;
+ /* capture ptp and bcn timestamp using BCN_CAPTURE_CFG */
+ writeq((CAPT_EN | CAPT_TRIG_SW),
+ priv->bcn_reg_base + BCN_CAPTURE_CFG);
+ /* poll for capt_en to become 0 */
+ while ((readq(priv->bcn_reg_base + BCN_CAPTURE_CFG) & CAPT_EN))
+ cpu_relax();
+ ptp0_ns = readq(priv->bcn_reg_base + BCN_CAPTURE_PTP);
+ regval = readq(priv->bcn_reg_base + BCN_CAPTURE_N1_N2);
+ bcn_n1 = (regval >> 24) & 0xFFFFFFFFFF;
+ bcn_n2 = regval & 0xFFFFFF;
+ /* BCN N1 10 msec counter to nsec */
+ bcn_n1_ns = bcn_n1 * 10 * NSEC_PER_MSEC;
+ bcn_n1_ns += UTC_GPS_EPOCH_DIFF * NSEC_PER_SEC;
+ /* BCN N2 clock period 0.813802083 nsec to pico secs */
+ bcn_n2_ps = (bcn_n2 * 813802083UL) / 1000000;
+ ref.ptp0_ns = ptp0_ns;
+ ref.bcn0_n1_ns = bcn_n1_ns;
+ ref.bcn0_n2_ps = bcn_n2_ps;
+ memcpy(&ptp_cfg->old_ref, &ref, sizeof(struct ptp_bcn_ref));
+ memcpy(&ptp_cfg->new_ref, &ref, sizeof(struct ptp_bcn_ref));
+ ptp_cfg->use_ptp_alg = 1;
+ expires = jiffies + PTP_OFF_RESAMPLE_THRESH * HZ;
+ mod_timer(&ptp_cfg->ptp_timer, expires);
+ ret = 0;
+ goto out;
+ }
+ case OTX2_RFOE_IOCTL_SEC_BCN_OFFSET:
+ {
+ struct otx2_rfoe_drv_ctx *drv_ctx = NULL;
+ struct otx2_rfoe_ndev_priv *priv;
+ struct bcn_sec_offset_cfg cfg;
+ struct net_device *netdev;
+ int idx;
+
+ if (!cdev->odp_intf_cfg) {
+ dev_info(cdev->dev, "odp interface cfg is not done\n");
+ ret = -EBUSY;
+ goto out;
+ }
+ if (copy_from_user(&cfg, (void __user *)arg,
+ sizeof(struct bcn_sec_offset_cfg))) {
+ dev_err(cdev->dev, "copy from user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+ for (idx = 0; idx < RFOE_MAX_INTF; idx++) {
+ drv_ctx = &rfoe_drv_ctx[idx];
+ if (drv_ctx->valid &&
+ drv_ctx->rfoe_num == cfg.rfoe_num &&
+ drv_ctx->lmac_id == cfg.lmac_id)
+ break;
+ }
+ if (idx >= RFOE_MAX_INTF) {
+ dev_err(cdev->dev, "drv ctx not found\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ priv->sec_bcn_offset = cfg.sec_bcn_offset;
+ ret = 0;
+ goto out;
+ }
+ case OTX2_RFOE_IOCTL_MODE_CPRI:
+ {
+ int id = 0;
+
+ if (!cdev->odp_intf_cfg) {
+ dev_info(cdev->dev, "odp interface cfg is not done\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (copy_from_user(&id, (void __user *)arg, sizeof(int))) {
+ dev_err(cdev->dev, "copy from user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (cdev->mhab_mode[id] == IF_TYPE_ETHERNET) {
+ otx2_rfoe_disable_intf(id);
+ otx2_cpri_enable_intf(id);
+ cdev->mhab_mode[id] = IF_TYPE_CPRI;
+ }
+
+ ret = 0;
+ goto out;
+ }
+ case OTX2_RFOE_IOCTL_LINK_EVENT:
+ {
+ struct otx2_rfoe_drv_ctx *drv_ctx = NULL;
+ struct otx2_rfoe_link_event cfg;
+ struct net_device *netdev;
+ int idx;
+
+ if (!cdev->odp_intf_cfg) {
+ dev_info(cdev->dev, "odp interface cfg is not done\n");
+ ret = -EBUSY;
+ goto out;
+ }
+ if (copy_from_user(&cfg, (void __user *)arg,
+ sizeof(struct otx2_rfoe_link_event))) {
+ dev_err(cdev->dev, "copy from user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+ for (idx = 0; idx < RFOE_MAX_INTF; idx++) {
+ drv_ctx = &rfoe_drv_ctx[idx];
+ if (drv_ctx->valid &&
+ drv_ctx->rfoe_num == cfg.rfoe_num &&
+ drv_ctx->lmac_id == cfg.lmac_id)
+ break;
+ }
+ if (idx >= RFOE_MAX_INTF) {
+ dev_err(cdev->dev, "drv ctx not found\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ netdev = drv_ctx->netdev;
+ otx2_rfoe_set_link_state(netdev, cfg.link_state);
+ ret = 0;
+ goto out;
+ }
+ case OTX2_CPRI_IOCTL_LINK_EVENT:
+ {
+ struct otx2_cpri_drv_ctx *drv_ctx = NULL;
+ struct otx2_cpri_link_event cfg;
+ struct net_device *netdev;
+ int idx;
+
+ if (!cdev->odp_intf_cfg) {
+ dev_info(cdev->dev, "odp interface cfg is not done\n");
+ ret = -EBUSY;
+ goto out;
+ }
+ if (copy_from_user(&cfg, (void __user *)arg,
+ sizeof(struct otx2_cpri_link_event))) {
+ dev_err(cdev->dev, "copy from user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+ for (idx = 0; idx < OTX2_BPHY_CPRI_MAX_INTF; idx++) {
+ drv_ctx = &cpri_drv_ctx[idx];
+ if (drv_ctx->valid &&
+ drv_ctx->cpri_num == cfg.cpri_num &&
+ drv_ctx->lmac_id == cfg.lmac_id)
+ break;
+ }
+ if (idx >= OTX2_BPHY_CPRI_MAX_INTF) {
+ dev_err(cdev->dev, "drv ctx not found\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ netdev = drv_ctx->netdev;
+ otx2_cpri_set_link_state(netdev, cfg.link_state);
+ ret = 0;
+ goto out;
+ }
+ case OTX2_IOCTL_RFOE_10x_CFG:
+ {
+ struct cnf10k_rfoe_ndev_comm_intf_cfg *intf_cfg;
+ struct pci_dev *bphy_pdev;
+ int idx;
+
+ if (cdev->odp_intf_cfg) {
+ dev_info(cdev->dev, "odp interface cfg already done\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ intf_cfg = kzalloc(BPHY_MAX_RFOE_MHAB * sizeof(*intf_cfg),
+ GFP_KERNEL);
+ if (!intf_cfg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(intf_cfg, (void __user *)arg,
+ (BPHY_MAX_RFOE_MHAB *
+ sizeof(*intf_cfg)))) {
+ dev_err(cdev->dev, "copy from user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+
+ for (idx = 0; idx < BPHY_MAX_RFOE_MHAB; idx++)
+ cdev->mhab_mode[idx] = IF_TYPE_ETHERNET;
+
+ ret = cnf10k_rfoe_parse_and_init_intf(cdev, intf_cfg);
+ if (ret < 0) {
+ dev_err(cdev->dev, "odp <-> netdev parse error\n");
+ goto out;
+ }
+
+ /* The MSIXEN bit is getting cleared when ODP BPHY driver
+ * resets BPHY. So enabling it back in IOCTL.
+ */
+ bphy_pdev = pci_get_device(OTX2_BPHY_PCI_VENDOR_ID,
+ OTX2_BPHY_PCI_DEVICE_ID, NULL);
+ if (!bphy_pdev) {
+ dev_err(cdev->dev, "Couldn't find BPHY PCI device %x\n",
+ OTX2_BPHY_PCI_DEVICE_ID);
+ ret = -ENODEV;
+ goto out;
+ }
+ msix_enable_ctrl(bphy_pdev);
+
+ /* Enable GPINT Rx and Tx interrupts */
+ writeq(0xFFFFFFFF, bphy_reg_base + PSM_INT_GP_ENA_W1S(1));
+ if (cdev->gpint2_irq)
+ writeq(0xFFFFFFFF, bphy_reg_base + PSM_INT_GP_ENA_W1S(2));
+
+ cdev->odp_intf_cfg = 1;
+
+ kfree(intf_cfg);
+
+ ret = 0;
+ goto out;
+ }
+ default:
+ {
+ dev_info(cdev->dev, "ioctl: no match\n");
+ ret = -EINVAL;
+ }
+ }
+
+out:
+ mutex_unlock(&cdev->mutex_lock);
+ return ret;
+}
+
+static int otx2_bphy_cdev_open(struct inode *inode, struct file *filp)
+{
+ struct otx2_bphy_cdev_priv *cdev;
+ int status = 0;
+
+ cdev = container_of(inode->i_cdev, struct otx2_bphy_cdev_priv, cdev);
+
+ mutex_lock(&cdev->mutex_lock);
+
+ if (cdev->is_open) {
+ dev_err(cdev->dev, "failed to open the device\n");
+ status = -EBUSY;
+ goto error;
+ }
+ cdev->is_open = 1;
+ filp->private_data = cdev;
+
+error:
+ mutex_unlock(&cdev->mutex_lock);
+
+ return status;
+}
+
+static int otx2_bphy_cdev_release(struct inode *inode, struct file *filp)
+{
+ struct otx2_bphy_cdev_priv *cdev = filp->private_data;
+ u32 status;
+
+ mutex_lock(&cdev->mutex_lock);
+
+ if (!cdev->odp_intf_cfg)
+ goto cdev_release_exit;
+
+ /* Disable GPINT Rx and Tx interrupts */
+ writeq(0xFFFFFFFF, bphy_reg_base + PSM_INT_GP_ENA_W1C(1));
+ if (cdev->gpint2_irq)
+ writeq(0xFFFFFFFF, bphy_reg_base + PSM_INT_GP_ENA_W1C(2));
+
+ /* clear interrupt status */
+ status = readq(bphy_reg_base + PSM_INT_GP_SUM_W1C(1)) & 0xFFFFFFFF;
+ writeq(status, bphy_reg_base + PSM_INT_GP_SUM_W1C(1));
+ if (cdev->gpint2_irq) {
+ status = readq(bphy_reg_base + PSM_INT_GP_SUM_W1C(2)) &
+ 0xFFFFFFFF;
+ writeq(status, bphy_reg_base + PSM_INT_GP_SUM_W1C(2));
+ }
+
+ otx2_bphy_rfoe_cleanup();
+ if (cpri_available())
+ otx2_bphy_cpri_cleanup();
+
+ cdev->odp_intf_cfg = 0;
+
+cdev_release_exit:
+ cdev->is_open = 0;
+ mutex_unlock(&cdev->mutex_lock);
+
+ return 0;
+}
+
+static const struct file_operations otx2_bphy_cdev_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = otx2_bphy_cdev_ioctl,
+ .open = otx2_bphy_cdev_open,
+ .release = otx2_bphy_cdev_release,
+};
+
+static int otx2_bphy_probe(struct platform_device *pdev)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ struct pci_dev *bphy_pdev;
+ struct resource *res;
+ int err = 0;
+ dev_t devt;
+
+ /* allocate priv structure */
+ cdev_priv = kzalloc(sizeof(*cdev_priv), GFP_KERNEL);
+ if (!cdev_priv) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* BPHY is a PCI device and the kernel resets the MSIXEN bit during
+ * enumeration. So enable it back for interrupts to be generated.
+ */
+ bphy_pdev = pci_get_device(OTX2_BPHY_PCI_VENDOR_ID,
+ OTX2_BPHY_PCI_DEVICE_ID, NULL);
+ if (!bphy_pdev) {
+ dev_err(&pdev->dev, "Couldn't find BPHY PCI device %x\n",
+ OTX2_BPHY_PCI_DEVICE_ID);
+ err = -ENODEV;
+ goto free_cdev_priv;
+ }
+ msix_enable_ctrl(bphy_pdev);
+
+ /* bphy registers ioremap */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get bphy resource\n");
+ err = -ENXIO;
+ goto free_cdev_priv;
+ }
+ bphy_reg_base = ioremap(res->start, resource_size(res));
+ if (IS_ERR(bphy_reg_base)) {
+ dev_err(&pdev->dev, "failed to ioremap bphy registers\n");
+ err = PTR_ERR(bphy_reg_base);
+ goto free_cdev_priv;
+ }
+ /* psm registers ioremap */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get psm resource\n");
+ err = -ENXIO;
+ goto out_unmap_bphy_reg;
+ }
+ psm_reg_base = ioremap(res->start, resource_size(res));
+ if (IS_ERR(psm_reg_base)) {
+ dev_err(&pdev->dev, "failed to ioremap psm registers\n");
+ err = PTR_ERR(psm_reg_base);
+ goto out_unmap_bphy_reg;
+ }
+ /* rfoe registers ioremap */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get rfoe resource\n");
+ err = -ENXIO;
+ goto out_unmap_psm_reg;
+ }
+ rfoe_reg_base = ioremap(res->start, resource_size(res));
+ if (IS_ERR(rfoe_reg_base)) {
+ dev_err(&pdev->dev, "failed to ioremap rfoe registers\n");
+ err = PTR_ERR(rfoe_reg_base);
+ goto out_unmap_psm_reg;
+ }
+ /* bcn register ioremap */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get bcn resource\n");
+ err = -ENXIO;
+ goto out_unmap_rfoe_reg;
+ }
+ bcn_reg_base = ioremap(res->start, resource_size(res));
+ if (IS_ERR(bcn_reg_base)) {
+ dev_err(&pdev->dev, "failed to ioremap bcn registers\n");
+ err = PTR_ERR(bcn_reg_base);
+ goto out_unmap_rfoe_reg;
+ }
+ /* ptp register ioremap */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get ptp resource\n");
+ err = -ENXIO;
+ goto out_unmap_bcn_reg;
+ }
+ ptp_reg_base = ioremap(res->start, resource_size(res));
+ if (IS_ERR(ptp_reg_base)) {
+ dev_err(&pdev->dev, "failed to ioremap ptp registers\n");
+ err = PTR_ERR(ptp_reg_base);
+ goto out_unmap_bcn_reg;
+ }
+ /* cpri registers ioremap */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
+ if (!res) {
+ cpri_reg_base = NULL;
+ } else {
+ dev_info(&pdev->dev, "cpri mem resource found\n");
+ cpri_reg_base = ioremap(res->start, resource_size(res));
+ if (IS_ERR(cpri_reg_base)) {
+ dev_err(&pdev->dev, "failed to ioremap cpri registers\n");
+ err = PTR_ERR(cpri_reg_base);
+ goto out_unmap_ptp_reg;
+ }
+ }
+ /* get irq */
+ cdev_priv->irq = platform_get_irq(pdev, 0);
+ if (cdev_priv->irq <= 0) {
+ dev_err(&pdev->dev, "irq resource not found\n");
+ goto out_unmap_cpri_reg;
+ }
+ cdev_priv->gpint2_irq = platform_get_irq(pdev, 1);
+ if (cdev_priv->gpint2_irq < 0)
+ cdev_priv->gpint2_irq = 0;
+ else
+ dev_info(&pdev->dev, "gpint2 irq resource found\n");
+
+ /* create a character device */
+ err = alloc_chrdev_region(&devt, 0, 1, DEVICE_NAME);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to alloc chrdev device region\n");
+ goto out_unmap_cpri_reg;
+ }
+
+ otx2rfoe_class = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(otx2rfoe_class)) {
+ dev_err(&pdev->dev, "couldn't create class %s\n", DEVICE_NAME);
+ err = PTR_ERR(otx2rfoe_class);
+ goto out_unregister_chrdev_region;
+ }
+
+ cdev_priv->devt = devt;
+ cdev_priv->is_open = 0;
+ spin_lock_init(&cdev_priv->lock);
+ spin_lock_init(&cdev_priv->mbt_lock);
+ mutex_init(&cdev_priv->mutex_lock);
+
+ cdev_init(&cdev_priv->cdev, &otx2_bphy_cdev_fops);
+ cdev_priv->cdev.owner = THIS_MODULE;
+
+ err = cdev_add(&cdev_priv->cdev, devt, 1);
+ if (err < 0) {
+ dev_err(&pdev->dev, "cdev_add() failed\n");
+ goto out_class_destroy;
+ }
+
+ cdev_priv->dev = device_create(otx2rfoe_class, &pdev->dev,
+ cdev_priv->cdev.dev, cdev_priv,
+ DEVICE_NAME);
+ if (IS_ERR(cdev_priv->dev)) {
+ dev_err(&pdev->dev, "device_create failed\n");
+ err = PTR_ERR(cdev_priv->dev);
+ goto out_cdev_del;
+ }
+
+ dev_info(&pdev->dev, "successfully registered char device, major=%d\n",
+ MAJOR(cdev_priv->cdev.dev));
+
+ err = request_irq(cdev_priv->irq, otx2_bphy_intr_handler, 0,
+ "otx2_bphy_int", cdev_priv);
+ if (err) {
+ dev_err(&pdev->dev, "can't assign irq %d\n", cdev_priv->irq);
+ goto out_device_destroy;
+ }
+
+ if (cdev_priv->gpint2_irq) {
+ err = request_irq(cdev_priv->gpint2_irq,
+ cnf10k_gpint2_intr_handler, 0,
+ "cn10k_bphy_int", cdev_priv);
+ if (err) {
+ dev_err(&pdev->dev, "can't assign irq %d\n",
+ cdev_priv->gpint2_irq);
+ goto free_irq;
+ }
+ }
+
+ err = 0;
+ goto out;
+
+free_irq:
+ free_irq(cdev_priv->irq, cdev_priv);
+out_device_destroy:
+ device_destroy(otx2rfoe_class, cdev_priv->cdev.dev);
+out_cdev_del:
+ cdev_del(&cdev_priv->cdev);
+out_class_destroy:
+ class_destroy(otx2rfoe_class);
+out_unregister_chrdev_region:
+ unregister_chrdev_region(devt, 1);
+out_unmap_cpri_reg:
+ iounmap(cpri_reg_base);
+out_unmap_ptp_reg:
+ iounmap(ptp_reg_base);
+out_unmap_bcn_reg:
+ iounmap(bcn_reg_base);
+out_unmap_rfoe_reg:
+ iounmap(rfoe_reg_base);
+out_unmap_psm_reg:
+ iounmap(psm_reg_base);
+out_unmap_bphy_reg:
+ iounmap(bphy_reg_base);
+free_cdev_priv:
+ kfree(cdev_priv);
+out:
+ return err;
+}
+
+static int otx2_bphy_remove(struct platform_device *pdev)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv = dev_get_drvdata(&pdev->dev);
+
+ /* unmap register regions */
+ iounmap(cpri_reg_base);
+ iounmap(ptp_reg_base);
+ iounmap(bcn_reg_base);
+ iounmap(rfoe_reg_base);
+ iounmap(psm_reg_base);
+ iounmap(bphy_reg_base);
+
+ /* free irq */
+ free_irq(cdev_priv->irq, cdev_priv);
+
+ /* char device cleanup */
+ device_destroy(otx2rfoe_class, cdev_priv->cdev.dev);
+ cdev_del(&cdev_priv->cdev);
+ class_destroy(otx2rfoe_class);
+ unregister_chrdev_region(cdev_priv->cdev.dev, 1);
+ kfree(cdev_priv);
+
+ return 0;
+}
+
+static const struct of_device_id otx2_bphy_of_match[] = {
+ { .compatible = "marvell,bphy-netdev" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, otx2_bphy_of_match);
+
+static struct platform_driver otx2_bphy_driver = {
+ .probe = otx2_bphy_probe,
+ .remove = otx2_bphy_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = otx2_bphy_of_match,
+ },
+};
+
+static int __init otx2_bphy_init(void)
+{
+ int ret;
+
+ pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
+
+ ret = platform_driver_register(&otx2_bphy_driver);
+ if (ret < 0)
+ return ret;
+
+ otx2_bphy_debugfs_init();
+
+ return 0;
+}
+
+static void __exit otx2_bphy_exit(void)
+{
+ otx2_bphy_debugfs_exit();
+
+ platform_driver_unregister(&otx2_bphy_driver);
+}
+
+module_init(otx2_bphy_init);
+module_exit(otx2_bphy_exit);
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri.c b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri.c
new file mode 100644
index 000000000000..2fda900e22c9
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri.c
@@ -0,0 +1,755 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "otx2_cpri.h"
+#include "otx2_bphy_debugfs.h"
+
+/* Theory of Operation
+ *
+ * I. General
+ *
+ * The BPHY CPRI netdev processes ethernet packets which are received
+ * and transmitted by CPRI MHAB. The ODP BPHY application shares the
+ * CPRI ETH UL/DL configuration information using ioctl. The Rx
+ * notification is sent to netdev using PSM GPINT.
+ *
+ * II. Driver Operation
+ *
+ * This driver register's a character device and provides ioctl for
+ * ODP application to initialize the netdev(s) to process CPRI Ethernet
+ * packets. Each netdev instance created by the driver corresponds to
+ * a unique CPRI MHAB id and Lane id. The ODP application shares the
+ * information such as CPRI ETH UL/DL circular buffers and Rx GPINT
+ * number per CPRI MHAB. The CPRI ETH UL/DL circular buffers are shared
+ * per each CPRI MHAB id. The Rx/Tx packet memory(DDR) is also allocated
+ * by ODP application. The GPINT is setup using CPRI_ETH_UL_INT_PSM_MSG_W0
+ * and CPRI_ETH_UL_INT_PSM_MSG_W1 registers.
+ *
+ * III. Transmit
+ *
+ * The driver xmit routine selects DL circular buffer ring based on MHAB
+ * id and if there is a free entry available, the driver updates the WQE
+ * header and packet data to the DL entry and updates the DL_WR_DOORBELL
+ * with number of packets written for the hardware to process.
+ *
+ * IV. Receive
+ *
+ * The driver receives GPINT interrupt notification per each MHAB and
+ * invokes NAPI handler. The NAPI handler reads the UL circular buffer
+ * ring parameters UL_SW_RD_PTR and UL_NXT_WR_PTR to get the count of
+ * packets to be processed. For each packet received, the driver allocates
+ * skb and copies the packet data to skb. The driver updates
+ * UL_RD_DOORBELL register with count of packets processed by the driver.
+ *
+ * V. Miscellaneous
+ *
+ * Ethtool:
+ * The ethtool stats shows packet stats for each netdev instance.
+ *
+ */
+
+/* global driver ctx */
+struct otx2_cpri_drv_ctx cpri_drv_ctx[OTX2_BPHY_CPRI_MAX_INTF];
+
+/* debugfs */
+static void otx2_cpri_debugfs_reader(char *buffer, size_t count, void *priv);
+static const char *otx2_cpri_debugfs_get_formatter(void);
+static size_t otx2_cpri_debugfs_get_buffer_size(void);
+static void otx2_cpri_debugfs_create(struct otx2_cpri_drv_ctx *ctx);
+static void otx2_cpri_debugfs_remove(struct otx2_cpri_drv_ctx *ctx);
+
+static struct net_device *otx2_cpri_get_netdev(int mhab_id, int lmac_id)
+{
+ struct net_device *netdev = NULL;
+ int idx;
+
+ for (idx = 0; idx < OTX2_BPHY_CPRI_MAX_INTF; idx++) {
+ if (cpri_drv_ctx[idx].cpri_num == mhab_id &&
+ cpri_drv_ctx[idx].lmac_id == lmac_id &&
+ cpri_drv_ctx[idx].valid) {
+ netdev = cpri_drv_ctx[idx].netdev;
+ break;
+ }
+ }
+
+ return netdev;
+}
+
+void otx2_cpri_enable_intf(int cpri_num)
+{
+ struct otx2_cpri_drv_ctx *drv_ctx;
+ struct otx2_cpri_ndev_priv *priv;
+ struct net_device *netdev;
+ int idx;
+
+ for (idx = 0; idx < OTX2_BPHY_CPRI_MAX_INTF; idx++) {
+ drv_ctx = &cpri_drv_ctx[idx];
+ if (drv_ctx->cpri_num == cpri_num && drv_ctx->valid) {
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ priv->if_type = IF_TYPE_CPRI;
+ }
+ }
+}
+
+void otx2_bphy_cpri_cleanup(void)
+{
+ struct otx2_cpri_drv_ctx *drv_ctx = NULL;
+ struct otx2_cpri_ndev_priv *priv;
+ struct net_device *netdev;
+ int i;
+
+ for (i = 0; i < OTX2_BPHY_CPRI_MAX_INTF; i++) {
+ drv_ctx = &cpri_drv_ctx[i];
+ if (drv_ctx->valid) {
+ otx2_cpri_debugfs_remove(drv_ctx);
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ unregister_netdev(netdev);
+ netif_napi_del(&priv->napi);
+ --(priv->cpri_common->refcnt);
+ if (priv->cpri_common->refcnt == 0)
+ kfree(priv->cpri_common);
+ free_netdev(netdev);
+ drv_ctx->valid = 0;
+ }
+ }
+
+ /* Disable CPRI ETH UL INT */
+ for (i = 0; i < OTX2_BPHY_CPRI_MAX_MHAB; i++)
+ writeq(0x1, cpri_reg_base +
+ CPRIX_ETH_UL_INT_ENA_W1C(i));
+}
+
+static int otx2_cpri_process_rx_pkts(struct otx2_cpri_ndev_priv *priv,
+ int budget)
+{
+ int count, head, processed_pkts = 0;
+ struct otx2_cpri_ndev_priv *priv2;
+ struct cpri_pkt_ul_wqe_hdr *wqe;
+ struct ul_cbuf_cfg *ul_cfg;
+ struct net_device *netdev;
+ u16 nxt_wr_ptr, len;
+ struct sk_buff *skb;
+ u8 *pkt_buf;
+
+ ul_cfg = &priv->cpri_common->ul_cfg;
+
+ nxt_wr_ptr = readq(priv->cpri_reg_base +
+ CPRIX_RXD_GMII_UL_NXT_WR_PTR(priv->cpri_num)) &
+ 0xFFFF;
+ /* get the HW head */
+ head = CIRC_BUF_ENTRY(nxt_wr_ptr);
+
+ if (ul_cfg->sw_rd_ptr > head) {
+ count = ul_cfg->num_entries - ul_cfg->sw_rd_ptr;
+ count += head;
+ } else {
+ count = head - ul_cfg->sw_rd_ptr;
+ }
+
+ while (likely((processed_pkts < budget) && (processed_pkts < count))) {
+ pkt_buf = (u8 *)ul_cfg->cbuf_virt_addr +
+ (OTX2_BPHY_CPRI_PKT_BUF_SIZE * ul_cfg->sw_rd_ptr);
+ wqe = (struct cpri_pkt_ul_wqe_hdr *)pkt_buf;
+ netdev = otx2_cpri_get_netdev(wqe->mhab_id, wqe->lane_id);
+ if (unlikely(!netdev)) {
+ net_err_ratelimited("CPRI Rx netdev not found, cpri%d lmac%d\n",
+ wqe->mhab_id, wqe->lane_id);
+ priv->stats.rx_dropped++;
+ priv->last_rx_dropped_jiffies = jiffies;
+ goto update_processed_pkts;
+ }
+ priv2 = netdev_priv(netdev);
+ if (wqe->fcserr || wqe->rsp_ferr || wqe->rsp_nferr) {
+ net_err_ratelimited("%s: CPRI Rx err,cpri%d lmac%d sw_rd_ptr=%d\n",
+ netdev->name,
+ wqe->mhab_id, wqe->lane_id,
+ ul_cfg->sw_rd_ptr);
+ priv2->stats.rx_dropped++;
+ priv2->last_rx_dropped_jiffies = jiffies;
+ goto update_processed_pkts;
+ }
+ if (unlikely(!netif_carrier_ok(netdev))) {
+ net_err_ratelimited("%s {cpri%d lmac%d} link down, drop pkt\n",
+ netdev->name, priv2->cpri_num,
+ priv2->lmac_id);
+ priv2->stats.rx_dropped++;
+ priv2->last_rx_dropped_jiffies = jiffies;
+ goto update_processed_pkts;
+ }
+
+ len = wqe->pkt_length;
+
+ if (unlikely(netif_msg_pktdata(priv2))) {
+ netdev_printk(KERN_DEBUG, priv2->netdev, "RX DATA:");
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16,
+ 4, pkt_buf,
+ len + OTX2_BPHY_CPRI_WQE_SIZE, true);
+ }
+
+ pkt_buf += OTX2_BPHY_CPRI_WQE_SIZE;
+
+ skb = netdev_alloc_skb_ip_align(netdev, len);
+ if (!skb) {
+ net_err_ratelimited("%s:CPRI Rx: alloc skb failed\n",
+ netdev->name);
+ priv->stats.rx_dropped++;
+ priv->last_rx_dropped_jiffies = jiffies;
+ goto update_processed_pkts;
+ }
+
+ memcpy(skb->data, pkt_buf, len);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ netif_receive_skb(skb);
+
+ priv2->last_rx_jiffies = jiffies;
+
+update_processed_pkts:
+ processed_pkts++;
+ ul_cfg->sw_rd_ptr++;
+ if (ul_cfg->sw_rd_ptr == ul_cfg->num_entries)
+ ul_cfg->sw_rd_ptr = 0;
+
+ }
+
+ if (processed_pkts)
+ writeq(processed_pkts, priv->cpri_reg_base +
+ CPRIX_RXD_GMII_UL_RD_DOORBELL(priv->cpri_num));
+
+ return processed_pkts;
+}
+
+/* napi poll routine */
+static int otx2_cpri_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ struct otx2_cpri_ndev_priv *priv;
+ u64 intr_en, regval;
+ int workdone = 0;
+
+ priv = container_of(napi, struct otx2_cpri_ndev_priv, napi);
+ cdev_priv = priv->cdev_priv;
+
+ /* pkt processing loop */
+ workdone += otx2_cpri_process_rx_pkts(priv, budget);
+
+ if (workdone < budget) {
+ napi_complete_done(napi, workdone);
+
+ /* Re enable the Rx interrupts */
+ intr_en = 1 << CPRI_RX_INTR_SHIFT(priv->cpri_num);
+ spin_lock(&cdev_priv->lock);
+ regval = readq(priv->bphy_reg_base + PSM_INT_GP_ENA_W1S(1));
+ regval |= intr_en;
+ writeq(regval, priv->bphy_reg_base + PSM_INT_GP_ENA_W1S(1));
+ spin_unlock(&cdev_priv->lock);
+ }
+
+ return workdone;
+}
+
+void otx2_cpri_rx_napi_schedule(int cpri_num, u32 status)
+{
+ struct otx2_cpri_drv_ctx *drv_ctx;
+ struct otx2_cpri_ndev_priv *priv;
+ u64 regval;
+ int idx;
+
+ for (idx = 0; idx < OTX2_BPHY_CPRI_MAX_INTF; idx++) {
+ drv_ctx = &cpri_drv_ctx[idx];
+ /* ignore lmac, one UL interrupt/cpri */
+ if (!(drv_ctx->valid && drv_ctx->cpri_num == cpri_num))
+ continue;
+ /* check if i/f down, napi disabled */
+ priv = netdev_priv(drv_ctx->netdev);
+ if (test_bit(CPRI_INTF_DOWN, &priv->state))
+ continue;
+ /* clear intr enable bit, re-enable in napi handler */
+ regval = 1 << CPRI_RX_INTR_SHIFT(cpri_num);
+ writeq(regval, priv->bphy_reg_base + PSM_INT_GP_ENA_W1C(1));
+ /* schedule napi */
+ napi_schedule(&priv->napi);
+ /* napi scheduled per MHAB, return */
+ return;
+ }
+}
+
+void otx2_cpri_update_stats(struct otx2_cpri_ndev_priv *priv)
+{
+ struct otx2_cpri_stats *dev_stats = &priv->stats;
+
+ dev_stats->rx_frames += readq(priv->cpri_reg_base +
+ CPRIX_ETH_UL_GPKTS_CNT(priv->cpri_num,
+ priv->lmac_id));
+ dev_stats->rx_octets += readq(priv->cpri_reg_base +
+ CPRIX_ETH_UL_GOCT_CNT(priv->cpri_num,
+ priv->lmac_id));
+ dev_stats->rx_err += readq(priv->cpri_reg_base +
+ CPRIX_ETH_UL_ERR_CNT(priv->cpri_num,
+ priv->lmac_id));
+ dev_stats->bad_crc += readq(priv->cpri_reg_base +
+ CPRIX_ETH_BAD_CRC_CNT(priv->cpri_num,
+ priv->lmac_id));
+ dev_stats->oversize += readq(priv->cpri_reg_base +
+ CPRIX_ETH_UL_OSIZE_CNT(priv->cpri_num,
+ priv->lmac_id));
+ dev_stats->undersize += readq(priv->cpri_reg_base +
+ CPRIX_ETH_UL_USIZE_CNT(priv->cpri_num,
+ priv->lmac_id));
+ dev_stats->fifo_ovr += readq(priv->cpri_reg_base +
+ CPRIX_ETH_UL_FIFO_ORUN_CNT(priv->cpri_num,
+ priv->lmac_id));
+ dev_stats->tx_frames += readq(priv->cpri_reg_base +
+ CPRIX_ETH_DL_GPKTS_CNT(priv->cpri_num,
+ priv->lmac_id));
+ dev_stats->tx_octets += readq(priv->cpri_reg_base +
+ CPRIX_ETH_DL_GOCTETS_CNT(priv->cpri_num,
+ priv->lmac_id));
+}
+
+static void otx2_cpri_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct otx2_cpri_ndev_priv *priv = netdev_priv(netdev);
+ struct otx2_cpri_stats *dev_stats = &priv->stats;
+
+ otx2_cpri_update_stats(priv);
+
+ stats->rx_bytes = dev_stats->rx_octets;
+ stats->rx_packets = dev_stats->rx_frames;
+ stats->rx_dropped = dev_stats->rx_dropped;
+ stats->rx_errors = dev_stats->rx_err;
+ stats->rx_crc_errors = dev_stats->bad_crc;
+ stats->rx_fifo_errors = dev_stats->fifo_ovr;
+ stats->rx_length_errors = dev_stats->oversize + dev_stats->undersize;
+
+ stats->tx_bytes = dev_stats->tx_octets;
+ stats->tx_packets = dev_stats->tx_frames;
+}
+
+/* netdev ioctl */
+static int otx2_cpri_ioctl(struct net_device *netdev, struct ifreq *req,
+ int cmd)
+{
+ return -EOPNOTSUPP;
+}
+
+/* netdev xmit */
+static netdev_tx_t otx2_cpri_eth_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct otx2_cpri_ndev_priv *priv = netdev_priv(netdev);
+ struct cpri_pkt_dl_wqe_hdr *wqe;
+ struct dl_cbuf_cfg *dl_cfg;
+ unsigned long flags;
+ u8 *buf_ptr;
+ int tail, count;
+ u16 nxt_rd_ptr;
+
+ dl_cfg = &priv->cpri_common->dl_cfg;
+
+ spin_lock_irqsave(&dl_cfg->lock, flags);
+
+ if (unlikely(priv->if_type != IF_TYPE_CPRI)) {
+ netif_err(priv, tx_queued, netdev,
+ "%s {cpri%d lmac%d} invalid intf mode, drop pkt\n",
+ netdev->name, priv->cpri_num, priv->lmac_id);
+ /* update stats */
+ priv->stats.tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ goto exit;
+ }
+
+ if (unlikely(!netif_carrier_ok(netdev))) {
+ /* update stats */
+ priv->stats.tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ goto exit;
+ }
+
+ /* Read CPRI(0..2)_TXD_GMII_DL_WR_DOORBELL to become 0 */
+ while ((readq(priv->cpri_reg_base +
+ CPRIX_TXD_GMII_DL_WR_DOORBELL(priv->cpri_num)) & 0xFF))
+ cpu_relax();
+
+ nxt_rd_ptr = readq(priv->cpri_reg_base +
+ CPRIX_TXD_GMII_DL_NXT_RD_PTR(priv->cpri_num)) &
+ 0xFFFF;
+ /* get the HW tail */
+ tail = CIRC_BUF_ENTRY(nxt_rd_ptr);
+ if (dl_cfg->sw_wr_ptr >= tail)
+ count = dl_cfg->num_entries - dl_cfg->sw_wr_ptr + tail;
+ else
+ count = tail - dl_cfg->sw_wr_ptr;
+
+ if (count == 0) {
+ spin_unlock_irqrestore(&dl_cfg->lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (unlikely(netif_msg_pktdata(priv))) {
+ netdev_printk(KERN_DEBUG, priv->netdev, "Tx: skb %pS len=%d\n",
+ skb, skb->len);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
+ skb->data, skb->len, true);
+ }
+
+ buf_ptr = (u8 *)dl_cfg->cbuf_virt_addr +
+ (OTX2_BPHY_CPRI_PKT_BUF_SIZE * dl_cfg->sw_wr_ptr);
+ wqe = (struct cpri_pkt_dl_wqe_hdr *)buf_ptr;
+ wqe->mhab_id = priv->cpri_num;
+ wqe->lane_id = priv->lmac_id;
+ buf_ptr += OTX2_BPHY_CPRI_WQE_SIZE;
+ /* zero pad for short pkts, since there is no HW support */
+ if (skb->len < 64)
+ memset(buf_ptr, 0, 64);
+ memcpy(buf_ptr, skb->data, skb->len);
+ wqe->pkt_length = skb->len > 64 ? skb->len : 64;
+
+ /* ensure the memory is updated before ringing doorbell */
+ dma_wmb();
+ writeq(1, priv->cpri_reg_base +
+ CPRIX_TXD_GMII_DL_WR_DOORBELL(priv->cpri_num));
+
+ /* increment queue index */
+ dl_cfg->sw_wr_ptr++;
+ if (dl_cfg->sw_wr_ptr == dl_cfg->num_entries)
+ dl_cfg->sw_wr_ptr = 0;
+
+ priv->last_tx_jiffies = jiffies;
+exit:
+ dev_kfree_skb_any(skb);
+ spin_unlock_irqrestore(&dl_cfg->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+/* netdev open */
+static int otx2_cpri_eth_open(struct net_device *netdev)
+{
+ struct otx2_cpri_ndev_priv *priv = netdev_priv(netdev);
+
+ napi_enable(&priv->napi);
+
+ spin_lock(&priv->lock);
+ clear_bit(CPRI_INTF_DOWN, &priv->state);
+ if (priv->link_state == LINK_STATE_UP) {
+ netif_carrier_on(netdev);
+ netif_start_queue(netdev);
+ }
+ spin_unlock(&priv->lock);
+
+ return 0;
+}
+
+/* netdev close */
+static int otx2_cpri_eth_stop(struct net_device *netdev)
+{
+ struct otx2_cpri_ndev_priv *priv = netdev_priv(netdev);
+
+ spin_lock(&priv->lock);
+ set_bit(CPRI_INTF_DOWN, &priv->state);
+
+ netif_stop_queue(netdev);
+ netif_carrier_off(netdev);
+ spin_unlock(&priv->lock);
+
+ napi_disable(&priv->napi);
+
+ return 0;
+}
+
+static const struct net_device_ops otx2_cpri_netdev_ops = {
+ .ndo_open = otx2_cpri_eth_open,
+ .ndo_stop = otx2_cpri_eth_stop,
+ .ndo_start_xmit = otx2_cpri_eth_start_xmit,
+ .ndo_do_ioctl = otx2_cpri_ioctl,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_get_stats64 = otx2_cpri_get_stats64,
+};
+
+static void otx2_cpri_dump_ul_cbuf(struct otx2_cpri_ndev_priv *priv)
+{
+ struct ul_cbuf_cfg *ul_cfg = &priv->cpri_common->ul_cfg;
+
+ pr_debug("%s: num_entries=%d iova=0x%llx\n",
+ __func__, ul_cfg->num_entries, ul_cfg->cbuf_iova_addr);
+}
+
+static void otx2_cpri_dump_dl_cbuf(struct otx2_cpri_ndev_priv *priv)
+{
+ struct dl_cbuf_cfg *dl_cfg = &priv->cpri_common->dl_cfg;
+
+ pr_debug("%s: num_entries=%d iova=0x%llx\n",
+ __func__, dl_cfg->num_entries, dl_cfg->cbuf_iova_addr);
+}
+
+static void otx2_cpri_fill_dl_ul_cfg(struct otx2_cpri_ndev_priv *priv,
+ struct bphy_netdev_cpri_if *cpri_cfg)
+{
+ struct dl_cbuf_cfg *dl_cfg;
+ struct ul_cbuf_cfg *ul_cfg;
+ u64 iova;
+
+ dl_cfg = &priv->cpri_common->dl_cfg;
+ dl_cfg->num_entries = cpri_cfg->num_dl_buf;
+ iova = cpri_cfg->dl_buf_iova_addr;
+ dl_cfg->cbuf_iova_addr = iova;
+ dl_cfg->cbuf_virt_addr = otx2_iova_to_virt(priv->iommu_domain, iova);
+ dl_cfg->sw_wr_ptr = 0;
+ spin_lock_init(&dl_cfg->lock);
+ otx2_cpri_dump_dl_cbuf(priv);
+
+ ul_cfg = &priv->cpri_common->ul_cfg;
+ ul_cfg->num_entries = cpri_cfg->num_ul_buf;
+ iova = cpri_cfg->ul_buf_iova_addr;
+ ul_cfg->cbuf_iova_addr = iova;
+ ul_cfg->cbuf_virt_addr = otx2_iova_to_virt(priv->iommu_domain, iova);
+ ul_cfg->sw_rd_ptr = 0;
+ spin_lock_init(&ul_cfg->lock);
+ otx2_cpri_dump_ul_cbuf(priv);
+}
+
+int otx2_cpri_parse_and_init_intf(struct otx2_bphy_cdev_priv *cdev,
+ struct bphy_netdev_comm_intf_cfg *cfg)
+{
+ struct otx2_cpri_drv_ctx *drv_ctx = NULL;
+ struct otx2_cpri_ndev_priv *priv, *priv2;
+ struct bphy_netdev_cpri_if *cpri_cfg;
+ int i, intf_idx = 0, lmac, ret;
+ struct net_device *netdev;
+
+ for (i = 0; i < OTX2_BPHY_CPRI_MAX_MHAB; i++) {
+ priv2 = NULL;
+ cpri_cfg = &cfg[i].cpri_if_cfg;
+ for (lmac = 0; lmac < OTX2_BPHY_CPRI_MAX_LMAC; lmac++) {
+ if (!(cpri_cfg->active_lane_mask & (1 << lmac)))
+ continue;
+ netdev =
+ alloc_etherdev(sizeof(struct otx2_cpri_ndev_priv));
+ if (!netdev) {
+ dev_err(cdev->dev,
+ "error allocating net device\n");
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+ priv = netdev_priv(netdev);
+ memset(priv, 0, sizeof(*priv));
+ if (!priv2) {
+ priv->cpri_common =
+ kzalloc(sizeof(struct cpri_common_cfg),
+ GFP_KERNEL);
+ if (!priv->cpri_common) {
+ dev_err(cdev->dev, "kzalloc failed\n");
+ free_netdev(netdev);
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+ priv->cpri_common->refcnt = 1;
+ }
+ spin_lock_init(&priv->lock);
+ priv->netdev = netdev;
+ priv->cdev_priv = cdev;
+ priv->msg_enable = netif_msg_init(-1, 0);
+ spin_lock_init(&priv->stats.lock);
+ priv->cpri_num = cpri_cfg->id;
+ priv->lmac_id = lmac;
+ priv->if_type = cfg[i].if_type;
+ memcpy(priv->mac_addr, &cpri_cfg->eth_addr[lmac],
+ ETH_ALEN);
+ if (is_valid_ether_addr(priv->mac_addr))
+ ether_addr_copy(netdev->dev_addr,
+ priv->mac_addr);
+ else
+ random_ether_addr(netdev->dev_addr);
+ priv->pdev = pci_get_device(OTX2_BPHY_PCI_VENDOR_ID,
+ OTX2_BPHY_PCI_DEVICE_ID,
+ NULL);
+ priv->iommu_domain =
+ iommu_get_domain_for_dev(&priv->pdev->dev);
+ priv->bphy_reg_base = bphy_reg_base;
+ priv->cpri_reg_base = cpri_reg_base;
+
+ if (!priv2) {
+ otx2_cpri_fill_dl_ul_cfg(priv, cpri_cfg);
+ } else {
+ /* share cpri_common data */
+ priv->cpri_common = priv2->cpri_common;
+ ++(priv->cpri_common->refcnt);
+ }
+
+ netif_napi_add(priv->netdev, &priv->napi,
+ otx2_cpri_napi_poll, NAPI_POLL_WEIGHT);
+
+ /* keep last (cpri + lmac) priv structure */
+ if (!priv2)
+ priv2 = priv;
+
+ intf_idx = (i * 4) + lmac;
+ snprintf(netdev->name, sizeof(netdev->name),
+ "cpri%d", intf_idx);
+ netdev->netdev_ops = &otx2_cpri_netdev_ops;
+ otx2_cpri_set_ethtool_ops(netdev);
+ netdev->mtu = 1500U;
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = 1500U;
+ ret = register_netdev(netdev);
+ if (ret < 0) {
+ dev_err(cdev->dev,
+ "failed to register net device %s\n",
+ netdev->name);
+ free_netdev(netdev);
+ ret = -ENODEV;
+ goto err_exit;
+ }
+ dev_dbg(cdev->dev, "net device %s registered\n",
+ netdev->name);
+
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ set_bit(CPRI_INTF_DOWN, &priv->state);
+ priv->link_state = LINK_STATE_UP;
+
+ /* initialize global ctx */
+ drv_ctx = &cpri_drv_ctx[intf_idx];
+ drv_ctx->cpri_num = priv->cpri_num;
+ drv_ctx->lmac_id = priv->lmac_id;
+ drv_ctx->valid = 1;
+ drv_ctx->netdev = netdev;
+
+ /* create debugfs entry */
+ otx2_cpri_debugfs_create(drv_ctx);
+ }
+ }
+
+ return 0;
+
+err_exit:
+ for (i = 0; i < OTX2_BPHY_CPRI_MAX_INTF; i++) {
+ drv_ctx = &cpri_drv_ctx[i];
+ if (drv_ctx->valid) {
+ otx2_cpri_debugfs_remove(drv_ctx);
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ unregister_netdev(netdev);
+ netif_napi_del(&priv->napi);
+ --(priv->cpri_common->refcnt);
+ if (priv->cpri_common->refcnt == 0)
+ kfree(priv->cpri_common);
+ free_netdev(netdev);
+ drv_ctx->valid = 0;
+ }
+ }
+ return ret;
+}
+
+static void otx2_cpri_debugfs_reader(char *buffer, size_t count, void *priv)
+{
+ struct otx2_cpri_drv_ctx *ctx;
+ struct otx2_cpri_ndev_priv *netdev;
+ u8 queue_stopped, state_up;
+ const char *formatter;
+
+ ctx = priv;
+ netdev = netdev_priv(ctx->netdev);
+ queue_stopped = netif_queue_stopped(ctx->netdev);
+ state_up = netdev->link_state;
+ formatter = otx2_cpri_debugfs_get_formatter();
+
+ snprintf(buffer, count, formatter,
+ queue_stopped,
+ state_up,
+ netdev->last_tx_jiffies,
+ netdev->last_tx_dropped_jiffies,
+ netdev->last_rx_jiffies,
+ netdev->last_rx_dropped_jiffies,
+ jiffies);
+}
+
+static const char *otx2_cpri_debugfs_get_formatter(void)
+{
+ static const char *buffer_format = "queue-stopped: %u\n"
+ "state-up: %u\n"
+ "last-tx-jiffies: %lu\n"
+ "last-tx-dropped-jiffies: %lu\n"
+ "last-rx-jiffies: %lu\n"
+ "last-rx-dropped-jiffies: %lu\n"
+ "current-jiffies: %lu\n";
+
+ return buffer_format;
+}
+
+static size_t otx2_cpri_debugfs_get_buffer_size(void)
+{
+ static size_t buffer_size;
+
+ if (!buffer_size) {
+ const char *formatter = otx2_cpri_debugfs_get_formatter();
+ u8 max_boolean = 1;
+ unsigned long max_jiffies = (unsigned long)-1;
+
+ buffer_size = snprintf(NULL, 0, formatter,
+ max_boolean,
+ max_boolean,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies);
+ ++buffer_size;
+ }
+
+ return buffer_size;
+}
+
+static void otx2_cpri_debugfs_create(struct otx2_cpri_drv_ctx *ctx)
+{
+ size_t buffer_size = otx2_cpri_debugfs_get_buffer_size();
+
+ ctx->debugfs = otx2_bphy_debugfs_add_file(ctx->netdev->name,
+ buffer_size, ctx,
+ otx2_cpri_debugfs_reader);
+}
+
+static void otx2_cpri_debugfs_remove(struct otx2_cpri_drv_ctx *ctx)
+{
+ if (ctx->debugfs)
+ otx2_bphy_debugfs_remove_file(ctx->debugfs);
+}
+
+void otx2_cpri_set_link_state(struct net_device *netdev, u8 state)
+{
+ struct otx2_cpri_ndev_priv *priv;
+
+ priv = netdev_priv(netdev);
+
+ spin_lock(&priv->lock);
+ if (priv->link_state != state) {
+ priv->link_state = state;
+ if (state == LINK_STATE_DOWN) {
+ netdev_info(netdev, "Link DOWN\n");
+ if (netif_running(netdev)) {
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ } else {
+ netdev_info(netdev, "Link UP\n");
+ if (netif_running(netdev)) {
+ netif_carrier_on(netdev);
+ netif_start_queue(netdev);
+ }
+ }
+ }
+ spin_unlock(&priv->lock);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri.h b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri.h
new file mode 100644
index 000000000000..e8b88384cd3d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _OTX2_CPRI_H_
+#define _OTX2_CPRI_H_
+
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/net_tstamp.h>
+
+#include "otx2_bphy.h"
+#include "otx2_bphy_hw.h"
+#include "rfoe_bphy_netdev_comm_if.h"
+
+#define OTX2_BPHY_CPRI_MAX_MHAB 3
+#define OTX2_BPHY_CPRI_MAX_LMAC 4
+#define OTX2_BPHY_CPRI_MAX_INTF 10
+
+#define OTX2_BPHY_CPRI_PKT_BUF_SIZE 1664 /* wqe 128 bytes + 1536 bytes */
+#define OTX2_BPHY_CPRI_WQE_SIZE 128
+
+#define CPRI_RX_INTR_MASK(a) ((1UL << (a)) << 13)
+#define CPRI_RX_INTR_SHIFT(a) (13 + (a))
+
+/* Each entry increments by cnt 0x68, 1 unit = 16 bytes */
+#define CIRC_BUF_ENTRY(a) ((a) / 0x68)
+
+enum cpri_state {
+ CPRI_INTF_DOWN = 1,
+};
+
+/* CPRI support */
+struct otx2_cpri_drv_ctx {
+ u8 cpri_num;
+ u8 lmac_id;
+ int valid;
+ void *debugfs;
+ struct net_device *netdev;
+};
+
+extern struct otx2_cpri_drv_ctx cpri_drv_ctx[OTX2_BPHY_CPRI_MAX_INTF];
+
+struct otx2_cpri_stats {
+ /* Rx */
+ u64 rx_frames;
+ u64 rx_octets;
+ u64 rx_err;
+ u64 bad_crc;
+ u64 oversize;
+ u64 undersize;
+ u64 fifo_ovr;
+ u64 rx_dropped;
+ /* Tx */
+ u64 tx_frames;
+ u64 tx_octets;
+ u64 tx_dropped;
+ /* stats lock */
+ spinlock_t lock;
+};
+
+/* cpri dl cbuf cfg */
+struct dl_cbuf_cfg {
+ int num_entries;
+ u64 cbuf_iova_addr;
+ void __iomem *cbuf_virt_addr;
+ /* sw */
+ u64 sw_wr_ptr;
+ /* dl lock */
+ spinlock_t lock;
+};
+
+/* cpri ul cbuf cfg */
+struct ul_cbuf_cfg {
+ int num_entries;
+ u64 cbuf_iova_addr;
+ void __iomem *cbuf_virt_addr;
+ /* sw */
+ int sw_rd_ptr;
+ /* ul lock */
+ spinlock_t lock;
+};
+
+struct cpri_common_cfg {
+ struct dl_cbuf_cfg dl_cfg;
+ struct ul_cbuf_cfg ul_cfg;
+ u8 refcnt;
+};
+
+struct otx2_cpri_link_event {
+ u8 cpri_num;
+ u8 lmac_id;
+ u8 link_state;
+};
+
+/* cpri netdev priv */
+struct otx2_cpri_ndev_priv {
+ u8 cpri_num;
+ u8 lmac_id;
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ u32 msg_enable;
+ void __iomem *bphy_reg_base;
+ void __iomem *cpri_reg_base;
+ struct iommu_domain *iommu_domain;
+ struct cpri_common_cfg *cpri_common;
+ struct napi_struct napi;
+ unsigned long state;
+ struct otx2_cpri_stats stats;
+ u8 mac_addr[ETH_ALEN];
+ /* priv lock */
+ spinlock_t lock;
+ int if_type;
+ u8 link_state;
+ unsigned long last_tx_jiffies;
+ unsigned long last_rx_jiffies;
+ unsigned long last_tx_dropped_jiffies;
+ unsigned long last_rx_dropped_jiffies;
+};
+
+int otx2_cpri_parse_and_init_intf(struct otx2_bphy_cdev_priv *cdev,
+ struct bphy_netdev_comm_intf_cfg *cfg);
+
+void otx2_cpri_rx_napi_schedule(int cpri_num, u32 status);
+
+void otx2_cpri_update_stats(struct otx2_cpri_ndev_priv *priv);
+
+void otx2_bphy_cpri_cleanup(void);
+
+void otx2_cpri_enable_intf(int cpri_num);
+
+/* ethtool */
+void otx2_cpri_set_ethtool_ops(struct net_device *netdev);
+
+/* update carrier state */
+void otx2_cpri_set_link_state(struct net_device *netdev, u8 state);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri_ethtool.c
new file mode 100644
index 000000000000..ae70cfa36043
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri_ethtool.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/net_tstamp.h>
+
+#include "otx2_cpri.h"
+
+static const char ethtool_stat_strings[][ETH_GSTRING_LEN] = {
+ "rx_frames",
+ "rx_octets",
+ "rx_err",
+ "bad_crc",
+ "oversize",
+ "undersize",
+ "rx_fifo_overrun",
+ "rx_dropped",
+ "tx_frames",
+ "tx_octets",
+ "tx_dropped",
+};
+
+static void otx2_cpri_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ memcpy(data, *ethtool_stat_strings,
+ sizeof(ethtool_stat_strings));
+ break;
+ }
+}
+
+static int otx2_cpri_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(ethtool_stat_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void otx2_cpri_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct otx2_cpri_ndev_priv *priv = netdev_priv(netdev);
+
+ otx2_cpri_update_stats(priv);
+
+ spin_lock(&priv->stats.lock);
+ memcpy(data, &priv->stats,
+ ARRAY_SIZE(ethtool_stat_strings) * sizeof(u64));
+ spin_unlock(&priv->stats.lock);
+}
+
+static void otx2_cpri_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *p)
+{
+ struct otx2_cpri_ndev_priv *priv = netdev_priv(netdev);
+
+ snprintf(p->driver, sizeof(p->driver), "otx2_cpri {cpri%d lmac%d}",
+ priv->cpri_num, priv->lmac_id);
+ strlcpy(p->bus_info, "platform", sizeof(p->bus_info));
+}
+
+static u32 otx2_cpri_get_msglevel(struct net_device *netdev)
+{
+ struct otx2_cpri_ndev_priv *priv = netdev_priv(netdev);
+
+ return priv->msg_enable;
+}
+
+static void otx2_cpri_set_msglevel(struct net_device *netdev, u32 level)
+{
+ struct otx2_cpri_ndev_priv *priv = netdev_priv(netdev);
+
+ priv->msg_enable = level;
+}
+
+static const struct ethtool_ops otx2_cpri_ethtool_ops = {
+ .get_drvinfo = otx2_cpri_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = otx2_cpri_get_strings,
+ .get_sset_count = otx2_cpri_get_sset_count,
+ .get_ethtool_stats = otx2_cpri_get_ethtool_stats,
+ .get_msglevel = otx2_cpri_get_msglevel,
+ .set_msglevel = otx2_cpri_set_msglevel,
+};
+
+void otx2_cpri_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &otx2_cpri_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe.c b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe.c
new file mode 100644
index 000000000000..0bf0d1a50024
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe.c
@@ -0,0 +1,1697 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "otx2_rfoe.h"
+#include "otx2_bphy_hw.h"
+#include "otx2_bphy_debugfs.h"
+
+/* Theory of Operation
+ *
+ * I. General
+ *
+ * The BPHY RFOE netdev driver handles packets such as eCPRI control,
+ * PTP and other ethernet packets received from/sent to BPHY RFOE MHAB
+ * in Linux kernel. All other packets such as ROE and eCPRI non-control
+ * are handled by ODP application in user space. The ODP application
+ * initializes the JDT/MBT/PSM-queues to process the Rx/Tx packets in
+ * netdev and shares the information through driver ioctl. The Rx/TX
+ * notification will be sent to netdev using one of the PSM GPINT.
+ *
+ * II. Driver Operation
+ *
+ * This driver register's a character device and provides ioctl for
+ * ODP application to initialize the netdev(s) to process eCPRI and
+ * other Ethernet packets. Each netdev corresponds to a unique RFOE
+ * index and LMAC id. The ODP application initializes the flow tables,
+ * Rx JDT and RX MBT to process Rx packets. There will be a unique
+ * Flow Table, JDT, MBT for processing eCPRI, PTP and other Ethernet
+ * packets separately. The Rx packet memory (DDR) is also allocated
+ * by ODP and configured in MBT. All LMAC's in a single RFOE MHAB share
+ * the Rx configuration tuple {Flow Id, JDT and MBT}. The Rx event is
+ * notified to the netdev via PSM GPINT1. Each PSM GPINT supports 32-bits
+ * and can be used as interrupt status bits. For each Rx packet type
+ * per RFOE, one PSM GPINT bit is reserved to notify the Rx event for
+ * that packet type. The ODP application configures PSM_CMD_GPINT_S
+ * in the JCE section of JD for each packet. There are total 32 JDT
+ * and MBT entries per packet type. These entries will be reused when
+ * the JDT/MBT circular entries wraps around.
+ *
+ * On Tx side, the ODP application creates preconfigured job commands
+ * for the driver use. Each job command contains information such as
+ * PSM cmd (ADDJOB) info, JD iova address. The packet memory is also
+ * allocated by ODP app. The JD rd dma cfg section contains the memory
+ * addr for packet DMA. There are two PSM queues/RFOE reserved for Tx
+ * puropose. One queue handles PTP traffic and other queue is used for
+ * eCPRI and regular Ethernet traffic. The PTP job descriptor's (JD) are
+ * configured to generate Tx completion event through GPINT mechanism.
+ * For each LMAC/RFOE there will be one GPINT bit reserved for this
+ * purpose. For eCPRI and other Ethernet traffic there is no GPINT event
+ * to signal Tx completion to the driver. The driver Tx interrupt handler
+ * reads RFOE(0..2)_TX_PTP_TSTMP_W0 and RFOE(0..2)_TX_PTP_TSTMP_W1
+ * registers for PTP timestamp and fills the time stamp in PTP skb. The
+ * number of preconfigured job commands are 64 for non-ptp shared by all
+ * LMAC's in RFOE and 4 for PTP per each LMAC in RFOE. The PTP job cmds
+ * are not shared because the timestamp registers are unique per LMAC.
+ *
+ * III. Transmit
+ *
+ * The driver xmit routine selects the PSM queue based on whether the
+ * packet needs to be timestamped in HW by checking SKBTX_HW_TSTAMP flag.
+ * In case of PTP packet, if there is pending PTP packet in progress then
+ * the drivers adds this skb to a list and returns success. This list
+ * is processed after the previous PTP packet is sent and timestamp is
+ * copied to the skb successfully in the Tx interrupt handler.
+ *
+ * Once the PSM queue is selected, the driver checks whether there is
+ * enough space in that PSM queue by reading PSM_QUEUE(0..127)_SPACE
+ * reister. If the PSM queue is not full, then the driver get's the
+ * corresponding job entries associated with that queue and updates the
+ * length in JD DMA cfg word0 and copied the packet data to JD DMA
+ * cfg word1. For eCPRI/non-PTP packets, the driver also updates JD CFG
+ * RFOE_MODE.
+ *
+ * IV. Receive
+ *
+ * The driver receives an interrupt per pkt_type and invokes NAPI handler.
+ * The NAPI handler reads the corresponding MBT cfg (nxt_buf) to see the
+ * number of packets to be processed. For each successful mbt_entry, the
+ * packet handler get's corresponding mbt entry buffer address and based
+ * on packet type, the PSW0/ECPRI_PSW0 is read to get the JD iova addr
+ * corresponding to that MBT entry. The DMA block size is read from the
+ * JDT entry to know the number of bytes DMA'd including PSW bytes. The
+ * MBT entry buffer address is moved by pkt_offset bytes and length is
+ * decremented by pkt_offset to get actual pkt data and length. For each
+ * pkt, skb is allocated and packet data is copied to skb->data. In case
+ * of PTP packets, the PSW1 contains the PTP timestamp value and will be
+ * copied to the skb.
+ *
+ * V. Miscellaneous
+ *
+ * Ethtool:
+ * The ethtool stats shows packet stats for each packet type.
+ *
+ */
+
+/* global driver ctx */
+struct otx2_rfoe_drv_ctx rfoe_drv_ctx[RFOE_MAX_INTF];
+
+/* debugfs */
+static void otx2_rfoe_debugfs_reader(char *buffer, size_t count, void *priv);
+static const char *otx2_rfoe_debugfs_get_formatter(void);
+static size_t otx2_rfoe_debugfs_get_buffer_size(void);
+static void otx2_rfoe_debugfs_create(struct otx2_rfoe_drv_ctx *ctx);
+static void otx2_rfoe_debugfs_remove(struct otx2_rfoe_drv_ctx *ctx);
+
+void otx2_rfoe_disable_intf(int rfoe_num)
+{
+ struct otx2_rfoe_drv_ctx *drv_ctx;
+ struct otx2_rfoe_ndev_priv *priv;
+ struct net_device *netdev;
+ int idx;
+
+ for (idx = 0; idx < RFOE_MAX_INTF; idx++) {
+ drv_ctx = &rfoe_drv_ctx[idx];
+ if (drv_ctx->rfoe_num == rfoe_num && drv_ctx->valid) {
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ priv->if_type = IF_TYPE_NONE;
+ }
+ }
+}
+
+void otx2_bphy_rfoe_cleanup(void)
+{
+ struct otx2_rfoe_drv_ctx *drv_ctx = NULL;
+ struct otx2_rfoe_ndev_priv *priv;
+ struct net_device *netdev;
+ struct rx_ft_cfg *ft_cfg;
+ int i, idx;
+
+ for (i = 0; i < RFOE_MAX_INTF; i++) {
+ drv_ctx = &rfoe_drv_ctx[i];
+ if (drv_ctx->valid) {
+ otx2_rfoe_debugfs_remove(drv_ctx);
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ --(priv->ptp_cfg->refcnt);
+ if (!priv->ptp_cfg->refcnt) {
+ del_timer_sync(&priv->ptp_cfg->ptp_timer);
+ kfree(priv->ptp_cfg);
+ }
+ otx2_rfoe_ptp_destroy(priv);
+ unregister_netdev(netdev);
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ ft_cfg = &priv->rx_ft_cfg[idx];
+ netif_napi_del(&ft_cfg->napi);
+ }
+ --(priv->rfoe_common->refcnt);
+ if (priv->rfoe_common->refcnt == 0)
+ kfree(priv->rfoe_common);
+ free_netdev(netdev);
+ drv_ctx->valid = 0;
+ }
+ }
+}
+
+void otx2_rfoe_calc_ptp_ts(struct otx2_rfoe_ndev_priv *priv, u64 *ts)
+{
+ u64 ptp_diff_nsec, ptp_diff_psec;
+ struct ptp_bcn_off_cfg *ptp_cfg;
+ struct ptp_clk_cfg *clk_cfg;
+ struct ptp_bcn_ref *ref;
+ unsigned long flags;
+ u64 timestamp = *ts;
+
+ ptp_cfg = priv->ptp_cfg;
+ if (!ptp_cfg->use_ptp_alg)
+ return;
+ clk_cfg = &ptp_cfg->clk_cfg;
+
+ spin_lock_irqsave(&ptp_cfg->lock, flags);
+
+ if (likely(timestamp > ptp_cfg->new_ref.ptp0_ns))
+ ref = &ptp_cfg->new_ref;
+ else
+ ref = &ptp_cfg->old_ref;
+
+ /* calculate ptp timestamp diff in pico sec */
+ ptp_diff_psec = ((timestamp - ref->ptp0_ns) * PICO_SEC_PER_NSEC *
+ clk_cfg->clk_freq_div) / clk_cfg->clk_freq_ghz;
+ ptp_diff_nsec = (ptp_diff_psec + ref->bcn0_n2_ps + 500) /
+ PICO_SEC_PER_NSEC;
+ timestamp = ref->bcn0_n1_ns - priv->sec_bcn_offset + ptp_diff_nsec;
+
+ spin_unlock_irqrestore(&ptp_cfg->lock, flags);
+
+ *ts = timestamp;
+}
+
+static void otx2_rfoe_ptp_offset_timer(struct timer_list *t)
+{
+ struct ptp_bcn_off_cfg *ptp_cfg = from_timer(ptp_cfg, t, ptp_timer);
+ u64 mio_ptp_ts, ptp_ts_diff, ptp_diff_nsec, ptp_diff_psec;
+ struct ptp_clk_cfg *clk_cfg = &ptp_cfg->clk_cfg;
+ unsigned long expires, flags;
+
+ spin_lock_irqsave(&ptp_cfg->lock, flags);
+
+ memcpy(&ptp_cfg->old_ref, &ptp_cfg->new_ref,
+ sizeof(struct ptp_bcn_ref));
+
+ mio_ptp_ts = readq(ptp_reg_base + MIO_PTP_CLOCK_HI);
+ ptp_ts_diff = mio_ptp_ts - ptp_cfg->new_ref.ptp0_ns;
+ ptp_diff_psec = (ptp_ts_diff * PICO_SEC_PER_NSEC *
+ clk_cfg->clk_freq_div) / clk_cfg->clk_freq_ghz;
+ ptp_diff_nsec = ptp_diff_psec / PICO_SEC_PER_NSEC;
+ ptp_cfg->new_ref.ptp0_ns += ptp_ts_diff;
+ ptp_cfg->new_ref.bcn0_n1_ns += ptp_diff_nsec;
+ ptp_cfg->new_ref.bcn0_n2_ps += ptp_diff_psec -
+ (ptp_diff_nsec * PICO_SEC_PER_NSEC);
+
+ spin_unlock_irqrestore(&ptp_cfg->lock, flags);
+
+ expires = jiffies + PTP_OFF_RESAMPLE_THRESH * HZ;
+ mod_timer(&ptp_cfg->ptp_timer, expires);
+}
+
+/* submit pending ptp tx requests */
+static void otx2_rfoe_ptp_submit_work(struct work_struct *work)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(work,
+ struct otx2_rfoe_ndev_priv,
+ ptp_queue_work);
+ struct mhbw_jd_dma_cfg_word_0_s *jd_dma_cfg_word_0;
+ struct mhbw_jd_dma_cfg_word_1_s *jd_dma_cfg_word_1;
+ struct mhab_job_desc_cfg *jd_cfg_ptr;
+ struct psm_cmd_addjob_s *psm_cmd_lo;
+ struct tx_job_queue_cfg *job_cfg;
+ struct tx_job_entry *job_entry;
+ struct ptp_tstamp_skb *ts_skb;
+ u16 psm_queue_id, queue_space;
+ struct sk_buff *skb = NULL;
+ struct list_head *head;
+ u64 jd_cfg_ptr_iova;
+ unsigned long flags;
+ u64 regval;
+
+ job_cfg = &priv->tx_ptp_job_cfg;
+
+ spin_lock_irqsave(&job_cfg->lock, flags);
+
+ /* check pending ptp requests */
+ if (list_empty(&priv->ptp_skb_list.list)) {
+ netif_dbg(priv, tx_queued, priv->netdev, "no pending ptp tx requests\n");
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+ return;
+ }
+
+ /* check psm queue space available */
+ psm_queue_id = job_cfg->psm_queue_id;
+ regval = readq(priv->psm_reg_base + PSM_QUEUE_SPACE(psm_queue_id));
+ queue_space = regval & 0x7FFF;
+ if (queue_space < 1) {
+ netif_dbg(priv, tx_queued, priv->netdev, "ptp tx psm queue %d full\n",
+ psm_queue_id);
+ /* reschedule to check later */
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+ schedule_work(&priv->ptp_queue_work);
+ return;
+ }
+
+ if (test_and_set_bit_lock(PTP_TX_IN_PROGRESS, &priv->state)) {
+ netif_dbg(priv, tx_queued, priv->netdev, "ptp tx ongoing\n");
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+ return;
+ }
+
+ head = &priv->ptp_skb_list.list;
+ ts_skb = list_entry(head->next, struct ptp_tstamp_skb, list);
+ skb = ts_skb->skb;
+ list_del(&ts_skb->list);
+ kfree(ts_skb);
+ priv->ptp_skb_list.count--;
+
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "submitting ptp tx skb %pS\n", skb);
+
+ priv->last_tx_ptp_jiffies = jiffies;
+
+ /* get the tx job entry */
+ job_entry = (struct tx_job_entry *)
+ &job_cfg->job_entries[job_cfg->q_idx];
+
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "rfoe=%d lmac=%d psm_queue=%d tx_job_entry %d job_cmd_lo=0x%llx job_cmd_high=0x%llx jd_iova_addr=0x%llx\n",
+ priv->rfoe_num, priv->lmac_id, psm_queue_id, job_cfg->q_idx,
+ job_entry->job_cmd_lo, job_entry->job_cmd_hi,
+ job_entry->jd_iova_addr);
+
+ priv->ptp_tx_skb = skb;
+ psm_cmd_lo = (struct psm_cmd_addjob_s *)&job_entry->job_cmd_lo;
+ priv->ptp_job_tag = psm_cmd_lo->jobtag;
+
+ /* update length and block size in jd dma cfg word */
+ jd_cfg_ptr_iova = *(u64 *)((u8 *)job_entry->jd_ptr + 8);
+ jd_cfg_ptr = otx2_iova_to_virt(priv->iommu_domain, jd_cfg_ptr_iova);
+ jd_cfg_ptr->cfg1.pkt_len = skb->len;
+ jd_dma_cfg_word_0 = (struct mhbw_jd_dma_cfg_word_0_s *)
+ job_entry->rd_dma_ptr;
+ jd_dma_cfg_word_0->block_size = (((skb->len + 15) >> 4) * 4);
+
+ /* copy packet data to rd_dma_ptr start addr */
+ jd_dma_cfg_word_1 = (struct mhbw_jd_dma_cfg_word_1_s *)
+ ((u8 *)job_entry->rd_dma_ptr + 8);
+ memcpy(otx2_iova_to_virt(priv->iommu_domain,
+ jd_dma_cfg_word_1->start_addr),
+ skb->data, skb->len);
+
+ /* make sure that all memory writes are completed */
+ dma_wmb();
+
+ /* submit PSM job */
+ writeq(job_entry->job_cmd_lo,
+ priv->psm_reg_base + PSM_QUEUE_CMD_LO(psm_queue_id));
+ writeq(job_entry->job_cmd_hi,
+ priv->psm_reg_base + PSM_QUEUE_CMD_HI(psm_queue_id));
+
+ /* increment queue index */
+ job_cfg->q_idx++;
+ if (job_cfg->q_idx == job_cfg->num_entries)
+ job_cfg->q_idx = 0;
+
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+}
+
+#define OTX2_RFOE_PTP_TSTMP_POLL_CNT 100
+
+/* ptp interrupt processing bottom half */
+static void otx2_rfoe_ptp_tx_work(struct work_struct *work)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(work,
+ struct otx2_rfoe_ndev_priv,
+ ptp_tx_work);
+ struct skb_shared_hwtstamps ts;
+ u64 timestamp, tstmp_w1;
+ u16 jobid;
+ int cnt;
+
+ if (!priv->ptp_tx_skb) {
+ netif_err(priv, tx_done, priv->netdev,
+ "ptp tx skb not found, something wrong!\n");
+ goto submit_next_req;
+ }
+
+ /* poll for timestamp valid bit to go high */
+ for (cnt = 0; cnt < OTX2_RFOE_PTP_TSTMP_POLL_CNT; cnt++) {
+ /* read RFOE(0..2)_TX_PTP_TSTMP_W1(0..3) */
+ tstmp_w1 = readq(priv->rfoe_reg_base +
+ RFOEX_TX_PTP_TSTMP_W1(priv->rfoe_num,
+ priv->lmac_id));
+ /* check valid bit */
+ if (tstmp_w1 & (1ULL << 63))
+ break;
+ usleep_range(5, 10);
+ }
+
+ if (cnt >= OTX2_RFOE_PTP_TSTMP_POLL_CNT) {
+ netif_err(priv, tx_err, priv->netdev,
+ "ptp tx timestamp polling timeout, skb=%pS\n",
+ priv->ptp_tx_skb);
+ priv->stats.tx_hwtstamp_failures++;
+ goto submit_next_req;
+ }
+
+ /* check err or drop condition */
+ if ((tstmp_w1 & (1ULL << 21)) || (tstmp_w1 & (1ULL << 20))) {
+ netif_err(priv, tx_done, priv->netdev,
+ "ptp timestamp error tstmp_w1=0x%llx\n",
+ tstmp_w1);
+ goto submit_next_req;
+ }
+ /* match job id */
+ jobid = (tstmp_w1 >> 4) & 0xffff;
+ if (jobid != priv->ptp_job_tag) {
+ netif_err(priv, tx_done, priv->netdev,
+ "ptp job id doesn't match, tstmp_w1->job_id=0x%x skb->job_tag=0x%x\n",
+ jobid, priv->ptp_job_tag);
+ goto submit_next_req;
+ }
+ /* update timestamp value in skb */
+ timestamp = readq(priv->rfoe_reg_base +
+ RFOEX_TX_PTP_TSTMP_W0(priv->rfoe_num,
+ priv->lmac_id));
+ if (priv->pdev->subsystem_device == PCI_SUBSYS_DEVID_OCTX2_95XXN)
+ otx2_rfoe_calc_ptp_ts(priv, &timestamp);
+ else
+ timestamp = timecounter_cyc2time(&priv->time_counter, timestamp);
+
+ memset(&ts, 0, sizeof(ts));
+ ts.hwtstamp = ns_to_ktime(timestamp);
+ skb_tstamp_tx(priv->ptp_tx_skb, &ts);
+
+submit_next_req:
+ if (priv->ptp_tx_skb)
+ dev_kfree_skb_any(priv->ptp_tx_skb);
+ priv->ptp_tx_skb = NULL;
+ clear_bit_unlock(PTP_TX_IN_PROGRESS, &priv->state);
+ schedule_work(&priv->ptp_queue_work);
+}
+
+/* psm queue timer callback to check queue space */
+static void otx2_rfoe_tx_timer_cb(struct timer_list *t)
+{
+ struct otx2_rfoe_ndev_priv *priv =
+ container_of(t, struct otx2_rfoe_ndev_priv, tx_timer);
+ u16 psm_queue_id, queue_space;
+ int reschedule = 0;
+ u64 regval;
+
+ /* check psm queue space for both ptp and oth packets */
+ if (netif_queue_stopped(priv->netdev)) {
+ psm_queue_id = priv->tx_ptp_job_cfg.psm_queue_id;
+ // check queue space
+ regval = readq(priv->psm_reg_base +
+ PSM_QUEUE_SPACE(psm_queue_id));
+ queue_space = regval & 0x7FFF;
+ if (queue_space > 1) {
+ netif_wake_queue(priv->netdev);
+ reschedule = 0;
+ } else {
+ reschedule = 1;
+ }
+
+ psm_queue_id = priv->rfoe_common->tx_oth_job_cfg.psm_queue_id;
+ // check queue space
+ regval = readq(priv->psm_reg_base +
+ PSM_QUEUE_SPACE(psm_queue_id));
+ queue_space = regval & 0x7FFF;
+ if (queue_space > 1) {
+ netif_wake_queue(priv->netdev);
+ reschedule = 0;
+ } else {
+ reschedule = 1;
+ }
+ }
+
+ if (reschedule)
+ mod_timer(&priv->tx_timer, jiffies + msecs_to_jiffies(100));
+}
+
+static void otx2_rfoe_process_rx_pkt(struct otx2_rfoe_ndev_priv *priv,
+ struct rx_ft_cfg *ft_cfg, int mbt_buf_idx)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv = priv->cdev_priv;
+ struct mhbw_jd_dma_cfg_word_0_s *jd_dma_cfg_word_0;
+ struct rfoe_ecpri_psw0_s *ecpri_psw0 = NULL;
+ struct rfoe_ecpri_psw1_s *ecpri_psw1 = NULL;
+ u64 tstamp = 0, mbt_state, jdt_iova_addr;
+ int found = 0, idx, len, pkt_type;
+ struct otx2_rfoe_ndev_priv *priv2;
+ struct otx2_rfoe_drv_ctx *drv_ctx;
+ unsigned int ptp_message_len = 0;
+ struct rfoe_psw0_s *psw0 = NULL;
+ struct rfoe_psw1_s *psw1 = NULL;
+ struct net_device *netdev;
+ u8 *buf_ptr, *jdt_ptr;
+ struct sk_buff *skb;
+ u8 lmac_id;
+
+ /* read mbt state */
+ spin_lock(&cdev_priv->mbt_lock);
+ writeq(mbt_buf_idx, (priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num)));
+ mbt_state = readq(priv->rfoe_reg_base +
+ RFOEX_RX_IND_MBT_SEG_STATE(priv->rfoe_num));
+ spin_unlock(&cdev_priv->mbt_lock);
+
+ if ((mbt_state >> 16 & 0xf) != 0) {
+ pr_err("rx pkt error: mbt_buf_idx=%d, err=%d\n",
+ mbt_buf_idx, (u8)(mbt_state >> 16 & 0xf));
+ return;
+ }
+ if (mbt_state >> 20 & 0x1) {
+ pr_err("rx dma error: mbt_buf_idx=%d\n", mbt_buf_idx);
+ return;
+ }
+
+ buf_ptr = (u8 *)ft_cfg->mbt_virt_addr +
+ (ft_cfg->buf_size * mbt_buf_idx);
+
+ pkt_type = ft_cfg->pkt_type;
+#ifdef ASIM
+ // ASIM issue, all rx packets will hit eCPRI flow table
+ pkt_type = PACKET_TYPE_ECPRI;
+#endif
+ if (pkt_type != PACKET_TYPE_ECPRI) {
+ psw0 = (struct rfoe_psw0_s *)buf_ptr;
+ if (psw0->pkt_err_sts || psw0->dma_error) {
+ net_warn_ratelimited("%s: psw0 pkt_err_sts = 0x%x, dma_err=0x%x\n",
+ priv->netdev->name,
+ psw0->pkt_err_sts,
+ psw0->dma_error);
+ return;
+ }
+ /* check that the psw type is correct: */
+ if (unlikely(psw0->pswt == ECPRI_TYPE)) {
+ net_warn_ratelimited("%s: pswt is eCPRI for pkt_type = %d\n",
+ priv->netdev->name, pkt_type);
+ return;
+ }
+ lmac_id = psw0->lmac_id;
+ jdt_iova_addr = (u64)psw0->jd_ptr;
+ psw1 = (struct rfoe_psw1_s *)(buf_ptr + 16);
+ tstamp = psw1->ptp_timestamp;
+ } else {
+ ecpri_psw0 = (struct rfoe_ecpri_psw0_s *)buf_ptr;
+ if (ecpri_psw0->err_sts & 0x1F) {
+ net_warn_ratelimited("%s: ecpri_psw0 err_sts = 0x%x\n",
+ priv->netdev->name,
+ ecpri_psw0->err_sts);
+ return;
+ }
+ /* check that the psw type is correct: */
+ if (unlikely(ecpri_psw0->pswt != ECPRI_TYPE)) {
+ net_warn_ratelimited("%s: pswt is not eCPRI for pkt_type = %d\n",
+ priv->netdev->name, pkt_type);
+ return;
+ }
+ lmac_id = ecpri_psw0->src_id & 0x3;
+ jdt_iova_addr = (u64)ecpri_psw0->jd_ptr;
+ ecpri_psw1 = (struct rfoe_ecpri_psw1_s *)(buf_ptr + 16);
+ tstamp = ecpri_psw1->ptp_timestamp;
+ }
+
+ netif_dbg(priv, rx_status, priv->netdev,
+ "Rx: rfoe=%d lmac=%d mbt_buf_idx=%d psw0(w0)=0x%llx psw0(w1)=0x%llx psw1(w0)=0x%llx psw1(w1)=0x%llx jd:iova=0x%llx\n",
+ priv->rfoe_num, lmac_id, mbt_buf_idx,
+ *(u64 *)buf_ptr, *((u64 *)buf_ptr + 1),
+ *((u64 *)buf_ptr + 2), *((u64 *)buf_ptr + 3),
+ jdt_iova_addr);
+
+ /* read jd ptr from psw */
+ jdt_ptr = otx2_iova_to_virt(priv->iommu_domain, jdt_iova_addr);
+ jd_dma_cfg_word_0 = (struct mhbw_jd_dma_cfg_word_0_s *)
+ ((u8 *)jdt_ptr + ft_cfg->jd_rd_offset);
+ len = (jd_dma_cfg_word_0->block_size) << 2;
+ netif_dbg(priv, rx_status, priv->netdev, "jd rd_dma len = %d\n", len);
+
+ if (unlikely(netif_msg_pktdata(priv))) {
+ netdev_printk(KERN_DEBUG, priv->netdev, "RX MBUF DATA:");
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
+ buf_ptr, len, true);
+ }
+
+ buf_ptr += (ft_cfg->pkt_offset * 16);
+ len -= (ft_cfg->pkt_offset * 16);
+
+ for (idx = 0; idx < RFOE_MAX_INTF; idx++) {
+ drv_ctx = &rfoe_drv_ctx[idx];
+ if (drv_ctx->valid && drv_ctx->rfoe_num == priv->rfoe_num &&
+ drv_ctx->lmac_id == lmac_id) {
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ netdev = rfoe_drv_ctx[idx].netdev;
+ priv2 = netdev_priv(netdev);
+ } else {
+ pr_err("netdev not found, something went wrong!\n");
+ return;
+ }
+
+ /* drop the packet if interface is down */
+ if (unlikely(!netif_carrier_ok(netdev))) {
+ netif_err(priv2, rx_err, netdev,
+ "%s {rfoe%d lmac%d} link down, drop pkt\n",
+ netdev->name, priv2->rfoe_num,
+ priv2->lmac_id);
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_PTP) {
+ priv2->stats.ptp_rx_dropped++;
+ priv2->last_rx_ptp_dropped_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv2->stats.ecpri_rx_dropped++;
+ priv2->last_rx_dropped_jiffies = jiffies;
+ } else {
+ priv2->stats.rx_dropped++;
+ priv2->last_rx_dropped_jiffies = jiffies;
+ }
+ return;
+ }
+
+ skb = netdev_alloc_skb_ip_align(netdev, len);
+ if (!skb) {
+ netif_err(priv2, rx_err, netdev, "Rx: alloc skb failed\n");
+ return;
+ }
+
+ memcpy(skb->data, buf_ptr, len);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ /* remove trailing padding for ptp packets */
+ if (skb->protocol == htons(ETH_P_1588)) {
+ ptp_message_len = skb->data[2] << 8 | skb->data[3];
+ skb_trim(skb, ptp_message_len);
+ }
+
+ if (priv2->rx_hw_tstamp_en) {
+ if (priv->pdev->subsystem_device == PCI_SUBSYS_DEVID_OCTX2_95XXN)
+ otx2_rfoe_calc_ptp_ts(priv, &tstamp);
+ else
+ tstamp = timecounter_cyc2time(&priv->time_counter, tstamp);
+
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tstamp);
+ }
+
+ netif_receive_skb(skb);
+
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_PTP) {
+ priv2->stats.ptp_rx_packets++;
+ priv2->last_rx_ptp_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv2->stats.ecpri_rx_packets++;
+ priv2->last_rx_jiffies = jiffies;
+ } else {
+ priv2->stats.rx_packets++;
+ priv2->last_rx_jiffies = jiffies;
+ }
+ priv2->stats.rx_bytes += skb->len;
+}
+
+static int otx2_rfoe_process_rx_flow(struct otx2_rfoe_ndev_priv *priv,
+ int pkt_type, int budget)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv = priv->cdev_priv;
+ int count = 0, processed_pkts = 0;
+ struct rx_ft_cfg *ft_cfg;
+ u64 mbt_cfg;
+ u16 nxt_buf;
+ int *mbt_last_idx = &priv->rfoe_common->rx_mbt_last_idx[pkt_type];
+ u16 *prv_nxt_buf = &priv->rfoe_common->nxt_buf[pkt_type];
+
+ ft_cfg = &priv->rx_ft_cfg[pkt_type];
+
+ spin_lock(&cdev_priv->mbt_lock);
+ /* read mbt nxt_buf */
+ writeq(ft_cfg->mbt_idx,
+ priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num));
+ mbt_cfg = readq(priv->rfoe_reg_base +
+ RFOEX_RX_IND_MBT_CFG(priv->rfoe_num));
+ spin_unlock(&cdev_priv->mbt_lock);
+
+ nxt_buf = (mbt_cfg >> 32) & 0xffff;
+
+ /* no mbt entries to process */
+ if (nxt_buf == *prv_nxt_buf) {
+ netif_dbg(priv, rx_status, priv->netdev,
+ "no rx packets to process, rfoe=%d pkt_type=%d mbt_idx=%d nxt_buf=%d mbt_buf_sw_head=%d\n",
+ priv->rfoe_num, pkt_type, ft_cfg->mbt_idx, nxt_buf,
+ *mbt_last_idx);
+ return 0;
+ }
+
+ *prv_nxt_buf = nxt_buf;
+
+ /* get count of pkts to process, check ring wrap condition */
+ if (*mbt_last_idx > nxt_buf) {
+ count = ft_cfg->num_bufs - *mbt_last_idx;
+ count += nxt_buf;
+ } else {
+ count = nxt_buf - *mbt_last_idx;
+ }
+
+ netif_dbg(priv, rx_status, priv->netdev,
+ "rfoe=%d pkt_type=%d mbt_idx=%d nxt_buf=%d mbt_buf_sw_head=%d count=%d\n",
+ priv->rfoe_num, pkt_type, ft_cfg->mbt_idx, nxt_buf,
+ *mbt_last_idx, count);
+
+ while (likely((processed_pkts < budget) && (processed_pkts < count))) {
+ otx2_rfoe_process_rx_pkt(priv, ft_cfg, *mbt_last_idx);
+
+ (*mbt_last_idx)++;
+ if (*mbt_last_idx == ft_cfg->num_bufs)
+ *mbt_last_idx = 0;
+
+ processed_pkts++;
+ }
+
+ return processed_pkts;
+}
+
+/* napi poll routine */
+static int otx2_rfoe_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ struct otx2_rfoe_ndev_priv *priv;
+ int workdone = 0, pkt_type;
+ struct rx_ft_cfg *ft_cfg;
+ u64 intr_en, regval;
+
+ ft_cfg = container_of(napi, struct rx_ft_cfg, napi);
+ priv = ft_cfg->priv;
+ cdev_priv = priv->cdev_priv;
+ pkt_type = ft_cfg->pkt_type;
+
+ /* pkt processing loop */
+ workdone += otx2_rfoe_process_rx_flow(priv, pkt_type, budget);
+
+ if (workdone < budget) {
+ napi_complete_done(napi, workdone);
+
+ /* Re enable the Rx interrupts */
+ intr_en = PKT_TYPE_TO_INTR(pkt_type) <<
+ RFOE_RX_INTR_SHIFT(priv->rfoe_num);
+ spin_lock(&cdev_priv->lock);
+ regval = readq(bphy_reg_base + PSM_INT_GP_ENA_W1S(1));
+ regval |= intr_en;
+ writeq(regval, bphy_reg_base + PSM_INT_GP_ENA_W1S(1));
+ spin_unlock(&cdev_priv->lock);
+ }
+
+ return workdone;
+}
+
+/* Rx GPINT napi schedule api */
+void otx2_rfoe_rx_napi_schedule(int rfoe_num, u32 status)
+{
+ enum bphy_netdev_packet_type pkt_type;
+ struct otx2_rfoe_drv_ctx *drv_ctx;
+ struct otx2_rfoe_ndev_priv *priv;
+ struct rx_ft_cfg *ft_cfg;
+ int intf, bit_idx;
+ u32 intr_sts;
+ u64 regval;
+
+ for (intf = 0; intf < RFOE_MAX_INTF; intf++) {
+ drv_ctx = &rfoe_drv_ctx[intf];
+ /* ignore lmac, one interrupt/pkt_type/rfoe */
+ if (!(drv_ctx->valid && drv_ctx->rfoe_num == rfoe_num))
+ continue;
+ /* check if i/f down, napi disabled */
+ priv = netdev_priv(drv_ctx->netdev);
+ if (test_bit(RFOE_INTF_DOWN, &priv->state))
+ continue;
+ /* check rx pkt type */
+ intr_sts = ((status >> RFOE_RX_INTR_SHIFT(rfoe_num)) &
+ RFOE_RX_INTR_EN);
+ for (bit_idx = 0; bit_idx < PACKET_TYPE_MAX; bit_idx++) {
+ if (!(intr_sts & BIT(bit_idx)))
+ continue;
+ pkt_type = INTR_TO_PKT_TYPE(bit_idx);
+ if (unlikely(!(priv->pkt_type_mask & (1U << pkt_type))))
+ continue;
+ /* clear intr enable bit, re-enable in napi handler */
+ regval = PKT_TYPE_TO_INTR(pkt_type) <<
+ RFOE_RX_INTR_SHIFT(rfoe_num);
+ writeq(regval, bphy_reg_base + PSM_INT_GP_ENA_W1C(1));
+ /* schedule napi */
+ ft_cfg = &drv_ctx->ft_cfg[pkt_type];
+ napi_schedule(&ft_cfg->napi);
+ }
+ /* napi scheduled per pkt_type, return */
+ return;
+ }
+}
+
+static void otx2_rfoe_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct otx2_rfoe_stats *dev_stats = &priv->stats;
+
+ stats->rx_bytes = dev_stats->rx_bytes;
+ stats->rx_packets = dev_stats->rx_packets +
+ dev_stats->ptp_rx_packets +
+ dev_stats->ecpri_rx_packets;
+ stats->rx_dropped = dev_stats->rx_dropped +
+ dev_stats->ptp_rx_dropped +
+ dev_stats->ecpri_rx_dropped;
+
+ stats->tx_bytes = dev_stats->tx_bytes;
+ stats->tx_packets = dev_stats->tx_packets +
+ dev_stats->ptp_tx_packets +
+ dev_stats->ecpri_tx_packets;
+ stats->tx_dropped = dev_stats->tx_dropped +
+ dev_stats->ptp_tx_dropped +
+ dev_stats->ecpri_tx_dropped;
+}
+
+static int otx2_rfoe_config_hwtstamp(struct net_device *netdev,
+ struct ifreq *ifr)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ /* ptp hw timestamp is always enabled, mark the sw flags
+ * so that tx ptp requests are submitted to ptp psm queue
+ * and rx timestamp is copied to skb
+ */
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->tx_hw_tstamp_en = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ priv->tx_hw_tstamp_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ priv->rx_hw_tstamp_en = 0;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ priv->rx_hw_tstamp_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/* netdev ioctl */
+static int otx2_rfoe_ioctl(struct net_device *netdev, struct ifreq *req,
+ int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return otx2_rfoe_config_hwtstamp(netdev, req);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* netdev xmit */
+static netdev_tx_t otx2_rfoe_eth_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct mhbw_jd_dma_cfg_word_0_s *jd_dma_cfg_word_0;
+ struct mhbw_jd_dma_cfg_word_1_s *jd_dma_cfg_word_1;
+ struct mhab_job_desc_cfg *jd_cfg_ptr;
+ struct psm_cmd_addjob_s *psm_cmd_lo;
+ struct tx_job_queue_cfg *job_cfg;
+ u64 jd_cfg_ptr_iova, regval;
+ struct tx_job_entry *job_entry;
+ struct ptp_tstamp_skb *ts_skb;
+ int psm_queue_id, queue_space;
+ int pkt_type = 0;
+ unsigned long flags;
+ struct ethhdr *eth;
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ if (!priv->tx_hw_tstamp_en) {
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "skb HW timestamp requested but not enabled, this packet will not be timestamped\n");
+ job_cfg = &priv->rfoe_common->tx_oth_job_cfg;
+ pkt_type = PACKET_TYPE_OTHER;
+ } else {
+ job_cfg = &priv->tx_ptp_job_cfg;
+ pkt_type = PACKET_TYPE_PTP;
+ }
+ } else {
+ job_cfg = &priv->rfoe_common->tx_oth_job_cfg;
+ eth = (struct ethhdr *)skb->data;
+ if (htons(eth->h_proto) == ETH_P_ECPRI)
+ pkt_type = PACKET_TYPE_ECPRI;
+ else
+ pkt_type = PACKET_TYPE_OTHER;
+ }
+
+ spin_lock_irqsave(&job_cfg->lock, flags);
+
+ if (unlikely(priv->if_type != IF_TYPE_ETHERNET)) {
+ netif_err(priv, tx_queued, netdev,
+ "%s {rfoe%d lmac%d} invalid intf mode, drop pkt\n",
+ netdev->name, priv->rfoe_num, priv->lmac_id);
+ /* update stats */
+ priv->stats.tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ goto exit;
+ }
+
+ if (unlikely(!netif_carrier_ok(netdev))) {
+ netif_err(priv, tx_err, netdev,
+ "%s {rfoe%d lmac%d} link down, drop pkt\n",
+ netdev->name, priv->rfoe_num,
+ priv->lmac_id);
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv->stats.ecpri_tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_PTP) {
+ priv->stats.ptp_tx_dropped++;
+ priv->last_tx_ptp_dropped_jiffies = jiffies;
+ } else {
+ priv->stats.tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ }
+
+ goto exit;
+ }
+
+ if (unlikely(!(priv->pkt_type_mask & (1U << pkt_type)))) {
+ netif_err(priv, tx_queued, netdev,
+ "%s {rfoe%d lmac%d} pkt not supported, drop pkt\n",
+ netdev->name, priv->rfoe_num,
+ priv->lmac_id);
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv->stats.ecpri_tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_PTP) {
+ priv->stats.ptp_tx_dropped++;
+ priv->last_tx_ptp_dropped_jiffies = jiffies;
+ } else {
+ priv->stats.tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ }
+
+ goto exit;
+ }
+
+ /* get psm queue number */
+ psm_queue_id = job_cfg->psm_queue_id;
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "psm: queue(%d): cfg=0x%llx ptr=0x%llx space=0x%llx\n",
+ psm_queue_id,
+ readq(priv->psm_reg_base + PSM_QUEUE_CFG(psm_queue_id)),
+ readq(priv->psm_reg_base + PSM_QUEUE_PTR(psm_queue_id)),
+ readq(priv->psm_reg_base + PSM_QUEUE_SPACE(psm_queue_id)));
+
+ /* check psm queue space available */
+ regval = readq(priv->psm_reg_base + PSM_QUEUE_SPACE(psm_queue_id));
+ queue_space = regval & 0x7FFF;
+ if (queue_space < 1 && pkt_type != PACKET_TYPE_PTP) {
+ netif_err(priv, tx_err, netdev,
+ "no space in psm queue %d, dropping pkt\n",
+ psm_queue_id);
+ netif_stop_queue(netdev);
+ dev_kfree_skb_any(skb);
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_ECPRI)
+ priv->stats.ecpri_tx_dropped++;
+ else
+ priv->stats.tx_dropped++;
+
+ priv->last_tx_dropped_jiffies = jiffies;
+
+ mod_timer(&priv->tx_timer, jiffies + msecs_to_jiffies(100));
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+ return NETDEV_TX_OK;
+ }
+
+ /* get the tx job entry */
+ job_entry = (struct tx_job_entry *)
+ &job_cfg->job_entries[job_cfg->q_idx];
+
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "rfoe=%d lmac=%d psm_queue=%d tx_job_entry %d job_cmd_lo=0x%llx job_cmd_high=0x%llx jd_iova_addr=0x%llx\n",
+ priv->rfoe_num, priv->lmac_id, psm_queue_id, job_cfg->q_idx,
+ job_entry->job_cmd_lo, job_entry->job_cmd_hi,
+ job_entry->jd_iova_addr);
+
+ /* hw timestamp */
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->tx_hw_tstamp_en) {
+ if (list_empty(&priv->ptp_skb_list.list) &&
+ !test_and_set_bit_lock(PTP_TX_IN_PROGRESS, &priv->state)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->ptp_tx_skb = skb;
+ psm_cmd_lo = (struct psm_cmd_addjob_s *)
+ &job_entry->job_cmd_lo;
+ priv->ptp_job_tag = psm_cmd_lo->jobtag;
+ } else {
+ /* check ptp queue count */
+ if (priv->ptp_skb_list.count >= max_ptp_req) {
+ netif_err(priv, tx_err, netdev,
+ "ptp list full, dropping pkt\n");
+ priv->stats.ptp_tx_dropped++;
+ priv->last_tx_ptp_dropped_jiffies = jiffies;
+ goto exit;
+ }
+ /* allocate and add ptp req to queue */
+ ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
+ if (!ts_skb) {
+ priv->stats.ptp_tx_dropped++;
+ priv->last_tx_ptp_dropped_jiffies = jiffies;
+ goto exit;
+ }
+ ts_skb->skb = skb;
+ list_add_tail(&ts_skb->list, &priv->ptp_skb_list.list);
+ priv->ptp_skb_list.count++;
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->stats.ptp_tx_packets++;
+ priv->stats.tx_bytes += skb->len;
+ /* sw timestamp */
+ skb_tx_timestamp(skb);
+ goto exit; /* submit the packet later */
+ }
+ }
+
+ /* sw timestamp */
+ skb_tx_timestamp(skb);
+
+ if (unlikely(netif_msg_pktdata(priv))) {
+ netdev_printk(KERN_DEBUG, priv->netdev, "Tx: skb %pS len=%d\n",
+ skb, skb->len);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
+ skb->data, skb->len, true);
+ }
+
+ /* update length and block size in jd dma cfg word */
+ jd_cfg_ptr_iova = *(u64 *)((u8 *)job_entry->jd_ptr + 8);
+ jd_cfg_ptr = otx2_iova_to_virt(priv->iommu_domain, jd_cfg_ptr_iova);
+ jd_cfg_ptr->cfg1.pkt_len = skb->len;
+ jd_dma_cfg_word_0 = (struct mhbw_jd_dma_cfg_word_0_s *)
+ job_entry->rd_dma_ptr;
+ jd_dma_cfg_word_0->block_size = (((skb->len + 15) >> 4) * 4);
+
+ /* update rfoe_mode and lmac id for non-ptp (shared) psm job entry */
+ if (pkt_type != PACKET_TYPE_PTP) {
+ jd_cfg_ptr->cfg.lmacid = priv->lmac_id & 0x3;
+ if (pkt_type == PACKET_TYPE_ECPRI)
+ jd_cfg_ptr->cfg.rfoe_mode = 1;
+ else
+ jd_cfg_ptr->cfg.rfoe_mode = 0;
+ }
+
+ /* copy packet data to rd_dma_ptr start addr */
+ jd_dma_cfg_word_1 = (struct mhbw_jd_dma_cfg_word_1_s *)
+ ((u8 *)job_entry->rd_dma_ptr + 8);
+ memcpy(otx2_iova_to_virt(priv->iommu_domain,
+ jd_dma_cfg_word_1->start_addr),
+ skb->data, skb->len);
+
+ /* make sure that all memory writes are completed */
+ dma_wmb();
+
+ /* submit PSM job */
+ writeq(job_entry->job_cmd_lo,
+ priv->psm_reg_base + PSM_QUEUE_CMD_LO(psm_queue_id));
+ writeq(job_entry->job_cmd_hi,
+ priv->psm_reg_base + PSM_QUEUE_CMD_HI(psm_queue_id));
+
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv->stats.ecpri_tx_packets++;
+ priv->last_tx_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_PTP) {
+ priv->stats.ptp_tx_packets++;
+ priv->last_tx_ptp_jiffies = jiffies;
+ } else {
+ priv->stats.tx_packets++;
+ priv->last_tx_jiffies = jiffies;
+ }
+ priv->stats.tx_bytes += skb->len;
+
+ /* increment queue index */
+ job_cfg->q_idx++;
+ if (job_cfg->q_idx == job_cfg->num_entries)
+ job_cfg->q_idx = 0;
+exit:
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
+ dev_kfree_skb_any(skb);
+
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+/* netdev open */
+static int otx2_rfoe_eth_open(struct net_device *netdev)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ int idx;
+
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ napi_enable(&priv->rx_ft_cfg[idx].napi);
+ }
+
+ priv->ptp_tx_skb = NULL;
+
+ spin_lock(&priv->lock);
+ clear_bit(RFOE_INTF_DOWN, &priv->state);
+
+ if (priv->link_state == LINK_STATE_UP) {
+ netif_carrier_on(netdev);
+ netif_start_queue(netdev);
+ }
+ spin_unlock(&priv->lock);
+
+ return 0;
+}
+
+/* netdev close */
+static int otx2_rfoe_eth_stop(struct net_device *netdev)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct ptp_tstamp_skb *ts_skb, *ts_skb2;
+ int idx;
+
+ spin_lock(&priv->lock);
+ set_bit(RFOE_INTF_DOWN, &priv->state);
+
+ netif_stop_queue(netdev);
+ netif_carrier_off(netdev);
+
+ spin_unlock(&priv->lock);
+
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ napi_disable(&priv->rx_ft_cfg[idx].napi);
+ }
+
+ del_timer_sync(&priv->tx_timer);
+
+ /* cancel any pending ptp work item in progress */
+ cancel_work_sync(&priv->ptp_tx_work);
+ if (priv->ptp_tx_skb) {
+ dev_kfree_skb_any(priv->ptp_tx_skb);
+ priv->ptp_tx_skb = NULL;
+ clear_bit_unlock(PTP_TX_IN_PROGRESS, &priv->state);
+ }
+
+ /* clear ptp skb list */
+ cancel_work_sync(&priv->ptp_queue_work);
+ list_for_each_entry_safe(ts_skb, ts_skb2,
+ &priv->ptp_skb_list.list, list) {
+ list_del(&ts_skb->list);
+ kfree(ts_skb);
+ }
+ priv->ptp_skb_list.count = 0;
+
+ return 0;
+}
+
+static int otx2_rfoe_init(struct net_device *netdev)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ /* Enable VLAN TPID match */
+ writeq(0x18100, (priv->rfoe_reg_base +
+ RFOEX_RX_VLANX_CFG(priv->rfoe_num, 0)));
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ return 0;
+}
+
+static int otx2_rfoe_vlan_rx_configure(struct net_device *netdev, u16 vid,
+ bool forward)
+{
+ struct rfoe_rx_ind_vlanx_fwd fwd;
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct otx2_bphy_cdev_priv *cdev_priv = priv->cdev_priv;
+ u64 index = (vid >> 6) & 0x3F;
+ u64 mask = (0x1ll << (vid & 0x3F));
+ unsigned long flags;
+
+ if (vid >= VLAN_N_VID) {
+ netdev_err(netdev, "Invalid VLAN ID %d\n", vid);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&cdev_priv->mbt_lock, flags);
+
+ if (forward && priv->rfoe_common->rx_vlan_fwd_refcnt[vid]++)
+ goto out;
+
+ if (!forward && --priv->rfoe_common->rx_vlan_fwd_refcnt[vid])
+ goto out;
+
+ /* read current fwd mask */
+ writeq(index, (priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num)));
+ fwd.fwd = readq(priv->rfoe_reg_base +
+ RFOEX_RX_IND_VLANX_FWD(priv->rfoe_num, 0));
+
+ if (forward)
+ fwd.fwd |= mask;
+ else
+ fwd.fwd &= ~mask;
+
+ /* write the new fwd mask */
+ writeq(index, (priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num)));
+ writeq(fwd.fwd, (priv->rfoe_reg_base +
+ RFOEX_RX_IND_VLANX_FWD(priv->rfoe_num, 0)));
+
+out:
+ spin_unlock_irqrestore(&cdev_priv->mbt_lock, flags);
+
+ return 0;
+}
+
+static int otx2_rfoe_vlan_rx_add(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ return otx2_rfoe_vlan_rx_configure(netdev, vid, true);
+}
+
+static int otx2_rfoe_vlan_rx_kill(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ return otx2_rfoe_vlan_rx_configure(netdev, vid, false);
+}
+
+static const struct net_device_ops otx2_rfoe_netdev_ops = {
+ .ndo_init = otx2_rfoe_init,
+ .ndo_open = otx2_rfoe_eth_open,
+ .ndo_stop = otx2_rfoe_eth_stop,
+ .ndo_start_xmit = otx2_rfoe_eth_start_xmit,
+ .ndo_do_ioctl = otx2_rfoe_ioctl,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_get_stats64 = otx2_rfoe_get_stats64,
+ .ndo_vlan_rx_add_vid = otx2_rfoe_vlan_rx_add,
+ .ndo_vlan_rx_kill_vid = otx2_rfoe_vlan_rx_kill,
+};
+
+static void otx2_rfoe_dump_rx_ft_cfg(struct otx2_rfoe_ndev_priv *priv)
+{
+ struct rx_ft_cfg *ft_cfg;
+ int idx;
+
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ ft_cfg = &priv->rx_ft_cfg[idx];
+ pr_debug("rfoe=%d lmac=%d pkttype=%d flowid=%d mbt: idx=%d size=%d nbufs=%d iova=0x%llx jdt: idx=%d size=%d num_jd=%d iova=0x%llx\n",
+ priv->rfoe_num, priv->lmac_id, ft_cfg->pkt_type,
+ ft_cfg->flow_id, ft_cfg->mbt_idx, ft_cfg->buf_size,
+ ft_cfg->num_bufs, ft_cfg->mbt_iova_addr,
+ ft_cfg->jdt_idx, ft_cfg->jd_size, ft_cfg->num_jd,
+ ft_cfg->jdt_iova_addr);
+ }
+}
+
+static inline void otx2_rfoe_fill_rx_ft_cfg(struct otx2_rfoe_ndev_priv *priv,
+ struct bphy_netdev_comm_if *if_cfg)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv = priv->cdev_priv;
+ struct bphy_netdev_rbuf_info *rbuf_info;
+ struct rx_ft_cfg *ft_cfg;
+ u64 jdt_cfg0, iova;
+ int idx;
+
+ /* RX flow table configuration */
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ ft_cfg = &priv->rx_ft_cfg[idx];
+ rbuf_info = &if_cfg->rbuf_info[idx];
+ ft_cfg->pkt_type = rbuf_info->pkt_type;
+ ft_cfg->gp_int_num = rbuf_info->gp_int_num;
+ ft_cfg->flow_id = rbuf_info->flow_id;
+ ft_cfg->mbt_idx = rbuf_info->mbt_index;
+ ft_cfg->buf_size = rbuf_info->buf_size * 16;
+ ft_cfg->num_bufs = rbuf_info->num_bufs;
+ ft_cfg->mbt_iova_addr = rbuf_info->mbt_iova_addr;
+ iova = ft_cfg->mbt_iova_addr;
+ ft_cfg->mbt_virt_addr = otx2_iova_to_virt(priv->iommu_domain,
+ iova);
+ ft_cfg->jdt_idx = rbuf_info->jdt_index;
+ ft_cfg->jd_size = rbuf_info->jd_size * 8;
+ ft_cfg->num_jd = rbuf_info->num_jd;
+ ft_cfg->jdt_iova_addr = rbuf_info->jdt_iova_addr;
+ iova = ft_cfg->jdt_iova_addr;
+ ft_cfg->jdt_virt_addr = otx2_iova_to_virt(priv->iommu_domain,
+ iova);
+ spin_lock(&cdev_priv->mbt_lock);
+ writeq(ft_cfg->jdt_idx,
+ (priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num)));
+ jdt_cfg0 = readq(priv->rfoe_reg_base +
+ RFOEX_RX_IND_JDT_CFG0(priv->rfoe_num));
+ spin_unlock(&cdev_priv->mbt_lock);
+ ft_cfg->jd_rd_offset = ((jdt_cfg0 >> 28) & 0xf) * 8;
+ ft_cfg->pkt_offset = (u8)((jdt_cfg0 >> 52) & 0x7);
+ ft_cfg->priv = priv;
+ netif_napi_add(priv->netdev, &ft_cfg->napi,
+ otx2_rfoe_napi_poll,
+ NAPI_POLL_WEIGHT);
+ }
+}
+
+static void otx2_rfoe_fill_tx_job_entries(struct otx2_rfoe_ndev_priv *priv,
+ struct tx_job_queue_cfg *job_cfg,
+ struct bphy_netdev_tx_psm_cmd_info *tx_job,
+ int num_entries)
+{
+ struct tx_job_entry *job_entry;
+ u64 jd_cfg_iova, iova;
+ int i;
+
+ for (i = 0; i < num_entries; i++) {
+ job_entry = &job_cfg->job_entries[i];
+ job_entry->job_cmd_lo = tx_job->low_cmd;
+ job_entry->job_cmd_hi = tx_job->high_cmd;
+ job_entry->jd_iova_addr = tx_job->jd_iova_addr;
+ iova = job_entry->jd_iova_addr;
+ job_entry->jd_ptr = otx2_iova_to_virt(priv->iommu_domain, iova);
+ jd_cfg_iova = *(u64 *)((u8 *)job_entry->jd_ptr + 8);
+ job_entry->jd_cfg_ptr = otx2_iova_to_virt(priv->iommu_domain,
+ jd_cfg_iova);
+ job_entry->rd_dma_iova_addr = tx_job->rd_dma_iova_addr;
+ iova = job_entry->rd_dma_iova_addr;
+ job_entry->rd_dma_ptr = otx2_iova_to_virt(priv->iommu_domain,
+ iova);
+ pr_debug("job_cmd_lo=0x%llx job_cmd_hi=0x%llx jd_iova_addr=0x%llx rd_dma_iova_addr=%llx\n",
+ tx_job->low_cmd, tx_job->high_cmd,
+ tx_job->jd_iova_addr, tx_job->rd_dma_iova_addr);
+ tx_job++;
+ }
+ /* get psm queue id */
+ job_entry = &job_cfg->job_entries[0];
+ job_cfg->psm_queue_id = (job_entry->job_cmd_lo >> 8) & 0xff;
+ job_cfg->q_idx = 0;
+ job_cfg->num_entries = num_entries;
+ spin_lock_init(&job_cfg->lock);
+}
+
+int otx2_rfoe_parse_and_init_intf(struct otx2_bphy_cdev_priv *cdev,
+ struct bphy_netdev_comm_intf_cfg *cfg)
+{
+ int i, intf_idx = 0, num_entries, lmac, idx, ret;
+ struct bphy_netdev_tx_psm_cmd_info *tx_info;
+ struct otx2_rfoe_drv_ctx *drv_ctx = NULL;
+ struct otx2_rfoe_ndev_priv *priv, *priv2;
+ struct bphy_netdev_rfoe_if *rfoe_cfg;
+ struct bphy_netdev_comm_if *if_cfg;
+ struct tx_job_queue_cfg *tx_cfg;
+ struct ptp_bcn_off_cfg *ptp_cfg;
+ struct net_device *netdev;
+ struct rx_ft_cfg *ft_cfg;
+ u8 pkt_type_mask;
+
+ ptp_cfg = kzalloc(sizeof(*ptp_cfg), GFP_KERNEL);
+ if (!ptp_cfg)
+ return -ENOMEM;
+ timer_setup(&ptp_cfg->ptp_timer, otx2_rfoe_ptp_offset_timer, 0);
+ ptp_cfg->clk_cfg.clk_freq_ghz = PTP_CLK_FREQ_GHZ;
+ ptp_cfg->clk_cfg.clk_freq_div = PTP_CLK_FREQ_DIV;
+ spin_lock_init(&ptp_cfg->lock);
+
+ for (i = 0; i < MAX_RFOE_INTF; i++) {
+ priv2 = NULL;
+ rfoe_cfg = &cfg[i].rfoe_if_cfg;
+ pkt_type_mask = rfoe_cfg->pkt_type_mask;
+ for (lmac = 0; lmac < MAX_LMAC_PER_RFOE; lmac++) {
+ if_cfg = &rfoe_cfg->if_cfg[lmac];
+ /* check if lmac is valid */
+ if (!if_cfg->lmac_info.is_valid) {
+ dev_dbg(cdev->dev,
+ "rfoe%d lmac%d invalid\n", i, lmac);
+ continue;
+ }
+ netdev =
+ alloc_etherdev(sizeof(struct otx2_rfoe_ndev_priv));
+ if (!netdev) {
+ dev_err(cdev->dev,
+ "error allocating net device\n");
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+ priv = netdev_priv(netdev);
+ memset(priv, 0, sizeof(*priv));
+ if (!priv2) {
+ priv->rfoe_common =
+ kzalloc(sizeof(struct rfoe_common_cfg),
+ GFP_KERNEL);
+ if (!priv->rfoe_common) {
+ dev_err(cdev->dev, "kzalloc failed\n");
+ free_netdev(netdev);
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+ priv->rfoe_common->refcnt = 1;
+ }
+ spin_lock_init(&priv->lock);
+ priv->netdev = netdev;
+ priv->cdev_priv = cdev;
+ priv->msg_enable = netif_msg_init(-1, 0);
+ spin_lock_init(&priv->stats.lock);
+ priv->rfoe_num = if_cfg->lmac_info.rfoe_num;
+ priv->lmac_id = if_cfg->lmac_info.lane_num;
+ priv->if_type = cfg[i].if_type;
+ memcpy(priv->mac_addr, if_cfg->lmac_info.eth_addr,
+ ETH_ALEN);
+ if (is_valid_ether_addr(priv->mac_addr))
+ ether_addr_copy(netdev->dev_addr,
+ priv->mac_addr);
+ else
+ random_ether_addr(netdev->dev_addr);
+ priv->pdev = pci_get_device(OTX2_BPHY_PCI_VENDOR_ID,
+ OTX2_BPHY_PCI_DEVICE_ID,
+ NULL);
+ priv->iommu_domain =
+ iommu_get_domain_for_dev(&priv->pdev->dev);
+ priv->bphy_reg_base = bphy_reg_base;
+ priv->psm_reg_base = psm_reg_base;
+ priv->rfoe_reg_base = rfoe_reg_base;
+ priv->bcn_reg_base = bcn_reg_base;
+ priv->ptp_reg_base = ptp_reg_base;
+ priv->ptp_cfg = ptp_cfg;
+ ++(priv->ptp_cfg->refcnt);
+
+ /* Initialise PTP TX work queue */
+ INIT_WORK(&priv->ptp_tx_work, otx2_rfoe_ptp_tx_work);
+ INIT_WORK(&priv->ptp_queue_work,
+ otx2_rfoe_ptp_submit_work);
+
+ /* Initialise PTP skb list */
+ INIT_LIST_HEAD(&priv->ptp_skb_list.list);
+ priv->ptp_skb_list.count = 0;
+ timer_setup(&priv->tx_timer, otx2_rfoe_tx_timer_cb, 0);
+
+ priv->pkt_type_mask = pkt_type_mask;
+ otx2_rfoe_fill_rx_ft_cfg(priv, if_cfg);
+ otx2_rfoe_dump_rx_ft_cfg(priv);
+
+ /* TX PTP job configuration */
+ if (priv->pkt_type_mask & (1U << PACKET_TYPE_PTP)) {
+ tx_cfg = &priv->tx_ptp_job_cfg;
+ tx_info = &if_cfg->ptp_pkt_info[0];
+ num_entries = MAX_PTP_MSG_PER_LMAC;
+ otx2_rfoe_fill_tx_job_entries(priv, tx_cfg,
+ tx_info,
+ num_entries);
+ }
+
+ /* TX ECPRI/OTH(PTP) job configuration */
+ if (!priv2 &&
+ ((priv->pkt_type_mask &
+ (1U << PACKET_TYPE_OTHER)) ||
+ (priv->pkt_type_mask &
+ (1U << PACKET_TYPE_ECPRI)))) {
+ /* RFOE 2 will have 2 LMAC's */
+ num_entries = (priv->rfoe_num < 2) ?
+ MAX_OTH_MSG_PER_RFOE : 32;
+ tx_cfg = &priv->rfoe_common->tx_oth_job_cfg;
+ tx_info = &rfoe_cfg->oth_pkt_info[0];
+ otx2_rfoe_fill_tx_job_entries(priv, tx_cfg,
+ tx_info,
+ num_entries);
+ } else {
+ /* share rfoe_common data */
+ priv->rfoe_common = priv2->rfoe_common;
+ ++(priv->rfoe_common->refcnt);
+ }
+
+ /* keep last (rfoe + lmac) priv structure */
+ if (!priv2)
+ priv2 = priv;
+
+ intf_idx = (i * 4) + lmac;
+ snprintf(netdev->name, sizeof(netdev->name),
+ "rfoe%d", intf_idx);
+ netdev->netdev_ops = &otx2_rfoe_netdev_ops;
+ otx2_rfoe_set_ethtool_ops(netdev);
+ otx2_rfoe_ptp_init(priv);
+ netdev->watchdog_timeo = (15 * HZ);
+ netdev->mtu = 1500U;
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = 1500U;
+ ret = register_netdev(netdev);
+ if (ret < 0) {
+ dev_err(cdev->dev,
+ "failed to register net device %s\n",
+ netdev->name);
+ free_netdev(netdev);
+ ret = -ENODEV;
+ goto err_exit;
+ }
+ dev_dbg(cdev->dev, "net device %s registered\n",
+ netdev->name);
+
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ set_bit(RFOE_INTF_DOWN, &priv->state);
+ priv->link_state = LINK_STATE_UP;
+
+ /* initialize global ctx */
+ drv_ctx = &rfoe_drv_ctx[intf_idx];
+ drv_ctx->rfoe_num = priv->rfoe_num;
+ drv_ctx->lmac_id = priv->lmac_id;
+ drv_ctx->valid = 1;
+ drv_ctx->netdev = netdev;
+ drv_ctx->ft_cfg = &priv->rx_ft_cfg[0];
+
+ /* create debugfs entry */
+ otx2_rfoe_debugfs_create(drv_ctx);
+ }
+ }
+
+ return 0;
+
+err_exit:
+ for (i = 0; i < RFOE_MAX_INTF; i++) {
+ drv_ctx = &rfoe_drv_ctx[i];
+ if (drv_ctx->valid) {
+ otx2_rfoe_debugfs_remove(drv_ctx);
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ otx2_rfoe_ptp_destroy(priv);
+ unregister_netdev(netdev);
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ ft_cfg = &priv->rx_ft_cfg[idx];
+ netif_napi_del(&ft_cfg->napi);
+ }
+ --(priv->rfoe_common->refcnt);
+ if (priv->rfoe_common->refcnt == 0)
+ kfree(priv->rfoe_common);
+ free_netdev(netdev);
+ drv_ctx->valid = 0;
+ }
+ }
+ del_timer_sync(&ptp_cfg->ptp_timer);
+ kfree(ptp_cfg);
+
+ return ret;
+}
+
+static void otx2_rfoe_debugfs_reader(char *buffer, size_t count, void *priv)
+{
+ struct otx2_rfoe_drv_ctx *ctx;
+ struct otx2_rfoe_ndev_priv *netdev;
+ u8 ptp_tx_in_progress;
+ unsigned int queued_ptp_reqs;
+ u8 queue_stopped, state_up;
+ u16 other_tx_psm_space, ptp_tx_psm_space, queue_id;
+ u64 regval;
+ const char *formatter;
+
+ ctx = priv;
+ netdev = netdev_priv(ctx->netdev);
+ ptp_tx_in_progress = test_bit(PTP_TX_IN_PROGRESS, &netdev->state);
+ queued_ptp_reqs = netdev->ptp_skb_list.count;
+ queue_stopped = netif_queue_stopped(ctx->netdev);
+ state_up = netdev->link_state;
+ formatter = otx2_rfoe_debugfs_get_formatter();
+
+ /* other tx psm space */
+ queue_id = netdev->rfoe_common->tx_oth_job_cfg.psm_queue_id;
+ regval = readq(netdev->psm_reg_base + PSM_QUEUE_SPACE(queue_id));
+ other_tx_psm_space = regval & 0x7FFF;
+
+ /* ptp tx psm space */
+ queue_id = netdev->tx_ptp_job_cfg.psm_queue_id;
+ regval = readq(netdev->psm_reg_base + PSM_QUEUE_SPACE(queue_id));
+ ptp_tx_psm_space = regval & 0x7FFF;
+
+ snprintf(buffer, count, formatter,
+ ptp_tx_in_progress,
+ queued_ptp_reqs,
+ queue_stopped,
+ state_up,
+ netdev->last_tx_jiffies,
+ netdev->last_tx_dropped_jiffies,
+ netdev->last_tx_ptp_jiffies,
+ netdev->last_tx_ptp_dropped_jiffies,
+ netdev->last_rx_jiffies,
+ netdev->last_rx_dropped_jiffies,
+ netdev->last_rx_ptp_jiffies,
+ netdev->last_rx_ptp_dropped_jiffies,
+ jiffies,
+ other_tx_psm_space,
+ ptp_tx_psm_space);
+}
+
+static const char *otx2_rfoe_debugfs_get_formatter(void)
+{
+ static const char *buffer_format = "ptp-tx-in-progress: %u\n"
+ "queued-ptp-reqs: %u\n"
+ "queue-stopped: %u\n"
+ "state-up: %u\n"
+ "last-tx-jiffies: %lu\n"
+ "last-tx-dropped-jiffies: %lu\n"
+ "last-tx-ptp-jiffies: %lu\n"
+ "last-tx-ptp-dropped-jiffies: %lu\n"
+ "last-rx-jiffies: %lu\n"
+ "last-rx-dropped-jiffies: %lu\n"
+ "last-rx-ptp-jiffies: %lu\n"
+ "last-rx-ptp-dropped-jiffies: %lu\n"
+ "current-jiffies: %lu\n"
+ "other-tx-psm-space: %u\n"
+ "ptp-tx-psm-space: %u\n";
+
+ return buffer_format;
+}
+
+static size_t otx2_rfoe_debugfs_get_buffer_size(void)
+{
+ static size_t buffer_size;
+
+ if (!buffer_size) {
+ const char *formatter = otx2_rfoe_debugfs_get_formatter();
+ u8 max_boolean = 1;
+ int max_ptp_req_count = max_ptp_req;
+ unsigned long max_jiffies = (unsigned long)-1;
+ u16 max_psm_space = (u16)-1;
+
+ buffer_size = snprintf(NULL, 0, formatter,
+ max_boolean,
+ max_ptp_req_count,
+ max_boolean,
+ max_boolean,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_psm_space,
+ max_psm_space);
+ ++buffer_size;
+ }
+
+ return buffer_size;
+}
+
+static void otx2_rfoe_debugfs_create(struct otx2_rfoe_drv_ctx *ctx)
+{
+ size_t buffer_size = otx2_rfoe_debugfs_get_buffer_size();
+
+ ctx->debugfs = otx2_bphy_debugfs_add_file(ctx->netdev->name,
+ buffer_size, ctx,
+ otx2_rfoe_debugfs_reader);
+}
+
+static void otx2_rfoe_debugfs_remove(struct otx2_rfoe_drv_ctx *ctx)
+{
+ if (ctx->debugfs)
+ otx2_bphy_debugfs_remove_file(ctx->debugfs);
+}
+
+void otx2_rfoe_set_link_state(struct net_device *netdev, u8 state)
+{
+ struct otx2_rfoe_ndev_priv *priv;
+
+ priv = netdev_priv(netdev);
+
+ spin_lock(&priv->lock);
+ if (priv->link_state != state) {
+ priv->link_state = state;
+ if (state == LINK_STATE_DOWN) {
+ netdev_info(netdev, "Link DOWN\n");
+ if (netif_running(netdev)) {
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ } else {
+ netdev_info(netdev, "Link UP\n");
+ if (netif_running(netdev)) {
+ netif_carrier_on(netdev);
+ netif_start_queue(netdev);
+ }
+ }
+ }
+ spin_unlock(&priv->lock);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe.h b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe.h
new file mode 100644
index 000000000000..da26a77d3cc6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _OTX2_RFOE_H_
+#define _OTX2_RFOE_H_
+
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/if_vlan.h>
+
+#include "otx2_bphy.h"
+#include "rfoe_common.h"
+
+#define RFOE_RX_INTR_SHIFT(a) (32 - ((a) + 1) * 3)
+#define RFOE_RX_INTR_MASK(a) (RFOE_RX_INTR_EN << \
+ RFOE_RX_INTR_SHIFT(a))
+#define RFOE_TX_PTP_INTR_MASK(a, b) (1UL << ((a) * 4 + (b)))
+
+#define MAX_RFOE_INTF 3 /* Max RFOE instances */
+#define RFOE_MAX_INTF 10 /* 2 rfoe x 4 lmac + 1 rfoe x 2 lmac */
+#define PCI_SUBSYS_DEVID_OCTX2_95XXN 0xB400
+
+/* ethtool msg */
+#define OTX2_RFOE_MSG_DEFAULT (NETIF_MSG_DRV)
+
+/* PTP clock time operates by adding a constant increment every clock
+ * cycle. That increment is expressed (MIO_PTP_CLOCK_COMP) as a Q32.32
+ * number of nanoseconds (32 integer bits and 32 fractional bits). The
+ * value must be equal to 1/(PTP clock frequency in Hz). If the PTP clock
+ * freq is 1 GHz, there is no issue but for other input clock frequency
+ * values for example 950 MHz which is SLCK or 153.6 MHz (bcn_clk/2) the
+ * MIO_PTP_CLOCK_COMP register value can't be expressed exactly and there
+ * will be error accumulated over the time depending on the direction the
+ * PTP_CLOCK_COMP value is rounded. The accumulated error will be around
+ * -70ps or +150ps per second in case of 950 MHz.
+ *
+ * To solve this issue, the driver calculates the PTP timestamps using
+ * BCN clock as reference as per the algorithm proposed as given below.
+ *
+ * Set PTP tick (= MIO_PTP_CLOCK_COMP) to 1.0 ns
+ * Sample once, at exactly the same time, BCN and PTP to (BCN0, PTP0).
+ * Calculate (applying BCN-to-PTP epoch difference and an OAM parameter
+ * secondaryBcnOffset)
+ * PTPbase[ns] = NanoSec(BCN0) + NanoSec(315964819[s]) - secondaryBcnOffset[ns]
+ * When reading packet timestamp (tick count) PTPn, convert it to nanoseconds.
+ * PTP pkt timestamp = PTPbase[ns] + (PTPn - PTP0) / (PTP Clock in GHz)
+ *
+ * The intermediate values generated need to be of pico-second precision to
+ * achieve PTP accuracy < 1ns. The calculations should not overflow 64-bit
+ * value at anytime. Added timer to adjust the PTP and BCN base values
+ * periodically to fix the overflow issue.
+ */
+#define PTP_CLK_FREQ_GHZ 95 /* Clock freq GHz dividend */
+#define PTP_CLK_FREQ_DIV 100 /* Clock freq GHz divisor */
+#define PTP_OFF_RESAMPLE_THRESH 1800 /* resample period in seconds */
+#define PICO_SEC_PER_NSEC 1000 /* pico seconds per nano sec */
+#define UTC_GPS_EPOCH_DIFF 315964819UL /* UTC - GPS epoch secs */
+
+/* global driver context */
+struct otx2_rfoe_drv_ctx {
+ u8 rfoe_num;
+ u8 lmac_id;
+ int valid;
+ struct net_device *netdev;
+ struct rx_ft_cfg *ft_cfg;
+ int tx_gpint_bit;
+ void *debugfs;
+};
+
+extern struct otx2_rfoe_drv_ctx rfoe_drv_ctx[RFOE_MAX_INTF];
+
+/* rx flow table configuration */
+struct rx_ft_cfg {
+ enum bphy_netdev_packet_type pkt_type; /* pkt_type for psw */
+ enum bphy_netdev_rx_gpint gp_int_num;
+ u16 flow_id; /* flow id */
+ u16 mbt_idx; /* mbt index */
+ u16 buf_size; /* mbt buf size */
+ u16 num_bufs; /* mbt num bufs */
+ u64 mbt_iova_addr;
+ void __iomem *mbt_virt_addr;
+ u16 jdt_idx; /* jdt index */
+ u8 jd_size; /* jd size */
+ u16 num_jd; /* num jd's */
+ u64 jdt_iova_addr;
+ void __iomem *jdt_virt_addr;
+ u8 jd_rd_offset; /* jd rd offset */
+ u8 pkt_offset;
+ struct napi_struct napi;
+ struct otx2_rfoe_ndev_priv *priv;
+};
+
+/* PTP clk freq in GHz represented as integer numbers.
+ * This information is passed to netdev by the ODP BPHY
+ * application via ioctl. The values are used in PTP
+ * timestamp calculation algorithm.
+ *
+ * For 950MHz PTP clock =0.95GHz, the values are:
+ * clk_freq_ghz = 95
+ * clk_freq_div = 100
+ *
+ * For 153.6MHz PTP clock =0.1536GHz, the values are:
+ * clk_freq_ghz = 1536
+ * clk_freq_div = 10000
+ *
+ */
+struct ptp_clk_cfg {
+ int clk_freq_ghz; /* ptp clk freq */
+ int clk_freq_div; /* ptp clk divisor */
+};
+
+struct bcn_sec_offset_cfg {
+ u8 rfoe_num;
+ u8 lmac_id;
+ s32 sec_bcn_offset;
+};
+
+struct ptp_bcn_ref {
+ u64 ptp0_ns; /* PTP nanosec */
+ u64 bcn0_n1_ns; /* BCN N1 nanosec */
+ u64 bcn0_n2_ps; /* BCN N2 picosec */
+};
+
+struct ptp_bcn_off_cfg {
+ struct ptp_bcn_ref old_ref;
+ struct ptp_bcn_ref new_ref;
+ struct ptp_clk_cfg clk_cfg;
+ struct timer_list ptp_timer;
+ int use_ptp_alg;
+ u8 refcnt;
+ /* protection lock for updating ref */
+ spinlock_t lock;
+};
+
+/* netdev priv */
+struct otx2_rfoe_ndev_priv {
+ u8 rfoe_num;
+ u8 lmac_id;
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ u32 msg_enable;
+ u32 ptp_ext_clk_rate;
+ void __iomem *bphy_reg_base;
+ void __iomem *psm_reg_base;
+ void __iomem *rfoe_reg_base;
+ void __iomem *bcn_reg_base;
+ void __iomem *ptp_reg_base;
+ struct iommu_domain *iommu_domain;
+ struct rx_ft_cfg rx_ft_cfg[PACKET_TYPE_MAX];
+ struct tx_job_queue_cfg tx_ptp_job_cfg;
+ struct rfoe_common_cfg *rfoe_common;
+ u8 pkt_type_mask;
+ /* priv lock */
+ spinlock_t lock;
+ int rx_hw_tstamp_en;
+ int tx_hw_tstamp_en;
+ struct sk_buff *ptp_tx_skb;
+ u16 ptp_job_tag;
+ struct timer_list tx_timer;
+ unsigned long state;
+ struct work_struct ptp_tx_work;
+ struct work_struct ptp_queue_work;
+ struct ptp_tx_skb_list ptp_skb_list;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ struct cyclecounter cycle_counter;
+ struct timecounter time_counter;
+
+ struct delayed_work extts_work;
+ u64 last_extts;
+ u64 thresh;
+
+ struct ptp_pin_desc extts_config;
+ /* ptp lock */
+ struct mutex ptp_lock;
+ struct otx2_rfoe_stats stats;
+ u8 mac_addr[ETH_ALEN];
+ struct ptp_bcn_off_cfg *ptp_cfg;
+ s32 sec_bcn_offset;
+ int if_type;
+ u8 link_state;
+ unsigned long last_tx_jiffies;
+ unsigned long last_tx_ptp_jiffies;
+ unsigned long last_rx_jiffies;
+ unsigned long last_rx_ptp_jiffies;
+ unsigned long last_tx_dropped_jiffies;
+ unsigned long last_tx_ptp_dropped_jiffies;
+ unsigned long last_rx_dropped_jiffies;
+ unsigned long last_rx_ptp_dropped_jiffies;
+};
+
+void otx2_rfoe_rx_napi_schedule(int rfoe_num, u32 status);
+
+int otx2_rfoe_parse_and_init_intf(struct otx2_bphy_cdev_priv *cdev,
+ struct bphy_netdev_comm_intf_cfg *cfg);
+
+void otx2_bphy_rfoe_cleanup(void);
+
+void otx2_rfoe_disable_intf(int rfoe_num);
+
+/* ethtool */
+void otx2_rfoe_set_ethtool_ops(struct net_device *netdev);
+
+/* ptp */
+void otx2_rfoe_calc_ptp_ts(struct otx2_rfoe_ndev_priv *priv, u64 *ts);
+int otx2_rfoe_ptp_init(struct otx2_rfoe_ndev_priv *priv);
+void otx2_rfoe_ptp_destroy(struct otx2_rfoe_ndev_priv *priv);
+
+/* update carrier state */
+void otx2_rfoe_set_link_state(struct net_device *netdev, u8 state);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe_ethtool.c
new file mode 100644
index 000000000000..d697c2e27bec
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe_ethtool.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "otx2_rfoe.h"
+#include "otx2_bphy_hw.h"
+
+static const char ethtool_stat_strings[][ETH_GSTRING_LEN] = {
+ "oth_rx_packets",
+ "ptp_rx_packets",
+ "ecpri_rx_packets",
+ "rx_bytes",
+ "oth_rx_dropped",
+ "ptp_rx_dropped",
+ "ecpri_rx_dropped",
+ "oth_tx_packets",
+ "ptp_tx_packets",
+ "ecpri_tx_packets",
+ "tx_bytes",
+ "oth_tx_dropped",
+ "ptp_tx_dropped",
+ "ecpri_tx_dropped",
+ "ptp_tx_hwtstamp_failures",
+ "EthIfInFrames",
+ "EthIfInOctets",
+ "EthIfOutFrames",
+ "EthIfOutOctets",
+ "EthIfInUnknownVlan",
+};
+
+static void otx2_rfoe_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ memcpy(data, *ethtool_stat_strings,
+ sizeof(ethtool_stat_strings));
+ break;
+ }
+}
+
+static int otx2_rfoe_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(ethtool_stat_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void otx2_rfoe_update_lmac_stats(struct otx2_rfoe_ndev_priv *priv)
+{
+ struct otx2_rfoe_stats *stats = &priv->stats;
+
+ stats->EthIfInFrames = readq(priv->rfoe_reg_base +
+ RFOEX_RX_CGX_PKT_STAT(priv->rfoe_num,
+ priv->lmac_id));
+ stats->EthIfInOctets = readq(priv->rfoe_reg_base +
+ RFOEX_RX_CGX_OCTS_STAT(priv->rfoe_num,
+ priv->lmac_id));
+ stats->EthIfOutFrames = readq(priv->rfoe_reg_base +
+ RFOEX_TX_PKT_STAT(priv->rfoe_num,
+ priv->lmac_id));
+ stats->EthIfOutOctets = readq(priv->rfoe_reg_base +
+ RFOEX_TX_OCTS_STAT(priv->rfoe_num,
+ priv->lmac_id));
+ stats->EthIfInUnknownVlan =
+ readq(priv->rfoe_reg_base +
+ RFOEX_RX_VLAN_DROP_STAT(priv->rfoe_num,
+ priv->lmac_id));
+}
+
+static void otx2_rfoe_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ otx2_rfoe_update_lmac_stats(priv);
+ spin_lock(&priv->stats.lock);
+ memcpy(data, &priv->stats,
+ ARRAY_SIZE(ethtool_stat_strings) * sizeof(u64));
+ spin_unlock(&priv->stats.lock);
+}
+
+static void otx2_rfoe_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *p)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ snprintf(p->driver, sizeof(p->driver), "otx2_rfoe {rfoe%d lmac%d}",
+ priv->rfoe_num, priv->lmac_id);
+ strlcpy(p->bus_info, "platform", sizeof(p->bus_info));
+}
+
+static int otx2_rfoe_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = ptp_clock_index(priv->ptp_clock);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static u32 otx2_rfoe_get_msglevel(struct net_device *netdev)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ return priv->msg_enable;
+}
+
+static void otx2_rfoe_set_msglevel(struct net_device *netdev, u32 level)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ priv->msg_enable = level;
+}
+
+static const struct ethtool_ops otx2_rfoe_ethtool_ops = {
+ .get_drvinfo = otx2_rfoe_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = otx2_rfoe_get_ts_info,
+ .get_strings = otx2_rfoe_get_strings,
+ .get_sset_count = otx2_rfoe_get_sset_count,
+ .get_ethtool_stats = otx2_rfoe_get_ethtool_stats,
+ .get_msglevel = otx2_rfoe_get_msglevel,
+ .set_msglevel = otx2_rfoe_set_msglevel,
+};
+
+void otx2_rfoe_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &otx2_rfoe_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe_ptp.c b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe_ptp.c
new file mode 100644
index 000000000000..a9f58c3bd0ab
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe_ptp.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell BPHY RFOE PTP PHC support.
+ *
+ * Copyright (C) 2020 Marvell.
+ */
+
+#include "otx2_rfoe.h"
+
+#define EXT_PTP_CLK_RATE (125 * 1000000) /* Ext PTP clk rate */
+
+static int otx2_rfoe_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(ptp_info,
+ struct
+ otx2_rfoe_ndev_priv,
+ ptp_clock_info);
+
+ if (priv->pdev->subsystem_device == PCI_SUBSYS_DEVID_OCTX2_95XXN)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&priv->ptp_lock);
+ timecounter_adjtime(&priv->time_counter, delta);
+ mutex_unlock(&priv->ptp_lock);
+
+ return 0;
+}
+
+static int otx2_rfoe_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(ptp,
+ struct
+ otx2_rfoe_ndev_priv,
+ ptp_clock_info);
+ bool neg_adj = false;
+ u64 comp, adj;
+ s64 ppb;
+
+ if (priv->pdev->subsystem_device == PCI_SUBSYS_DEVID_OCTX2_95XXN)
+ return -EOPNOTSUPP;
+
+ if (scaled_ppm < 0) {
+ neg_adj = true;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ /* The hardware adds the clock compensation value to the PTP clock
+ * on every coprocessor clock cycle. Typical convention is that it
+ * represent number of nanosecond betwen each cycle. In this
+ * convention compensation value is in 64 bit fixed-point
+ * representation where upper 32 bits are number of nanoseconds
+ * and lower is fractions of nanosecond.
+ * The scaled_ppm represent the ratio in "parts per million" by which
+ * the compensation value should be corrected.
+ * To calculate new compenstation value we use 64bit fixed point
+ * arithmetic on following formula
+ * comp = tbase + tbase * scaled_ppm / (1M * 2^16)
+ * where tbase is the basic compensation value calculated
+ * initialy in the probe function.
+ */
+ /* convert scaled_ppm to ppb */
+ ppb = 1 + scaled_ppm;
+ ppb *= 125;
+ ppb >>= 13;
+
+ comp = ((u64)1000000000ull << 32) / priv->ptp_ext_clk_rate;
+ adj = comp * ppb;
+ adj = div_u64(adj, 1000000000ull);
+ comp = neg_adj ? comp - adj : comp + adj;
+
+ writeq(comp, priv->ptp_reg_base + MIO_PTP_CLOCK_COMP);
+
+ return 0;
+}
+
+static u64 otx2_rfoe_ptp_cc_read(const struct cyclecounter *cc)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(cc, struct
+ otx2_rfoe_ndev_priv,
+ cycle_counter);
+
+ return readq(priv->ptp_reg_base + MIO_PTP_CLOCK_HI);
+}
+
+static int otx2_rfoe_ptp_gettime(struct ptp_clock_info *ptp_info,
+ struct timespec64 *ts)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(ptp_info,
+ struct
+ otx2_rfoe_ndev_priv,
+ ptp_clock_info);
+ u64 nsec;
+
+ mutex_lock(&priv->ptp_lock);
+
+ if (priv->pdev->subsystem_device == PCI_SUBSYS_DEVID_OCTX2_95XXN) {
+ nsec = readq(priv->ptp_reg_base + MIO_PTP_CLOCK_HI);
+ otx2_rfoe_calc_ptp_ts(priv, &nsec);
+ } else {
+ nsec = timecounter_read(&priv->time_counter);
+ }
+ mutex_unlock(&priv->ptp_lock);
+
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+static int otx2_rfoe_ptp_settime(struct ptp_clock_info *ptp_info,
+ const struct timespec64 *ts)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(ptp_info,
+ struct
+ otx2_rfoe_ndev_priv,
+ ptp_clock_info);
+ u64 nsec;
+
+ if (priv->pdev->subsystem_device == PCI_SUBSYS_DEVID_OCTX2_95XXN)
+ return -EOPNOTSUPP;
+
+ nsec = timespec64_to_ns(ts);
+
+ mutex_lock(&priv->ptp_lock);
+ timecounter_init(&priv->time_counter, &priv->cycle_counter, nsec);
+ mutex_unlock(&priv->ptp_lock);
+
+ return 0;
+}
+
+static int otx2_rfoe_ptp_verify_pin(struct ptp_clock_info *ptp,
+ unsigned int pin,
+ enum ptp_pin_function func,
+ unsigned int chan)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(ptp,
+ struct
+ otx2_rfoe_ndev_priv,
+ ptp_clock_info);
+
+ if (priv->pdev->subsystem_device == PCI_SUBSYS_DEVID_OCTX2_95XXN)
+ return -EOPNOTSUPP;
+
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_EXTTS:
+ break;
+ case PTP_PF_PEROUT:
+ case PTP_PF_PHYSYNC:
+ return -1;
+ }
+ return 0;
+}
+
+static void otx2_rfoe_ptp_extts_check(struct work_struct *work)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(work, struct
+ otx2_rfoe_ndev_priv,
+ extts_work.work);
+ struct ptp_clock_event event;
+ u64 tstmp, new_thresh;
+
+ mutex_lock(&priv->ptp_lock);
+ tstmp = readq(priv->ptp_reg_base + MIO_PTP_TIMESTAMP);
+ mutex_unlock(&priv->ptp_lock);
+
+ if (tstmp != priv->last_extts) {
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 0;
+ event.timestamp = timecounter_cyc2time(&priv->time_counter, tstmp);
+ ptp_clock_event(priv->ptp_clock, &event);
+ priv->last_extts = tstmp;
+
+ new_thresh = tstmp % 500000000;
+ if (priv->thresh != new_thresh) {
+ mutex_lock(&priv->ptp_lock);
+ writeq(new_thresh,
+ priv->ptp_reg_base + MIO_PTP_PPS_THRESH_HI);
+ mutex_unlock(&priv->ptp_lock);
+ priv->thresh = new_thresh;
+ }
+ }
+ schedule_delayed_work(&priv->extts_work, msecs_to_jiffies(200));
+}
+
+static int otx2_rfoe_ptp_enable(struct ptp_clock_info *ptp_info,
+ struct ptp_clock_request *rq, int on)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(ptp_info,
+ struct
+ otx2_rfoe_ndev_priv,
+ ptp_clock_info);
+ int pin = -1;
+
+ if (priv->pdev->subsystem_device == PCI_SUBSYS_DEVID_OCTX2_95XXN)
+ return -EOPNOTSUPP;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS,
+ rq->extts.index);
+ if (pin < 0)
+ return -EBUSY;
+ if (on)
+ schedule_delayed_work(&priv->extts_work,
+ msecs_to_jiffies(200));
+ else
+ cancel_delayed_work_sync(&priv->extts_work);
+ return 0;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static const struct ptp_clock_info otx2_rfoe_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "RFOE PTP",
+ .max_adj = 1000000000ull,
+ .n_ext_ts = 1,
+ .n_pins = 1,
+ .pps = 0,
+ .adjfine = otx2_rfoe_ptp_adjfine,
+ .adjtime = otx2_rfoe_ptp_adjtime,
+ .gettime64 = otx2_rfoe_ptp_gettime,
+ .settime64 = otx2_rfoe_ptp_settime,
+ .enable = otx2_rfoe_ptp_enable,
+ .verify = otx2_rfoe_ptp_verify_pin,
+};
+
+int otx2_rfoe_ptp_init(struct otx2_rfoe_ndev_priv *priv)
+{
+ struct cyclecounter *cc;
+ int err;
+
+ cc = &priv->cycle_counter;
+ cc->read = otx2_rfoe_ptp_cc_read;
+ cc->mask = CYCLECOUNTER_MASK(64);
+ cc->mult = 1;
+ cc->shift = 0;
+
+ timecounter_init(&priv->time_counter, &priv->cycle_counter,
+ ktime_to_ns(ktime_get_real()));
+ snprintf(priv->extts_config.name, sizeof(priv->extts_config.name),
+ "RFOE TSTAMP");
+ priv->extts_config.index = 0;
+ priv->extts_config.func = PTP_PF_NONE;
+ priv->ptp_clock_info = otx2_rfoe_ptp_clock_info;
+ priv->ptp_ext_clk_rate = EXT_PTP_CLK_RATE;
+ snprintf(priv->ptp_clock_info.name, 16, "%s", priv->netdev->name);
+ priv->ptp_clock_info.pin_config = &priv->extts_config;
+ INIT_DELAYED_WORK(&priv->extts_work, otx2_rfoe_ptp_extts_check);
+ priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_info,
+ &priv->pdev->dev);
+ if (IS_ERR_OR_NULL(priv->ptp_clock)) {
+ priv->ptp_clock = NULL;
+ err = PTR_ERR(priv->ptp_clock);
+ return err;
+ }
+
+ mutex_init(&priv->ptp_lock);
+
+ return 0;
+}
+
+void otx2_rfoe_ptp_destroy(struct otx2_rfoe_ndev_priv *priv)
+{
+ ptp_clock_unregister(priv->ptp_clock);
+ priv->ptp_clock = NULL;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/rfoe_bphy_netdev_comm_if.h b/drivers/net/ethernet/marvell/octeontx2/bphy/rfoe_bphy_netdev_comm_if.h
new file mode 100644
index 000000000000..06ce9660988f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/rfoe_bphy_netdev_comm_if.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _RFOE_BPHY_NETDEV_COMM_IF_H_
+#define _RFOE_BPHY_NETDEV_COMM_IF_H_
+
+#include <linux/etherdevice.h>
+#include "bphy_netdev_comm_if.h"
+
+/**
+ * @enum bphy_netdev_tx_gpint
+ * @brief GP_INT numbers for packet notification by netdev to BPHY.
+ *
+ */
+enum bphy_netdev_tx_gpint {
+ TX_GP_INT_RFOE0_LMAC0 = 32, //PSM_GPINT32,
+ TX_GP_INT_RFOE0_LMAC1 = 33, //PSM_GPINT33,
+ TX_GP_INT_RFOE0_LMAC2 = 34, //PSM_GPINT34,
+ TX_GP_INT_RFOE0_LMAC3 = 35, //PSM_GPINT35,
+
+ TX_GP_INT_RFOE1_LMAC0 = 36, //PSM_GPINT36,
+ TX_GP_INT_RFOE1_LMAC1 = 37, //PSM_GPINT37,
+ TX_GP_INT_RFOE1_LMAC2 = 38, //PSM_GPINT38,
+ TX_GP_INT_RFOE1_LMAC3 = 39, //PSM_GPINT39,
+
+ TX_GP_INT_RFOE2_LMAC0 = 40, //PSM_GPINT40,
+ TX_GP_INT_RFOE2_LMAC1 = 41, //PSM_GPINT41
+};
+
+/**
+ * @enum bphy_netdev_rx_gpint
+ * @brief GP_INT numbers for packet notification by BPHY to netdev.
+ *
+ */
+enum bphy_netdev_rx_gpint {
+ RX_GP_INT_RFOE0_PTP = 63, //PSM_GPINT63,
+ RX_GP_INT_RFOE0_ECPRI = 62, //PSM_GPINT62,
+ RX_GP_INT_RFOE0_GENERIC = 61, //PSM_GPINT61,
+
+ RX_GP_INT_RFOE1_PTP = 60, //PSM_GPINT60,
+ RX_GP_INT_RFOE1_ECPRI = 59, //PSM_GPINT59,
+ RX_GP_INT_RFOE1_GENERIC = 58, //PSM_GPINT58,
+
+ RX_GP_INT_RFOE2_PTP = 57, //PSM_GPINT57,
+ RX_GP_INT_RFOE2_ECPRI = 56, //PSM_GPINT56,
+ RX_GP_INT_RFOE2_GENERIC = 55, //PSM_GPINT55
+};
+
+/**
+ * @enum bphy_netdev_cpri_rx_gpint
+ * @brief GP_INT numbers for CPRI Ethernet packet Rx notification to netdev.
+ *
+ */
+enum bphy_netdev_cpri_rx_gpint {
+ RX_GP_INT_CPRI0_ETH = 45, //PSM_GPINT45,
+ RX_GP_INT_CPRI1_ETH = 46, //PSM_GPINT46,
+ RX_GP_INT_CPRI2_ETH = 47, //PSM_GPINT47
+};
+
+/**
+ * @struct bphy_netdev_intf_info
+ * @brief LMAC lane number, mac address and status information
+ *
+ */
+struct bphy_netdev_intf_info {
+ u8 rfoe_num;
+ u8 lane_num;
+ /* Source mac address */
+ u8 eth_addr[ETH_ALEN];
+ /* LMAC interface status */
+ u8 status; //0-DOWN, 1-UP
+ /* Configuration valid status; This interface shall be
+ * invalid if this field is set to 0
+ */
+ u8 is_valid;
+};
+
+/**
+ * @struct bphy_netdev_rbuf_info
+ * @brief Information abnout the packet ring buffer which shall be used to send
+ * the packets from BPHY to netdev.
+ *
+ */
+struct bphy_netdev_rbuf_info {
+ enum bphy_netdev_packet_type pkt_type;
+ /* gp_int = 0 can be treated as pkt type not enabled */
+ enum bphy_netdev_rx_gpint gp_int_num;
+ u16 flow_id;
+ u16 mbt_index;
+ /* Maximum number of buffers in the Ring/Pool */
+ u16 num_bufs;
+ /* MAX Buffer Size configured */
+ u16 buf_size; // TBC: 1536?
+ /* MBT byffer target memory */
+ u8 mbt_target_mem;
+ u8 reserved;
+ /* Buffers starting address */
+ u64 mbt_iova_addr;
+ u16 jdt_index;
+ /* Maximum number of JD buffers in the Ring/Pool */
+ u16 num_jd;
+ /* MAX JD size configured */
+ u8 jd_size;
+ /* MBT byffer target memory */
+ u8 jdt_target_mem;
+ /* Buffers starting address */
+ u64 jdt_iova_addr;
+};
+
+/**
+ * @brief
+ *
+ */
+struct bphy_netdev_tx_psm_cmd_info {
+ enum bphy_netdev_tx_gpint gp_int_num; /* Valid only for PTP messages */
+ u64 jd_iova_addr;
+ u64 rd_dma_iova_addr;
+ u64 low_cmd;
+ u64 high_cmd;
+};
+
+/**
+ * @struct bphy_netdev_comm_if
+ * @brief The communication interface defnitions which would be used by
+ * the netdev and bphy application.
+ *
+ */
+struct bphy_netdev_comm_if {
+ struct bphy_netdev_intf_info lmac_info;
+ struct bphy_netdev_rbuf_info rbuf_info[PACKET_TYPE_MAX];
+ /* Defining single array to handle both PTP and OTHER cmds info */
+ struct bphy_netdev_tx_psm_cmd_info ptp_pkt_info[MAX_PTP_MSG_PER_LMAC];
+};
+
+/**
+ * @struct bphy_netdev_cpri_if
+ * @brief communication interface structure defnition to be used by
+ * BPHY and NETDEV applications for CPRI Interface.
+ *
+ */
+struct bphy_netdev_cpri_if {
+ u8 id; /* CPRI ID 0..2 */
+ u8 active_lane_mask; /* lane mask */
+ u8 ul_gp_int_num; /* UL GP INT NUM */
+ u8 ul_int_threshold; /* UL INT THRESHOLD */
+ u8 num_ul_buf; /* Num UL Buffers */
+ u8 num_dl_buf; /* Num DL Buffers */
+ u8 reserved[2];
+ u64 ul_buf_iova_addr;
+ u64 dl_buf_iova_addr;
+ u8 eth_addr[MAX_LANE_PER_CPRI][ETH_ALEN];
+};
+
+/**
+ * @struct bphy_netdev_rfoe_if
+ * @brief communication interface structure defnition to be used by
+ * BPHY and NETDEV applications for RFOE Interface.
+ *
+ */
+struct bphy_netdev_rfoe_if {
+ /* Interface configuration */
+ struct bphy_netdev_comm_if if_cfg[MAX_LMAC_PER_RFOE];
+ /* TX JD cmds to send packets other than PTP;
+ * These are defined per RFoE and all LMAC can share
+ */
+ struct bphy_netdev_tx_psm_cmd_info oth_pkt_info[MAX_OTH_MSG_PER_RFOE];
+ /* Packet types for which the RX flows are configured.*/
+ u8 pkt_type_mask;
+};
+
+/**
+ * @struct bphy_netdev_comm_intf_cfg
+ * @brief ODP-NETDEV communication interface defnition structure to share
+ * the RX/TX intrefaces information.
+ *
+ */
+struct bphy_netdev_comm_intf_cfg {
+ enum bphy_netdev_if_type if_type; /* 0 --> ETHERNET, 1 --> CPRI */
+ struct bphy_netdev_rfoe_if rfoe_if_cfg; /* RFOE INTF configuration */
+ struct bphy_netdev_cpri_if cpri_if_cfg; /* CPRI INTF configuration */
+};
+
+#endif //_BPHY_NETDEV_COMM_IF_H_
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/rfoe_common.h b/drivers/net/ethernet/marvell/octeontx2/bphy/rfoe_common.h
new file mode 100644
index 000000000000..6fb7c315bd0f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/rfoe_common.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell BPHY RFOE Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#ifndef _OTX2_RFOE_COMMON_H_
+#define _OTX2_RFOE_COMMON_H_
+
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+
+#include "bphy_netdev_comm_if.h"
+
+/* PTP register offsets */
+#define MIO_PTP_CLOCK_HI 0x10
+#define MIO_PTP_TIMESTAMP 0x20
+#define MIO_PTP_PPS_THRESH_HI 0x58ULL
+#define MIO_PTP_CLOCK_COMP 0x18ULL
+
+/* max tx job entries */
+#define MAX_TX_JOB_ENTRIES 64
+
+/* GPINT(1) RFOE definitions */
+#define RX_PTP_INTR BIT(2) /* PTP packet intr */
+#define RX_ECPRI_INTR BIT(1) /* ECPRI packet intr */
+#define RX_GEN_INTR BIT(0) /* GENERIC packet intr */
+#define RFOE_RX_INTR_EN (RX_PTP_INTR | \
+ RX_ECPRI_INTR | \
+ RX_GEN_INTR)
+/* Interrupt processing definitions */
+#define INTR_TO_PKT_TYPE(a) (PACKET_TYPE_OTHER - (a))
+#define PKT_TYPE_TO_INTR(a) (1UL << (PACKET_TYPE_OTHER - (a)))
+
+enum state {
+ PTP_TX_IN_PROGRESS = 1,
+ RFOE_INTF_DOWN,
+};
+
+/* rfoe rx ind register configuration */
+struct otx2_rfoe_rx_ind_cfg {
+ u8 rfoe_num; /* rfoe idx */
+ u16 rx_ind_idx; /* RFOE(0..2)_RX_INDIRECT_INDEX */
+ u64 regoff; /* RFOE(0..2)_RX_IND_* reg offset */
+ u64 regval; /* input when write, output when read */
+#define OTX2_RFOE_RX_IND_READ 0
+#define OTX2_RFOE_RX_IND_WRITE 1
+ u8 dir; /* register access dir (read/write) */
+};
+
+/* tx job entry */
+struct tx_job_entry {
+ u64 job_cmd_lo;
+ u64 job_cmd_hi;
+ u64 jd_iova_addr;
+ u64 rd_dma_iova_addr;
+ void __iomem *jd_ptr;
+ void __iomem *rd_dma_ptr;
+ void __iomem *jd_cfg_ptr;
+};
+
+/* tx job queue */
+struct tx_job_queue_cfg {
+ u8 psm_queue_id;
+ struct tx_job_entry job_entries[MAX_TX_JOB_ENTRIES];
+ /* actual number of entries configured by ODP */
+ int num_entries;
+ /* queue index */
+ int q_idx;
+ /* lmac protection lock */
+ spinlock_t lock;
+};
+
+/* rfoe common (for all lmac's) */
+struct rfoe_common_cfg {
+ struct tx_job_queue_cfg tx_oth_job_cfg;
+ int rx_mbt_last_idx[PACKET_TYPE_MAX];
+ u16 nxt_buf[PACKET_TYPE_MAX];
+ u8 refcnt;
+ u8 rx_vlan_fwd_refcnt[VLAN_N_VID];
+};
+
+/* ptp pending skb list */
+struct ptp_tx_skb_list {
+ struct list_head list;
+ unsigned int count;
+};
+
+/* ptp skb list entry */
+struct ptp_tstamp_skb {
+ struct list_head list;
+ struct sk_buff *skb;
+};
+
+struct otx2_rfoe_stats {
+ /* rx */
+ u64 rx_packets; /* rx packets */
+ u64 ptp_rx_packets; /* ptp rx packets */
+ u64 ecpri_rx_packets; /* ecpri rx packets */
+ u64 rx_bytes; /* rx bytes count */
+ u64 rx_dropped; /* rx dropped */
+ u64 ptp_rx_dropped; /* ptp rx dropped */
+ u64 ecpri_rx_dropped; /* ptp rx dropped */
+
+ /* tx */
+ u64 tx_packets; /* tx packets */
+ u64 ptp_tx_packets; /* ptp rx packets */
+ u64 ecpri_tx_packets; /* ecpri rx packets */
+ u64 tx_bytes; /* tx bytes count */
+ u64 tx_dropped; /* tx dropped */
+ u64 ptp_tx_dropped; /* ptp tx dropped */
+ u64 ecpri_tx_dropped; /* ptp tx dropped */
+ u64 tx_hwtstamp_failures; /* ptp tx timestamp failures */
+
+ /* per LMAC stats */
+ u64 EthIfInFrames;
+ u64 EthIfInOctets;
+ u64 EthIfOutFrames;
+ u64 EthIfOutOctets;
+ u64 EthIfInUnknownVlan;
+
+ /* stats update lock */
+ spinlock_t lock;
+};
+
+struct otx2_rfoe_link_event {
+ u8 rfoe_num;
+ u8 lmac_id;
+ u8 link_state;
+};
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index b2c6385707c9..c42abc2593e0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -1,13 +1,17 @@
# SPDX-License-Identifier: GPL-2.0
#
-# Makefile for Marvell's OcteonTX2 ethernet device drivers
+# Makefile for Marvell's RVU Ethernet device drivers
#
-obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o
-obj-$(CONFIG_OCTEONTX2_VF) += octeontx2_nicvf.o
+obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o otx2_ptp.o
+obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o
-octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
- otx2_ptp.o
-octeontx2_nicvf-y := otx2_vf.o
+rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
+ otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
+ otx2_devlink.o
+rvu_nicvf-y := otx2_vf.o otx2_smqvf.o otx2_devlink.o
+
+rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
+rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
new file mode 100644
index 000000000000..d844611cad83
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -0,0 +1,482 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include "cn10k.h"
+#include "otx2_reg.h"
+#include "otx2_struct.h"
+
+static struct dev_hw_ops otx2_hw_ops = {
+ .sq_aq_init = otx2_sq_aq_init,
+ .sqe_flush = otx2_sqe_flush,
+ .aura_freeptr = otx2_aura_freeptr,
+ .refill_pool_ptrs = otx2_refill_pool_ptrs,
+};
+
+static struct dev_hw_ops cn10k_hw_ops = {
+ .sq_aq_init = cn10k_sq_aq_init,
+ .sqe_flush = cn10k_sqe_flush,
+ .aura_freeptr = cn10k_aura_freeptr,
+ .refill_pool_ptrs = cn10k_refill_pool_ptrs,
+};
+
+int cn10k_lmtst_init(struct otx2_nic *pfvf)
+{
+ struct lmtst_tbl_setup_req *req;
+ struct otx2_lmt_info *lmt_info;
+ int err, cpu;
+
+ if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ pfvf->hw_ops = &otx2_hw_ops;
+ return 0;
+ }
+
+ pfvf->hw_ops = &cn10k_hw_ops;
+ /* Total LMTLINES = num_online_cpus() * 32 (For Burst flush).*/
+ pfvf->tot_lmt_lines = (num_online_cpus() * LMT_BURST_SIZE);
+ pfvf->hw.lmt_info = alloc_percpu(struct otx2_lmt_info);
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_lmtst_tbl_setup(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->use_local_lmt_region = true;
+
+ err = qmem_alloc(pfvf->dev, &pfvf->dync_lmt, pfvf->tot_lmt_lines,
+ LMT_LINE_SIZE);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+ pfvf->hw.lmt_base = (u64 *)pfvf->dync_lmt->base;
+ req->lmt_iova = (u64)pfvf->dync_lmt->iova;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ for_each_possible_cpu(cpu) {
+ lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, cpu);
+ lmt_info->lmt_addr = ((u64)pfvf->hw.lmt_base +
+ (cpu * LMT_BURST_SIZE * LMT_LINE_SIZE));
+ lmt_info->lmt_id = cpu * LMT_BURST_SIZE;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(cn10k_lmtst_init);
+
+int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura)
+{
+ struct nix_cn10k_aq_enq_req *aq;
+ struct otx2_nic *pfvf = dev;
+
+ /* Get memory to put this msg */
+ aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ aq->sq.cq = pfvf->hw.rx_queues + qidx;
+ aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
+ aq->sq.cq_ena = 1;
+ aq->sq.ena = 1;
+ /* Only one SMQ is allocated, map all SQ's to that SMQ */
+ aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
+ aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset;
+ aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
+ aq->sq.sqb_aura = sqb_aura;
+ aq->sq.sq_int_ena = NIX_SQINT_BITS;
+ aq->sq.qint_idx = 0;
+ /* Due pipelining impact minimum 2000 unused SQ CQE's
+ * need to maintain to avoid CQ overflow.
+ */
+ aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt));
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+#define NPA_MAX_BURST 16
+void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
+{
+ struct otx2_nic *pfvf = dev;
+ u64 ptrs[NPA_MAX_BURST];
+ int num_ptrs = 1;
+ dma_addr_t bufptr;
+
+ /* Refill pool with new buffers */
+ while (cq->pool_ptrs) {
+ if (otx2_alloc_buffer(pfvf, cq, &bufptr)) {
+ if (num_ptrs--)
+ __cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
+ num_ptrs);
+ break;
+ }
+ cq->pool_ptrs--;
+ ptrs[num_ptrs] = (u64)bufptr + OTX2_HEAD_ROOM;
+ num_ptrs++;
+ if (num_ptrs == NPA_MAX_BURST || cq->pool_ptrs == 0) {
+ __cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
+ num_ptrs);
+ num_ptrs = 1;
+ }
+ }
+}
+
+void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
+{
+ struct otx2_lmt_info *lmt_info;
+ struct otx2_nic *pfvf = dev;
+ u64 val = 0, tar_addr = 0;
+
+ lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, smp_processor_id());
+ /* FIXME: val[0:10] LMT_ID.
+ * [12:15] no of LMTST - 1 in the burst.
+ * [19:63] data size of each LMTST in the burst except first.
+ */
+ val = (lmt_info->lmt_id & 0x7FF);
+ /* Target address for LMTST flush tells HW how many 128bit
+ * words are present.
+ * tar_addr[6:4] size of first LMTST - 1 in units of 128b.
+ */
+ tar_addr |= sq->io_addr | (((size / 16) - 1) & 0x7) << 4;
+ dma_wmb();
+ memcpy((u64 *)lmt_info->lmt_addr, sq->sqe_base, size);
+ cn10k_lmt_flush(val, tar_addr);
+
+ sq->head++;
+ sq->head &= (sq->sqe_cnt - 1);
+}
+
+int cn10k_free_all_ipolicers(struct otx2_nic *pfvf)
+{
+ struct nix_bandprof_free_req *req;
+ int rc;
+
+ if (is_dev_otx2(pfvf->pdev))
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_nix_bandprof_free(&pfvf->mbox);
+ if (!req) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Free all bandwidth profiles allocated */
+ req->free_all = true;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+out:
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
+int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf)
+{
+ struct nix_bandprof_alloc_req *req;
+ struct nix_bandprof_alloc_rsp *rsp;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_nix_bandprof_alloc(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->prof_count[BAND_PROF_LEAF_LAYER] = 1;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (rc)
+ goto out;
+
+ rsp = (struct nix_bandprof_alloc_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (!rsp->prof_count[BAND_PROF_LEAF_LAYER]) {
+ rc = -EIO;
+ goto out;
+ }
+
+ *leaf = rsp->prof_idx[BAND_PROF_LEAF_LAYER][0];
+out:
+ if (rc) {
+ dev_warn(pfvf->dev,
+ "Failed to allocate ingress bandwidth policer\n");
+ }
+
+ return rc;
+}
+
+int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int ret;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ ret = cn10k_alloc_leaf_profile(pfvf, &hw->matchall_ipolicer);
+
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return ret;
+}
+
+#define POLICER_TIMESTAMP 1 /* 1 second */
+#define MAX_RATE_EXP 22 /* Valid rate exponent range: 0 - 22 */
+
+static void cn10k_get_ingress_burst_cfg(u32 burst, u32 *burst_exp,
+ u32 *burst_mantissa)
+{
+ int tmp;
+
+ /* Burst is calculated as
+ * (1+[BURST_MANTISSA]/256)*2^[BURST_EXPONENT]
+ * This is the upper limit on number tokens (bytes) that
+ * can be accumulated in the bucket.
+ */
+ *burst_exp = ilog2(burst);
+ if (burst < 256) {
+ /* No float: can't express mantissa in this case */
+ *burst_mantissa = 0;
+ return;
+ }
+
+ if (*burst_exp > MAX_RATE_EXP)
+ *burst_exp = MAX_RATE_EXP;
+
+ /* Calculate mantissa
+ * Find remaining bytes 'burst - 2^burst_exp'
+ * mantissa = (remaining bytes) / 2^ (burst_exp - 8)
+ */
+ tmp = burst - rounddown_pow_of_two(burst);
+ *burst_mantissa = tmp / (1UL << (*burst_exp - 8));
+}
+
+static void cn10k_get_ingress_rate_cfg(u64 rate, u32 *rate_exp,
+ u32 *rate_mantissa, u32 *rdiv)
+{
+ u32 div = 0;
+ u32 exp = 0;
+ u64 tmp;
+
+ /* Figure out mantissa, exponent and divider from given max pkt rate
+ *
+ * To achieve desired rate HW adds
+ * (1+[RATE_MANTISSA]/256)*2^[RATE_EXPONENT] tokens (bytes) at every
+ * policer timeunit * 2^rdiv ie 2 * 2^rdiv usecs, to the token bucket.
+ * Here policer timeunit is 2 usecs and rate is in bits per sec.
+ * Since floating point cannot be used below algorithm uses 1000000
+ * scale factor to support rates upto 100Gbps.
+ */
+ tmp = rate * 32 * 2;
+ if (tmp < 256000000) {
+ while (tmp < 256000000) {
+ tmp = tmp * 2;
+ div++;
+ }
+ } else {
+ for (exp = 0; tmp >= 512000000 && exp <= MAX_RATE_EXP; exp++)
+ tmp = tmp / 2;
+
+ if (exp > MAX_RATE_EXP)
+ exp = MAX_RATE_EXP;
+ }
+
+ *rate_mantissa = (tmp - 256000000) / 1000000;
+ *rate_exp = exp;
+ *rdiv = div;
+}
+
+int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx,
+ u16 policer, bool map)
+{
+ struct nix_cn10k_aq_enq_req *aq;
+
+ aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ /* Enable policing and set the bandwidth profile (policer) index */
+ if (map)
+ aq->rq.policer_ena = 1;
+ else
+ aq->rq.policer_ena = 0;
+ aq->rq_mask.policer_ena = 1;
+
+ aq->rq.band_prof_id = policer;
+ aq->rq_mask.band_prof_id = GENMASK(9, 0);
+
+ /* Fill AQ info */
+ aq->qidx = rq_idx;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf)
+{
+ struct nix_bandprof_free_req *req;
+
+ req = otx2_mbox_alloc_msg_nix_bandprof_free(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->prof_count[BAND_PROF_LEAF_LAYER] = 1;
+ req->prof_idx[BAND_PROF_LEAF_LAYER][0] = leaf;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int qidx, rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ /* Remove RQ's policer mapping */
+ for (qidx = 0; qidx < hw->rx_queues; qidx++)
+ cn10k_map_unmap_rq_policer(pfvf, qidx,
+ hw->matchall_ipolicer, false);
+
+ rc = cn10k_free_leaf_profile(pfvf, hw->matchall_ipolicer);
+
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
+int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
+ u32 burst, u64 rate, bool pps)
+{
+ struct nix_cn10k_aq_enq_req *aq;
+ u32 burst_exp, burst_mantissa;
+ u32 rate_exp, rate_mantissa;
+ u32 rdiv;
+
+ /* Get exponent and mantissa values for the desired rate */
+ cn10k_get_ingress_burst_cfg(burst, &burst_exp, &burst_mantissa);
+ cn10k_get_ingress_rate_cfg(rate, &rate_exp, &rate_mantissa, &rdiv);
+
+ /* Init bandwidth profile */
+ aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ /* Set initial color mode to blind */
+ aq->prof.icolor = 0x03;
+ aq->prof_mask.icolor = 0x03;
+
+ /* Set rate and burst values */
+ aq->prof.cir_exponent = rate_exp;
+ aq->prof_mask.cir_exponent = 0x1F;
+
+ aq->prof.cir_mantissa = rate_mantissa;
+ aq->prof_mask.cir_mantissa = 0xFF;
+
+ aq->prof.cbs_exponent = burst_exp;
+ aq->prof_mask.cbs_exponent = 0x1F;
+
+ aq->prof.cbs_mantissa = burst_mantissa;
+ aq->prof_mask.cbs_mantissa = 0xFF;
+
+ aq->prof.rdiv = rdiv;
+ aq->prof_mask.rdiv = 0xF;
+
+ if (pps) {
+ /* The amount of decremented tokens is calculated according to
+ * the following equation:
+ * max([ LMODE ? 0 : (packet_length - LXPTR)] +
+ * ([ADJUST_MANTISSA]/256 - 1) * 2^[ADJUST_EXPONENT],
+ * 1/256)
+ * if LMODE is 1 then rate limiting will be based on
+ * PPS otherwise bps.
+ * The aim of the ADJUST value is to specify a token cost per
+ * packet in contrary to the packet length that specifies a
+ * cost per byte. To rate limit based on PPS adjust mantissa
+ * is set as 384 and exponent as 1 so that number of tokens
+ * decremented becomes 1 i.e, 1 token per packeet.
+ */
+ aq->prof.adjust_exponent = 1;
+ aq->prof_mask.adjust_exponent = 0x1F;
+
+ aq->prof.adjust_mantissa = 384;
+ aq->prof_mask.adjust_mantissa = 0x1FF;
+
+ aq->prof.lmode = 0x1;
+ aq->prof_mask.lmode = 0x1;
+ }
+
+ /* Two rate three color marker
+ * With PEIR/EIR set to zero, color will be either green or red
+ */
+ aq->prof.meter_algo = 2;
+ aq->prof_mask.meter_algo = 0x3;
+
+ aq->prof.rc_action = NIX_RX_BAND_PROF_ACTIONRESULT_DROP;
+ aq->prof_mask.rc_action = 0x3;
+
+ aq->prof.yc_action = NIX_RX_BAND_PROF_ACTIONRESULT_PASS;
+ aq->prof_mask.yc_action = 0x3;
+
+ aq->prof.gc_action = NIX_RX_BAND_PROF_ACTIONRESULT_PASS;
+ aq->prof_mask.gc_action = 0x3;
+
+ /* Setting exponent value as 24 and mantissa as 0 configures
+ * the bucket with zero values making bucket unused. Peak
+ * information rate and Excess information rate buckets are
+ * unused here.
+ */
+ aq->prof.peir_exponent = 24;
+ aq->prof_mask.peir_exponent = 0x1F;
+
+ aq->prof.peir_mantissa = 0;
+ aq->prof_mask.peir_mantissa = 0xFF;
+
+ aq->prof.pebs_exponent = 24;
+ aq->prof_mask.pebs_exponent = 0x1F;
+
+ aq->prof.pebs_mantissa = 0;
+ aq->prof_mask.pebs_mantissa = 0xFF;
+
+ /* Fill AQ info */
+ aq->qidx = profile;
+ aq->ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int cn10k_set_matchall_ipolicer_rate(struct otx2_nic *pfvf,
+ u32 burst, u64 rate)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int qidx, rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ rc = cn10k_set_ipolicer_rate(pfvf, hw->matchall_ipolicer, burst,
+ rate, false);
+ if (rc)
+ goto out;
+
+ for (qidx = 0; qidx < hw->rx_queues; qidx++) {
+ rc = cn10k_map_unmap_rq_policer(pfvf, qidx,
+ hw->matchall_ipolicer, true);
+ if (rc)
+ break;
+ }
+
+out:
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
new file mode 100644
index 000000000000..28b3b3275fe6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#ifndef CN10K_H
+#define CN10K_H
+
+#include "otx2_common.h"
+
+static inline int mtu_to_dwrr_weight(struct otx2_nic *pfvf, int mtu)
+{
+ u32 weight;
+
+ /* On OTx2, since AF returns DWRR_MTU as '1', this logic
+ * will work on those silicons as well.
+ */
+ weight = mtu / pfvf->hw.dwrr_mtu;
+ if (mtu % pfvf->hw.dwrr_mtu)
+ weight += 1;
+
+ return weight;
+}
+
+void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
+void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
+int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura);
+int cn10k_lmtst_init(struct otx2_nic *pfvf);
+int cn10k_free_all_ipolicers(struct otx2_nic *pfvf);
+int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf);
+int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf);
+int cn10k_set_matchall_ipolicer_rate(struct otx2_nic *pfvf,
+ u32 burst, u64 rate);
+int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx,
+ u16 policer, bool map);
+int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf);
+int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
+ u32 burst, u64 rate, bool pps);
+int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf);
+#endif /* CN10K_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index b062ed06235d..58ddd0c54a79 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -1,11 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Ethernet driver
+/* Marvell RVU Ethernet driver
*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/interrupt.h>
@@ -15,6 +12,7 @@
#include "otx2_reg.h"
#include "otx2_common.h"
#include "otx2_struct.h"
+#include "cn10k.h"
static void otx2_nix_rq_op_stats(struct queue_stats *stats,
struct otx2_nic *pfvf, int qidx)
@@ -60,6 +58,19 @@ void otx2_update_lmac_stats(struct otx2_nic *pfvf)
mutex_unlock(&pfvf->mbox.lock);
}
+void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf)
+{
+ struct msg_req *req;
+
+ if (!netif_running(pfvf->netdev))
+ return;
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_fec_stats(&pfvf->mbox);
+ if (req)
+ otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+}
+
int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx)
{
struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx];
@@ -191,10 +202,18 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data))
+ if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) {
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- else
+ /* update dmac field in vlan offload rule */
+ if (netif_running(netdev) &&
+ pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
+ otx2_install_rxvlan_offload_flow(pfvf);
+ /* update dmac address in ntuple and DMAC filter list */
+ if (pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
+ otx2_dmacflt_update_pfmac_flow(pfvf);
+ } else {
return -EPERM;
+ }
return 0;
}
@@ -203,8 +222,11 @@ EXPORT_SYMBOL(otx2_set_mac_address);
int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
{
struct nix_frs_cfg *req;
+ u16 maxlen;
int err;
+ maxlen = otx2_get_max_mtu(pfvf) + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+
mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox);
if (!req) {
@@ -212,10 +234,18 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
return -ENOMEM;
}
- pfvf->max_frs = mtu + OTX2_ETH_HLEN;
- req->maxlen = pfvf->max_frs;
+ /* Add EDSA/HIGIG2 header length and timestamp length to maxlen */
+ req->maxlen = pfvf->netdev->mtu + OTX2_ETH_HLEN + pfvf->addl_mtu +
+ OTX2_HW_TIMESTAMP_LEN + pfvf->xtra_hdr;
+
+ if (is_otx2_lbkvf(pfvf->pdev))
+ req->maxlen = maxlen;
+
+ if (is_otx2_sdpvf(pfvf->pdev))
+ req->sdp_link = true;
err = otx2_sync_mbox_msg(&pfvf->mbox);
+
mutex_unlock(&pfvf->mbox.lock);
return err;
}
@@ -225,7 +255,7 @@ int otx2_config_pause_frm(struct otx2_nic *pfvf)
struct cgx_pause_frm_cfg *req;
int err;
- if (is_otx2_lbkvf(pfvf->pdev))
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdpvf(pfvf->pdev))
return 0;
mutex_lock(&pfvf->mbox.lock);
@@ -244,10 +274,32 @@ unlock:
mutex_unlock(&pfvf->mbox.lock);
return err;
}
+EXPORT_SYMBOL(otx2_config_pause_frm);
+
+int otx2_config_serdes_link_state(struct otx2_nic *pfvf, bool en)
+{
+ struct cgx_set_link_state_msg *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_set_link_state(&pfvf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ req->enable = !!en;
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+unlock:
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+EXPORT_SYMBOL(otx2_config_serdes_link_state);
int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
{
struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct nix_rss_flowkey_cfg_rsp *rsp;
struct nix_rss_flowkey_cfg *req;
int err;
@@ -262,18 +314,33 @@ int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
req->group = DEFAULT_RSS_CONTEXT_GROUP;
err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto fail;
+
+ rsp = (struct nix_rss_flowkey_cfg_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ err = PTR_ERR(rsp);
+ goto fail;
+ }
+
+ pfvf->hw.flowkey_alg_idx = rsp->alg_idx;
+fail:
mutex_unlock(&pfvf->mbox.lock);
return err;
}
-int otx2_set_rss_table(struct otx2_nic *pfvf)
+int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id)
{
struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ const int index = rss->rss_size * ctx_id;
struct mbox *mbox = &pfvf->mbox;
+ struct otx2_rss_ctx *rss_ctx;
struct nix_aq_enq_req *aq;
int idx, err;
mutex_lock(&mbox->lock);
+ rss_ctx = rss->rss_ctx[ctx_id];
/* Get memory to put this msg */
for (idx = 0; idx < rss->rss_size; idx++) {
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
@@ -293,10 +360,10 @@ int otx2_set_rss_table(struct otx2_nic *pfvf)
}
}
- aq->rss.rq = rss->ind_tbl[idx];
+ aq->rss.rq = rss_ctx->ind_tbl[idx];
/* Fill AQ info */
- aq->qidx = idx;
+ aq->qidx = index + idx;
aq->ctype = NIX_AQ_CTYPE_RSS;
aq->op = NIX_AQ_INSTOP_INIT;
}
@@ -331,9 +398,10 @@ void otx2_set_rss_key(struct otx2_nic *pfvf)
int otx2_rss_init(struct otx2_nic *pfvf)
{
struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct otx2_rss_ctx *rss_ctx;
int idx, ret = 0;
- rss->rss_size = sizeof(rss->ind_tbl);
+ rss->rss_size = sizeof(*rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
/* Init RSS key if it is not setup already */
if (!rss->enable)
@@ -341,13 +409,19 @@ int otx2_rss_init(struct otx2_nic *pfvf)
otx2_set_rss_key(pfvf);
if (!netif_is_rxfh_configured(pfvf->netdev)) {
- /* Default indirection table */
+ /* Set RSS group 0 as default indirection table */
+ rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP] = kzalloc(rss->rss_size,
+ GFP_KERNEL);
+ if (!rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP])
+ return -ENOMEM;
+
+ rss_ctx = rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP];
for (idx = 0; idx < rss->rss_size; idx++)
- rss->ind_tbl[idx] =
+ rss_ctx->ind_tbl[idx] =
ethtool_rxfh_indir_default(idx,
pfvf->hw.rx_queues);
}
- ret = otx2_set_rss_table(pfvf);
+ ret = otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP);
if (ret)
return ret;
@@ -355,7 +429,8 @@ int otx2_rss_init(struct otx2_nic *pfvf)
rss->flowkey_cfg = rss->enable ? rss->flowkey_cfg :
NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6 |
NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_UDP |
- NIX_FLOW_KEY_TYPE_SCTP | NIX_FLOW_KEY_TYPE_VLAN;
+ NIX_FLOW_KEY_TYPE_SCTP | NIX_FLOW_KEY_TYPE_VLAN |
+ NIX_FLOW_KEY_TYPE_IPV4_PROTO;
ret = otx2_set_flowkey_cfg(pfvf);
if (ret)
@@ -468,34 +543,53 @@ void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
(pfvf->hw.cq_ecount_wait - 1));
}
-dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
+int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma)
{
- dma_addr_t iova;
u8 *buf;
- buf = napi_alloc_frag(pool->rbsize + OTX2_ALIGN);
+ buf = napi_alloc_frag_align(pool->rbsize, OTX2_ALIGN);
if (unlikely(!buf))
return -ENOMEM;
- buf = PTR_ALIGN(buf, OTX2_ALIGN);
- iova = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
+ *dma = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
- if (unlikely(dma_mapping_error(pfvf->dev, iova))) {
+ if (unlikely(dma_mapping_error(pfvf->dev, *dma))) {
page_frag_free(buf);
return -ENOMEM;
}
- return iova;
+ return 0;
}
-static dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
+static int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma)
{
- dma_addr_t addr;
-
+ int ret;
local_bh_disable();
- addr = __otx2_alloc_rbuf(pfvf, pool);
+ ret = __otx2_alloc_rbuf(pfvf, pool, dma);
local_bh_enable();
- return addr;
+ return ret;
+}
+
+int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
+ dma_addr_t *dma)
+{
+ if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) {
+ struct refill_work *work;
+ struct delayed_work *dwork;
+
+ work = &pfvf->refill_wrk[cq->cq_idx];
+ dwork = &work->pool_refill_work;
+ /* Schedule a task if no other task is running */
+ if (!cq->refill_task_sched) {
+ cq->refill_task_sched = true;
+ schedule_delayed_work(dwork,
+ msecs_to_jiffies(100));
+ }
+ return -ENOMEM;
+ }
+ return 0;
}
void otx2_tx_timeout(struct net_device *netdev, unsigned int txq)
@@ -521,28 +615,14 @@ void otx2_get_mac_from_af(struct net_device *netdev)
}
EXPORT_SYMBOL(otx2_get_mac_from_af);
-static int otx2_get_link(struct otx2_nic *pfvf)
-{
- int link = 0;
- u16 map;
-
- /* cgx lmac link */
- if (pfvf->hw.tx_chan_base >= CGX_CHAN_BASE) {
- map = pfvf->hw.tx_chan_base & 0x7FF;
- link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
- }
- /* LBK channel */
- if (pfvf->hw.tx_chan_base < SDP_CHAN_BASE)
- link = 12;
-
- return link;
-}
-
int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
{
struct otx2_hw *hw = &pfvf->hw;
struct nix_txschq_config *req;
u64 schq, parent;
+ u64 dwrr_val;
+
+ dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
if (!req)
@@ -555,9 +635,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
/* Set topology e.t.c configuration */
if (lvl == NIX_TXSCH_LVL_SMQ) {
req->reg[0] = NIX_AF_SMQX_CFG(schq);
- req->regval[0] = ((OTX2_MAX_MTU + OTX2_ETH_HLEN) << 8) |
- OTX2_MIN_MTU;
-
+ req->regval[0] = ((u64)pfvf->tx_max_pktlen << 8) | OTX2_MIN_MTU;
req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
(0x2ULL << 36);
req->num_regs++;
@@ -568,21 +646,26 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->num_regs++;
/* Set DWRR quantum */
req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
- req->regval[2] = DFLT_RR_QTM;
+ req->regval[2] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL4) {
parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0];
req->reg[0] = NIX_AF_TL4X_PARENT(schq);
req->regval[0] = parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
- req->regval[1] = DFLT_RR_QTM;
+ req->regval[1] = dwrr_val;
+ if (is_otx2_sdpvf(pfvf->pdev)) {
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
+ req->regval[2] = BIT_ULL(12);
+ }
} else if (lvl == NIX_TXSCH_LVL_TL3) {
parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0];
req->reg[0] = NIX_AF_TL3X_PARENT(schq);
req->regval[0] = parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
- req->regval[1] = DFLT_RR_QTM;
+ req->regval[1] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL2) {
parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0];
req->reg[0] = NIX_AF_TL2X_PARENT(schq);
@@ -590,20 +673,24 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->num_regs++;
req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
- req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | DFLT_RR_QTM;
+ req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val;
- req->num_regs++;
- req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
- otx2_get_link(pfvf));
- /* Enable this queue and backpressure */
- req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
+ if (!is_otx2_sdpvf(pfvf->pdev)) {
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
+ /* Enable this queue and backpressure */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
+ }
} else if (lvl == NIX_TXSCH_LVL_TL1) {
/* Default config for TL1.
* For VF this is always ignored.
*/
- /* Set DWRR quantum */
+ /* On CN10K, if RR_WEIGHT is greater than 16384, HW will
+ * clip it to 16384, so configuring a 24bit max value
+ * will work on both OTx2 and CN10K.
+ */
req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
@@ -668,7 +755,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
int timeout = 1000;
ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
- for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
incr = (u64)qidx << 32;
while (timeout) {
val = otx2_atomic64_add(incr, ptr);
@@ -698,9 +785,6 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
-/* Send skid of 2000 packets required for CQ size of 4K CQEs. */
-#define SEND_CQ_SKID 2000
-
static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
{
struct otx2_qset *qset = &pfvf->qset;
@@ -734,12 +818,50 @@ static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
return otx2_sync_mbox_msg(&pfvf->mbox);
}
+int otx2_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura)
+{
+ struct otx2_nic *pfvf = dev;
+ struct otx2_snd_queue *sq;
+ struct nix_aq_enq_req *aq;
+
+ sq = &pfvf->qset.sq[qidx];
+ sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx));
+ /* Get memory to put this msg */
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ aq->sq.cq = pfvf->hw.rx_queues + qidx;
+ aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
+ aq->sq.cq_ena = 1;
+ aq->sq.ena = 1;
+ /* Only one SMQ is allocated, map all SQ's to that SMQ */
+ aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
+ aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset;
+ aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
+ aq->sq.sqb_aura = sqb_aura;
+ aq->sq.sq_int_ena = NIX_SQINT_BITS;
+ aq->sq.qint_idx = 0;
+ /* Due pipelining impact minimum 2000 unused SQ CQE's
+ * need to maintain to avoid CQ overflow.
+ */
+ aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt));
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
{
struct otx2_qset *qset = &pfvf->qset;
struct otx2_snd_queue *sq;
- struct nix_aq_enq_req *aq;
struct otx2_pool *pool;
+ u8 chan_offset;
int err;
pool = &pfvf->qset.pool[sqb_aura];
@@ -751,17 +873,19 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (err)
return err;
- err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
- TSO_HEADER_SIZE);
- if (err)
- return err;
+ if (qidx < pfvf->hw.tx_queues) {
+ err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
+ TSO_HEADER_SIZE);
+ if (err)
+ return err;
+ }
sq->sqe_base = sq->sqe->base;
sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL);
if (!sq->sg)
return -ENOMEM;
- if (pfvf->ptp) {
+ if (pfvf->ptp && qidx < pfvf->hw.tx_queues) {
err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
sizeof(*sq->timestamps));
if (err)
@@ -775,59 +899,37 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
sq->sqe_thresh = ((sq->num_sqbs * sq->sqe_per_sqb) * 10) / 100;
sq->aura_id = sqb_aura;
sq->aura_fc_addr = pool->fc_addr->base;
- sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx));
sq->io_addr = (__force u64)otx2_get_regaddr(pfvf, NIX_LF_OP_SENDX(0));
sq->stats.bytes = 0;
sq->stats.pkts = 0;
- /* Get memory to put this msg */
- aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
- if (!aq)
- return -ENOMEM;
-
- aq->sq.cq = pfvf->hw.rx_queues + qidx;
- aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
- aq->sq.cq_ena = 1;
- aq->sq.ena = 1;
- /* Only one SMQ is allocated, map all SQ's to that SMQ */
- aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
- aq->sq.smq_rr_quantum = DFLT_RR_QTM;
- aq->sq.default_chan = pfvf->hw.tx_chan_base;
- aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
- aq->sq.sqb_aura = sqb_aura;
- aq->sq.sq_int_ena = NIX_SQINT_BITS;
- aq->sq.qint_idx = 0;
- /* Due pipelining impact minimum 2000 unused SQ CQE's
- * need to maintain to avoid CQ overflow.
- */
- aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (sq->sqe_cnt));
-
- /* Fill AQ info */
- aq->qidx = qidx;
- aq->ctype = NIX_AQ_CTYPE_SQ;
- aq->op = NIX_AQ_INSTOP_INIT;
-
- return otx2_sync_mbox_msg(&pfvf->mbox);
+ chan_offset = qidx % pfvf->hw.tx_chan_cnt;
+ return pfvf->hw_ops->sq_aq_init(pfvf, qidx, chan_offset, sqb_aura);
}
static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
{
struct otx2_qset *qset = &pfvf->qset;
+ int err, pool_id, non_xdp_queues;
struct nix_aq_enq_req *aq;
struct otx2_cq_queue *cq;
- int err, pool_id;
cq = &qset->cq[qidx];
cq->cq_idx = qidx;
+ non_xdp_queues = pfvf->hw.rx_queues + pfvf->hw.tx_queues;
if (qidx < pfvf->hw.rx_queues) {
cq->cq_type = CQ_RX;
cq->cint_idx = qidx;
cq->cqe_cnt = qset->rqe_cnt;
- } else {
+ } else if (qidx < non_xdp_queues) {
cq->cq_type = CQ_TX;
cq->cint_idx = qidx - pfvf->hw.rx_queues;
cq->cqe_cnt = qset->sqe_cnt;
+ } else {
+ cq->cq_type = CQ_XDP;
+ cq->cint_idx = qidx - non_xdp_queues;
+ cq->cqe_cnt = qset->sqe_cnt;
}
cq->cqe_size = pfvf->qset.xqe_size;
@@ -845,6 +947,7 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
(pfvf->hw.rqpool_cnt != pfvf->hw.rx_queues)) ? 0 : qidx;
cq->rbpool = &qset->pool[pool_id];
cq->refill_task_sched = false;
+ cq->pend_cqe = 0;
/* Get memory to put this msg */
aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
@@ -864,12 +967,19 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt);
aq->cq.drop_ena = 1;
- /* Enable receive CQ backpressure */
- aq->cq.bp_ena = 1;
- aq->cq.bpid = pfvf->bpid[0];
-
- /* Set backpressure level is same as cq pass level */
- aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
+ if (!is_otx2_lbkvf(pfvf->pdev)) {
+ /* Enable receive CQ backpressure */
+ aq->cq.bp_ena = 1;
+#ifdef CONFIG_DCB
+ aq->cq.bpid = pfvf->bpid[pfvf->queue_to_pfc_map[qidx]];
+#else
+ aq->cq.bpid = pfvf->bpid[0];
+#endif
+
+ /* Set backpressure level is same as cq pass level */
+ aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid,
+ qset->rqe_cnt);
+ }
}
/* Fill AQ info */
@@ -887,7 +997,7 @@ static void otx2_pool_refill_task(struct work_struct *work)
struct refill_work *wrk;
int qidx, free_ptrs = 0;
struct otx2_nic *pfvf;
- s64 bufptr;
+ dma_addr_t bufptr;
wrk = container_of(work, struct refill_work, pool_refill_work.work);
pfvf = wrk->pf;
@@ -897,8 +1007,7 @@ static void otx2_pool_refill_task(struct work_struct *work)
free_ptrs = cq->pool_ptrs;
while (cq->pool_ptrs) {
- bufptr = otx2_alloc_rbuf(pfvf, rbpool);
- if (bufptr <= 0) {
+ if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) {
/* Schedule a WQ if we fails to free atleast half of the
* pointers else enable napi for this RQ.
*/
@@ -913,7 +1022,7 @@ static void otx2_pool_refill_task(struct work_struct *work)
}
return;
}
- otx2_aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
+ pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--;
}
cq->refill_task_sched = false;
@@ -933,7 +1042,7 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf)
}
/* Initialize TX queues */
- for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
err = otx2_sq_init(pfvf, qidx, sqb_aura);
@@ -948,6 +1057,8 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf)
return err;
}
+ pfvf->cq_op_addr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_CQ_OP_STATUS);
+
/* Initialize work queue for receive buffer refill */
pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt,
sizeof(struct refill_work), GFP_KERNEL);
@@ -968,7 +1079,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
struct nix_lf_alloc_rsp *rsp;
int err;
- pfvf->qset.xqe_size = NIX_XQESZ_W16 ? 128 : 512;
+ pfvf->qset.xqe_size = pfvf->hw.xqe_size;
/* Get memory to put this msg */
nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox);
@@ -977,11 +1088,11 @@ int otx2_config_nix(struct otx2_nic *pfvf)
/* Set RQ/SQ/CQ counts */
nixlf->rq_cnt = pfvf->hw.rx_queues;
- nixlf->sq_cnt = pfvf->hw.tx_queues;
+ nixlf->sq_cnt = pfvf->hw.tot_tx_queues;
nixlf->cq_cnt = pfvf->qset.cq_cnt;
nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
- nixlf->rss_grps = 1; /* Single RSS indir table supported, for now */
- nixlf->xqe_sz = NIX_XQESZ_W16;
+ nixlf->rss_grps = MAX_RSS_GROUPS;
+ nixlf->xqe_sz = pfvf->hw.xqe_size == 128 ? NIX_XQESZ_W16 : NIX_XQESZ_W64;
/* We don't know absolute NPA LF idx attached.
* AF will replace 'RVU_DEFAULT_PF_FUNC' with
* NPA LF attached to this RVU PF/VF.
@@ -1015,7 +1126,7 @@ void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
int sqb, qidx;
u64 iova, pa;
- for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
sq = &qset->sq[qidx];
if (!sq->sqb_ptrs)
continue;
@@ -1127,7 +1238,7 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
/* Enable backpressure for RQ aura */
- if (aura_id < pfvf->hw.rqpool_cnt) {
+ if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) {
aq->aura.bp_ena = 0;
/* If NIX1 LF is attached then specify NIX1_RX.
*
@@ -1140,10 +1251,16 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
* "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to
* NIX-RX based on [BP] level. One bit per NIX-RX; index
* enumerated by NPA_BPINTF_E."
+ * In the above description 'One bit per NIX-RX' is written
+ * presumably by mistake in HRM.
*/
if (pfvf->nix_blkaddr == BLKADDR_NIX1)
aq->aura.bp_ena = 1;
+#ifdef CONFIG_DCB
+ aq->aura.nix0_bpid = pfvf->bpid[pfvf->queue_to_pfc_map[aura_id]];
+#else
aq->aura.nix0_bpid = pfvf->bpid[0];
+#endif
/* Set backpressure level for RQ's Aura */
aq->aura.bp = RQ_BP_LVL_AURA;
@@ -1212,8 +1329,8 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
struct otx2_hw *hw = &pfvf->hw;
struct otx2_snd_queue *sq;
struct otx2_pool *pool;
+ dma_addr_t bufptr;
int err, ptr;
- s64 bufptr;
/* Calculate number of SQBs needed.
*
@@ -1227,7 +1344,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
stack_pages =
(num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
- for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
/* Initialize aura context */
err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
@@ -1247,26 +1364,30 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
goto fail;
/* Allocate pointers and free them to aura/pool */
- for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
pool = &pfvf->qset.pool[pool_id];
sq = &qset->sq[qidx];
sq->sqb_count = 0;
- sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(u64 *), GFP_KERNEL);
- if (!sq->sqb_ptrs)
- return -ENOMEM;
+ sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL);
+ if (!sq->sqb_ptrs) {
+ err = -ENOMEM;
+ goto err_mem;
+ }
for (ptr = 0; ptr < num_sqbs; ptr++) {
- bufptr = otx2_alloc_rbuf(pfvf, pool);
- if (bufptr <= 0)
- return bufptr;
- otx2_aura_freeptr(pfvf, pool_id, bufptr);
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
+ if (err)
+ goto err_mem;
+ pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
}
}
- return 0;
+err_mem:
+ return err ? -ENOMEM : 0;
+
fail:
otx2_mbox_reset(&pfvf->mbox.mbox, 0);
otx2_aura_pool_free(pfvf);
@@ -1279,7 +1400,7 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
int stack_pages, pool_id, rq;
struct otx2_pool *pool;
int err, ptr, num_ptrs;
- s64 bufptr;
+ dma_addr_t bufptr;
num_ptrs = pfvf->qset.rqe_cnt;
@@ -1309,15 +1430,15 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
pool = &pfvf->qset.pool[pool_id];
for (ptr = 0; ptr < num_ptrs; ptr++) {
- bufptr = otx2_alloc_rbuf(pfvf, pool);
- if (bufptr <= 0)
- return bufptr;
- otx2_aura_freeptr(pfvf, pool_id,
- bufptr + OTX2_HEAD_ROOM);
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
+ if (err)
+ goto err_mem;
+ pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
+ bufptr + OTX2_HEAD_ROOM);
}
}
-
- return 0;
+err_mem:
+ return err ? -ENOMEM : 0;
fail:
otx2_mbox_reset(&pfvf->mbox.mbox, 0);
otx2_aura_pool_free(pfvf);
@@ -1472,11 +1593,18 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
return -ENOMEM;
req->chan_base = 0;
- req->chan_cnt = 1;
+#ifdef CONFIG_DCB
+ req->chan_cnt = pfvf->pfc_en ? IEEE_8021QAZ_MAX_TCS : 1;
+ req->bpid_per_chan = pfvf->pfc_en ? 1 : 0;
+#else
+ req->chan_cnt = 1;
req->bpid_per_chan = 0;
+#endif
+
return otx2_sync_mbox_msg(&pfvf->mbox);
}
+EXPORT_SYMBOL(otx2_nix_config_bp);
/* Mbox message handlers */
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
@@ -1490,6 +1618,13 @@ void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
pfvf->hw.cgx_tx_stats[id] = rsp->tx_stats[id];
}
+void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf,
+ struct cgx_fec_stats_rsp *rsp)
+{
+ pfvf->hw.cgx_fec_corr_blks += rsp->fec_corr_blks;
+ pfvf->hw.cgx_fec_uncorr_blks += rsp->fec_uncorr_blks;
+}
+
void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
struct nix_txsch_alloc_rsp *rsp)
{
@@ -1517,8 +1652,13 @@ void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
pfvf->hw.sqb_size = rsp->sqb_size;
pfvf->hw.rx_chan_base = rsp->rx_chan_base;
pfvf->hw.tx_chan_base = rsp->tx_chan_base;
+ pfvf->hw.rx_chan_cnt = rsp->rx_chan_cnt;
+ pfvf->hw.tx_chan_cnt = rsp->tx_chan_cnt;
pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx;
pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx;
+ pfvf->hw.cgx_links = rsp->cgx_links;
+ pfvf->hw.lbk_links = rsp->lbk_links;
+ pfvf->hw.tx_link = rsp->tx_link;
}
EXPORT_SYMBOL(mbox_handler_nix_lf_alloc);
@@ -1583,6 +1723,101 @@ void otx2_set_cints_affinity(struct otx2_nic *pfvf)
}
}
+u16 otx2_get_max_mtu(struct otx2_nic *pfvf)
+{
+ struct nix_hw_info *rsp;
+ struct msg_req *req;
+ u16 max_mtu;
+ int rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_nix_get_hw_info(&pfvf->mbox);
+ if (!req) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (!rc) {
+ rsp = (struct nix_hw_info *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+
+ /* HW counts VLAN insertion bytes (8 for double tag)
+ * irrespective of whether SQE is requesting to insert VLAN
+ * in the packet or not. Hence these 8 bytes have to be
+ * discounted from max packet size otherwise HW will throw
+ * SMQ errors
+ */
+ max_mtu = rsp->max_mtu - 8 - OTX2_ETH_HLEN;
+
+ /* Also save DWRR MTU, needed for DWRR weight calculation */
+ pfvf->hw.dwrr_mtu = rsp->rpm_dwrr_mtu;
+ if (!pfvf->hw.dwrr_mtu)
+ pfvf->hw.dwrr_mtu = 1;
+ }
+
+out:
+ mutex_unlock(&pfvf->mbox.lock);
+ if (rc) {
+ dev_warn(pfvf->dev,
+ "Failed to get MTU from hardware setting default value(1500)\n");
+ max_mtu = 1500;
+ }
+ return max_mtu;
+}
+EXPORT_SYMBOL(otx2_get_max_mtu);
+
+int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t features)
+{
+ netdev_features_t changed = features ^ netdev->features;
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ bool ntuple = !!(features & NETIF_F_NTUPLE);
+ bool tc = !!(features & NETIF_F_HW_TC);
+
+ if ((changed & NETIF_F_NTUPLE) && !ntuple)
+ otx2_destroy_ntuple_flows(pfvf);
+
+ if ((changed & NETIF_F_NTUPLE) && ntuple) {
+ if (!pfvf->flow_cfg->max_flows) {
+ netdev_err(netdev,
+ "Can't enable NTUPLE, MCAM entries not allocated\n");
+ return -EINVAL;
+ }
+ }
+
+ if ((changed & NETIF_F_HW_TC) && tc) {
+ if (!pfvf->flow_cfg->max_flows) {
+ netdev_err(netdev,
+ "Can't enable TC, MCAM entries not allocated\n");
+ return -EINVAL;
+ }
+ }
+
+ if ((changed & NETIF_F_HW_TC) && !tc &&
+ pfvf->flow_cfg && pfvf->flow_cfg->nr_flows) {
+ netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n");
+ return -EBUSY;
+ }
+
+ if ((changed & NETIF_F_NTUPLE) && ntuple &&
+ (netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) {
+ netdev_err(netdev,
+ "Can't enable NTUPLE when TC is active, disable TC and retry\n");
+ return -EINVAL;
+ }
+
+ if ((changed & NETIF_F_HW_TC) && tc &&
+ (netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) {
+ netdev_err(netdev,
+ "Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2_handle_ntuple_tc_features);
+
#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
int __weak \
otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index d6253f2a414d..1fc59d74318e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -1,11 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 RVU Ethernet driver
+/* Marvell RVU Ethernet driver
*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef OTX2_COMMON_H
@@ -16,18 +13,26 @@
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
+#include <linux/soc/marvell/octeontx2/asm.h>
+#include <net/pkt_cls.h>
+#include <net/devlink.h>
+#include <linux/time64.h>
#include <mbox.h>
+#include <npc.h>
#include "otx2_reg.h"
#include "otx2_txrx.h"
+#include "otx2_devlink.h"
#include <rvu_trace.h>
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
#define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064
+#define PCI_DEVID_OCTEONTX2_SDP_VF 0xA0F7
#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
+#define PCI_SUBSYS_DEVID_95XX_RVU_PFVF 0xB200
/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 2
@@ -47,14 +52,25 @@ enum arua_mapped_qtypes {
#define NIX_LF_ERR_VEC 0x81
#define NIX_LF_POISON_VEC 0x82
+/* Send skid of 2000 packets required for CQ size of 4K CQEs. */
+#define SEND_CQ_SKID 2000
+
+struct otx2_lmt_info {
+ u64 lmt_addr;
+ u16 lmt_id;
+};
/* RSS configuration */
+struct otx2_rss_ctx {
+ u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
+};
+
struct otx2_rss_info {
u8 enable;
u32 flowkey_cfg;
u16 rss_size;
- u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
#define RSS_HASH_KEY_SIZE 44 /* 352 bit key */
u8 key[RSS_HASH_KEY_SIZE];
+ struct otx2_rss_ctx *rss_ctx[MAX_RSS_GROUPS];
};
/* NIX (or NPC) RX errors */
@@ -157,10 +173,14 @@ struct otx2_hw {
struct otx2_rss_info rss_info;
u16 rx_queues;
u16 tx_queues;
+ u16 xdp_queues;
+ u16 tot_tx_queues;
u16 max_queues;
u16 pool_cnt;
u16 rqpool_cnt;
u16 sqpool_cnt;
+ u16 xqe_size;
+ u16 rbuf_fixed_size;
/* NPA */
u32 stack_pg_ptrs; /* No of ptrs per stack page */
@@ -169,10 +189,14 @@ struct otx2_hw {
/* NIX */
u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ u16 matchall_ipolicer;
+ u32 dwrr_mtu;
/* HW settings, coalescing etc */
u16 rx_chan_base;
u16 tx_chan_base;
+ u8 rx_chan_cnt;
+ u8 tx_chan_cnt;
u16 cq_qcount_wait;
u16 cq_ecount_wait;
u16 rq_skid;
@@ -183,7 +207,9 @@ struct otx2_hw {
u8 lso_tsov6_idx;
u8 lso_udpv4_idx;
u8 lso_udpv6_idx;
- u8 hw_tso;
+
+ /* RSS */
+ u8 flowkey_alg_idx;
/* MSI-X */
u8 cint_cnt; /* CQ interrupt count */
@@ -197,12 +223,45 @@ struct otx2_hw {
struct otx2_drv_stats drv_stats;
u64 cgx_rx_stats[CGX_RX_STATS_COUNT];
u64 cgx_tx_stats[CGX_TX_STATS_COUNT];
+ u64 cgx_fec_corr_blks;
+ u64 cgx_fec_uncorr_blks;
+ u8 cgx_links; /* No. of CGX links present in HW */
+ u8 lbk_links; /* No. of LBK links present in HW */
+ u8 tx_link; /* Transmit channel link number */
+#define HW_TSO 0
+#define CN10K_MBOX 1
+#define CN10K_LMTST 2
+#define CN10K_RPM 3
+#define CN10K_PTP_ONESTEP 4
+ unsigned long cap_flag;
+
+#define LMT_LINE_SIZE 128
+#define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */
+ u64 *lmt_base;
+ struct otx2_lmt_info __percpu *lmt_info;
+};
+
+struct vfvlan {
+ u16 vlan;
+ u16 proto;
+ u8 qos;
+};
+
+enum vfperm {
+ OTX2_RESET_VF_PERM,
+ OTX2_TRUSTED_VF,
};
struct otx2_vf_config {
struct otx2_nic *pf;
struct delayed_work link_event_work;
+ struct delayed_work ptp_info_work;
bool intf_down; /* interface was either configured or not */
+ u8 mac[ETH_ALEN];
+ u16 vlan;
+ int tx_vtag_idx;
+ struct vfvlan rule;
+ bool trusted;
};
struct flr_work {
@@ -220,26 +279,94 @@ struct otx2_ptp {
struct ptp_clock *ptp_clock;
struct otx2_nic *nic;
- struct cyclecounter cycle_counter;
- struct timecounter time_counter;
+ struct delayed_work extts_work;
+ u64 last_extts;
+ u64 thresh;
+
+ struct ptp_pin_desc extts_config;
+ bool ptp_en;
+ u64 (*convert_rx_ptp_tstmp)(u64 timestamp);
+ u64 (*convert_tx_ptp_tstmp)(u64 timestamp);
+ struct delayed_work synctstamp_work;
+ u64 tstamp;
+};
+
+struct otx2_mac_table {
+ u8 addr[ETH_ALEN];
+ u16 mcam_entry;
+ bool inuse;
+};
+
+struct otx2_flow_config {
+ u16 *flow_ent;
+ u16 *def_ent;
+ u16 nr_flows;
+#define OTX2_DEFAULT_FLOWCOUNT 16
+#define OTX2_MAX_UNICAST_FLOWS 8
+#define OTX2_MAX_VLAN_FLOWS 1
+#define OTX2_MAX_TC_FLOWS OTX2_DEFAULT_FLOWCOUNT
+#define OTX2_MCAM_COUNT (OTX2_DEFAULT_FLOWCOUNT + \
+ OTX2_MAX_UNICAST_FLOWS + \
+ OTX2_MAX_VLAN_FLOWS)
+ u16 unicast_offset;
+ u16 rx_vlan_offset;
+ u16 vf_vlan_offset;
+#define OTX2_PER_VF_VLAN_FLOWS 2 /* Rx + Tx per VF */
+#define OTX2_VF_VLAN_RX_INDEX 0
+#define OTX2_VF_VLAN_TX_INDEX 1
+ u16 max_flows;
+ u8 dmacflt_max_flows;
+ u8 *bmap_to_dmacindex;
+ unsigned long dmacflt_bmap;
+ struct list_head flow_list;
};
#define OTX2_HW_TIMESTAMP_LEN 8
+struct otx2_tc_info {
+ /* hash table to store TC offloaded flows */
+ struct rhashtable flow_table;
+ struct rhashtable_params flow_ht_params;
+ unsigned long *tc_entries_bitmap;
+};
+
+struct dev_hw_ops {
+ int (*sq_aq_init)(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura);
+ void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq,
+ int size, int qidx);
+ void (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
+ void (*aura_freeptr)(void *dev, int aura, u64 buf);
+};
+
struct otx2_nic {
void __iomem *reg_base;
struct net_device *netdev;
+ struct dev_hw_ops *hw_ops;
void *iommu_domain;
- u16 max_frs;
+ u16 xtra_hdr;
+ u16 tx_max_pktlen;
u16 rbsize; /* Receive buffer size */
#define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0)
#define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1)
#define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
+#define OTX2_FLAG_MCAM_ENTRIES_ALLOC BIT_ULL(3)
+#define OTX2_FLAG_NTUPLE_SUPPORT BIT_ULL(4)
+#define OTX2_FLAG_UCAST_FLTR_SUPPORT BIT_ULL(5)
+#define OTX2_FLAG_RX_VLAN_SUPPORT BIT_ULL(6)
+#define OTX2_FLAG_VF_VLAN_SUPPORT BIT_ULL(7)
+#define OTX2_FLAG_PF_SHUTDOWN BIT_ULL(8)
#define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9)
#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
+#define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11)
+#define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12)
+#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
+#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
+#define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15)
u64 flags;
+ u64 *cq_op_addr;
+ struct bpf_prog *xdp_prog;
struct otx2_qset qset;
struct otx2_hw hw;
struct pci_dev *pdev;
@@ -259,6 +386,10 @@ struct otx2_nic {
u64 reset_count;
struct work_struct reset_task;
+
+ /* NPC MCAM */
+ struct otx2_flow_config *flow_cfg;
+
struct workqueue_struct *flr_wq;
struct flr_work *flr_wrk;
struct refill_work *refill_wrk;
@@ -270,9 +401,50 @@ struct otx2_nic {
/* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
int nix_blkaddr;
+ struct qmem *dync_lmt;
+ u16 tot_lmt_lines;
+ u16 npa_lmt_lines;
+ u32 nix_lmt_size;
+ unsigned long rq_bmap;
struct otx2_ptp *ptp;
struct hwtstamp_config tstamp;
+ struct otx2_mac_table *mac_table;
+ struct otx2_tc_info tc_info;
+ struct workqueue_struct *otx2_ndo_wq;
+ struct work_struct otx2_rx_mode_work;
+
+#define OTX2_PRIV_FLAG_PAM4 BIT(0)
+#define OTX2_PRIV_FLAG_EDSA_HDR BIT(1)
+#define OTX2_PRIV_FLAG_HIGIG2_HDR BIT(2)
+#define OTX2_PRIV_FLAG_FDSA_HDR BIT(3)
+#define OTX2_INTF_MOD_MASK GENMASK(3, 1)
+#define OTX2_PRIV_FLAG_DEF_MODE BIT(4)
+#define OTX2_IS_EDSA_ENABLED(flags) ((flags) & \
+ OTX2_PRIV_FLAG_EDSA_HDR)
+#define OTX2_IS_HIGIG2_ENABLED(flags) ((flags) & \
+ OTX2_PRIV_FLAG_HIGIG2_HDR)
+#define OTX2_IS_DEF_MODE_ENABLED(flags) ((flags) & \
+ OTX2_PRIV_FLAG_DEF_MODE)
+#define OTX2_IS_INTFMOD_SET(flags) hweight32((flags) & OTX2_INTF_MOD_MASK)
+
+ u32 ethtool_flags;
+
+ /* extended DSA and EDSA header lengths are 8/16 bytes
+ * so take max length 16 bytes here
+ */
+#define OTX2_EDSA_HDR_LEN 16
+#define OTX2_HIGIG2_HDR_LEN 16
+#define OTX2_FDSA_HDR_LEN 4
+ u32 addl_mtu;
+
+ /* Devlink */
+ struct otx2_devlink *dl;
+#ifdef CONFIG_DCB
+ /* PFC */
+ u8 pfc_en;
+ u8 *queue_to_pfc_map;
+#endif
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -280,10 +452,19 @@ static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF;
}
+static inline bool is_otx2_sdpvf(struct pci_dev *pdev)
+{
+ return pdev->device == PCI_DEVID_OCTEONTX2_SDP_VF;
+}
+
static inline bool is_96xx_A0(struct pci_dev *pdev)
{
- return (pdev->revision == 0x00) &&
- (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
+ return (pdev->revision == 0x00);
+}
+
+static inline bool is_95xx_A0(struct pci_dev *pdev)
+{
+ return (pdev->revision == 0x10) || (pdev->revision == 0x11);
}
static inline bool is_96xx_B0(struct pci_dev *pdev)
@@ -292,6 +473,26 @@ static inline bool is_96xx_B0(struct pci_dev *pdev)
(pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
}
+/* REVID for PCIe devices.
+ * Bits 0..1: minor pass, bit 3..2: major pass
+ * bits 7..4: midr id
+ */
+#define PCI_REVISION_ID_96XX 0x00
+#define PCI_REVISION_ID_95XX 0x10
+#define PCI_REVISION_ID_95XXN 0x20
+#define PCI_REVISION_ID_98XX 0x30
+#define PCI_REVISION_ID_95XXMM 0x40
+#define PCI_REVISION_ID_95XXO 0xE0
+
+static inline bool is_dev_otx2(struct pci_dev *pdev)
+{
+ u8 midr = pdev->revision & 0xF0;
+
+ return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX ||
+ midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX ||
+ midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
+}
+
static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
{
struct otx2_hw *hw = &pfvf->hw;
@@ -300,10 +501,10 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT;
pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT;
- hw->hw_tso = true;
+ __set_bit(HW_TSO, &hw->cap_flag);
if (is_96xx_A0(pfvf->pdev)) {
- hw->hw_tso = false;
+ __clear_bit(HW_TSO, &hw->cap_flag);
/* Time based irq coalescing is not supported */
pfvf->hw.cq_qcount_wait = 0x0;
@@ -314,6 +515,24 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
pfvf->hw.rq_skid = 600;
pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K);
}
+ if (is_96xx_A0(pfvf->pdev)) {
+ pfvf->hw.cq_qcount_wait = 0x0;
+
+ /* Due to HW errata there will be frequent stalls on the
+ * transmit side, instead of disabling set timeout to a
+ * very high value.
+ */
+ pfvf->netdev->watchdog_timeo = 10000 * HZ;
+ }
+ if (is_96xx_B0(pfvf->pdev))
+ __clear_bit(HW_TSO, &hw->cap_flag);
+
+ if (!is_dev_otx2(pfvf->pdev)) {
+ __set_bit(CN10K_MBOX, &hw->cap_flag);
+ __set_bit(CN10K_LMTST, &hw->cap_flag);
+ __set_bit(CN10K_RPM, &hw->cap_flag);
+ __set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag);
+ }
}
/* Register read/write APIs */
@@ -421,23 +640,53 @@ static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
return result;
}
-static inline u64 otx2_lmt_flush(uint64_t addr)
-{
- u64 result = 0;
-
- __asm__ volatile(".cpu generic+lse\n"
- "ldeor xzr,%x[rf],[%[rs]]"
- : [rf]"=r"(result)
- : [rs]"r"(addr));
- return result;
-}
-
#else
-#define otx2_write128(lo, hi, addr)
+#define otx2_write128(lo, hi, addr) writeq((hi) | (lo), addr)
#define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; })
-#define otx2_lmt_flush(addr) ({ 0; })
#endif
+static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
+ u64 *ptrs, u64 num_ptrs)
+{
+ struct otx2_lmt_info *lmt_info;
+ u64 size = 0, count_eot = 0;
+ u64 tar_addr, val = 0;
+
+ lmt_info = get_cpu_ptr(pfvf->hw.lmt_info);
+ tar_addr = (__force u64)otx2_get_regaddr(pfvf, NPA_LF_AURA_BATCH_FREE0);
+ /* LMTID is same as AURA Id */
+ val = (lmt_info->lmt_id & 0x7FF) | BIT_ULL(63);
+ /* Set if [127:64] of last 128bit word has a valid pointer */
+ count_eot = (num_ptrs % 2) ? 0ULL : 1ULL;
+ /* Set AURA ID to free pointer */
+ ptrs[0] = (count_eot << 32) | (aura & 0xFFFFF);
+ /* Target address for LMTST flush tells HW how many 128bit
+ * words are valid from NPA_LF_AURA_BATCH_FREE0.
+ *
+ * tar_addr[6:4] is LMTST size-1 in units of 128b.
+ */
+ if (num_ptrs > 2) {
+ size = (sizeof(u64) * num_ptrs) / 16;
+ if (!count_eot)
+ size++;
+ tar_addr |= ((size - 1) & 0x7) << 4;
+ }
+ dma_wmb();
+ memcpy((u64 *)lmt_info->lmt_addr, ptrs, sizeof(u64) * num_ptrs);
+ put_cpu_ptr(pfvf->hw.lmt_info);
+ /* Perform LMTST flush */
+ cn10k_lmt_flush(val, tar_addr);
+}
+
+static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
+{
+ struct otx2_nic *pfvf = dev;
+ u64 ptrs[2] = {0};
+
+ ptrs[1] = buf;
+ /* Free only one buffer at time during init and teardown */
+ __cn10k_aura_freeptr(pfvf, aura, ptrs, 2);
+}
/* Alloc pointer from pool/aura */
static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
{
@@ -449,11 +698,12 @@ static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
}
/* Free pointer to a pool/aura */
-static inline void otx2_aura_freeptr(struct otx2_nic *pfvf,
- int aura, s64 buf)
+static inline void otx2_aura_freeptr(void *dev, int aura, u64 buf)
{
- otx2_write128((u64)buf, (u64)aura | BIT_ULL(63),
- otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0));
+ struct otx2_nic *pfvf = dev;
+ void __iomem *addr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0);
+
+ otx2_write128(buf, (u64)aura | BIT_ULL(63), addr);
}
static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx)
@@ -548,6 +798,11 @@ MBOX_UP_CGX_MESSAGES
#define RVU_PFVF_FUNC_SHIFT 0
#define RVU_PFVF_FUNC_MASK 0x3FF
+static inline bool is_otx2_vf(u16 pcifunc)
+{
+ return !!(pcifunc & RVU_PFVF_FUNC_MASK);
+}
+
static inline int rvu_get_pf(u16 pcifunc)
{
return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
@@ -585,6 +840,7 @@ void otx2_get_mac_from_af(struct net_device *netdev);
void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
int otx2_config_pause_frm(struct otx2_nic *pfvf);
void otx2_setup_segmentation(struct otx2_nic *pfvf);
+int otx2_config_serdes_link_state(struct otx2_nic *pfvf, bool en);
/* RVU block related APIs */
int otx2_attach_npa_nix(struct otx2_nic *pfvf);
@@ -601,18 +857,23 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl);
int otx2_txsch_alloc(struct otx2_nic *pfvf);
int otx2_txschq_stop(struct otx2_nic *pfvf);
void otx2_sqb_flush(struct otx2_nic *pfvf);
-dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool);
+int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma);
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
+int otx2_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura);
+int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura);
+int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
+ dma_addr_t *dma);
/* RSS configuration APIs*/
int otx2_rss_init(struct otx2_nic *pfvf);
int otx2_set_flowkey_cfg(struct otx2_nic *pfvf);
void otx2_set_rss_key(struct otx2_nic *pfvf);
-int otx2_set_rss_table(struct otx2_nic *pfvf);
+int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id);
/* Mbox handlers */
void mbox_handler_msix_offset(struct otx2_nic *pfvf,
@@ -625,6 +886,9 @@ void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
struct nix_txsch_alloc_rsp *rsp);
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
struct cgx_stats_rsp *rsp);
+void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf,
+ struct cgx_fec_stats_rsp *rsp);
+void otx2_set_fec_stats_count(struct otx2_nic *pfvf);
void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
struct nix_bp_cfg_rsp *rsp);
@@ -633,6 +897,7 @@ void otx2_get_dev_stats(struct otx2_nic *pfvf);
void otx2_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats);
void otx2_update_lmac_stats(struct otx2_nic *pfvf);
+void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf);
int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx);
int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx);
void otx2_set_ethtool_ops(struct net_device *netdev);
@@ -642,4 +907,58 @@ int otx2_open(struct net_device *netdev);
int otx2_stop(struct net_device *netdev);
int otx2_set_real_num_queues(struct net_device *netdev,
int tx_queues, int rx_queues);
+int otx2_set_npc_parse_mode(struct otx2_nic *pfvf, bool unbind);
+int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd);
+int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr);
+
+/* MCAM filter related APIs */
+int otx2_mcam_flow_init(struct otx2_nic *pf);
+int otx2vf_mcam_flow_init(struct otx2_nic *pfvf);
+int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count);
+void otx2_mcam_flow_del(struct otx2_nic *pf);
+int otx2_destroy_ntuple_flows(struct otx2_nic *pf);
+int otx2_destroy_mcam_flows(struct otx2_nic *pfvf);
+int otx2_get_flow(struct otx2_nic *pfvf,
+ struct ethtool_rxnfc *nfc, u32 location);
+int otx2_get_all_flows(struct otx2_nic *pfvf,
+ struct ethtool_rxnfc *nfc, u32 *rule_locs);
+int otx2_add_flow(struct otx2_nic *pfvf,
+ struct ethtool_rxnfc *nfc);
+int otx2_remove_flow(struct otx2_nic *pfvf, u32 location);
+int otx2_get_maxflows(struct otx2_flow_config *flow_cfg);
+void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id);
+int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
+int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
+int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
+int otx2_enable_vf_vlan(struct otx2_nic *pf);
+int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
+int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
+ u16 proto);
+int otx2smqvf_probe(struct otx2_nic *vf);
+int otx2smqvf_remove(struct otx2_nic *vf);
+
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
+u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
+int otx2_handle_ntuple_tc_features(struct net_device *netdev,
+ netdev_features_t features);
+/* tc support */
+int otx2_init_tc(struct otx2_nic *nic);
+void otx2_shutdown_tc(struct otx2_nic *nic);
+int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data);
+int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic);
+/* CGX/RPM DMAC filters support */
+int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
+int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
+int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
+int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos);
+void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf);
+void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf);
+
+#ifdef CONFIG_DCB
+/* DCB support*/
+void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable);
+int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf);
+int otx2_dcbnl_set_ops(struct net_device *dev);
+#endif
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
new file mode 100644
index 000000000000..290f04436050
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include "otx2_common.h"
+
+int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
+{
+ struct cgx_pfc_cfg *req;
+ struct cgx_pfc_rsp *rsp;
+ int err = 0;
+
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdpvf(pfvf->pdev))
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(&pfvf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ if (pfvf->pfc_en) {
+ req->rx_pause = true;
+ req->tx_pause = true;
+ } else {
+ req->rx_pause = false;
+ req->tx_pause = false;
+ }
+ req->pfc_en = pfvf->pfc_en;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
+ rsp = (struct cgx_pfc_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (req->rx_pause != rsp->rx_pause || req->tx_pause != rsp->tx_pause) {
+ dev_warn(pfvf->dev,
+ "Failed to config PFC\n");
+ err = -EPERM;
+ }
+ }
+unlock:
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx,
+ bool pfc_enable)
+{
+ bool if_up = netif_running(pfvf->netdev);
+ struct npa_aq_enq_req *npa_aq;
+ struct nix_aq_enq_req *aq;
+ int err = 0;
+
+ if (pfvf->queue_to_pfc_map[qidx] && pfc_enable) {
+ dev_warn(pfvf->dev,
+ "PFC enable not permitted as Priority %d already mapped to Queue %d\n",
+ pfvf->queue_to_pfc_map[qidx], qidx);
+ return;
+ }
+
+ if (if_up) {
+ netif_tx_stop_all_queues(pfvf->netdev);
+ netif_carrier_off(pfvf->netdev);
+ }
+
+ pfvf->queue_to_pfc_map[qidx] = vlan_prio;
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ aq->cq.bpid = pfvf->bpid[vlan_prio];
+ aq->cq_mask.bpid = GENMASK(8, 0);
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_CQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ otx2_sync_mbox_msg(&pfvf->mbox);
+
+ npa_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!npa_aq) {
+ err = -ENOMEM;
+ goto out;
+ }
+ npa_aq->aura.nix0_bpid = pfvf->bpid[vlan_prio];
+ npa_aq->aura_mask.nix0_bpid = GENMASK(8, 0);
+
+ /* Fill NPA AQ info */
+ npa_aq->aura_id = qidx;
+ npa_aq->ctype = NPA_AQ_CTYPE_AURA;
+ npa_aq->op = NPA_AQ_INSTOP_WRITE;
+ otx2_sync_mbox_msg(&pfvf->mbox);
+
+out:
+ if (if_up) {
+ netif_carrier_on(pfvf->netdev);
+ netif_tx_start_all_queues(pfvf->netdev);
+ }
+
+ if (err)
+ dev_warn(pfvf->dev,
+ "Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n",
+ qidx, err);
+}
+
+static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
+ pfc->pfc_en = pfvf->pfc_en;
+
+ return 0;
+}
+
+static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ int err;
+
+ /* Save PFC configuration to interface */
+ pfvf->pfc_en = pfc->pfc_en;
+
+ err = otx2_config_priority_flow_ctrl(pfvf);
+ if (err)
+ return err;
+
+ /* Request Per channel Bpids */
+ if (pfc->pfc_en)
+ otx2_nix_config_bp(pfvf, true);
+
+ return 0;
+}
+
+static u8 otx2_dcbnl_getdcbx(struct net_device __always_unused *dev)
+{
+ return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+}
+
+static u8 otx2_dcbnl_setdcbx(struct net_device __always_unused *dev, u8 mode)
+{
+ return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0;
+}
+
+static const struct dcbnl_rtnl_ops otx2_dcbnl_ops = {
+ .ieee_getpfc = otx2_dcbnl_ieee_getpfc,
+ .ieee_setpfc = otx2_dcbnl_ieee_setpfc,
+ .getdcbx = otx2_dcbnl_getdcbx,
+ .setdcbx = otx2_dcbnl_setdcbx,
+};
+
+int otx2_dcbnl_set_ops(struct net_device *dev)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ pfvf->queue_to_pfc_map = devm_kzalloc(pfvf->dev, pfvf->hw.rx_queues,
+ GFP_KERNEL);
+ if (!pfvf->queue_to_pfc_map)
+ return -ENOMEM;
+ dev->dcbnl_ops = &otx2_dcbnl_ops;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
new file mode 100644
index 000000000000..3284a2b353f0
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU PF/VF Netdev Devlink
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include "otx2_common.h"
+
+/* Devlink Params APIs */
+static int otx2_dl_mcam_count_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+ struct otx2_flow_config *flow_cfg;
+
+ if (!pfvf->flow_cfg) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "pfvf->flow_cfg not initialized");
+ return -EINVAL;
+ }
+
+ flow_cfg = pfvf->flow_cfg;
+ if (flow_cfg && flow_cfg->nr_flows) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot modify count when there are active rules");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+
+ if (!pfvf->flow_cfg)
+ return 0;
+
+ otx2_alloc_mcam_entries(pfvf, ctx->val.vu16);
+ otx2_tc_alloc_ent_bitmap(pfvf);
+
+ return 0;
+}
+
+static int otx2_dl_mcam_count_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+ struct otx2_flow_config *flow_cfg;
+
+ if (!pfvf->flow_cfg) {
+ ctx->val.vu16 = 0;
+ return 0;
+ }
+
+ flow_cfg = pfvf->flow_cfg;
+ ctx->val.vu16 = flow_cfg->max_flows;
+
+ return 0;
+}
+
+static int otx2_dl_rbuf_size_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ /* Hardware supports max size of 32k for a receive buffer
+ * and 1536 is typical ethernet frame size.
+ */
+ if (val.vu16 < 1536 || val.vu16 > 32768) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Receive buffer range is 1536 - 32768");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int otx2_dl_rbuf_size_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+ struct net_device *netdev;
+ int err = 0;
+ bool if_up;
+
+ rtnl_lock();
+
+ netdev = pfvf->netdev;
+ if_up = netif_running(netdev);
+ if (if_up)
+ netdev->netdev_ops->ndo_stop(netdev);
+
+ pfvf->hw.rbuf_fixed_size = ALIGN(ctx->val.vu16, OTX2_ALIGN) +
+ OTX2_HEAD_ROOM;
+
+ if (if_up)
+ err = netdev->netdev_ops->ndo_open(netdev);
+
+ rtnl_unlock();
+
+ return err;
+}
+
+static int otx2_dl_rbuf_size_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+
+ ctx->val.vu16 = pfvf->hw.rbuf_fixed_size;
+
+ return 0;
+}
+
+static int otx2_dl_cqe_size_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (val.vu16 != 128 && val.vu16 != 512) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only 128 or 512 byte descriptor allowed");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int otx2_dl_cqe_size_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+ struct net_device *netdev;
+ int err = 0;
+ bool if_up;
+
+ rtnl_lock();
+
+ netdev = pfvf->netdev;
+ if_up = netif_running(netdev);
+ if (if_up)
+ netdev->netdev_ops->ndo_stop(netdev);
+
+ pfvf->hw.xqe_size = ctx->val.vu16;
+
+ if (if_up)
+ err = netdev->netdev_ops->ndo_open(netdev);
+
+ rtnl_unlock();
+
+ return err;
+}
+
+static int otx2_dl_cqe_size_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+
+ ctx->val.vu16 = pfvf->hw.xqe_size;
+
+ return 0;
+}
+
+static int otx2_dl_serdes_link_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+
+ if (!is_otx2_vf(pfvf->pcifunc))
+ return otx2_config_serdes_link_state(pfvf, ctx->val.vbool);
+
+ return -EOPNOTSUPP;
+}
+
+static int otx2_dl_serdes_link_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+
+ ctx->val.vbool = (pfvf->linfo.link_up) ? true : false;
+
+ return 0;
+}
+
+enum otx2_dl_param_id {
+ OTX2_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ OTX2_DEVLINK_PARAM_ID_MCAM_COUNT,
+ OTX2_DEVLINK_PARAM_ID_CQE_SIZE,
+ OTX2_DEVLINK_PARAM_ID_RBUF_SIZE,
+ OTX2_DEVLINK_PARAM_ID_SERDES_LINK,
+};
+
+static const struct devlink_param otx2_dl_params[] = {
+ DEVLINK_PARAM_DRIVER(OTX2_DEVLINK_PARAM_ID_MCAM_COUNT,
+ "mcam_count", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ otx2_dl_mcam_count_get, otx2_dl_mcam_count_set,
+ otx2_dl_mcam_count_validate),
+ DEVLINK_PARAM_DRIVER(OTX2_DEVLINK_PARAM_ID_CQE_SIZE,
+ "completion_descriptor_size", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ otx2_dl_cqe_size_get, otx2_dl_cqe_size_set,
+ otx2_dl_cqe_size_validate),
+ DEVLINK_PARAM_DRIVER(OTX2_DEVLINK_PARAM_ID_RBUF_SIZE,
+ "receive_buffer_size", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ otx2_dl_rbuf_size_get, otx2_dl_rbuf_size_set,
+ otx2_dl_rbuf_size_validate),
+ DEVLINK_PARAM_DRIVER(OTX2_DEVLINK_PARAM_ID_SERDES_LINK,
+ "serdes_link", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ otx2_dl_serdes_link_get, otx2_dl_serdes_link_set,
+ NULL),
+};
+
+/* Devlink OPs */
+static int otx2_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+
+ if (is_otx2_vf(pfvf->pcifunc))
+ return devlink_info_driver_name_put(req, "rvu_nicvf");
+
+ return devlink_info_driver_name_put(req, "rvu_nicpf");
+}
+
+static const struct devlink_ops otx2_devlink_ops = {
+ .info_get = otx2_devlink_info_get,
+};
+
+int otx2_register_dl(struct otx2_nic *pfvf)
+{
+ struct otx2_devlink *otx2_dl;
+ struct devlink *dl;
+ int err;
+
+ dl = devlink_alloc(&otx2_devlink_ops, sizeof(struct otx2_devlink));
+ if (!dl) {
+ dev_warn(pfvf->dev, "devlink_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ err = devlink_register(dl, pfvf->dev);
+ if (err) {
+ dev_err(pfvf->dev, "devlink register failed with error %d\n", err);
+ devlink_free(dl);
+ return err;
+ }
+
+ otx2_dl = devlink_priv(dl);
+ otx2_dl->dl = dl;
+ otx2_dl->pfvf = pfvf;
+ pfvf->dl = otx2_dl;
+
+ err = devlink_params_register(dl, otx2_dl_params,
+ ARRAY_SIZE(otx2_dl_params));
+ if (err) {
+ dev_err(pfvf->dev,
+ "devlink params register failed with error %d", err);
+ goto err_dl;
+ }
+
+ devlink_params_publish(dl);
+
+ return 0;
+
+err_dl:
+ devlink_unregister(dl);
+ devlink_free(dl);
+ return err;
+}
+
+void otx2_unregister_dl(struct otx2_nic *pfvf)
+{
+ struct otx2_devlink *otx2_dl = pfvf->dl;
+ struct devlink *dl;
+
+ if (!otx2_dl || !otx2_dl->dl)
+ return;
+
+ dl = otx2_dl->dl;
+
+ devlink_params_unregister(dl, otx2_dl_params,
+ ARRAY_SIZE(otx2_dl_params));
+
+ devlink_unregister(dl);
+ devlink_free(dl);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h
new file mode 100644
index 000000000000..c7bd4f3c6c6b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU PF/VF Netdev Devlink
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#ifndef OTX2_DEVLINK_H
+#define OTX2_DEVLINK_H
+
+struct otx2_devlink {
+ struct devlink *dl;
+ struct otx2_nic *pfvf;
+};
+
+/* Devlink APIs */
+int otx2_register_dl(struct otx2_nic *pfvf);
+void otx2_unregister_dl(struct otx2_nic *pfvf);
+
+#endif /* RVU_DEVLINK_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
new file mode 100644
index 000000000000..2ec800f741d8
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include "otx2_common.h"
+
+static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac,
+ u8 *dmac_index)
+{
+ struct cgx_mac_addr_add_req *req;
+ struct cgx_mac_addr_add_rsp *rsp;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_add(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, mac);
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ if (!err) {
+ rsp = (struct cgx_mac_addr_add_rsp *)
+ otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+ *dmac_index = rsp->index;
+ }
+
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf)
+{
+ struct cgx_mac_addr_set_or_get *req;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_set(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, pf->netdev->dev_addr);
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos)
+{
+ u8 *dmacindex;
+
+ /* Store dmacindex returned by CGX/RPM driver which will
+ * be used for macaddr update/remove
+ */
+ dmacindex = &pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+
+ if (ether_addr_equal(mac, pf->netdev->dev_addr))
+ return otx2_dmacflt_add_pfmac(pf);
+ else
+ return otx2_dmacflt_do_add(pf, mac, dmacindex);
+}
+
+static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac,
+ u8 dmac_index)
+{
+ struct cgx_mac_addr_del_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_del(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->index = dmac_index;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return err;
+}
+
+static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf)
+{
+ struct msg_req *req;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_reset(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac,
+ u8 bit_pos)
+{
+ u8 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+
+ if (ether_addr_equal(mac, pf->netdev->dev_addr))
+ return otx2_dmacflt_remove_pfmac(pf);
+ else
+ return otx2_dmacflt_do_remove(pf, mac, dmacindex);
+}
+
+/* CGX/RPM blocks support max unicast entries of 32.
+ * on typical configuration MAC block associated
+ * with 4 lmacs, each lmac will have 8 dmac entries
+ */
+int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf)
+{
+ struct cgx_max_dmac_entries_get_rsp *rsp;
+ struct msg_req *msg;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+ msg = otx2_mbox_alloc_msg_cgx_mac_max_entries_get(&pf->mbox);
+
+ if (!msg) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ rsp = (struct cgx_max_dmac_entries_get_rsp *)
+ otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &msg->hdr);
+ pf->flow_cfg->dmacflt_max_flows = rsp->max_dmac_filters;
+
+out:
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos)
+{
+ struct cgx_mac_addr_update_req *req;
+ int rc;
+
+ mutex_lock(&pf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_update(&pf->mbox);
+
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, mac);
+ req->index = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+ rc = otx2_sync_mbox_msg(&pf->mbox);
+
+ mutex_unlock(&pf->mbox.lock);
+ return rc;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index c6d408de0605..94a4043d47a3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -1,11 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Ethernet driver
+/* Marvell RVU Ethernet driver
*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/pci.h>
@@ -14,12 +11,20 @@
#include <linux/etherdevice.h>
#include <linux/log2.h>
#include <linux/net_tstamp.h>
+#include <linux/linkmode.h>
#include "otx2_common.h"
#include "otx2_ptp.h"
-#define DRV_NAME "octeontx2-nicpf"
-#define DRV_VF_NAME "octeontx2-nicvf"
+#define DRV_NAME "rvu-nicpf"
+#define DRV_VF_NAME "rvu-nicvf"
+
+static const char otx2_priv_flags_strings[][ETH_GSTRING_LEN] = {
+ "pam4",
+ "edsa",
+ "higig2",
+ "fdsa",
+};
struct otx2_stat {
char name[ETH_GSTRING_LEN];
@@ -32,6 +37,11 @@ struct otx2_stat {
.index = offsetof(struct otx2_dev_stats, stat) / sizeof(u64), \
}
+enum link_mode {
+ OTX2_MODE_SUPPORTED,
+ OTX2_MODE_ADVERTISED
+};
+
static const struct otx2_stat otx2_dev_stats[] = {
OTX2_DEV_STAT(rx_ucast_frames),
OTX2_DEV_STAT(rx_bcast_frames),
@@ -66,6 +76,8 @@ static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats);
static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats);
static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats);
+static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf);
+
static void otx2_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
@@ -87,7 +99,7 @@ static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset)
*data += ETH_GSTRING_LEN;
}
}
- for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
for (stats = 0; stats < otx2_n_queue_stats; stats++) {
sprintf(*data, "txq%d: %s", qidx + start_qidx,
otx2_queue_stats[stats].name);
@@ -101,6 +113,12 @@ static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
struct otx2_nic *pfvf = netdev_priv(netdev);
int stats;
+ if (sset == ETH_SS_PRIV_FLAGS) {
+ memcpy(data, otx2_priv_flags_strings,
+ ARRAY_SIZE(otx2_priv_flags_strings) * ETH_GSTRING_LEN);
+ return;
+ }
+
if (sset != ETH_SS_STATS)
return;
@@ -116,18 +134,24 @@ static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
otx2_get_qset_strings(pfvf, &data, 0);
- for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) {
- sprintf(data, "cgx_rxstat%d: ", stats);
- data += ETH_GSTRING_LEN;
- }
+ if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) {
+ for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) {
+ sprintf(data, "cgx_rxstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
- for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) {
- sprintf(data, "cgx_txstat%d: ", stats);
- data += ETH_GSTRING_LEN;
+ for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) {
+ sprintf(data, "cgx_txstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
}
strcpy(data, "reset_count");
data += ETH_GSTRING_LEN;
+ sprintf(data, "Fec Corrected Errors: ");
+ data += ETH_GSTRING_LEN;
+ sprintf(data, "Fec Uncorrected Errors: ");
+ data += ETH_GSTRING_LEN;
}
static void otx2_get_qset_stats(struct otx2_nic *pfvf,
@@ -148,7 +172,7 @@ static void otx2_get_qset_stats(struct otx2_nic *pfvf,
[otx2_queue_stats[stat].index];
}
- for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
if (!otx2_update_sq_stats(pfvf, qidx)) {
for (stat = 0; stat < otx2_n_queue_stats; stat++)
*((*data)++) = 0;
@@ -160,11 +184,30 @@ static void otx2_get_qset_stats(struct otx2_nic *pfvf,
}
}
+static int otx2_get_phy_fec_stats(struct otx2_nic *pfvf)
+{
+ struct msg_req *req;
+ int rc = -ENOMEM;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_get_phy_fec_stats(&pfvf->mbox);
+ if (!req)
+ goto end;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox))
+ rc = 0;
+end:
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
/* Get device and per queue statistics */
static void otx2_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
+ u64 fec_corr_blks, fec_uncorr_blks;
+ struct cgx_fw_data *rsp;
int stat;
otx2_get_dev_stats(pfvf);
@@ -177,27 +220,63 @@ static void otx2_get_ethtool_stats(struct net_device *netdev,
[otx2_drv_stats[stat].index]);
otx2_get_qset_stats(pfvf, stats, &data);
- otx2_update_lmac_stats(pfvf);
- for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++)
- *(data++) = pfvf->hw.cgx_rx_stats[stat];
- for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++)
- *(data++) = pfvf->hw.cgx_tx_stats[stat];
+
+ if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) {
+ otx2_update_lmac_stats(pfvf);
+ for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++)
+ *(data++) = pfvf->hw.cgx_rx_stats[stat];
+ for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++)
+ *(data++) = pfvf->hw.cgx_tx_stats[stat];
+ }
+
*(data++) = pfvf->reset_count;
+
+ fec_corr_blks = pfvf->hw.cgx_fec_corr_blks;
+ fec_uncorr_blks = pfvf->hw.cgx_fec_uncorr_blks;
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (!IS_ERR(rsp) && rsp->fwdata.phy.misc.has_fec_stats &&
+ !otx2_get_phy_fec_stats(pfvf)) {
+ /* Fetch fwdata again because it's been recently populated with
+ * latest PHY FEC stats.
+ */
+ rsp = otx2_get_fwdata(pfvf);
+ if (!IS_ERR(rsp)) {
+ struct fec_stats_s *p = &rsp->fwdata.phy.fec_stats;
+
+ if (pfvf->linfo.fec == OTX2_FEC_BASER) {
+ fec_corr_blks = p->brfec_corr_blks;
+ fec_uncorr_blks = p->brfec_uncorr_blks;
+ } else {
+ fec_corr_blks = p->rsfec_corr_cws;
+ fec_uncorr_blks = p->rsfec_uncorr_cws;
+ }
+ }
+ }
+
+ *(data++) = fec_corr_blks;
+ *(data++) = fec_uncorr_blks;
}
static int otx2_get_sset_count(struct net_device *netdev, int sset)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- int qstats_count;
+ int qstats_count, mac_stats = 0;
+
+ if (sset == ETH_SS_PRIV_FLAGS)
+ return ARRAY_SIZE(otx2_priv_flags_strings);
if (sset != ETH_SS_STATS)
return -EINVAL;
qstats_count = otx2_n_queue_stats *
- (pfvf->hw.rx_queues + pfvf->hw.tx_queues);
+ (pfvf->hw.rx_queues + pfvf->hw.tot_tx_queues);
+ if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag))
+ mac_stats = CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT;
+ otx2_update_lmac_fec_stats(pfvf);
return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count +
- CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + 1;
+ mac_stats + OTX2_FEC_STATS_CNT + 1;
}
/* Get no of queues device supports and current queue count */
@@ -224,6 +303,12 @@ static int otx2_set_channels(struct net_device *dev,
if (!channel->rx_count || !channel->tx_count)
return -EINVAL;
+ if (bitmap_weight(&pfvf->rq_bmap, pfvf->hw.rx_queues) > 1) {
+ netdev_err(dev,
+ "Receive queues are in use by TC police action\n");
+ return -EINVAL;
+ }
+
if (if_up)
dev->netdev_ops->ndo_stop(dev);
@@ -234,6 +319,9 @@ static int otx2_set_channels(struct net_device *dev,
pfvf->hw.rx_queues = channel->rx_count;
pfvf->hw.tx_queues = channel->tx_count;
+ if (pfvf->xdp_prog)
+ pfvf->hw.xdp_queues = channel->rx_count;
+ pfvf->hw.tot_tx_queues = pfvf->hw.tx_queues + pfvf->hw.xdp_queues;
pfvf->qset.cq_cnt = pfvf->hw.tx_queues + pfvf->hw.rx_queues;
if (if_up)
@@ -254,9 +342,12 @@ static void otx2_get_pauseparam(struct net_device *netdev,
if (is_otx2_lbkvf(pfvf->pdev))
return;
+ mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
- if (!req)
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
return;
+ }
if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
rsp = (struct cgx_pause_frm_cfg *)
@@ -264,6 +355,7 @@ static void otx2_get_pauseparam(struct net_device *netdev,
pause->rx_pause = rsp->rx_pause;
pause->tx_pause = rsp->tx_pause;
}
+ mutex_unlock(&pfvf->mbox.lock);
}
static int otx2_set_pauseparam(struct net_device *netdev,
@@ -447,10 +539,14 @@ static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case AH_ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_ESP)
+ nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
case AH_V4_FLOW:
case ESP_V4_FLOW:
case IPV4_FLOW:
- case AH_ESP_V6_FLOW:
+ break;
case AH_V6_FLOW:
case ESP_V6_FLOW:
case IPV6_FLOW:
@@ -458,6 +554,7 @@ static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
default:
return -EINVAL;
}
+
return 0;
}
@@ -526,6 +623,36 @@ static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf,
return -EINVAL;
}
break;
+ case AH_ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ switch (nfc->data & rxh_l4) {
+ case 0:
+ rss_cfg &= ~(NIX_FLOW_KEY_TYPE_ESP |
+ NIX_FLOW_KEY_TYPE_AH);
+ rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN |
+ NIX_FLOW_KEY_TYPE_IPV4_PROTO;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ /* If VLAN hashing is also requested for ESP then do not
+ * allow because of hardware 40 bytes flow key limit.
+ */
+ if (rss_cfg & NIX_FLOW_KEY_TYPE_VLAN) {
+ netdev_err(pfvf->netdev,
+ "RSS hash of ESP or AH with VLAN is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ rss_cfg |= NIX_FLOW_KEY_TYPE_ESP | NIX_FLOW_KEY_TYPE_AH;
+ /* Disable IPv4 proto hashing since IPv6 SA+DA(32 bytes)
+ * and ESP SPI+sequence(8 bytes) uses hardware maximum
+ * limit of 40 byte flow key.
+ */
+ rss_cfg &= ~NIX_FLOW_KEY_TYPE_IPV4_PROTO;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
case IPV4_FLOW:
case IPV6_FLOW:
rss_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
@@ -542,6 +669,7 @@ static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf,
static int otx2_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *nfc, u32 *rules)
{
+ bool ntuple = !!(dev->features & NETIF_F_NTUPLE);
struct otx2_nic *pfvf = netdev_priv(dev);
int ret = -EOPNOTSUPP;
@@ -550,6 +678,20 @@ static int otx2_get_rxnfc(struct net_device *dev,
nfc->data = pfvf->hw.rx_queues;
ret = 0;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ if (netif_running(dev) && ntuple) {
+ nfc->rule_cnt = pfvf->flow_cfg->nr_flows;
+ ret = 0;
+ }
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ if (netif_running(dev) && ntuple)
+ ret = otx2_get_flow(pfvf, nfc, nfc->fs.location);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ if (netif_running(dev) && ntuple)
+ ret = otx2_get_all_flows(pfvf, nfc, rules);
+ break;
case ETHTOOL_GRXFH:
return otx2_get_rss_hash_opts(pfvf, nfc);
default:
@@ -560,6 +702,7 @@ static int otx2_get_rxnfc(struct net_device *dev,
static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
{
+ bool ntuple = !!(dev->features & NETIF_F_NTUPLE);
struct otx2_nic *pfvf = netdev_priv(dev);
int ret = -EOPNOTSUPP;
@@ -567,6 +710,14 @@ static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
case ETHTOOL_SRXFH:
ret = otx2_set_rss_hash_opts(pfvf, nfc);
break;
+ case ETHTOOL_SRXCLSRLINS:
+ if (netif_running(dev) && ntuple)
+ ret = otx2_add_flow(pfvf, nfc);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ if (netif_running(dev) && ntuple)
+ ret = otx2_remove_flow(pfvf, nfc->fs.location);
+ break;
default:
break;
}
@@ -586,46 +737,59 @@ static u32 otx2_get_rxfh_key_size(struct net_device *netdev)
static u32 otx2_get_rxfh_indir_size(struct net_device *dev)
{
- struct otx2_nic *pfvf = netdev_priv(dev);
-
- return pfvf->hw.rss_info.rss_size;
+ return MAX_RSS_INDIR_TBL_SIZE;
}
-/* Get RSS configuration */
-static int otx2_get_rxfh(struct net_device *dev, u32 *indir,
- u8 *hkey, u8 *hfunc)
+static int otx2_rss_ctx_delete(struct otx2_nic *pfvf, int ctx_id)
{
- struct otx2_nic *pfvf = netdev_priv(dev);
- struct otx2_rss_info *rss;
- int idx;
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
- rss = &pfvf->hw.rss_info;
+ otx2_rss_ctx_flow_del(pfvf, ctx_id);
+ kfree(rss->rss_ctx[ctx_id]);
+ rss->rss_ctx[ctx_id] = NULL;
- if (indir) {
- for (idx = 0; idx < rss->rss_size; idx++)
- indir[idx] = rss->ind_tbl[idx];
- }
+ return 0;
+}
- if (hkey)
- memcpy(hkey, rss->key, sizeof(rss->key));
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+static int otx2_rss_ctx_create(struct otx2_nic *pfvf,
+ u32 *rss_context)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ u8 ctx;
+
+ for (ctx = 0; ctx < MAX_RSS_GROUPS; ctx++) {
+ if (!rss->rss_ctx[ctx])
+ break;
+ }
+ if (ctx == MAX_RSS_GROUPS)
+ return -EINVAL;
+
+ rss->rss_ctx[ctx] = kzalloc(sizeof(*rss->rss_ctx[ctx]), GFP_KERNEL);
+ if (!rss->rss_ctx[ctx])
+ return -ENOMEM;
+ *rss_context = ctx;
return 0;
}
-/* Configure RSS table and hash key */
-static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *hkey, const u8 hfunc)
+/* RSS context configuration */
+static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
+ const u8 *hkey, const u8 hfunc,
+ u32 *rss_context, bool delete)
{
struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_ctx *rss_ctx;
struct otx2_rss_info *rss;
- int idx;
+ int ret, idx;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
+ if (*rss_context != ETH_RXFH_CONTEXT_ALLOC &&
+ *rss_context >= MAX_RSS_GROUPS)
+ return -EINVAL;
+
rss = &pfvf->hw.rss_info;
if (!rss->enable) {
@@ -633,20 +797,83 @@ static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
return -EIO;
}
+ if (hkey) {
+ memcpy(rss->key, hkey, sizeof(rss->key));
+ otx2_set_rss_key(pfvf);
+ }
+ if (delete)
+ return otx2_rss_ctx_delete(pfvf, *rss_context);
+
+ if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
+ ret = otx2_rss_ctx_create(pfvf, rss_context);
+ if (ret)
+ return ret;
+ }
if (indir) {
+ rss_ctx = rss->rss_ctx[*rss_context];
for (idx = 0; idx < rss->rss_size; idx++)
- rss->ind_tbl[idx] = indir[idx];
+ rss_ctx->ind_tbl[idx] = indir[idx];
}
- if (hkey) {
- memcpy(rss->key, hkey, sizeof(rss->key));
- otx2_set_rss_key(pfvf);
+ return 0;
+}
+
+static int otx2_get_rxfh_context(struct net_device *dev, u32 *indir,
+ u8 *hkey, u8 *hfunc, u32 rss_context)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_ctx *rss_ctx;
+ struct otx2_rss_info *rss;
+ int idx, rx_queues;
+
+ rss = &pfvf->hw.rss_info;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ if (!indir)
+ return 0;
+
+ if (!rss->enable && rss_context == DEFAULT_RSS_CONTEXT_GROUP) {
+ rx_queues = pfvf->hw.rx_queues;
+ for (idx = 0; idx < MAX_RSS_INDIR_TBL_SIZE; idx++)
+ indir[idx] = ethtool_rxfh_indir_default(idx, rx_queues);
+ return 0;
+ }
+ if (rss_context >= MAX_RSS_GROUPS)
+ return -ENOENT;
+
+ rss_ctx = rss->rss_ctx[rss_context];
+ if (!rss_ctx)
+ return -ENOENT;
+
+ if (indir) {
+ for (idx = 0; idx < rss->rss_size; idx++)
+ indir[idx] = rss_ctx->ind_tbl[idx];
}
+ if (hkey)
+ memcpy(hkey, rss->key, sizeof(rss->key));
- otx2_set_rss_table(pfvf);
return 0;
}
+/* Get RSS configuration */
+static int otx2_get_rxfh(struct net_device *dev, u32 *indir,
+ u8 *hkey, u8 *hfunc)
+{
+ return otx2_get_rxfh_context(dev, indir, hkey, hfunc,
+ DEFAULT_RSS_CONTEXT_GROUP);
+}
+
+/* Configure RSS table and hash key */
+static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *hkey, const u8 hfunc)
+{
+ u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
+
+ return otx2_set_rxfh_context(dev, indir, hkey, hfunc, &rss_context, 0);
+}
+
static u32 otx2_get_msglevel(struct net_device *netdev)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
@@ -688,15 +915,602 @@ static int otx2_get_ts_info(struct net_device *netdev,
info->phc_index = otx2_ptp_clock_index(pfvf);
- info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+ if (test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag))
+ info->tx_types |= BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf)
+{
+ struct cgx_fw_data *rsp = NULL;
+ struct msg_req *req;
+ int err = 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_get_aux_link_info(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (!err) {
+ rsp = (struct cgx_fw_data *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ } else {
+ rsp = ERR_PTR(err);
+ }
- info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_ALL);
+ mutex_unlock(&pfvf->mbox.lock);
+ return rsp;
+}
+static int otx2_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_fw_data *rsp;
+ const int fec[] = {
+ ETHTOOL_FEC_OFF,
+ ETHTOOL_FEC_BASER,
+ ETHTOOL_FEC_RS,
+ ETHTOOL_FEC_BASER | ETHTOOL_FEC_RS
+ };
+
+ if (pfvf->linfo.fec < ARRAY_SIZE(fec))
+ fecparam->active_fec = fec[pfvf->linfo.fec];
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ if (rsp->fwdata.supported_fec < ARRAY_SIZE(fec)) {
+ if (!rsp->fwdata.supported_fec)
+ fecparam->fec = ETHTOOL_FEC_NONE;
+ else
+ fecparam->fec = fec[rsp->fwdata.supported_fec];
+ }
return 0;
}
-static const struct ethtool_ops otx2_ethtool_ops = {
+static int otx2_set_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct mbox *mbox = &pfvf->mbox;
+ struct fec_mode *req, *rsp;
+ int err = 0, fec = 0;
+
+ switch (fecparam->fec) {
+ /* Firmware does not support AUTO mode consider it as FEC_OFF */
+ case ETHTOOL_FEC_OFF:
+ case ETHTOOL_FEC_AUTO:
+ fec = OTX2_FEC_OFF;
+ break;
+ case ETHTOOL_FEC_RS:
+ fec = OTX2_FEC_RS;
+ break;
+ case ETHTOOL_FEC_BASER:
+ fec = OTX2_FEC_BASER;
+ break;
+ default:
+ netdev_warn(pfvf->netdev, "Unsupported FEC mode: %d",
+ fecparam->fec);
+ return -EINVAL;
+ }
+
+ if (fec == pfvf->linfo.fec)
+ return 0;
+
+ mutex_lock(&mbox->lock);
+ req = otx2_mbox_alloc_msg_cgx_set_fec_param(&pfvf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto end;
+ }
+ req->fec = fec;
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto end;
+
+ rsp = (struct fec_mode *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (rsp->fec >= 0)
+ pfvf->linfo.fec = rsp->fec;
+ else
+ err = rsp->fec;
+end:
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+static void otx2_get_fec_info(u64 index, int req_mode,
+ struct ethtool_link_ksettings *link_ksettings)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_fec_modes) = { 0, };
+
+ switch (index) {
+ case OTX2_FEC_NONE:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ otx2_fec_modes);
+ break;
+ case OTX2_FEC_BASER:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ otx2_fec_modes);
+ break;
+ case OTX2_FEC_RS:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ otx2_fec_modes);
+ break;
+ case OTX2_FEC_BASER | OTX2_FEC_RS:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ otx2_fec_modes);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ otx2_fec_modes);
+ break;
+ }
+
+ /* Add fec modes to existing modes */
+ if (req_mode == OTX2_MODE_ADVERTISED)
+ linkmode_or(link_ksettings->link_modes.advertising,
+ link_ksettings->link_modes.advertising,
+ otx2_fec_modes);
+ else
+ linkmode_or(link_ksettings->link_modes.supported,
+ link_ksettings->link_modes.supported,
+ otx2_fec_modes);
+}
+
+static void otx2_get_link_mode_info(u64 link_mode_bmap,
+ bool req_mode,
+ struct ethtool_link_ksettings
+ *link_ksettings)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_link_modes) = { 0, };
+ const int otx2_sgmii_features[6] = {
+ ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ };
+ /* CGX link modes to Ethtool link mode mapping */
+ const int cgx_link_mode[38] = {
+ 0, /*SGMII Mode */
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+ ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseDR_Full_BIT,
+ ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT
+ };
+ u8 bit;
+
+ for_each_set_bit(bit, (unsigned long *)&link_mode_bmap,
+ ARRAY_SIZE(cgx_link_mode)) {
+ /* SGMII mode is set */
+ if (bit == 0)
+ linkmode_set_bit_array(otx2_sgmii_features,
+ ARRAY_SIZE(otx2_sgmii_features),
+ otx2_link_modes);
+ else
+ linkmode_set_bit(cgx_link_mode[bit], otx2_link_modes);
+ }
+
+ if (req_mode == OTX2_MODE_ADVERTISED)
+ linkmode_or(link_ksettings->link_modes.advertising,
+ link_ksettings->link_modes.advertising,
+ otx2_link_modes);
+ else
+ linkmode_or(link_ksettings->link_modes.supported,
+ link_ksettings->link_modes.supported,
+ otx2_link_modes);
+}
+
+static int otx2_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_fw_data *rsp;
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ modinfo->type = rsp->fwdata.sfp_eeprom.sff_id;
+ modinfo->eeprom_len = SFP_EEPROM_SIZE;
+ return 0;
+}
+
+static int otx2_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_fw_data *rsp;
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ memcpy(data, &rsp->fwdata.sfp_eeprom.buf, ee->len);
+
+ return 0;
+}
+
+static int otx2_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_fw_data *rsp = NULL;
+
+ cmd->base.duplex = pfvf->linfo.full_duplex;
+ cmd->base.speed = pfvf->linfo.speed;
+ cmd->base.autoneg = pfvf->linfo.an;
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ if (rsp->fwdata.supported_an)
+ ethtool_link_ksettings_add_link_mode(cmd,
+ supported,
+ Autoneg);
+
+ otx2_get_link_mode_info(rsp->fwdata.advertised_link_modes,
+ OTX2_MODE_ADVERTISED, cmd);
+ otx2_get_fec_info(rsp->fwdata.advertised_fec,
+ OTX2_MODE_ADVERTISED, cmd);
+ otx2_get_link_mode_info(rsp->fwdata.supported_link_modes,
+ OTX2_MODE_SUPPORTED, cmd);
+ otx2_get_fec_info(rsp->fwdata.supported_fec,
+ OTX2_MODE_SUPPORTED, cmd);
+ return 0;
+}
+
+static void otx2_get_advertised_mode(const struct ethtool_link_ksettings *cmd,
+ u64 *mode)
+{
+ u32 bit_pos;
+
+ /* Firmware does not support requesting multiple advertised modes
+ * return first set bit
+ */
+ bit_pos = find_first_bit(cmd->link_modes.advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ if (bit_pos != __ETHTOOL_LINK_MODE_MASK_NBITS)
+ *mode = bit_pos;
+}
+
+#define OTX2_OVERWRITE_DEF 1
+static int otx2_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct ethtool_link_ksettings cur_ks;
+ struct cgx_set_link_mode_req *req;
+ struct mbox *mbox = &pf->mbox;
+ int err = 0;
+
+ memset(&cur_ks, 0, sizeof(struct ethtool_link_ksettings));
+
+ if (!ethtool_validate_speed(cmd->base.speed) ||
+ !ethtool_validate_duplex(cmd->base.duplex))
+ return -EINVAL;
+
+ if (cmd->base.autoneg != AUTONEG_ENABLE &&
+ cmd->base.autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+
+ otx2_get_link_ksettings(netdev, &cur_ks);
+
+ /* Check requested modes against supported modes by hardware */
+ if (!bitmap_subset(cmd->link_modes.advertising,
+ cur_ks.link_modes.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
+ return -EINVAL;
+
+ mutex_lock(&mbox->lock);
+ req = otx2_mbox_alloc_msg_cgx_set_link_mode(&pf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto end;
+ }
+
+ if (cmd->base.phy_address == OTX2_OVERWRITE_DEF) {
+ req->args.speed = cmd->base.speed;
+ /* firmware expects 1 for half duplex and 0 for full duplex
+ * hence inverting
+ */
+ req->args.duplex = cmd->base.duplex ^ 0x1;
+ req->args.an = cmd->base.autoneg;
+ } else {
+ req->args.speed = SPEED_UNKNOWN;
+ req->args.duplex = DUPLEX_UNKNOWN;
+ req->args.an = AUTONEG_UNKNOWN;
+ }
+
+ otx2_get_advertised_mode(cmd, &req->args.mode);
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+end:
+ mutex_unlock(&mbox->lock);
+ return err;
+}
+
+static u32 otx2_get_priv_flags(struct net_device *netdev)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_fw_data *rsp;
+
+ rsp = otx2_get_fwdata(pfvf);
+
+ if (IS_ERR(rsp)) {
+ pfvf->ethtool_flags &= ~OTX2_PRIV_FLAG_PAM4;
+ } else {
+ if (rsp->fwdata.phy.misc.mod_type)
+ pfvf->ethtool_flags |= OTX2_PRIV_FLAG_PAM4;
+ else
+ pfvf->ethtool_flags &= ~OTX2_PRIV_FLAG_PAM4;
+ }
+
+ return pfvf->ethtool_flags;
+}
+
+static int otx2_set_phy_mod_type(struct net_device *netdev, bool enable)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_phy_mod_type *req;
+ struct cgx_fw_data *fwd;
+ int rc = -EAGAIN;
+
+ fwd = otx2_get_fwdata(pfvf);
+ if (IS_ERR(fwd))
+ return -EAGAIN;
+
+ /* ret here if phy does not support this feature */
+ if (!fwd->fwdata.phy.misc.can_change_mod_type)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_set_phy_mod_type(&pfvf->mbox);
+ if (!req)
+ goto end;
+
+ req->mod = enable;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox))
+ rc = 0;
+end:
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
+int otx2_set_npc_parse_mode(struct otx2_nic *pfvf, bool unbind)
+{
+ struct npc_set_pkind *req;
+ u32 interface_mode = 0;
+ int rc = -EAGAIN;
+
+ if (OTX2_IS_DEF_MODE_ENABLED(pfvf->ethtool_flags))
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_set_pkind(&pfvf->mbox);
+ if (!req)
+ goto end;
+
+ if (unbind) {
+ req->mode = OTX2_PRIV_FLAGS_DEFAULT;
+ interface_mode = OTX2_PRIV_FLAG_DEF_MODE;
+ } else if (OTX2_IS_HIGIG2_ENABLED(pfvf->ethtool_flags)) {
+ req->mode = OTX2_PRIV_FLAGS_HIGIG;
+ interface_mode = OTX2_PRIV_FLAG_HIGIG2_HDR;
+ } else if (OTX2_IS_EDSA_ENABLED(pfvf->ethtool_flags)) {
+ req->mode = OTX2_PRIV_FLAGS_EDSA;
+ interface_mode = OTX2_PRIV_FLAG_EDSA_HDR;
+ } else if (pfvf->ethtool_flags & OTX2_PRIV_FLAG_FDSA_HDR) {
+ req->mode = OTX2_PRIV_FLAGS_FDSA;
+ interface_mode = OTX2_PRIV_FLAG_FDSA_HDR;
+ } else {
+ req->mode = OTX2_PRIV_FLAGS_DEFAULT;
+ interface_mode = OTX2_PRIV_FLAG_DEF_MODE;
+ }
+
+ req->dir = PKIND_RX;
+
+ /* req AF to change pkind on both the dir */
+ if (req->mode == OTX2_PRIV_FLAGS_HIGIG ||
+ req->mode == OTX2_PRIV_FLAGS_DEFAULT)
+ req->dir |= PKIND_TX;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox))
+ rc = 0;
+ else
+ pfvf->ethtool_flags &= ~interface_mode;
+end:
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
+static int otx2_enable_addl_header(struct net_device *netdev, int bitpos,
+ u32 len, bool enable)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ bool if_up = netif_running(netdev);
+
+ if (enable) {
+ pfvf->ethtool_flags |= BIT(bitpos);
+ pfvf->ethtool_flags &= ~OTX2_PRIV_FLAG_DEF_MODE;
+ } else {
+ pfvf->ethtool_flags &= ~BIT(bitpos);
+ len = 0;
+ }
+
+ if (if_up)
+ otx2_stop(netdev);
+
+ /* Update max FRS so that additional hdrs are considered */
+ pfvf->addl_mtu = len;
+
+ /* Incase HIGIG2 mode is set packet will have 16 bytes of
+ * extra header at start of packet which stack does not need.
+ */
+ if (OTX2_IS_HIGIG2_ENABLED(pfvf->ethtool_flags))
+ pfvf->xtra_hdr = 16;
+ else
+ pfvf->xtra_hdr = 0;
+
+ /* NPC parse mode will be updated here */
+ if (if_up) {
+ otx2_open(netdev);
+
+ if (!enable)
+ pfvf->ethtool_flags |= OTX2_PRIV_FLAG_DEF_MODE;
+ }
+
+ return 0;
+}
+
+/* This function disables vfvlan rules upon enabling
+ * fdsa and vice versa
+ */
+static void otx2_endis_vfvlan_rules(struct otx2_nic *pfvf, bool enable)
+{
+ struct vfvlan *rule;
+ int vf;
+
+ for (vf = 0; vf < pci_num_vf(pfvf->pdev); vf++) {
+ /* pass vlan as 0 to disable rule */
+ if (enable) {
+ otx2_do_set_vf_vlan(pfvf, vf, 0, 0, 0);
+ } else {
+ rule = &pfvf->vf_configs[vf].rule;
+ otx2_do_set_vf_vlan(pfvf, vf, rule->vlan, rule->qos,
+ rule->proto);
+ }
+ }
+}
+
+static int otx2_set_priv_flags(struct net_device *netdev, u32 new_flags)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ bool enable = false;
+ int bitnr, rc = 0;
+ u32 chg_flags;
+
+ /* Get latest PAM4 settings */
+ otx2_get_priv_flags(netdev);
+
+ chg_flags = new_flags ^ pfvf->ethtool_flags;
+ if (!chg_flags)
+ return 0;
+
+ /* Some are mutually exclusive, so allow only change at a time */
+ if (hweight32(chg_flags) != 1)
+ return -EINVAL;
+
+ bitnr = ffs(chg_flags) - 1;
+ if (new_flags & BIT(bitnr))
+ enable = true;
+
+ if ((BIT(bitnr) != OTX2_PRIV_FLAG_PAM4) && (pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)) {
+ netdev_info(netdev, "Can't enable requested mode when PTP HW timestamping is ON\n");
+ return -EINVAL;
+ }
+
+ switch (BIT(bitnr)) {
+ case OTX2_PRIV_FLAG_PAM4:
+ rc = otx2_set_phy_mod_type(netdev, enable);
+ break;
+ case OTX2_PRIV_FLAG_EDSA_HDR:
+ /* HIGIG & EDSA are mutual exclusive */
+ if (enable && OTX2_IS_INTFMOD_SET(pfvf->ethtool_flags)) {
+ netdev_info(netdev,
+ "Disable mutually exclusive modes higig2/fdsa\n");
+ return -EINVAL;
+ }
+ return otx2_enable_addl_header(netdev, bitnr,
+ OTX2_EDSA_HDR_LEN, enable);
+ break;
+ case OTX2_PRIV_FLAG_HIGIG2_HDR:
+ if (test_bit(CN10K_RPM, &pfvf->hw.cap_flag))
+ return -EOPNOTSUPP;
+
+ if (enable && OTX2_IS_INTFMOD_SET(pfvf->ethtool_flags)) {
+ netdev_info(netdev,
+ "Disable mutually exclusive modes edsa/fdsa\n");
+ return -EINVAL;
+ }
+ return otx2_enable_addl_header(netdev, bitnr,
+ OTX2_HIGIG2_HDR_LEN, enable);
+ break;
+ case OTX2_PRIV_FLAG_FDSA_HDR:
+ if (enable && OTX2_IS_INTFMOD_SET(pfvf->ethtool_flags)) {
+ netdev_info(netdev,
+ "Disable mutually exclusive modes edsa/higig2\n");
+ return -EINVAL;
+ }
+ otx2_enable_addl_header(netdev, bitnr,
+ OTX2_FDSA_HDR_LEN, enable);
+ if (enable)
+ netdev_warn(netdev,
+ "Disabling VF VLAN rules as FDSA & VFVLAN are mutual exclusive\n");
+ otx2_endis_vfvlan_rules(pfvf, enable);
+ break;
+ default:
+ break;
+ }
+
+ /* save the change */
+ if (!rc) {
+ if (enable)
+ pfvf->ethtool_flags |= BIT(bitnr);
+ else
+ pfvf->ethtool_flags &= ~BIT(bitnr);
+ }
+
+ return rc;
+}
+
+static struct ethtool_ops otx2_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
.get_link = otx2_get_link,
@@ -716,11 +1530,21 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
+ .get_rxfh_context = otx2_get_rxfh_context,
+ .set_rxfh_context = otx2_set_rxfh_context,
+ .get_ts_info = otx2_get_ts_info,
+ .get_link_ksettings = otx2_get_link_ksettings,
+ .set_link_ksettings = otx2_set_link_ksettings,
.get_msglevel = otx2_get_msglevel,
.set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
.set_pauseparam = otx2_set_pauseparam,
- .get_ts_info = otx2_get_ts_info,
+ .get_fecparam = otx2_get_fecparam,
+ .set_fecparam = otx2_set_fecparam,
+ .get_module_info = otx2_get_module_info,
+ .get_module_eeprom = otx2_get_module_eeprom,
+ .get_priv_flags = otx2_get_priv_flags,
+ .set_priv_flags = otx2_set_priv_flags,
};
void otx2_set_ethtool_ops(struct net_device *netdev)
@@ -795,6 +1619,20 @@ static int otx2vf_get_sset_count(struct net_device *netdev, int sset)
return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 1;
}
+static int otx2vf_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ if (pfvf->pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF) {
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.speed = SPEED_100000;
+ } else {
+ return otx2_get_link_ksettings(netdev, cmd);
+ }
+ return 0;
+}
+
static const struct ethtool_ops otx2vf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@@ -811,6 +1649,8 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
+ .get_rxfh_context = otx2_get_rxfh_context,
+ .set_rxfh_context = otx2_set_rxfh_context,
.get_ringparam = otx2_get_ringparam,
.set_ringparam = otx2_set_ringparam,
.get_coalesce = otx2_get_coalesce,
@@ -819,6 +1659,8 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
.set_pauseparam = otx2_set_pauseparam,
+ .get_link_ksettings = otx2vf_get_link_ksettings,
+ .get_ts_info = otx2_get_ts_info,
};
void otx2vf_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
new file mode 100644
index 000000000000..2187ea798d05
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -0,0 +1,1478 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <net/ipv6.h>
+#include <linux/sort.h>
+
+#include "otx2_common.h"
+
+#define OTX2_DEFAULT_ACTION 0x1
+#define FDSA_MAX_SPORT 32
+#define FDSA_SPORT_MASK 0xf8
+
+static int otx2_mcam_entry_init(struct otx2_nic *pfvf);
+
+struct otx2_flow {
+ struct ethtool_rx_flow_spec flow_spec;
+ struct list_head list;
+ u32 location;
+ u16 entry;
+ bool is_vf;
+ u8 rss_ctx_id;
+#define DMAC_FILTER_RULE BIT(0)
+#define PFC_FLOWCTRL_RULE BIT(1)
+ u16 rule_type;
+ int vf;
+};
+
+enum dmac_req {
+ DMAC_ADDR_UPDATE,
+ DMAC_ADDR_DEL
+};
+
+static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
+{
+ devm_kfree(pfvf->dev, flow_cfg->flow_ent);
+ flow_cfg->flow_ent = NULL;
+ flow_cfg->max_flows = 0;
+}
+
+static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_mcam_free_entry_req *req;
+ int ent, err;
+
+ if (!flow_cfg->max_flows)
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ for (ent = 0; ent < flow_cfg->max_flows; ent++) {
+ req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
+ if (!req)
+ break;
+
+ req->entry = flow_cfg->flow_ent[ent];
+
+ /* Send message to AF to free MCAM entries */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ break;
+ }
+ mutex_unlock(&pfvf->mbox.lock);
+ otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
+ return 0;
+}
+
+static int mcam_entry_cmp(const void *a, const void *b)
+{
+ return *(u16 *)a - *(u16 *)b;
+}
+
+int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_mcam_alloc_entry_req *req;
+ struct npc_mcam_alloc_entry_rsp *rsp;
+ int ent, allocated = 0;
+
+ /* Free current ones and allocate new ones with requested count */
+ otx2_free_ntuple_mcam_entries(pfvf);
+
+ if (!count)
+ return 0;
+
+ flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
+ sizeof(u16), GFP_KERNEL);
+ if (!flow_cfg->flow_ent) {
+ netdev_err(pfvf->netdev,
+ "%s: Unable to allocate memory for flow entries\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ /* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries
+ * can only be allocated.
+ */
+ while (allocated < count) {
+ req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
+ if (!req)
+ goto exit;
+
+ req->contig = false;
+ req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
+ NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
+
+ /* Allocate higher priority entries for PFs, so that VF's entries
+ * will be on top of PF.
+ */
+ if (!is_otx2_vf(pfvf->pcifunc)) {
+ req->priority = NPC_MCAM_HIGHER_PRIO;
+ req->ref_entry = flow_cfg->def_ent[0];
+ }
+
+ /* Send message to AF */
+ if (otx2_sync_mbox_msg(&pfvf->mbox))
+ goto exit;
+
+ rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
+ (&pfvf->mbox.mbox, 0, &req->hdr);
+
+ for (ent = 0; ent < rsp->count; ent++)
+ flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
+
+ allocated += rsp->count;
+
+ /* If this request is not fulfilled, no need to send
+ * further requests.
+ */
+ if (rsp->count != req->count)
+ break;
+ }
+
+ /* Multiple MCAM entry alloc requests could result in non-sequential
+ * MCAM entries in the flow_ent[] array. Sort them in an ascending order,
+ * otherwise user installed ntuple filter index and MCAM entry index will
+ * not be in sync.
+ */
+ if (allocated)
+ sort(&flow_cfg->flow_ent[0], allocated,
+ sizeof(flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL);
+
+exit:
+ mutex_unlock(&pfvf->mbox.lock);
+
+ flow_cfg->max_flows = allocated;
+
+ if (allocated) {
+ pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+ pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
+ }
+
+ if (allocated != count)
+ netdev_info(pfvf->netdev,
+ "Unable to allocate %d MCAM entries, got only %d\n",
+ count, allocated);
+ return allocated;
+}
+EXPORT_SYMBOL(otx2_alloc_mcam_entries);
+
+static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_mcam_alloc_entry_req *req;
+ struct npc_mcam_alloc_entry_rsp *rsp;
+ int vf_vlan_max_flows;
+ int ent, count;
+
+ vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
+ count = OTX2_MAX_UNICAST_FLOWS +
+ OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
+
+ flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
+ sizeof(u16), GFP_KERNEL);
+ if (!flow_cfg->def_ent)
+ return -ENOMEM;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->contig = false;
+ req->count = count;
+
+ /* Send message to AF */
+ if (otx2_sync_mbox_msg(&pfvf->mbox)) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -EINVAL;
+ }
+
+ rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
+ (&pfvf->mbox.mbox, 0, &req->hdr);
+
+ if (rsp->count != req->count) {
+ netdev_info(pfvf->netdev,
+ "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n");
+ mutex_unlock(&pfvf->mbox.lock);
+ devm_kfree(pfvf->dev, flow_cfg->def_ent);
+ return 0;
+ }
+
+ for (ent = 0; ent < rsp->count; ent++)
+ flow_cfg->def_ent[ent] = rsp->entry_list[ent];
+
+ flow_cfg->vf_vlan_offset = 0;
+ flow_cfg->unicast_offset = vf_vlan_max_flows;
+ flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
+ OTX2_MAX_UNICAST_FLOWS;
+ pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
+ pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
+ pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
+
+ pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+ mutex_unlock(&pfvf->mbox.lock);
+
+ /* Allocate entries for Ntuple filters */
+ count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
+ if (count <= 0) {
+ otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
+ return 0;
+ }
+
+ pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
+
+ return 0;
+}
+
+int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg;
+
+ pfvf->flow_cfg = devm_kzalloc(pfvf->dev,
+ sizeof(struct otx2_flow_config),
+ GFP_KERNEL);
+ if (!pfvf->flow_cfg)
+ return -ENOMEM;
+
+ flow_cfg = pfvf->flow_cfg;
+ INIT_LIST_HEAD(&flow_cfg->flow_list);
+ flow_cfg->max_flows = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2vf_mcam_flow_init);
+
+int otx2_mcam_flow_init(struct otx2_nic *pf)
+{
+ int err;
+
+ pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
+ GFP_KERNEL);
+ if (!pf->flow_cfg)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
+
+ /* Allocate bare minimum number of MCAM entries needed for
+ * unicast and ntuple filters.
+ */
+ err = otx2_mcam_entry_init(pf);
+ if (err)
+ return err;
+
+ /* Check if MCAM entries are allocate or not */
+ if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
+ return 0;
+
+ pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
+ * OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
+
+ if (!pf->mac_table)
+ return -ENOMEM;
+
+ otx2_dmacflt_get_max_cnt(pf);
+
+ /* DMAC filters are not allocated */
+ if (!pf->flow_cfg->dmacflt_max_flows)
+ return 0;
+
+ pf->flow_cfg->bmap_to_dmacindex =
+ devm_kzalloc(pf->dev, sizeof(u8) *
+ pf->flow_cfg->dmacflt_max_flows,
+ GFP_KERNEL);
+
+ if (!pf->flow_cfg->bmap_to_dmacindex)
+ return -ENOMEM;
+
+ pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
+
+ return 0;
+}
+
+void otx2_mcam_flow_del(struct otx2_nic *pf)
+{
+ otx2_destroy_mcam_flows(pf);
+}
+EXPORT_SYMBOL(otx2_mcam_flow_del);
+
+/* On success adds mcam entry
+ * On failure enable promisous mode
+ */
+static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
+{
+ struct otx2_flow_config *flow_cfg = pf->flow_cfg;
+ struct npc_install_flow_req *req;
+ int err, i;
+
+ if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
+ return -ENOMEM;
+
+ /* dont have free mcam entries or uc list is greater than alloted */
+ if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
+ return -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ /* unicast offset starts with 32 0..31 for ntuple */
+ for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
+ if (pf->mac_table[i].inuse)
+ continue;
+ ether_addr_copy(pf->mac_table[i].addr, mac);
+ pf->mac_table[i].inuse = true;
+ pf->mac_table[i].mcam_entry =
+ flow_cfg->def_ent[i + flow_cfg->unicast_offset];
+ req->entry = pf->mac_table[i].mcam_entry;
+ break;
+ }
+
+ ether_addr_copy(req->packet.dmac, mac);
+ eth_broadcast_addr((u8 *)&req->mask.dmac);
+ req->features = BIT_ULL(NPC_DMAC);
+ req->channel = pf->hw.rx_chan_base;
+ req->intf = NIX_INTF_RX;
+ req->op = NIX_RX_ACTION_DEFAULT;
+ req->set_cntr = 1;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+
+ return err;
+}
+
+int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+
+ if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+ pf->flow_cfg->dmacflt_max_flows))
+ netdev_warn(netdev,
+ "Add %pM to CGX/RPM DMAC filters list as well\n",
+ mac);
+
+ return otx2_do_add_macfilter(pf, mac);
+}
+
+static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
+ int *mcam_entry)
+{
+ int i;
+
+ for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
+ if (!pf->mac_table[i].inuse)
+ continue;
+
+ if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
+ *mcam_entry = pf->mac_table[i].mcam_entry;
+ pf->mac_table[i].inuse = false;
+ return true;
+ }
+ }
+ return false;
+}
+
+int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct npc_delete_flow_req *req;
+ int err, mcam_entry;
+
+ /* check does mcam entry exists for given mac */
+ if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
+ return 0;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+ req->entry = mcam_entry;
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+
+ return err;
+}
+
+static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
+{
+ struct otx2_flow *iter;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if (iter->location == location)
+ return iter;
+ }
+
+ return NULL;
+}
+
+static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
+{
+ struct list_head *head = &pfvf->flow_cfg->flow_list;
+ struct otx2_flow *iter;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if (iter->location > flow->location)
+ break;
+ head = &iter->list;
+ }
+
+ list_add(&flow->list, head);
+}
+
+int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
+{
+ if (!flow_cfg)
+ return 0;
+
+ if (flow_cfg->nr_flows == flow_cfg->max_flows ||
+ bitmap_weight(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows))
+ return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows;
+ else
+ return flow_cfg->max_flows;
+}
+EXPORT_SYMBOL(otx2_get_maxflows);
+
+int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
+ u32 location)
+{
+ struct otx2_flow *iter;
+
+ if (location >= otx2_get_maxflows(pfvf->flow_cfg))
+ return -EINVAL;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if (iter->location == location) {
+ nfc->fs = iter->flow_spec;
+ nfc->rss_context = iter->rss_ctx_id;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
+ u32 *rule_locs)
+{
+ u32 rule_cnt = nfc->rule_cnt;
+ u32 location = 0;
+ int idx = 0;
+ int err = 0;
+
+ nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
+ while ((!err || err == -ENOENT) && idx < rule_cnt) {
+ err = otx2_get_flow(pfvf, nfc, location);
+ if (!err)
+ rule_locs[idx++] = location;
+ location++;
+ }
+ nfc->rule_cnt = rule_cnt;
+
+ return err;
+}
+
+static void otx2_prepare_fdsa_flow_request(struct npc_install_flow_req *req,
+ bool is_vlan)
+{
+ struct flow_msg *pmask = &req->mask;
+ struct flow_msg *pkt = &req->packet;
+
+ /* In FDSA tag srcport starts from b3..b7 */
+ if (!is_vlan) {
+ pkt->vlan_tci <<= 3;
+ pmask->vlan_tci = cpu_to_be16(FDSA_SPORT_MASK);
+ }
+ /* Strip FDSA tag */
+ req->features |= BIT_ULL(NPC_FDSA_VAL);
+ req->vtag0_valid = true;
+ req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE6;
+ req->op = NIX_RX_ACTION_DEFAULT;
+}
+
+static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req,
+ u32 flow_type)
+{
+ struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
+ struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
+ struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
+ struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
+ struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
+ struct flow_msg *pmask = &req->mask;
+ struct flow_msg *pkt = &req->packet;
+
+ switch (flow_type) {
+ case IP_USER_FLOW:
+ if (ipv4_usr_mask->ip4src) {
+ memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
+ sizeof(pkt->ip4src));
+ memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
+ sizeof(pmask->ip4src));
+ req->features |= BIT_ULL(NPC_SIP_IPV4);
+ }
+ if (ipv4_usr_mask->ip4dst) {
+ memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
+ sizeof(pkt->ip4dst));
+ memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
+ sizeof(pmask->ip4dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV4);
+ }
+ if (ipv4_usr_mask->tos) {
+ pkt->tos = ipv4_usr_hdr->tos;
+ pmask->tos = ipv4_usr_mask->tos;
+ req->features |= BIT_ULL(NPC_TOS);
+ }
+ if (ipv4_usr_mask->proto) {
+ switch (ipv4_usr_hdr->proto) {
+ case IPPROTO_ICMP:
+ req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
+ break;
+ case IPPROTO_TCP:
+ req->features |= BIT_ULL(NPC_IPPROTO_TCP);
+ break;
+ case IPPROTO_UDP:
+ req->features |= BIT_ULL(NPC_IPPROTO_UDP);
+ break;
+ case IPPROTO_SCTP:
+ req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
+ break;
+ case IPPROTO_AH:
+ req->features |= BIT_ULL(NPC_IPPROTO_AH);
+ break;
+ case IPPROTO_ESP:
+ req->features |= BIT_ULL(NPC_IPPROTO_ESP);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+ pkt->etype = cpu_to_be16(ETH_P_IP);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IP);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
+ if (ipv4_l4_mask->ip4src) {
+ memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
+ sizeof(pkt->ip4src));
+ memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
+ sizeof(pmask->ip4src));
+ req->features |= BIT_ULL(NPC_SIP_IPV4);
+ }
+ if (ipv4_l4_mask->ip4dst) {
+ memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
+ sizeof(pkt->ip4dst));
+ memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
+ sizeof(pmask->ip4dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV4);
+ }
+ if (ipv4_l4_mask->tos) {
+ pkt->tos = ipv4_l4_hdr->tos;
+ pmask->tos = ipv4_l4_mask->tos;
+ req->features |= BIT_ULL(NPC_TOS);
+ }
+ if (ipv4_l4_mask->psrc) {
+ memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
+ sizeof(pkt->sport));
+ memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
+ sizeof(pmask->sport));
+ if (flow_type == UDP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_SPORT_UDP);
+ else if (flow_type == TCP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_SPORT_TCP);
+ else
+ req->features |= BIT_ULL(NPC_SPORT_SCTP);
+ }
+ if (ipv4_l4_mask->pdst) {
+ memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
+ sizeof(pkt->dport));
+ memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
+ sizeof(pmask->dport));
+ if (flow_type == UDP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_DPORT_UDP);
+ else if (flow_type == TCP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_DPORT_TCP);
+ else
+ req->features |= BIT_ULL(NPC_DPORT_SCTP);
+ }
+ if (flow_type == UDP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_UDP);
+ else if (flow_type == TCP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_TCP);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IP);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
+ if (ah_esp_mask->ip4src) {
+ memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
+ sizeof(pkt->ip4src));
+ memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
+ sizeof(pmask->ip4src));
+ req->features |= BIT_ULL(NPC_SIP_IPV4);
+ }
+ if (ah_esp_mask->ip4dst) {
+ memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
+ sizeof(pkt->ip4dst));
+ memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
+ sizeof(pmask->ip4dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV4);
+ }
+ if (ah_esp_mask->tos) {
+ pkt->tos = ah_esp_hdr->tos;
+ pmask->tos = ah_esp_mask->tos;
+ req->features |= BIT_ULL(NPC_TOS);
+ }
+
+ /* NPC profile doesn't extract AH/ESP header fields */
+ if (ah_esp_mask->spi & ah_esp_hdr->spi)
+ return -EOPNOTSUPP;
+
+ if (flow_type == AH_V4_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_AH);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_ESP);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req,
+ u32 flow_type)
+{
+ struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
+ struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
+ struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
+ struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
+ struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
+ struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
+ struct flow_msg *pmask = &req->mask;
+ struct flow_msg *pkt = &req->packet;
+
+ switch (flow_type) {
+ case IPV6_USER_FLOW:
+ if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
+ memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
+ sizeof(pkt->ip6src));
+ memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
+ sizeof(pmask->ip6src));
+ req->features |= BIT_ULL(NPC_SIP_IPV6);
+ }
+ if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
+ memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
+ sizeof(pkt->ip6dst));
+ memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
+ sizeof(pmask->ip6dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV6);
+ }
+ pkt->etype = cpu_to_be16(ETH_P_IPV6);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IPV6);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
+ if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
+ memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
+ sizeof(pkt->ip6src));
+ memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
+ sizeof(pmask->ip6src));
+ req->features |= BIT_ULL(NPC_SIP_IPV6);
+ }
+ if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
+ memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
+ sizeof(pkt->ip6dst));
+ memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
+ sizeof(pmask->ip6dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV6);
+ }
+ if (ipv6_l4_mask->psrc) {
+ memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
+ sizeof(pkt->sport));
+ memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
+ sizeof(pmask->sport));
+ if (flow_type == UDP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_SPORT_UDP);
+ else if (flow_type == TCP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_SPORT_TCP);
+ else
+ req->features |= BIT_ULL(NPC_SPORT_SCTP);
+ }
+ if (ipv6_l4_mask->pdst) {
+ memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
+ sizeof(pkt->dport));
+ memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
+ sizeof(pmask->dport));
+ if (flow_type == UDP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_DPORT_UDP);
+ else if (flow_type == TCP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_DPORT_TCP);
+ else
+ req->features |= BIT_ULL(NPC_DPORT_SCTP);
+ }
+ if (flow_type == UDP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_UDP);
+ else if (flow_type == TCP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_TCP);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
+ break;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IPV6);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
+ if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
+ memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
+ sizeof(pkt->ip6src));
+ memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
+ sizeof(pmask->ip6src));
+ req->features |= BIT_ULL(NPC_SIP_IPV6);
+ }
+ if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
+ memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
+ sizeof(pkt->ip6dst));
+ memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
+ sizeof(pmask->ip6dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV6);
+ }
+
+ /* NPC profile doesn't extract AH/ESP header fields */
+ if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
+ (ah_esp_mask->tclass & ah_esp_mask->tclass))
+ return -EOPNOTSUPP;
+
+ if (flow_type == AH_V6_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_AH);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_ESP);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req,
+ struct otx2_nic *pfvf)
+{
+ struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
+ struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
+ struct flow_msg *pmask = &req->mask;
+ struct flow_msg *pkt = &req->packet;
+ u32 flow_type;
+ int ret;
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+ switch (flow_type) {
+ /* bits not set in mask are don't care */
+ case ETHER_FLOW:
+ if (!is_zero_ether_addr(eth_mask->h_source)) {
+ ether_addr_copy(pkt->smac, eth_hdr->h_source);
+ ether_addr_copy(pmask->smac, eth_mask->h_source);
+ req->features |= BIT_ULL(NPC_SMAC);
+ }
+ if (!is_zero_ether_addr(eth_mask->h_dest)) {
+ ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
+ ether_addr_copy(pmask->dmac, eth_mask->h_dest);
+ req->features |= BIT_ULL(NPC_DMAC);
+ }
+ if (eth_hdr->h_proto) {
+ memcpy(&pkt->etype, &eth_hdr->h_proto,
+ sizeof(pkt->etype));
+ memcpy(&pmask->etype, &eth_mask->h_proto,
+ sizeof(pmask->etype));
+ req->features |= BIT_ULL(NPC_ETYPE);
+ }
+ break;
+ case IP_USER_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
+ if (ret)
+ return ret;
+ break;
+ case IPV6_USER_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ if (fsp->flow_type & FLOW_EXT) {
+ int skip_user_def = false;
+ u16 vlan_etype;
+
+ if (fsp->m_ext.vlan_etype) {
+ /* Partial masks not supported */
+ if (fsp->m_ext.vlan_etype != 0xFFFF)
+ return -EINVAL;
+
+ vlan_etype = be16_to_cpu(fsp->h_ext.vlan_etype);
+ /* Only ETH_P_8021Q and ETH_P_802AD types supported */
+ if (vlan_etype != ETH_P_8021Q &&
+ vlan_etype != ETH_P_8021AD)
+ return -EINVAL;
+
+ memcpy(&pkt->vlan_etype, &fsp->h_ext.vlan_etype,
+ sizeof(pkt->vlan_etype));
+ memcpy(&pmask->vlan_etype, &fsp->m_ext.vlan_etype,
+ sizeof(pmask->vlan_etype));
+
+ if (vlan_etype == ETH_P_8021Q)
+ req->features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG);
+ else
+ req->features |= BIT_ULL(NPC_VLAN_ETYPE_STAG);
+ }
+ if (fsp->m_ext.vlan_tci) {
+ memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
+ sizeof(pkt->vlan_tci));
+ memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
+ sizeof(pmask->vlan_tci));
+
+ if (pfvf->ethtool_flags & OTX2_PRIV_FLAG_FDSA_HDR) {
+ otx2_prepare_fdsa_flow_request(req, true);
+ skip_user_def = true;
+ } else {
+ req->features |= BIT_ULL(NPC_OUTER_VID);
+ }
+ }
+
+ if (fsp->m_ext.data[1] && !skip_user_def) {
+ if (pfvf->ethtool_flags & OTX2_PRIV_FLAG_FDSA_HDR) {
+ if (be32_to_cpu(fsp->h_ext.data[1]) >=
+ FDSA_MAX_SPORT)
+ return -EINVAL;
+
+ memcpy(&pkt->vlan_tci,
+ (u8 *)&fsp->h_ext.data[1] + 2,
+ sizeof(pkt->vlan_tci));
+ otx2_prepare_fdsa_flow_request(req, false);
+ } else if (fsp->h_ext.data[1] ==
+ cpu_to_be32(OTX2_DEFAULT_ACTION)) {
+ /* Not Drop/Direct to queue but use action
+ * in default entry
+ */
+ req->op = NIX_RX_ACTION_DEFAULT;
+ }
+ }
+ }
+
+ if (fsp->flow_type & FLOW_MAC_EXT &&
+ !is_zero_ether_addr(fsp->m_ext.h_dest)) {
+ ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
+ ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
+ req->features |= BIT_ULL(NPC_DMAC);
+ }
+
+ if (!req->features)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
+ struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
+ u64 ring_cookie = fsp->ring_cookie;
+ u32 flow_type;
+
+ if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
+ return false;
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+
+ /* CGX/RPM block dmac filtering configured for white listing
+ * check for action other than DROP
+ */
+ if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
+ !ethtool_get_flow_spec_ring_vf(ring_cookie)) {
+ if (is_zero_ether_addr(eth_mask->h_dest) &&
+ is_valid_ether_addr(eth_hdr->h_dest))
+ return true;
+ }
+
+ return false;
+}
+
+static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
+{
+ u64 ring_cookie = flow->flow_spec.ring_cookie;
+#ifdef CONFIG_DCB
+ int vlan_prio, qidx, pfc_rule = 0;
+#endif
+ struct npc_install_flow_req *req;
+ int err, vf = 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_prepare_flow_request(&flow->flow_spec, req, pfvf);
+ if (err) {
+ /* free the allocated msg above */
+ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+
+ req->entry = flow->entry;
+ req->intf = NIX_INTF_RX;
+ req->set_cntr = 1;
+ req->channel = pfvf->hw.rx_chan_base;
+ if (ring_cookie == RX_CLS_FLOW_DISC) {
+ req->op = NIX_RX_ACTIONOP_DROP;
+ } else {
+ /* change to unicast only if action of default entry is not
+ * requested by user
+ */
+ if (flow->flow_spec.flow_type & FLOW_RSS) {
+ req->op = NIX_RX_ACTIONOP_RSS;
+ req->index = flow->rss_ctx_id;
+ req->flow_key_alg = pfvf->hw.flowkey_alg_idx;
+ } else {
+ req->op = NIX_RX_ACTIONOP_UCAST;
+ req->index = ethtool_get_flow_spec_ring(ring_cookie);
+ }
+ vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
+ if (vf > pci_num_vf(pfvf->pdev)) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -EINVAL;
+ }
+
+#ifdef CONFIG_DCB
+ /* Identify PFC rule if PFC enabled and ntuple rule is vlan */
+ if (!vf && (req->features & BIT_ULL(NPC_OUTER_VID)) &&
+ pfvf->pfc_en && req->op != NIX_RX_ACTIONOP_RSS) {
+ vlan_prio = ntohs(req->packet.vlan_tci) &
+ ntohs(req->mask.vlan_tci);
+
+ /* Get the priority */
+ vlan_prio >>= 13;
+ flow->rule_type |= PFC_FLOWCTRL_RULE;
+ /* Check if PFC enabled for this priority */
+ if (pfvf->pfc_en & BIT(vlan_prio)) {
+ pfc_rule = true;
+ qidx = req->index;
+ }
+ }
+#endif
+ }
+
+ /* ethtool ring_cookie has (VF + 1) for VF */
+ if (vf) {
+ req->vf = vf;
+ flow->is_vf = true;
+ flow->vf = vf;
+ }
+
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+
+#ifdef CONFIG_DCB
+ if (!err && pfc_rule)
+ otx2_update_bpid_in_rqctx(pfvf, vlan_prio, qidx, true);
+#endif
+
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
+ struct otx2_flow *flow)
+{
+ struct ethhdr *eth_hdr;
+ struct otx2_flow *pf_mac;
+
+ pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
+ if (!pf_mac)
+ return -ENOMEM;
+
+ pf_mac->entry = 0;
+ pf_mac->rule_type |= DMAC_FILTER_RULE;
+ pf_mac->location = pfvf->flow_cfg->max_flows;
+ memcpy(&pf_mac->flow_spec, &flow->flow_spec,
+ sizeof(struct ethtool_rx_flow_spec));
+ pf_mac->flow_spec.location = pf_mac->location;
+
+ /* Copy PF mac address */
+ eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
+ ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
+
+ /* Install DMAC filter with PF mac address */
+ otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
+
+ otx2_add_flow_to_list(pfvf, pf_mac);
+ pfvf->flow_cfg->nr_flows++;
+ set_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
+
+ return 0;
+}
+
+int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct ethtool_rx_flow_spec *fsp = &nfc->fs;
+ struct otx2_flow *flow;
+ struct ethhdr *eth_hdr;
+ bool new = false;
+ int err = 0;
+ u32 ring;
+
+ if (!flow_cfg->max_flows) {
+ netdev_err(pfvf->netdev,
+ "Ntuple rule count is 0, allocate and retry\n");
+ return -EINVAL;
+ }
+
+ ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+ if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
+ return -ENOMEM;
+
+ if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
+ return -EINVAL;
+
+ if (fsp->location >= otx2_get_maxflows(flow_cfg))
+ return -EINVAL;
+
+ flow = otx2_find_flow(pfvf, fsp->location);
+ if (!flow) {
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ if (!flow)
+ return -ENOMEM;
+ flow->location = fsp->location;
+ flow->entry = flow_cfg->flow_ent[flow->location];
+ new = true;
+ }
+ /* struct copy */
+ flow->flow_spec = *fsp;
+
+ if (fsp->flow_type & FLOW_RSS)
+ flow->rss_ctx_id = nfc->rss_context;
+
+ if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
+ eth_hdr = &flow->flow_spec.h_u.ether_spec;
+
+ /* Sync dmac filter table with updated fields */
+ if (flow->rule_type & DMAC_FILTER_RULE)
+ return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
+ flow->entry);
+
+ if (bitmap_full(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows)) {
+ netdev_warn(pfvf->netdev,
+ "Can't insert the rule %d as max allowed dmac filters are %d\n",
+ flow->location +
+ flow_cfg->dmacflt_max_flows,
+ flow_cfg->dmacflt_max_flows);
+ err = -EINVAL;
+ if (new)
+ kfree(flow);
+ return err;
+ }
+
+ /* Install PF mac address to DMAC filter list */
+ if (!test_bit(0, &flow_cfg->dmacflt_bmap))
+ otx2_add_flow_with_pfmac(pfvf, flow);
+
+ flow->rule_type |= DMAC_FILTER_RULE;
+ flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows);
+ fsp->location = flow_cfg->max_flows + flow->entry;
+ flow->flow_spec.location = fsp->location;
+ flow->location = fsp->location;
+
+ set_bit(flow->entry, &flow_cfg->dmacflt_bmap);
+ otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
+
+ } else {
+ if (flow->location >= pfvf->flow_cfg->max_flows) {
+ netdev_warn(pfvf->netdev,
+ "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
+ flow->location,
+ flow_cfg->max_flows - 1);
+ err = -EINVAL;
+ } else {
+ err = otx2_add_flow_msg(pfvf, flow);
+ }
+ }
+
+ if (err) {
+ if (err == MBOX_MSG_INVALID)
+ err = -EINVAL;
+ if (new)
+ kfree(flow);
+ return err;
+ }
+
+ /* add the new flow installed to list */
+ if (new) {
+ otx2_add_flow_to_list(pfvf, flow);
+ flow_cfg->nr_flows++;
+ }
+
+ return 0;
+}
+
+static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
+{
+ struct npc_delete_flow_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = entry;
+ if (all)
+ req->all = 1;
+
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
+{
+ struct otx2_flow *iter;
+ struct ethhdr *eth_hdr;
+ bool found = false;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if ((iter->rule_type & DMAC_FILTER_RULE) && iter->entry == 0) {
+ eth_hdr = &iter->flow_spec.h_u.ether_spec;
+ if (req == DMAC_ADDR_DEL) {
+ otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
+ 0);
+ clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
+ found = true;
+ } else {
+ ether_addr_copy(eth_hdr->h_dest,
+ pfvf->netdev->dev_addr);
+ otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
+ }
+ break;
+ }
+ }
+
+ if (found) {
+ list_del(&iter->list);
+ kfree(iter);
+ pfvf->flow_cfg->nr_flows--;
+ }
+}
+
+int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct otx2_flow *flow;
+ int err;
+
+ if (location >= otx2_get_maxflows(flow_cfg))
+ return -EINVAL;
+
+ flow = otx2_find_flow(pfvf, location);
+ if (!flow)
+ return -ENOENT;
+
+ if (flow->rule_type & DMAC_FILTER_RULE) {
+ struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
+
+ /* user not allowed to remove dmac filter with interface mac */
+ if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
+ return -EPERM;
+
+ err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
+ flow->entry);
+ clear_bit(flow->entry, &flow_cfg->dmacflt_bmap);
+ /* If all dmac filters are removed delete macfilter with
+ * interface mac address and configure CGX/RPM block in
+ * promiscuous mode
+ */
+ if (bitmap_weight(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows) == 1)
+ otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
+ } else {
+#ifdef CONFIG_DCB
+ if (flow->rule_type & PFC_FLOWCTRL_RULE)
+ otx2_update_bpid_in_rqctx(pfvf, 0,
+ flow->flow_spec.ring_cookie,
+ false);
+#endif
+
+ err = otx2_remove_flow_msg(pfvf, flow->entry, false);
+ }
+
+ if (err)
+ return err;
+
+ list_del(&flow->list);
+ kfree(flow);
+ flow_cfg->nr_flows--;
+
+ return 0;
+}
+
+void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
+{
+ struct otx2_flow *flow, *tmp;
+ int err;
+
+ list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
+ if (flow->rss_ctx_id != ctx_id)
+ continue;
+ err = otx2_remove_flow(pfvf, flow->location);
+ if (err)
+ netdev_warn(pfvf->netdev,
+ "Can't delete the rule %d associated with this rss group err:%d",
+ flow->location, err);
+ }
+}
+
+int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_delete_flow_req *req;
+ struct otx2_flow *iter, *tmp;
+ int err;
+
+ if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
+ return 0;
+
+ if (!flow_cfg->max_flows)
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->start = flow_cfg->flow_ent[0];
+ req->end = flow_cfg->flow_ent[flow_cfg->max_flows - 1];
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
+ list_del(&iter->list);
+ kfree(iter);
+ flow_cfg->nr_flows--;
+ }
+ return err;
+}
+
+int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_mcam_free_entry_req *req;
+ struct otx2_flow *iter, *tmp;
+ int err;
+
+ if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
+ return 0;
+
+ /* remove all flows */
+ err = otx2_remove_flow_msg(pfvf, 0, true);
+ if (err)
+ return err;
+
+ list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
+ list_del(&iter->list);
+ kfree(iter);
+ flow_cfg->nr_flows--;
+ }
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->all = 1;
+ /* Send message to AF to free MCAM entries */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+
+ pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return 0;
+}
+
+int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_install_flow_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
+ req->intf = NIX_INTF_RX;
+ ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
+ eth_broadcast_addr((u8 *)&req->mask.dmac);
+ req->channel = pfvf->hw.rx_chan_base;
+ req->op = NIX_RX_ACTION_DEFAULT;
+ req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
+ req->vtag0_valid = true;
+ req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
+
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_delete_flow_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
+{
+ struct nix_vtag_config *req;
+ struct mbox_msghdr *rsp_hdr;
+ int err;
+
+ /* Dont have enough mcam entries */
+ if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
+ return -ENOMEM;
+
+ /* FDSA & RXVLAN are mutually exclusive */
+ if (pf->ethtool_flags & OTX2_PRIV_FLAG_FDSA_HDR)
+ enable = false;
+
+ if (enable) {
+ err = otx2_install_rxvlan_offload_flow(pf);
+ if (err)
+ return err;
+ } else {
+ err = otx2_delete_rxvlan_offload_flow(pf);
+ if (err)
+ return err;
+ }
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ /* config strip, capture and size */
+ req->vtag_size = VTAGSIZE_T4;
+ req->cfg_type = 1; /* rx vlan cfg */
+ req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
+ req->rx.strip_vtag = enable;
+ req->rx.capture_vtag = enable;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err) {
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+ }
+
+ rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp_hdr)) {
+ mutex_unlock(&pf->mbox.lock);
+ return PTR_ERR(rsp_hdr);
+ }
+
+ mutex_unlock(&pf->mbox.lock);
+ return rsp_hdr->rc;
+}
+
+void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
+{
+ struct otx2_flow *iter;
+ struct ethhdr *eth_hdr;
+
+ list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
+ if (iter->rule_type & DMAC_FILTER_RULE) {
+ eth_hdr = &iter->flow_spec.h_u.ether_spec;
+ otx2_dmacflt_add(pf, eth_hdr->h_dest,
+ iter->entry);
+ }
+ }
+}
+
+void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
+{
+ otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 161174be51c3..7fac19c72c61 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1,11 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
+/* Marvell RVU Physical Function ethernet driver
*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
@@ -16,16 +13,19 @@
#include <linux/if_vlan.h>
#include <linux/iommu.h>
#include <net/ip.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include "otx2_reg.h"
#include "otx2_common.h"
#include "otx2_txrx.h"
#include "otx2_struct.h"
#include "otx2_ptp.h"
+#include "cn10k.h"
#include <rvu_trace.h>
-#define DRV_NAME "octeontx2-nicpf"
-#define DRV_STRING "Marvell OcteonTX2 NIC Physical Function Driver"
+#define DRV_NAME "rvu_nicpf"
+#define DRV_STRING "Marvell RVU NIC Physical Function Driver"
/* Supported devices */
static const struct pci_device_id otx2_pf_id_table[] = {
@@ -38,6 +38,10 @@ MODULE_DESCRIPTION(DRV_STRING);
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
+static void otx2_vf_link_event_task(struct work_struct *work);
+static void otx2_vf_ptp_info_task(struct work_struct *work);
+static void otx2_do_set_rx_mode(struct otx2_nic *pf);
+
enum {
TYPE_PFAF,
TYPE_PFVF,
@@ -48,6 +52,7 @@ static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);
static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
{
+ struct otx2_nic *pf = netdev_priv(netdev);
bool if_up = netif_running(netdev);
int err = 0;
@@ -57,6 +62,10 @@ static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
netdev_info(netdev, "Changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
+ /* Modify receive buffer size based on MTU and do not
+ * use the fixed size set.
+ */
+ pf->hw.rbuf_fixed_size = 0;
if (if_up)
err = otx2_open(netdev);
@@ -590,9 +599,17 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
if (!pf->mbox_pfvf_wq)
return -ENOMEM;
- base = readq((void __iomem *)((u64)pf->reg_base + RVU_PF_VF_BAR4_ADDR));
- hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
+ /* On CN10K platform, PF <-> VF mailbox region follows after
+ * PF <-> AF mailbox region.
+ */
+ if (test_bit(CN10K_MBOX, &pf->hw.cap_flag))
+ base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
+ MBOX_SIZE;
+ else
+ base = readq((void __iomem *)((u64)pf->reg_base +
+ RVU_PF_VF_BAR4_ADDR));
+ hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
if (!hwbase) {
err = -ENOMEM;
goto free_wq;
@@ -784,6 +801,9 @@ static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
case MBOX_MSG_CGX_STATS:
mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
break;
+ case MBOX_MSG_CGX_FEC_STATS:
+ mbox_handler_cgx_fec_stats(pf, (struct cgx_fec_stats_rsp *)msg);
+ break;
default:
if (msg->rc)
dev_err(pf->dev,
@@ -867,6 +887,30 @@ int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
return 0;
}
+int otx2_mbox_up_handler_cgx_ptp_rx_info(struct otx2_nic *pf,
+ struct cgx_ptp_rx_info_msg *msg,
+ struct msg_rsp *rsp)
+{
+ int i;
+
+ if (!pf->ptp)
+ return 0;
+
+ pf->ptp->ptp_en = msg->ptp_en;
+
+ /* notify VFs about ptp event */
+ for (i = 0; i < pci_num_vf(pf->pdev); i++) {
+ struct otx2_vf_config *config = &pf->vf_configs[i];
+ struct delayed_work *dwork = &config->ptp_info_work;
+
+ if (config->intf_down)
+ continue;
+
+ schedule_delayed_work(dwork, msecs_to_jiffies(100));
+ }
+ return 0;
+}
+
static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
struct mbox_msghdr *req)
{
@@ -1044,7 +1088,7 @@ static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
* device memory to allow unaligned accesses.
*/
hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM),
- pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM));
+ MBOX_SIZE);
if (!hwbase) {
dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
err = -ENOMEM;
@@ -1101,6 +1145,11 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
struct msg_req *msg;
int err;
+ if (enable && bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+ pf->flow_cfg->dmacflt_max_flows))
+ netdev_warn(pf->netdev,
+ "CGX/RPM internal loopback might not work as DMAC filters are active\n");
+
mutex_lock(&pf->mbox.lock);
if (enable)
msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
@@ -1169,7 +1218,7 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
}
/* SQ */
- for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
val = otx2_atomic64_add((qidx << 44), ptr);
otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
@@ -1272,17 +1321,48 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
/* Free SQB pointers */
otx2_sq_free_sqbs(pf);
- for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
sq = &qset->sq[qidx];
qmem_free(pf->dev, sq->sqe);
qmem_free(pf->dev, sq->tso_hdrs);
kfree(sq->sg);
kfree(sq->sqb_ptrs);
+ qmem_free(pf->dev, sq->timestamps);
}
}
+static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
+{
+ int frame_size;
+ int total_size;
+ int rbuf_size;
+
+ if (pf->hw.rbuf_fixed_size)
+ return pf->hw.rbuf_fixed_size;
+
+ /* The data transferred by NIX to memory consists of actual packet
+ * plus additional data which has timestamp and/or EDSA/HIGIG2
+ * headers if interface is configured in corresponding modes.
+ * NIX transfers entire data using 6 segments/buffers and writes
+ * a CQE_RX descriptor with those segment addresses. First segment
+ * has additional data prepended to packet. Also software omits a
+ * headroom of 128 bytes in each segment. Hence the total size of
+ * memory needed to receive a packet with 'mtu' is:
+ * frame size = mtu + additional data;
+ * memory = frame_size + headroom * 6;
+ * each receive buffer size = memory / 6;
+ */
+ frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN +
+ pf->addl_mtu + pf->xtra_hdr;
+ total_size = frame_size + OTX2_HEAD_ROOM * 6;
+ rbuf_size = total_size / 6;
+
+ return ALIGN(rbuf_size, 2048);
+}
+
static int otx2_init_hw_resources(struct otx2_nic *pf)
{
+ struct nix_lf_free_req *free_req;
struct mbox *mbox = &pf->mbox;
struct otx2_hw *hw = &pf->hw;
struct msg_req *req;
@@ -1293,12 +1373,13 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
* so, aura count = pool count.
*/
hw->rqpool_cnt = hw->rx_queues;
- hw->sqpool_cnt = hw->tx_queues;
+ hw->sqpool_cnt = hw->tot_tx_queues;
hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
- /* Get the size of receive buffers to allocate */
- pf->rbsize = RCV_FRAG_LEN(OTX2_HW_TIMESTAMP_LEN + pf->netdev->mtu +
- OTX2_ETH_HLEN);
+ /* Maximum hardware supported transmit length */
+ pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN;
+
+ pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
mutex_lock(&mbox->lock);
/* NPA init */
@@ -1364,8 +1445,8 @@ err_free_rq_ptrs:
otx2_aura_pool_free(pf);
err_free_nix_lf:
mutex_lock(&mbox->lock);
- req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
- if (req) {
+ free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
+ if (free_req) {
if (otx2_sync_mbox_msg(mbox))
dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
}
@@ -1384,6 +1465,7 @@ exit:
static void otx2_free_hw_resources(struct otx2_nic *pf)
{
struct otx2_qset *qset = &pf->qset;
+ struct nix_lf_free_req *free_req;
struct mbox *mbox = &pf->mbox;
struct otx2_cq_queue *cq;
struct msg_req *req;
@@ -1422,10 +1504,16 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
otx2_free_cq_res(pf);
+ /* Free all ingress bandwidth profiles allocated */
+ cn10k_free_all_ipolicers(pf);
+
mutex_lock(&mbox->lock);
/* Reset NIX LF */
- req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
- if (req) {
+ free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
+ if (free_req) {
+ free_req->flags = NIX_LF_DISABLE_FLOWS;
+ if (!(pf->flags & OTX2_FLAG_PF_SHUTDOWN))
+ free_req->flags |= NIX_LF_DONT_FREE_TX_VTAG;
if (otx2_sync_mbox_msg(mbox))
dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
}
@@ -1456,7 +1544,7 @@ int otx2_open(struct net_device *netdev)
netif_carrier_off(netdev);
- pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tx_queues;
+ pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tot_tx_queues;
/* RQ and SQs are mapped to different CQs,
* so find out max CQ IRQs (i.e CINTs) needed.
*/
@@ -1476,7 +1564,7 @@ int otx2_open(struct net_device *netdev)
if (!qset->cq)
goto err_free_mem;
- qset->sq = kcalloc(pf->hw.tx_queues,
+ qset->sq = kcalloc(pf->hw.tot_tx_queues,
sizeof(struct otx2_snd_queue), GFP_KERNEL);
if (!qset->sq)
goto err_free_mem;
@@ -1497,11 +1585,20 @@ int otx2_open(struct net_device *netdev)
/* RQ0 & SQ0 are mapped to CINT0 and so on..
* 'cq_ids[0]' points to RQ's CQ and
* 'cq_ids[1]' points to SQ's CQ and
+ * 'cq_ids[2]' points to XDP's CQ and
*/
cq_poll->cq_ids[CQ_RX] =
(qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
+ if (pf->xdp_prog)
+ cq_poll->cq_ids[CQ_XDP] = (qidx < pf->hw.xdp_queues) ?
+ (qidx + pf->hw.rx_queues +
+ pf->hw.tx_queues) :
+ CINT_INVALID_CQ;
+ else
+ cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
+
cq_poll->dev = (void *)pf;
netif_napi_add(netdev, &cq_poll->napi,
otx2_napi_handler, NAPI_POLL_WEIGHT);
@@ -1585,13 +1682,22 @@ int otx2_open(struct net_device *netdev)
if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
otx2_handle_link_event(pf);
- /* Restore pause frame settings */
- otx2_config_pause_frm(pf);
+ if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
+ otx2_enable_rxvlan(pf, true);
+
+ /* Set NPC parsing mode */
+ otx2_set_npc_parse_mode(pf, false);
+
+ /* Install DMAC Filters */
+ if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
+ otx2_dmacflt_reinstall_flows(pf);
err = otx2_rxtx_enable(pf, true);
if (err)
goto err_tx_stop_queues;
+ otx2_do_set_rx_mode(pf);
+
return 0;
err_tx_stop_queues:
@@ -1696,7 +1802,7 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Check for minimum and maximum packet length */
if (skb->len <= ETH_HLEN ||
- (!skb_shinfo(skb)->gso_size && skb->len > pf->max_frs)) {
+ (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -1719,6 +1825,17 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
+static netdev_features_t otx2_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ features |= NETIF_F_HW_VLAN_STAG_RX;
+ else
+ features &= ~NETIF_F_HW_VLAN_STAG_RX;
+
+ return features;
+}
+
static void otx2_set_rx_mode(struct net_device *netdev)
{
struct otx2_nic *pf = netdev_priv(netdev);
@@ -1726,15 +1843,24 @@ static void otx2_set_rx_mode(struct net_device *netdev)
queue_work(pf->otx2_wq, &pf->rx_mode_work);
}
-static void otx2_do_set_rx_mode(struct work_struct *work)
+static void otx2_do_set_rx_mode(struct otx2_nic *pf)
{
- struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
struct net_device *netdev = pf->netdev;
struct nix_rx_mode *req;
+ bool promisc = false;
if (!(netdev->flags & IFF_UP))
return;
+ if ((netdev->flags & IFF_PROMISC) ||
+ (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) {
+ promisc = true;
+ }
+
+ /* Write unicast address to mcam entries or del from mcam */
+ if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT)
+ __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
+
mutex_lock(&pf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
if (!req) {
@@ -1744,16 +1870,24 @@ static void otx2_do_set_rx_mode(struct work_struct *work)
req->mode = NIX_RX_MODE_UCAST;
- /* We don't support MAC address filtering yet */
- if (netdev->flags & IFF_PROMISC)
+ if (promisc)
req->mode |= NIX_RX_MODE_PROMISC;
- else if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
+ if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
req->mode |= NIX_RX_MODE_ALLMULTI;
+ req->mode |= NIX_RX_MODE_USE_MCE;
+
otx2_sync_mbox_msg(&pf->mbox);
mutex_unlock(&pf->mbox.lock);
}
+static void otx2_rx_mode_wrk_handler(struct work_struct *work)
+{
+ struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
+
+ otx2_do_set_rx_mode(pf);
+}
+
static int otx2_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -1763,7 +1897,12 @@ static int otx2_set_features(struct net_device *netdev,
if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
return otx2_cgx_config_loopback(pf,
features & NETIF_F_LOOPBACK);
- return 0;
+
+ if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(netdev))
+ return otx2_enable_rxvlan(pf,
+ features & NETIF_F_HW_VLAN_CTAG_RX);
+
+ return otx2_handle_ntuple_tc_features(netdev, features);
}
static void otx2_reset_task(struct work_struct *work)
@@ -1845,7 +1984,7 @@ static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
return 0;
}
-static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
+int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct hwtstamp_config config;
@@ -1860,10 +1999,24 @@ static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
if (config.flags)
return -EINVAL;
+ if (OTX2_IS_INTFMOD_SET(pfvf->ethtool_flags)) {
+ netdev_info(netdev, "Can't support PTP HW timestamping when switch features are enabled\n");
+ return -EOPNOTSUPP;
+ }
+
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
+ if (pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC)
+ pfvf->flags &= ~OTX2_FLAG_PTP_ONESTEP_SYNC;
+
+ cancel_delayed_work(&pfvf->ptp->synctstamp_work);
otx2_config_hw_tx_tstamp(pfvf, false);
break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ pfvf->flags |= OTX2_FLAG_PTP_ONESTEP_SYNC;
+ schedule_delayed_work(&pfvf->ptp->synctstamp_work,
+ msecs_to_jiffies(500));
+ /* fall through */
case HWTSTAMP_TX_ON:
otx2_config_hw_tx_tstamp(pfvf, true);
break;
@@ -1901,8 +2054,9 @@ static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
return copy_to_user(ifr->ifr_data, &config,
sizeof(config)) ? -EFAULT : 0;
}
+EXPORT_SYMBOL(otx2_config_hwtstamp);
-static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct hwtstamp_config *cfg = &pfvf->tstamp;
@@ -1917,11 +2071,417 @@ static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
return -EOPNOTSUPP;
}
}
+EXPORT_SYMBOL(otx2_ioctl);
+
+static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac)
+{
+ struct npc_install_flow_req *req;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ ether_addr_copy(req->packet.dmac, mac);
+ eth_broadcast_addr((u8 *)&req->mask.dmac);
+ req->features = BIT_ULL(NPC_DMAC);
+ req->channel = pf->hw.rx_chan_base;
+ req->intf = NIX_INTF_RX;
+ req->default_rule = 1;
+ req->append = 1;
+ req->vf = vf + 1;
+ req->op = NIX_RX_ACTION_DEFAULT;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+out:
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct pci_dev *pdev = pf->pdev;
+ struct otx2_vf_config *config;
+ int ret;
+
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
+ if (vf >= pf->total_vfs)
+ return -EINVAL;
+
+ if (!is_valid_ether_addr(mac))
+ return -EINVAL;
+
+ config = &pf->vf_configs[vf];
+ ether_addr_copy(config->mac, mac);
+
+ ret = otx2_do_set_vf_mac(pf, vf, mac);
+ if (ret == 0)
+ dev_info(&pdev->dev,
+ "Load/Reload VF driver\n");
+
+ return ret;
+}
+
+int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
+ __be16 proto)
+{
+ struct otx2_flow_config *flow_cfg = pf->flow_cfg;
+ struct nix_vtag_config_rsp *vtag_rsp;
+ struct npc_delete_flow_req *del_req;
+ struct nix_vtag_config *vtag_req;
+ struct npc_install_flow_req *req;
+ struct otx2_vf_config *config;
+ int err = 0;
+ u32 idx;
+
+ config = &pf->vf_configs[vf];
+
+ if (!vlan && !config->vlan)
+ goto out;
+
+ mutex_lock(&pf->mbox.lock);
+
+ /* free old tx vtag entry */
+ if (config->vlan) {
+ vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
+ if (!vtag_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+ vtag_req->cfg_type = 0;
+ vtag_req->tx.free_vtag0 = 1;
+ vtag_req->tx.vtag0_idx = config->tx_vtag_idx;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+ }
+
+ if (!vlan && config->vlan) {
+ /* rx */
+ del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
+ if (!del_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+ idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
+ del_req->entry =
+ flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ /* tx */
+ del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
+ if (!del_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+ idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
+ del_req->entry =
+ flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ if (!(pf->ethtool_flags & OTX2_PRIV_FLAG_FDSA_HDR))
+ memset(&config->rule, 0, sizeof(config->rule));
+ goto out;
+ }
+
+ /* rx */
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
+ req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
+ req->packet.vlan_tci = htons(vlan);
+ req->mask.vlan_tci = htons(VLAN_VID_MASK);
+ /* af fills the destination mac addr */
+ eth_broadcast_addr((u8 *)&req->mask.dmac);
+ req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
+ req->channel = pf->hw.rx_chan_base;
+ req->intf = NIX_INTF_RX;
+ req->vf = vf + 1;
+ req->op = NIX_RX_ACTION_DEFAULT;
+ req->vtag0_valid = true;
+ req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
+ req->set_cntr = 1;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ /* tx */
+ vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
+ if (!vtag_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* configure tx vtag params */
+ vtag_req->vtag_size = VTAGSIZE_T4;
+ vtag_req->cfg_type = 0; /* tx vlan cfg */
+ vtag_req->tx.cfg_vtag0 = 1;
+ vtag_req->tx.vtag0 = (((u64)ntohs(proto)) << 16) | vlan;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ vtag_rsp = (struct nix_vtag_config_rsp *)otx2_mbox_get_rsp
+ (&pf->mbox.mbox, 0, &vtag_req->hdr);
+ if (IS_ERR(vtag_rsp)) {
+ err = PTR_ERR(vtag_rsp);
+ goto out;
+ }
+ config->tx_vtag_idx = vtag_rsp->vtag0_idx;
+
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ eth_zero_addr((u8 *)&req->mask.dmac);
+ idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
+ req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
+ req->features = BIT_ULL(NPC_DMAC);
+ req->channel = pf->hw.tx_chan_base;
+ req->intf = NIX_INTF_TX;
+ req->vf = vf + 1;
+ req->op = NIX_TX_ACTIONOP_UCAST_DEFAULT;
+ req->vtag0_def = vtag_rsp->vtag0_idx;
+ req->vtag0_op = VTAG_INSERT;
+ req->set_cntr = 1;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ /* Update these values to reinstall the vfvlan rule */
+ config->rule.vlan = vlan;
+ config->rule.proto = proto;
+ config->rule.qos = qos;
+out:
+ config->vlan = vlan;
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+static int otx2_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 proto)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct pci_dev *pdev = pf->pdev;
+
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
+ if (vf >= pci_num_vf(pdev))
+ return -EINVAL;
+
+ /* qos is currently unsupported */
+ if (vlan >= VLAN_N_VID || qos)
+ return -EINVAL;
+
+ if (proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ if (!(pf->flags & OTX2_FLAG_VF_VLAN_SUPPORT))
+ return -EOPNOTSUPP;
+
+ if (pf->ethtool_flags & OTX2_PRIV_FLAG_FDSA_HDR)
+ return -EOPNOTSUPP;
+
+ return otx2_do_set_vf_vlan(pf, vf, vlan, qos, proto);
+}
+
+static int otx2_get_vf_config(struct net_device *netdev, int vf,
+ struct ifla_vf_info *ivi)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct pci_dev *pdev = pf->pdev;
+ struct otx2_vf_config *config;
+
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
+ if (vf >= pci_num_vf(pdev))
+ return -EINVAL;
+
+ config = &pf->vf_configs[vf];
+ ivi->vf = vf;
+ ether_addr_copy(ivi->mac, config->mac);
+ ivi->vlan = config->vlan;
+ ivi->trusted = config->trusted;
+
+ return 0;
+}
+
+static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
+ int qidx)
+{
+ struct page *page;
+ u64 dma_addr;
+ int err = 0;
+
+ dma_addr = otx2_dma_map_page(pf, virt_to_page(xdpf->data),
+ offset_in_page(xdpf->data), xdpf->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(pf->dev, dma_addr))
+ return -ENOMEM;
+
+ err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx);
+ if (!err) {
+ otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE);
+ page = virt_to_page(xdpf->data);
+ put_page(page);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int otx2_xdp_xmit(struct net_device *netdev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int qidx = smp_processor_id();
+ struct otx2_snd_queue *sq;
+ int drops = 0, i;
+
+ if (!netif_running(netdev))
+ return -ENETDOWN;
+
+ qidx += pf->hw.tx_queues;
+ sq = pf->xdp_prog ? &pf->qset.sq[qidx] : NULL;
+
+ /* Abort xmit if xdp queue is not */
+ if (unlikely(!sq))
+ return -ENXIO;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+ int err;
+
+ err = otx2_xdp_xmit_tx(pf, xdpf, qidx);
+ if (err)
+ drops++;
+ }
+ return n - drops;
+}
+
+static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
+{
+ bool if_up = netif_running(pf->netdev);
+ struct bpf_prog *old_prog;
+ int err = 0;
+
+ if (if_up)
+ otx2_stop(pf->netdev);
+
+ old_prog = xchg(&pf->xdp_prog, prog);
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (pf->xdp_prog) {
+ bpf_prog_add(pf->xdp_prog, pf->hw.rx_queues - 1);
+ if (IS_ERR(pf->xdp_prog))
+ err = PTR_ERR(pf->xdp_prog);
+ }
+ /* Network stack and XDP shared same rx queues.
+ * Use separate tx queues for XDP and network stack.
+ */
+ if (pf->xdp_prog)
+ pf->hw.xdp_queues = pf->hw.rx_queues;
+ else
+ pf->hw.xdp_queues = 0;
+
+ pf->hw.tot_tx_queues += pf->hw.xdp_queues;
+
+ if (if_up)
+ otx2_open(pf->netdev);
+
+ return err;
+}
+
+static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return otx2_xdp_setup(pf, xdp->prog);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf,
+ int req_perm)
+{
+ struct set_vf_perm *req;
+ int rc;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_set_vf_perm(&pf->mbox);
+ if (!req) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Let AF reset VF permissions as sriov is disabled */
+ if (req_perm == OTX2_RESET_VF_PERM) {
+ req->flags |= RESET_VF_PERM;
+ } else if (req_perm == OTX2_TRUSTED_VF) {
+ if (pf->vf_configs[vf].trusted)
+ req->flags |= VF_TRUSTED;
+ }
+
+ req->vf = vf;
+ rc = otx2_sync_mbox_msg(&pf->mbox);
+out:
+ mutex_unlock(&pf->mbox.lock);
+ return rc;
+}
+
+static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf,
+ bool enable)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct pci_dev *pdev = pf->pdev;
+ int rc;
+
+ if (vf >= pci_num_vf(pdev))
+ return -EINVAL;
+
+ if (pf->vf_configs[vf].trusted == enable)
+ return 0;
+
+ pf->vf_configs[vf].trusted = enable;
+ rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF);
+
+ if (rc)
+ pf->vf_configs[vf].trusted = !enable;
+ else
+ netdev_info(pf->netdev, "VF %d is %strusted\n",
+ vf, enable ? "" : "not ");
+ return rc;
+}
static const struct net_device_ops otx2_netdev_ops = {
.ndo_open = otx2_open,
.ndo_stop = otx2_stop,
.ndo_start_xmit = otx2_xmit,
+ .ndo_fix_features = otx2_fix_features,
.ndo_set_mac_address = otx2_set_mac_address,
.ndo_change_mtu = otx2_change_mtu,
.ndo_set_rx_mode = otx2_set_rx_mode,
@@ -1929,6 +2489,13 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_tx_timeout = otx2_tx_timeout,
.ndo_get_stats64 = otx2_get_stats64,
.ndo_do_ioctl = otx2_ioctl,
+ .ndo_set_vf_mac = otx2_set_vf_mac,
+ .ndo_set_vf_vlan = otx2_set_vf_vlan,
+ .ndo_get_vf_config = otx2_get_vf_config,
+ .ndo_bpf = otx2_xdp,
+ .ndo_xdp_xmit = otx2_xdp_xmit,
+ .ndo_setup_tc = otx2_setup_tc,
+ .ndo_set_vf_trust = otx2_ndo_set_vf_trust,
};
static int otx2_wq_init(struct otx2_nic *pf)
@@ -1937,7 +2504,7 @@ static int otx2_wq_init(struct otx2_nic *pf)
if (!pf->otx2_wq)
return -ENOMEM;
- INIT_WORK(&pf->rx_mode_work, otx2_do_set_rx_mode);
+ INIT_WORK(&pf->rx_mode_work, otx2_rx_mode_wrk_handler);
INIT_WORK(&pf->reset_task, otx2_reset_task);
return 0;
}
@@ -1983,6 +2550,43 @@ static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
return otx2_register_mbox_intr(pf, false);
}
+static int otx2_sriov_vfcfg_init(struct otx2_nic *pf)
+{
+ int i;
+
+ pf->vf_configs = devm_kcalloc(pf->dev, pf->total_vfs,
+ sizeof(struct otx2_vf_config),
+ GFP_KERNEL);
+ if (!pf->vf_configs)
+ return -ENOMEM;
+
+ for (i = 0; i < pf->total_vfs; i++) {
+ pf->vf_configs[i].pf = pf;
+ pf->vf_configs[i].intf_down = true;
+ pf->vf_configs[i].trusted = false;
+ INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
+ otx2_vf_link_event_task);
+ INIT_DELAYED_WORK(&pf->vf_configs[i].ptp_info_work,
+ otx2_vf_ptp_info_task);
+ }
+
+ return 0;
+}
+
+static void otx2_sriov_vfcfg_cleanup(struct otx2_nic *pf)
+{
+ int i;
+
+ if (!pf->vf_configs)
+ return;
+
+ for (i = 0; i < pf->total_vfs; i++) {
+ cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
+ cancel_delayed_work_sync(&pf->vf_configs[i].ptp_info_work);
+ otx2_set_vf_permissions(pf, i, OTX2_RESET_VF_PERM);
+ }
+}
+
static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
@@ -2034,7 +2638,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->pdev = pdev;
hw->rx_queues = qcount;
hw->tx_queues = qcount;
+ hw->tot_tx_queues = qcount;
hw->max_queues = qcount;
+ /* Use CQE of 128 byte descriptor size by default */
+ hw->xqe_size = 128;
num_vec = pci_msix_vec_count(pdev);
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
@@ -2071,6 +2678,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_free_netdev;
}
+ otx2_setup_dev_hw_settings(pf);
+
/* Init PF <=> AF mailbox stuff */
err = otx2_pfaf_mbox_init(pf);
if (err)
@@ -2096,7 +2705,9 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_detach_rsrc;
- otx2_setup_dev_hw_settings(pf);
+ err = cn10k_lmtst_init(pf);
+ if (err)
+ goto err_detach_rsrc;
/* Assign default mac address */
otx2_get_mac_from_af(netdev);
@@ -2123,21 +2734,44 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
NETIF_F_GSO_UDP_L4);
netdev->features |= netdev->hw_features;
+ err = otx2_mcam_flow_init(pf);
+ if (err)
+ goto err_ptp_destroy;
+
+ if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT)
+ netdev->hw_features |= NETIF_F_NTUPLE;
+
+ if (pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* Support TSO on tag interface */
+ netdev->vlan_features |= netdev->features;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
+ if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX;
+ netdev->features |= netdev->hw_features;
+
+ /* HW supports tc offload but mutually exclusive with n-tuple filters */
+ if (pf->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)
+ netdev->hw_features |= NETIF_F_HW_TC;
+
netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
- netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
+ netdev->watchdog_timeo = netdev->watchdog_timeo ?
+ netdev->watchdog_timeo : OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2_netdev_ops;
- /* MTU range: 64 - 9190 */
netdev->min_mtu = OTX2_MIN_MTU;
- netdev->max_mtu = OTX2_MAX_MTU;
+ netdev->max_mtu = otx2_get_max_mtu(pf);
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_ptp_destroy;
+ goto err_del_mcam_entries;
}
err = otx2_wq_init(pf);
@@ -2146,25 +2780,54 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
otx2_set_ethtool_ops(netdev);
+ err = otx2_init_tc(pf);
+ if (err)
+ goto err_mcam_flow_del;
+
+ err = otx2_register_dl(pf);
+ if (err)
+ goto err_mcam_flow_del;
+
+ /* Initialize SR-IOV resources */
+ err = otx2_sriov_vfcfg_init(pf);
+ if (err)
+ goto err_pf_sriov_init;
+
/* Enable link notifications */
otx2_cgx_config_linkevents(pf, true);
- /* Enable pause frames by default */
- pf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
- pf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+ /* Set interface mode as Default */
+ pf->ethtool_flags |= OTX2_PRIV_FLAG_DEF_MODE;
+
+#ifdef CONFIG_DCB
+ err = otx2_dcbnl_set_ops(netdev);
+ if (err)
+ goto err_pf_sriov_init;
+#endif
return 0;
+err_pf_sriov_init:
+ otx2_shutdown_tc(pf);
+err_mcam_flow_del:
+ otx2_mcam_flow_del(pf);
err_unreg_netdev:
unregister_netdev(netdev);
+err_del_mcam_entries:
+ otx2_mcam_flow_del(pf);
err_ptp_destroy:
otx2_ptp_destroy(pf);
err_detach_rsrc:
+ if (pf->hw.lmt_info)
+ free_percpu(pf->hw.lmt_info);
+ if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
+ qmem_free(pf->dev, pf->dync_lmt);
otx2_detach_resources(&pf->mbox);
err_disable_mbox_intr:
otx2_disable_mbox_intr(pf);
err_mbox_destroy:
otx2_pfaf_mbox_destroy(pf);
+ otx2_pfvf_mbox_destroy(pf);
err_free_irq_vectors:
pci_free_irq_vectors(hw->pdev);
err_free_netdev:
@@ -2203,11 +2866,39 @@ static void otx2_vf_link_event_task(struct work_struct *work)
otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
}
+static void otx2_vf_ptp_info_task(struct work_struct *work)
+{
+ struct cgx_ptp_rx_info_msg *req;
+ struct otx2_vf_config *config;
+ struct mbox_msghdr *msghdr;
+ struct otx2_nic *pf;
+ int vf_idx;
+
+ config = container_of(work, struct otx2_vf_config,
+ ptp_info_work.work);
+ vf_idx = config - config->pf->vf_configs;
+ pf = config->pf;
+
+ msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
+ sizeof(*req), sizeof(struct msg_rsp));
+ if (!msghdr) {
+ dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
+ return;
+ }
+
+ req = (struct cgx_ptp_rx_info_msg *)msghdr;
+ req->hdr.id = MBOX_MSG_CGX_PTP_RX_INFO;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->ptp_en = pf->ptp->ptp_en;
+
+ otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
+}
+
static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct otx2_nic *pf = netdev_priv(netdev);
- int ret, i;
+ int ret;
/* Init PF <=> VF mailbox stuff */
ret = otx2_pfvf_mbox_init(pf, numvfs);
@@ -2218,23 +2909,9 @@ static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
if (ret)
goto free_mbox;
- pf->vf_configs = kcalloc(numvfs, sizeof(struct otx2_vf_config),
- GFP_KERNEL);
- if (!pf->vf_configs) {
- ret = -ENOMEM;
- goto free_intr;
- }
-
- for (i = 0; i < numvfs; i++) {
- pf->vf_configs[i].pf = pf;
- pf->vf_configs[i].intf_down = true;
- INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
- otx2_vf_link_event_task);
- }
-
ret = otx2_pf_flr_init(pf, numvfs);
if (ret)
- goto free_configs;
+ goto free_intr;
ret = otx2_register_flr_me_intr(pf, numvfs);
if (ret)
@@ -2249,8 +2926,6 @@ free_flr_intr:
otx2_disable_flr_me_intr(pf);
free_flr:
otx2_flr_wq_destroy(pf);
-free_configs:
- kfree(pf->vf_configs);
free_intr:
otx2_disable_pfvf_mbox_intr(pf, numvfs);
free_mbox:
@@ -2263,17 +2938,12 @@ static int otx2_sriov_disable(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct otx2_nic *pf = netdev_priv(netdev);
int numvfs = pci_num_vf(pdev);
- int i;
if (!numvfs)
return 0;
pci_disable_sriov(pdev);
- for (i = 0; i < pci_num_vf(pdev); i++)
- cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
- kfree(pf->vf_configs);
-
otx2_disable_flr_me_intr(pf);
otx2_flr_wq_destroy(pf);
otx2_disable_pfvf_mbox_intr(pf, numvfs);
@@ -2299,23 +2969,49 @@ static void otx2_remove(struct pci_dev *pdev)
return;
pf = netdev_priv(netdev);
+ pf->flags |= OTX2_FLAG_PF_SHUTDOWN;
if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED)
otx2_config_hw_tx_tstamp(pf, false);
if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
otx2_config_hw_rx_tstamp(pf, false);
+ /* Disable 802.3x pause frames */
+ if (pf->flags & OTX2_FLAG_RX_PAUSE_ENABLED ||
+ (pf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) {
+ pf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
+ pf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
+ otx2_config_pause_frm(pf);
+ }
+
+#ifdef CONFIG_DCB
+ /* Disable PFC config */
+ if (pf->pfc_en) {
+ pf->pfc_en = 0;
+ otx2_config_priority_flow_ctrl(pf);
+ }
+#endif
+ otx2_set_npc_parse_mode(pf, true);
+
cancel_work_sync(&pf->reset_task);
/* Disable link notifications */
otx2_cgx_config_linkevents(pf, false);
+ otx2_unregister_dl(pf);
unregister_netdev(netdev);
otx2_sriov_disable(pf->pdev);
+ otx2_sriov_vfcfg_cleanup(pf);
if (pf->otx2_wq)
destroy_workqueue(pf->otx2_wq);
otx2_ptp_destroy(pf);
+ otx2_mcam_flow_del(pf);
+ otx2_shutdown_tc(pf);
otx2_detach_resources(&pf->mbox);
+ if (pf->hw.lmt_info)
+ free_percpu(pf->hw.lmt_info);
+ if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
+ qmem_free(pf->dev, pf->dync_lmt);
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
pci_free_irq_vectors(pf->pdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index 7bcf5246350f..d1c6fe4559cb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -1,9 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 PTP support for ethernet driver
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
*
- * Copyright (C) 2020 Marvell International Ltd.
*/
+#include <linux/module.h>
#include "otx2_common.h"
#include "otx2_ptp.h"
@@ -12,7 +14,6 @@ static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
struct ptp_req *req;
- int err;
if (!ptp->nic)
return -ENODEV;
@@ -24,16 +25,28 @@ static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
req->op = PTP_OP_ADJFINE;
req->scaled_ppm = scaled_ppm;
- err = otx2_sync_mbox_msg(&ptp->nic->mbox);
- if (err)
- return err;
+ return otx2_sync_mbox_msg(&ptp->nic->mbox);
+}
- return 0;
+static int ptp_set_thresh(struct otx2_ptp *ptp, u64 thresh)
+{
+ struct ptp_req *req;
+
+ if (!ptp->nic)
+ return -ENODEV;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->op = PTP_OP_SET_THRESH;
+ req->thresh = thresh;
+
+ return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
-static u64 ptp_cc_read(const struct cyclecounter *cc)
+static u64 ptp_tstmp_read(struct otx2_ptp *ptp)
{
- struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
struct ptp_req *req;
struct ptp_rsp *rsp;
int err;
@@ -45,7 +58,7 @@ static u64 ptp_cc_read(const struct cyclecounter *cc)
if (!req)
return 0;
- req->op = PTP_OP_GET_CLOCK;
+ req->op = PTP_OP_GET_TSTMP;
err = otx2_sync_mbox_msg(&ptp->nic->mbox);
if (err)
@@ -59,17 +72,50 @@ static u64 ptp_cc_read(const struct cyclecounter *cc)
return rsp->clk;
}
+static void otx2_get_ptpclock(struct otx2_nic *pfvf, u64 *tstamp)
+{
+ struct ptp_req *req;
+ struct ptp_rsp *rsp;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_ptp_op(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return;
+ }
+
+ req->op = PTP_OP_GET_CLOCK;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
+ rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0,
+ &req->hdr);
+ *tstamp = rsp->clk;
+ }
+
+ mutex_unlock(&pfvf->mbox.lock);
+}
+
static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
struct otx2_nic *pfvf = ptp->nic;
+ struct ptp_req *req;
+ int err;
mutex_lock(&pfvf->mbox.lock);
- timecounter_adjtime(&ptp->time_counter, delta);
+ req = otx2_mbox_alloc_msg_ptp_op(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+ req->op = PTP_OP_ADJ_CLOCK;
+ req->delta = delta;
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
+ return err;
- return 0;
}
static int otx2_ptp_gettime(struct ptp_clock_info *ptp_info,
@@ -78,13 +124,10 @@ static int otx2_ptp_gettime(struct ptp_clock_info *ptp_info,
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
struct otx2_nic *pfvf = ptp->nic;
- u64 nsec;
+ u64 tstamp;
- mutex_lock(&pfvf->mbox.lock);
- nsec = timecounter_read(&ptp->time_counter);
- mutex_unlock(&pfvf->mbox.lock);
-
- *ts = ns_to_timespec64(nsec);
+ otx2_get_ptpclock(pfvf, &tstamp);
+ *ts = ns_to_timespec64(tstamp);
return 0;
}
@@ -95,30 +138,113 @@ static int otx2_ptp_settime(struct ptp_clock_info *ptp_info,
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
struct otx2_nic *pfvf = ptp->nic;
- u64 nsec;
-
- nsec = timespec64_to_ns(ts);
+ struct ptp_req *req;
+ int err;
mutex_lock(&pfvf->mbox.lock);
- timecounter_init(&ptp->time_counter, &ptp->cycle_counter, nsec);
+ req = otx2_mbox_alloc_msg_ptp_op(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->op = PTP_OP_SET_CLOCK;
+ req->nsec = timespec64_to_ns(ts);
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+static int otx2_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_EXTTS:
+ break;
+ case PTP_PF_PEROUT:
+ case PTP_PF_PHYSYNC:
+ return -1;
+ }
return 0;
}
+static void otx2_ptp_extts_check(struct work_struct *work)
+{
+ struct otx2_ptp *ptp = container_of(work, struct otx2_ptp,
+ extts_work.work);
+ struct ptp_clock_event event;
+ u64 tstmp, new_thresh;
+
+ mutex_lock(&ptp->nic->mbox.lock);
+ tstmp = ptp_tstmp_read(ptp);
+ mutex_unlock(&ptp->nic->mbox.lock);
+
+ if (tstmp != ptp->last_extts) {
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 0;
+ event.timestamp = ptp->convert_tx_ptp_tstmp(tstmp);
+ ptp_clock_event(ptp->ptp_clock, &event);
+ ptp->last_extts = tstmp;
+
+ new_thresh = tstmp % 500000000;
+ if (ptp->thresh != new_thresh) {
+ mutex_lock(&ptp->nic->mbox.lock);
+ ptp_set_thresh(ptp, new_thresh);
+ mutex_unlock(&ptp->nic->mbox.lock);
+ ptp->thresh = new_thresh;
+ }
+ }
+ schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
+}
+
+static void otx2_sync_tstamp(struct work_struct *work)
+{
+ struct otx2_ptp *ptp = container_of(work, struct otx2_ptp,
+ synctstamp_work.work);
+
+ otx2_get_ptpclock(ptp->nic, &ptp->tstamp);
+ schedule_delayed_work(&ptp->synctstamp_work, msecs_to_jiffies(500));
+}
+
static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
struct ptp_clock_request *rq, int on)
{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ int pin = -1;
+
+ if (!ptp->nic)
+ return -ENODEV;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS,
+ rq->extts.index);
+ if (pin < 0)
+ return -EBUSY;
+ if (on)
+ schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
+ else
+ cancel_delayed_work_sync(&ptp->extts_work);
+ return 0;
+ default:
+ break;
+ }
return -EOPNOTSUPP;
}
int otx2_ptp_init(struct otx2_nic *pfvf)
{
struct otx2_ptp *ptp_ptr;
- struct cyclecounter *cc;
struct ptp_req *req;
int err;
+ if (is_otx2_lbkvf(pfvf->pdev)) {
+ pfvf->ptp = NULL;
+ return 0;
+ }
+
mutex_lock(&pfvf->mbox.lock);
/* check if PTP block is available */
req = otx2_mbox_alloc_msg_ptp_op(&pfvf->mbox);
@@ -144,29 +270,28 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
ptp_ptr->nic = pfvf;
- cc = &ptp_ptr->cycle_counter;
- cc->read = ptp_cc_read;
- cc->mask = CYCLECOUNTER_MASK(64);
- cc->mult = 1;
- cc->shift = 0;
-
- timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter,
- ktime_to_ns(ktime_get_real()));
+ snprintf(ptp_ptr->extts_config.name, sizeof(ptp_ptr->extts_config.name), "TSTAMP");
+ ptp_ptr->extts_config.index = 0;
+ ptp_ptr->extts_config.func = PTP_PF_NONE;
ptp_ptr->ptp_info = (struct ptp_clock_info) {
.owner = THIS_MODULE,
.name = "OcteonTX2 PTP",
.max_adj = 1000000000ull,
- .n_ext_ts = 0,
- .n_pins = 0,
+ .n_ext_ts = 1,
+ .n_pins = 1,
.pps = 0,
+ .pin_config = &ptp_ptr->extts_config,
.adjfine = otx2_ptp_adjfine,
.adjtime = otx2_ptp_adjtime,
.gettime64 = otx2_ptp_gettime,
.settime64 = otx2_ptp_settime,
.enable = otx2_ptp_enable,
+ .verify = otx2_ptp_verify_pin,
};
+ INIT_DELAYED_WORK(&ptp_ptr->extts_work, otx2_ptp_extts_check);
+
ptp_ptr->ptp_clock = ptp_clock_register(&ptp_ptr->ptp_info, pfvf->dev);
if (IS_ERR_OR_NULL(ptp_ptr->ptp_clock)) {
err = ptp_ptr->ptp_clock ?
@@ -175,11 +300,22 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
goto error;
}
+ if (is_dev_otx2(pfvf->pdev)) {
+ ptp_ptr->convert_rx_ptp_tstmp = &otx2_ptp_convert_rx_timestamp;
+ ptp_ptr->convert_tx_ptp_tstmp = &otx2_ptp_convert_tx_timestamp;
+ } else {
+ ptp_ptr->convert_rx_ptp_tstmp = &cn10k_ptp_convert_timestamp;
+ ptp_ptr->convert_tx_ptp_tstmp = &cn10k_ptp_convert_timestamp;
+ }
+
+ INIT_DELAYED_WORK(&ptp_ptr->synctstamp_work, otx2_sync_tstamp);
+
pfvf->ptp = ptp_ptr;
error:
return err;
}
+EXPORT_SYMBOL_GPL(otx2_ptp_init);
void otx2_ptp_destroy(struct otx2_nic *pfvf)
{
@@ -188,10 +324,13 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf)
if (!ptp)
return;
+ cancel_delayed_work(&pfvf->ptp->synctstamp_work);
+
ptp_clock_unregister(ptp->ptp_clock);
kfree(ptp);
pfvf->ptp = NULL;
}
+EXPORT_SYMBOL_GPL(otx2_ptp_destroy);
int otx2_ptp_clock_index(struct otx2_nic *pfvf)
{
@@ -200,13 +339,8 @@ int otx2_ptp_clock_index(struct otx2_nic *pfvf)
return ptp_clock_index(pfvf->ptp->ptp_clock);
}
+EXPORT_SYMBOL_GPL(otx2_ptp_clock_index);
-int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns)
-{
- if (!pfvf->ptp)
- return -ENODEV;
-
- *tsns = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp);
-
- return 0;
-}
+MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
+MODULE_DESCRIPTION("Marvell RVU NIC PTP Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
index 706d63a43ae1..7ff41927ceaf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
@@ -1,9 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 PTP support for ethernet driver */
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
#ifndef OTX2_PTP_H
#define OTX2_PTP_H
+static inline u64 otx2_ptp_convert_rx_timestamp(u64 timestamp)
+{
+ return be64_to_cpu(*(__be64 *)&timestamp);
+}
+
+static inline u64 otx2_ptp_convert_tx_timestamp(u64 timestamp)
+{
+ return timestamp;
+}
+
+static inline u64 cn10k_ptp_convert_timestamp(u64 timestamp)
+{
+ return ((timestamp >> 32) * NSEC_PER_SEC) + (timestamp & 0xFFFFFFFFUL);
+}
+
int otx2_ptp_init(struct otx2_nic *pfvf);
void otx2_ptp_destroy(struct otx2_nic *pfvf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
index 867f646e0802..6ef52051ab09 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
@@ -1,11 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 RVU Ethernet driver
+/* Marvell RVU Ethernet driver
*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef OTX2_REG_H
@@ -44,6 +41,8 @@
#define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+#define RVU_PF_VF_MBOX_ADDR (0xC40)
+#define RVU_PF_LMTLINE_ADDR (0xC48)
/* RVU VF registers */
#define RVU_VF_VFPF_MBOX0 (0x00000)
@@ -57,6 +56,7 @@
#define RVU_VF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
#define RVU_VF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
#define RVU_VF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+#define RVU_VF_MBOX_REGION (0xC0000)
#define RVU_FUNC_BLKADDR_SHIFT 20
#define RVU_FUNC_BLKADDR_MASK 0x1FULL
@@ -91,6 +91,7 @@
#define NPA_LF_QINTX_INT_W1S(a) (NPA_LFBASE | 0x318 | (a) << 12)
#define NPA_LF_QINTX_ENA_W1S(a) (NPA_LFBASE | 0x320 | (a) << 12)
#define NPA_LF_QINTX_ENA_W1C(a) (NPA_LFBASE | 0x330 | (a) << 12)
+#define NPA_LF_AURA_BATCH_FREE0 (NPA_LFBASE | 0x400)
/* NIX LF registers */
#define NIX_LFBASE (BLKTYPE_NIX << RVU_FUNC_BLKADDR_SHIFT)
@@ -139,6 +140,7 @@
/* NIX AF transmit scheduler registers */
#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
+#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (a) << 16)
#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16)
#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16)
#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16)
@@ -148,6 +150,7 @@
#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16)
#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16)
#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16)
+#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16)
#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16)
#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16)
#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_smqvf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_smqvf.c
new file mode 100644
index 000000000000..20f2ebb78945
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_smqvf.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Virtual Function ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+#include "otx2_common.h"
+#include "otx2_reg.h"
+#include "otx2_struct.h"
+#include "rvu_fixes.h"
+
+/* serialize device removal and xmit */
+DEFINE_MUTEX(remove_lock);
+
+static char pkt_data[64] = { 0x00, 0x0f, 0xb7, 0x11, 0xa6, 0x87, 0x02, 0xe0,
+ 0x28, 0xa5, 0xf6, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x32, 0x00, 0x00, 0x00, 0x00, 0x04, 0x11,
+ 0xee, 0x53, 0x50, 0x50, 0x50, 0x02, 0x14, 0x14,
+ 0x14, 0x02, 0x10, 0x00, 0x10, 0x01, 0x00, 0x1e,
+ 0x00, 0x00, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
+ 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e,
+ 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76 };
+
+static struct sk_buff *the_skb;
+static struct otx2_nic *the_smqvf;
+static u16 drop_entry = 0xFFFF;
+
+static bool is_otx2_smqvf(struct otx2_nic *vf)
+{
+ if (vf->pcifunc == RVU_SMQVF_PCIFUNC &&
+ (is_96xx_A0(vf->pdev) || is_95xx_A0(vf->pdev)))
+ return true;
+
+ return false;
+}
+
+static void __otx2_sqe_flush(struct otx2_snd_queue *sq, int size)
+{
+ u64 status;
+
+ /* Packet data stores should finish before SQE is flushed to HW */
+ dma_wmb();
+
+ do {
+ memcpy(sq->lmt_addr, sq->sqe_base, size);
+ status = otx2_lmt_flush(sq->io_addr);
+ } while (status == 0);
+
+ sq->head++;
+ sq->head &= (sq->sqe_cnt - 1);
+}
+
+static int otx2_ctx_update(struct otx2_nic *vf, u16 qidx)
+{
+ struct nix_aq_enq_req *sq_aq, *rq_aq, *cq_aq;
+
+ /* Do not link CQ for SQ and disable RQ, CQ */
+ sq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&vf->mbox);
+ if (!sq_aq)
+ return -ENOMEM;
+
+ sq_aq->sq.cq_ena = 0;
+ sq_aq->sq_mask.cq_ena = 1;
+ sq_aq->qidx = qidx;
+ sq_aq->ctype = NIX_AQ_CTYPE_SQ;
+ sq_aq->op = NIX_AQ_INSTOP_WRITE;
+
+ rq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&vf->mbox);
+ if (!rq_aq)
+ return -ENOMEM;
+
+ rq_aq->rq.ena = 0;
+ rq_aq->rq_mask.ena = 1;
+ rq_aq->qidx = qidx;
+ rq_aq->ctype = NIX_AQ_CTYPE_RQ;
+ rq_aq->op = NIX_AQ_INSTOP_WRITE;
+
+ cq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&vf->mbox);
+ if (!cq_aq)
+ return -ENOMEM;
+
+ cq_aq->cq.ena = 0;
+ cq_aq->cq_mask.ena = 1;
+ cq_aq->qidx = qidx;
+ cq_aq->ctype = NIX_AQ_CTYPE_CQ;
+ cq_aq->op = NIX_AQ_INSTOP_WRITE;
+
+ return otx2_sync_mbox_msg(&vf->mbox);
+}
+
+void otx2smqvf_xmit(void)
+{
+ struct otx2_snd_queue *sq;
+ int i, size;
+
+ mutex_lock(&remove_lock);
+
+ if (!the_smqvf) {
+ mutex_unlock(&remove_lock);
+ return;
+ }
+
+ sq = &the_smqvf->qset.sq[0];
+ /* Min. set of send descriptors required to send packets */
+ size = sizeof(struct nix_sqe_hdr_s) + sizeof(struct nix_sqe_sg_s) +
+ sizeof(struct nix_sqe_ext_s) + sizeof(u64);
+
+ for (i = 0; i < 256; i++)
+ __otx2_sqe_flush(sq, size);
+
+ mutex_unlock(&remove_lock);
+}
+EXPORT_SYMBOL(otx2smqvf_xmit);
+
+static int otx2smqvf_install_flow(struct otx2_nic *vf)
+{
+ struct npc_mcam_alloc_entry_req *alloc_req;
+ struct npc_mcam_free_entry_req *free_req;
+ struct npc_install_flow_req *install_req;
+ struct npc_mcam_alloc_entry_rsp *rsp;
+ struct msg_req *msg;
+ int err, qid;
+ size_t size;
+ void *data;
+
+ size = SKB_DATA_ALIGN(64 + OTX2_ALIGN) + NET_SKB_PAD +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ err = -ENOMEM;
+
+ data = kzalloc(size, GFP_KERNEL);
+ if (!data)
+ return err;
+
+ memcpy(data, &pkt_data, 64);
+
+ the_skb = build_skb(data, 0);
+ the_skb->len = 64;
+
+ for (qid = 0; qid < vf->hw.tx_queues; qid++) {
+ err = otx2_ctx_update(vf, qid);
+ /* If something wrong with Q0 then treat as error */
+ if (err && !qid)
+ goto err_free_mem;
+ }
+
+ mutex_lock(&vf->mbox.lock);
+
+ alloc_req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&vf->mbox);
+ if (!alloc_req) {
+ mutex_unlock(&vf->mbox.lock);
+ goto err_free_mem;
+ }
+ alloc_req->count = 1;
+ alloc_req->contig = true;
+
+ /* Send message to AF */
+ if (otx2_sync_mbox_msg(&vf->mbox)) {
+ err = -EINVAL;
+ mutex_unlock(&vf->mbox.lock);
+ goto err_free_mem;
+ }
+ mutex_unlock(&vf->mbox.lock);
+
+ rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
+ (&vf->mbox.mbox, 0, &alloc_req->hdr);
+ drop_entry = rsp->entry;
+
+ mutex_lock(&vf->mbox.lock);
+
+ /* Send messages to drop Tx packets at NPC and stop Rx traffic */
+ install_req = otx2_mbox_alloc_msg_npc_install_flow(&vf->mbox);
+ if (!install_req) {
+ err = -ENOMEM;
+ mutex_unlock(&vf->mbox.lock);
+ goto err_free_entry;
+ }
+
+ u64_to_ether_addr(0x0ull, install_req->mask.dmac);
+ install_req->entry = drop_entry;
+ install_req->features = BIT_ULL(NPC_DMAC);
+ install_req->intf = NIX_INTF_TX;
+ install_req->op = NIX_TX_ACTIONOP_DROP;
+ install_req->set_cntr = 1;
+
+ msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&vf->mbox);
+ if (!msg) {
+ mutex_unlock(&vf->mbox.lock);
+ goto err_free_entry;
+ }
+
+ /* Send message to AF */
+ if (otx2_sync_mbox_msg(&vf->mbox)) {
+ err = -EINVAL;
+ mutex_unlock(&vf->mbox.lock);
+ goto err_free_entry;
+ }
+ mutex_unlock(&vf->mbox.lock);
+
+ otx2_sq_append_skb(vf->netdev, &vf->qset.sq[0], the_skb, 0);
+
+ return 0;
+
+err_free_entry:
+ mutex_lock(&vf->mbox.lock);
+ free_req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&vf->mbox);
+ if (!free_req) {
+ dev_err(vf->dev, "Could not allocate msg for freeing entry\n");
+ } else {
+ free_req->entry = drop_entry;
+ WARN_ON(otx2_sync_mbox_msg(&vf->mbox));
+ }
+ mutex_unlock(&vf->mbox.lock);
+err_free_mem:
+ kfree_skb(the_skb);
+ drop_entry = 0xFFFF;
+ return err;
+}
+
+int otx2smqvf_probe(struct otx2_nic *vf)
+{
+ int err;
+
+ if (!is_otx2_smqvf(vf))
+ return -EPERM;
+
+ err = otx2_open(vf->netdev);
+ if (err)
+ return -EINVAL;
+
+ /* Disable QINT interrupts because we do not use a CQ for SQ and
+ * drop TX packets intentionally
+ */
+ otx2_write64(vf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
+
+ err = otx2smqvf_install_flow(vf);
+ if (err) {
+ otx2_stop(vf->netdev);
+ return -EINVAL;
+ }
+
+ the_smqvf = vf;
+
+ return 0;
+}
+
+int otx2smqvf_remove(struct otx2_nic *vf)
+{
+ struct npc_mcam_free_entry_req *free_req;
+ struct npc_delete_flow_req *del_req;
+
+ if (!is_otx2_smqvf(vf))
+ return -EPERM;
+
+ mutex_lock(&remove_lock);
+ kfree_skb(the_skb);
+ the_smqvf = NULL;
+ the_skb = NULL;
+ mutex_unlock(&remove_lock);
+
+ mutex_lock(&vf->mbox.lock);
+ del_req = otx2_mbox_alloc_msg_npc_delete_flow(&vf->mbox);
+ free_req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&vf->mbox);
+ if (!del_req || !free_req) {
+ dev_err(vf->dev, "Could not allocate msg for freeing entry\n");
+ } else {
+ del_req->entry = drop_entry;
+ free_req->entry = drop_entry;
+ WARN_ON(otx2_sync_mbox_msg(&vf->mbox));
+ }
+ mutex_unlock(&vf->mbox.lock);
+
+ otx2_stop(vf->netdev);
+ drop_entry = 0xFFFF;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
index cba59ddf71bb..aa205a0d158f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
@@ -1,11 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 RVU Ethernet driver
+/* Marvell RVU Ethernet driver
*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef OTX2_STRUCT_H
@@ -142,7 +139,9 @@ struct nix_rx_parse_s {
u64 vtag0_ptr : 8; /* W5 */
u64 vtag1_ptr : 8;
u64 flow_key_alg : 5;
- u64 rsvd_383_341 : 43;
+ u64 rsvd_359_341 : 19;
+ u64 color : 2;
+ u64 rsvd_383_362 : 22;
u64 rsvd_447_384; /* W6 */
};
@@ -218,7 +217,8 @@ struct nix_sqe_ext_s {
u64 vlan1_ins_tci : 16;
u64 vlan0_ins_ena : 1;
u64 vlan1_ins_ena : 1;
- u64 rsvd_127_114 : 14;
+ u64 init_color : 2;
+ u64 rsvd_127_116 : 12;
};
struct nix_sqe_sg_s {
@@ -236,8 +236,16 @@ struct nix_sqe_sg_s {
/* NIX send memory subdescriptor structure */
struct nix_sqe_mem_s {
- u64 offset : 16; /* W0 */
- u64 rsvd_52_16 : 37;
+ u64 start_offset : 8;
+ u64 rsvd_11_8 : 4;
+ u64 rsvd_12 : 1;
+ u64 udp_csum_crt : 1;
+ u64 update64 : 1;
+ u64 rsvd_15_16 : 1;
+ u64 base_ns : 32;
+ u64 step_type : 1;
+ u64 rsvd_51_49 : 3;
+ u64 per_lso_seg : 1;
u64 wmem : 1;
u64 dsz : 2;
u64 alg : 4;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
new file mode 100644
index 000000000000..c6229b3593d7
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -0,0 +1,1065 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/rhashtable.h>
+#include <linux/bitfield.h>
+#include <net/flow_dissector.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_vlan.h>
+#include <net/ipv6.h>
+
+#include "cn10k.h"
+#include "otx2_common.h"
+
+/* Egress rate limiting definitions */
+#define MAX_BURST_EXPONENT 0x0FULL
+#define MAX_BURST_MANTISSA 0xFFULL
+#define MAX_BURST_SIZE 130816ULL
+#define MAX_RATE_DIVIDER_EXPONENT 12ULL
+#define MAX_RATE_EXPONENT 0x0FULL
+#define MAX_RATE_MANTISSA 0xFFULL
+
+/* Bitfields in NIX_TLX_PIR register */
+#define TLX_RATE_MANTISSA GENMASK_ULL(8, 1)
+#define TLX_RATE_EXPONENT GENMASK_ULL(12, 9)
+#define TLX_RATE_DIVIDER_EXPONENT GENMASK_ULL(16, 13)
+#define TLX_BURST_MANTISSA GENMASK_ULL(36, 29)
+#define TLX_BURST_EXPONENT GENMASK_ULL(40, 37)
+
+struct otx2_tc_flow_stats {
+ u64 bytes;
+ u64 pkts;
+ u64 used;
+};
+
+struct otx2_tc_flow {
+ struct rhash_head node;
+ unsigned long cookie;
+ unsigned int bitpos;
+ struct rcu_head rcu;
+ struct otx2_tc_flow_stats stats;
+ spinlock_t lock; /* lock for stats */
+ u16 rq;
+ u16 entry;
+ u16 leaf_profile;
+ bool is_act_police;
+};
+
+int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic)
+{
+ struct otx2_tc_info *tc = &nic->tc_info;
+
+ if (!nic->flow_cfg->max_flows)
+ return 0;
+
+ /* Max flows changed, free the existing bitmap */
+ kfree(tc->tc_entries_bitmap);
+
+ tc->tc_entries_bitmap =
+ kcalloc(BITS_TO_LONGS(nic->flow_cfg->max_flows),
+ sizeof(long), GFP_KERNEL);
+ if (!tc->tc_entries_bitmap) {
+ netdev_err(nic->netdev,
+ "Unable to alloc TC flow entries bitmap\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap);
+
+static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp,
+ u32 *burst_mantissa)
+{
+ unsigned int tmp;
+
+ /* Burst is calculated as
+ * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256
+ * Max supported burst size is 130,816 bytes.
+ */
+ burst = min_t(u32, burst, MAX_BURST_SIZE);
+ if (burst) {
+ *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0;
+ tmp = burst - rounddown_pow_of_two(burst);
+ if (burst < MAX_BURST_MANTISSA)
+ *burst_mantissa = tmp * 2;
+ else
+ *burst_mantissa = tmp / (1ULL << (*burst_exp - 7));
+ } else {
+ *burst_exp = MAX_BURST_EXPONENT;
+ *burst_mantissa = MAX_BURST_MANTISSA;
+ }
+}
+
+static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp,
+ u32 *mantissa, u32 *div_exp)
+{
+ unsigned int tmp;
+
+ /* Rate calculation by hardware
+ *
+ * PIR_ADD = ((256 + mantissa) << exp) / 256
+ * rate = (2 * PIR_ADD) / ( 1 << div_exp)
+ * The resultant rate is in Mbps.
+ */
+
+ /* 2Mbps to 100Gbps can be expressed with div_exp = 0.
+ * Setting this to '0' will ease the calculation of
+ * exponent and mantissa.
+ */
+ *div_exp = 0;
+
+ if (maxrate) {
+ *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0;
+ tmp = maxrate - rounddown_pow_of_two(maxrate);
+ if (maxrate < MAX_RATE_MANTISSA)
+ *mantissa = tmp * 2;
+ else
+ *mantissa = tmp / (1ULL << (*exp - 7));
+ } else {
+ /* Instead of disabling rate limiting, set all values to max */
+ *exp = MAX_RATE_EXPONENT;
+ *mantissa = MAX_RATE_MANTISSA;
+ }
+}
+
+static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 maxrate)
+{
+ struct otx2_hw *hw = &nic->hw;
+ struct nix_txschq_config *req;
+ u32 burst_exp, burst_mantissa;
+ u32 exp, mantissa, div_exp;
+ int txschq, err;
+
+ /* All SQs share the same TL4, so pick the first scheduler */
+ txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
+
+ /* Get exponent and mantissa values from the desired rate */
+ otx2_get_egress_burst_cfg(burst, &burst_exp, &burst_mantissa);
+ otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
+
+ mutex_lock(&nic->mbox.lock);
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox);
+ if (!req) {
+ mutex_unlock(&nic->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->lvl = NIX_TXSCH_LVL_TL4;
+ req->num_regs = 1;
+ req->reg[0] = NIX_AF_TL4X_PIR(txschq);
+ req->regval[0] = FIELD_PREP(TLX_BURST_EXPONENT, burst_exp) |
+ FIELD_PREP(TLX_BURST_MANTISSA, burst_mantissa) |
+ FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
+ FIELD_PREP(TLX_RATE_EXPONENT, exp) |
+ FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
+
+ err = otx2_sync_mbox_msg(&nic->mbox);
+ mutex_unlock(&nic->mbox.lock);
+ return err;
+}
+
+static int otx2_tc_validate_flow(struct otx2_nic *nic,
+ struct flow_action *actions,
+ struct netlink_ext_ack *extack)
+{
+ if (nic->flags & OTX2_FLAG_INTF_DOWN) {
+ NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
+ return -EINVAL;
+ }
+
+ if (!flow_action_has_entries(actions)) {
+ NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action");
+ return -EINVAL;
+ }
+
+ if (!flow_offload_has_one_action(actions)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Egress MATCHALL offload supports only 1 policing action");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct flow_action *actions = &cls->rule->action;
+ struct flow_action_entry *entry;
+ u32 rate;
+ int err;
+
+ err = otx2_tc_validate_flow(nic, actions, extack);
+ if (err)
+ return err;
+
+ if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one Egress MATCHALL ratelimiter can be offloaded");
+ return -ENOMEM;
+ }
+
+ entry = &cls->rule->action.entries[0];
+ switch (entry->id) {
+ case FLOW_ACTION_POLICE:
+ if (entry->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+ /* Convert bytes per second to Mbps */
+ rate = entry->police.rate_bytes_ps * 8;
+ rate = max_t(u32, rate / 1000000, 1);
+ err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate);
+ if (err)
+ return err;
+ nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only police action is supported with Egress MATCHALL offload");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ int err;
+
+ if (nic->flags & OTX2_FLAG_INTF_DOWN) {
+ NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
+ return -EINVAL;
+ }
+
+ err = otx2_set_matchall_egress_rate(nic, 0, 0);
+ nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
+ return err;
+}
+
+static int otx2_tc_act_set_police(struct otx2_nic *nic,
+ struct otx2_tc_flow *node,
+ struct flow_cls_offload *f,
+ u64 rate, u32 burst, u32 mark,
+ struct npc_install_flow_req *req, bool pps)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct otx2_hw *hw = &nic->hw;
+ int rq_idx, rc;
+
+ rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
+ if (rq_idx >= hw->rx_queues) {
+ NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
+ return -EINVAL;
+ }
+
+ mutex_lock(&nic->mbox.lock);
+
+ rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile);
+ if (rc) {
+ mutex_unlock(&nic->mbox.lock);
+ return rc;
+ }
+
+ rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps);
+ if (rc)
+ goto free_leaf;
+
+ rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true);
+ if (rc)
+ goto free_leaf;
+
+ mutex_unlock(&nic->mbox.lock);
+
+ req->match_id = mark & 0xFFFFULL;
+ req->index = rq_idx;
+ req->op = NIX_RX_ACTIONOP_UCAST;
+ set_bit(rq_idx, &nic->rq_bmap);
+ node->is_act_police = true;
+ node->rq = rq_idx;
+
+ return 0;
+
+free_leaf:
+ if (cn10k_free_leaf_profile(nic, node->leaf_profile))
+ netdev_err(nic->netdev,
+ "Unable to free leaf bandwidth profile(%d)\n",
+ node->leaf_profile);
+ mutex_unlock(&nic->mbox.lock);
+ return rc;
+}
+
+static int otx2_tc_parse_actions(struct otx2_nic *nic,
+ struct flow_action *flow_action,
+ struct npc_install_flow_req *req,
+ struct flow_cls_offload *f,
+ struct otx2_tc_flow *node)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct flow_action_entry *act;
+ struct net_device *target;
+ struct otx2_nic *priv;
+ u32 burst, mark = 0;
+ u8 nr_police = 0;
+ bool pps = false;
+ u64 rate;
+ int i;
+
+ if (!flow_action_has_entries(flow_action)) {
+ NL_SET_ERR_MSG_MOD(extack, "no tc actions specified");
+ return -EINVAL;
+ }
+
+ flow_action_for_each(i, act, flow_action) {
+ switch (act->id) {
+ case FLOW_ACTION_DROP:
+ req->op = NIX_RX_ACTIONOP_DROP;
+ return 0;
+ case FLOW_ACTION_ACCEPT:
+ req->op = NIX_RX_ACTION_DEFAULT;
+ return 0;
+ case FLOW_ACTION_REDIRECT_INGRESS:
+ target = act->dev;
+ priv = netdev_priv(target);
+ /* npc_install_flow_req doesn't support passing a target pcifunc */
+ if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't redirect to other pf/vf");
+ return -EOPNOTSUPP;
+ }
+ req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
+ req->op = NIX_RX_ACTION_DEFAULT;
+ return 0;
+ case FLOW_ACTION_VLAN_POP:
+ req->vtag0_valid = true;
+ /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */
+ req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
+ break;
+ case FLOW_ACTION_POLICE:
+ /* Ingress ratelimiting is not supported on OcteonTx2 */
+ if (is_dev_otx2(nic->pdev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress policing not supported on this platform");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_bytes_ps > 0) {
+ rate = act->police.rate_bytes_ps * 8;
+ burst = act->police.burst;
+ } else if (act->police.rate_pkt_ps > 0) {
+ /* The algorithm used to calculate rate
+ * mantissa, exponent values for a given token
+ * rate (token can be byte or packet) requires
+ * token rate to be mutiplied by 8.
+ */
+ rate = act->police.rate_pkt_ps * 8;
+ burst = act->police.burst_pkt;
+ pps = true;
+ }
+ nr_police++;
+ break;
+ case FLOW_ACTION_MARK:
+ mark = act->mark;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (nr_police > 1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "rate limit police offload requires a single action");
+ return -EOPNOTSUPP;
+ }
+
+ if (nr_police)
+ return otx2_tc_act_set_police(nic, node, f, rate, burst,
+ mark, req, pps);
+
+ return 0;
+}
+
+static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
+ struct flow_cls_offload *f,
+ struct npc_install_flow_req *req)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct flow_msg *flow_spec = &req->packet;
+ struct flow_msg *flow_mask = &req->mask;
+ struct flow_dissector *dissector;
+ struct flow_rule *rule;
+ u8 ip_proto = 0;
+
+ rule = flow_cls_offload_flow_rule(f);
+ dissector = rule->match.dissector;
+
+ if ((dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_IP)))) {
+ netdev_info(nic->netdev, "unsupported flow used key 0x%x",
+ dissector->used_keys);
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+
+ /* All EtherTypes can be matched, no hw limitation */
+ flow_spec->etype = match.key->n_proto;
+ flow_mask->etype = match.mask->n_proto;
+ req->features |= BIT_ULL(NPC_ETYPE);
+
+ if (match.mask->ip_proto &&
+ (match.key->ip_proto != IPPROTO_TCP &&
+ match.key->ip_proto != IPPROTO_UDP &&
+ match.key->ip_proto != IPPROTO_SCTP &&
+ match.key->ip_proto != IPPROTO_ICMP &&
+ match.key->ip_proto != IPPROTO_ICMPV6)) {
+ netdev_info(nic->netdev,
+ "ip_proto=0x%x not supported\n",
+ match.key->ip_proto);
+ return -EOPNOTSUPP;
+ }
+ if (match.mask->ip_proto)
+ ip_proto = match.key->ip_proto;
+
+ if (ip_proto == IPPROTO_UDP)
+ req->features |= BIT_ULL(NPC_IPPROTO_UDP);
+ else if (ip_proto == IPPROTO_TCP)
+ req->features |= BIT_ULL(NPC_IPPROTO_TCP);
+ else if (ip_proto == IPPROTO_SCTP)
+ req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
+ else if (ip_proto == IPPROTO_ICMP)
+ req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
+ else if (ip_proto == IPPROTO_ICMPV6)
+ req->features |= BIT_ULL(NPC_IPPROTO_ICMP6);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ if (!is_zero_ether_addr(match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack, "src mac match not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!is_zero_ether_addr(match.mask->dst)) {
+ ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst);
+ ether_addr_copy(flow_mask->dmac,
+ (u8 *)&match.mask->dst);
+ req->features |= BIT_ULL(NPC_DMAC);
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_ip(rule, &match);
+ if ((ntohs(flow_spec->etype) != ETH_P_IP) &&
+ match.mask->tos) {
+ NL_SET_ERR_MSG_MOD(extack, "tos not supported");
+ return -EOPNOTSUPP;
+ }
+ if (match.mask->ttl) {
+ NL_SET_ERR_MSG_MOD(extack, "ttl not supported");
+ return -EOPNOTSUPP;
+ }
+ flow_spec->tos = match.key->tos;
+ flow_mask->tos = match.mask->tos;
+ req->features |= BIT_ULL(NPC_TOS);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+ u16 vlan_tci, vlan_tci_mask;
+
+ flow_rule_match_vlan(rule, &match);
+
+ if (ntohs(match.key->vlan_tpid) != ETH_P_8021Q) {
+ netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n",
+ ntohs(match.key->vlan_tpid));
+ return -EOPNOTSUPP;
+ }
+
+ if (match.mask->vlan_id ||
+ match.mask->vlan_dei ||
+ match.mask->vlan_priority) {
+ vlan_tci = match.key->vlan_id |
+ match.key->vlan_dei << 12 |
+ match.key->vlan_priority << 13;
+
+ vlan_tci_mask = match.mask->vlan_id |
+ match.mask->vlan_dei << 12 |
+ match.mask->vlan_priority << 13;
+
+ flow_spec->vlan_tci = htons(vlan_tci);
+ flow_mask->vlan_tci = htons(vlan_tci_mask);
+ req->features |= BIT_ULL(NPC_OUTER_VID);
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+
+ flow_spec->ip4dst = match.key->dst;
+ flow_mask->ip4dst = match.mask->dst;
+ req->features |= BIT_ULL(NPC_DIP_IPV4);
+
+ flow_spec->ip4src = match.key->src;
+ flow_mask->ip4src = match.mask->src;
+ req->features |= BIT_ULL(NPC_SIP_IPV4);
+ } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(rule, &match);
+
+ if (ipv6_addr_loopback(&match.key->dst) ||
+ ipv6_addr_loopback(&match.key->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Flow matching IPv6 loopback addr not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ipv6_addr_any(&match.mask->dst)) {
+ memcpy(&flow_spec->ip6dst,
+ (struct in6_addr *)&match.key->dst,
+ sizeof(flow_spec->ip6dst));
+ memcpy(&flow_mask->ip6dst,
+ (struct in6_addr *)&match.mask->dst,
+ sizeof(flow_spec->ip6dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV6);
+ }
+
+ if (!ipv6_addr_any(&match.mask->src)) {
+ memcpy(&flow_spec->ip6src,
+ (struct in6_addr *)&match.key->src,
+ sizeof(flow_spec->ip6src));
+ memcpy(&flow_mask->ip6src,
+ (struct in6_addr *)&match.mask->src,
+ sizeof(flow_spec->ip6src));
+ req->features |= BIT_ULL(NPC_SIP_IPV6);
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(rule, &match);
+
+ flow_spec->dport = match.key->dst;
+ flow_mask->dport = match.mask->dst;
+ if (ip_proto == IPPROTO_UDP)
+ req->features |= BIT_ULL(NPC_DPORT_UDP);
+ else if (ip_proto == IPPROTO_TCP)
+ req->features |= BIT_ULL(NPC_DPORT_TCP);
+ else if (ip_proto == IPPROTO_SCTP)
+ req->features |= BIT_ULL(NPC_DPORT_SCTP);
+
+ flow_spec->sport = match.key->src;
+ flow_mask->sport = match.mask->src;
+ if (ip_proto == IPPROTO_UDP)
+ req->features |= BIT_ULL(NPC_SPORT_UDP);
+ else if (ip_proto == IPPROTO_TCP)
+ req->features |= BIT_ULL(NPC_SPORT_TCP);
+ else if (ip_proto == IPPROTO_SCTP)
+ req->features |= BIT_ULL(NPC_SPORT_SCTP);
+ }
+
+ return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
+}
+
+static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
+{
+ struct npc_delete_flow_req *req;
+ int err;
+
+ mutex_lock(&nic->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox);
+ if (!req) {
+ mutex_unlock(&nic->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = entry;
+
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&nic->mbox);
+ if (err) {
+ netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n",
+ entry);
+ mutex_unlock(&nic->mbox.lock);
+ return -EFAULT;
+ }
+ mutex_unlock(&nic->mbox.lock);
+
+ return 0;
+}
+
+static int otx2_tc_del_flow(struct otx2_nic *nic,
+ struct flow_cls_offload *tc_flow_cmd)
+{
+ struct otx2_flow_config *flow_cfg = nic->flow_cfg;
+ struct otx2_tc_info *tc_info = &nic->tc_info;
+ struct otx2_tc_flow *flow_node;
+ int err;
+
+ flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
+ &tc_flow_cmd->cookie,
+ tc_info->flow_ht_params);
+ if (!flow_node) {
+ netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n",
+ tc_flow_cmd->cookie);
+ return -EINVAL;
+ }
+
+ if (flow_node->is_act_police) {
+ mutex_lock(&nic->mbox.lock);
+
+ err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
+ flow_node->leaf_profile, false);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unmapping RQ %d & profile %d failed\n",
+ flow_node->rq, flow_node->leaf_profile);
+
+ err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unable to free leaf bandwidth profile(%d)\n",
+ flow_node->leaf_profile);
+
+ __clear_bit(flow_node->rq, &nic->rq_bmap);
+
+ mutex_unlock(&nic->mbox.lock);
+ }
+
+ otx2_del_mcam_flow_entry(nic, flow_node->entry);
+
+ WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table,
+ &flow_node->node,
+ nic->tc_info.flow_ht_params));
+ kfree_rcu(flow_node, rcu);
+
+ clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap);
+ flow_cfg->nr_flows--;
+
+ return 0;
+}
+
+static int otx2_tc_add_flow(struct otx2_nic *nic,
+ struct flow_cls_offload *tc_flow_cmd)
+{
+ struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
+ struct otx2_flow_config *flow_cfg = nic->flow_cfg;
+ struct otx2_tc_info *tc_info = &nic->tc_info;
+ struct otx2_tc_flow *new_node, *old_node;
+ struct npc_install_flow_req *req, dummy;
+ int rc, err;
+
+ if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
+ return -ENOMEM;
+
+ if (bitmap_full(tc_info->tc_entries_bitmap, flow_cfg->max_flows)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Free MCAM entry not available to add the flow");
+ return -ENOMEM;
+ }
+
+ /* allocate memory for the new flow and it's node */
+ new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
+ if (!new_node)
+ return -ENOMEM;
+ spin_lock_init(&new_node->lock);
+ new_node->cookie = tc_flow_cmd->cookie;
+
+ memset(&dummy, 0, sizeof(struct npc_install_flow_req));
+
+ rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy);
+ if (rc) {
+ kfree_rcu(new_node, rcu);
+ return rc;
+ }
+
+ /* If a flow exists with the same cookie, delete it */
+ old_node = rhashtable_lookup_fast(&tc_info->flow_table,
+ &tc_flow_cmd->cookie,
+ tc_info->flow_ht_params);
+ if (old_node)
+ otx2_tc_del_flow(nic, tc_flow_cmd);
+
+ mutex_lock(&nic->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
+ if (!req) {
+ mutex_unlock(&nic->mbox.lock);
+ rc = -ENOMEM;
+ goto free_leaf;
+ }
+
+ memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr));
+ memcpy(req, &dummy, sizeof(struct npc_install_flow_req));
+
+ new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap,
+ flow_cfg->max_flows);
+ req->channel = nic->hw.rx_chan_base;
+ req->entry = flow_cfg->flow_ent[flow_cfg->max_flows - new_node->bitpos - 1];
+ req->intf = NIX_INTF_RX;
+ req->set_cntr = 1;
+ new_node->entry = req->entry;
+
+ /* Send message to AF */
+ rc = otx2_sync_mbox_msg(&nic->mbox);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry");
+ mutex_unlock(&nic->mbox.lock);
+ kfree_rcu(new_node, rcu);
+ goto free_leaf;
+ }
+ mutex_unlock(&nic->mbox.lock);
+
+ /* add new flow to flow-table */
+ rc = rhashtable_insert_fast(&nic->tc_info.flow_table, &new_node->node,
+ nic->tc_info.flow_ht_params);
+ if (rc) {
+ otx2_del_mcam_flow_entry(nic, req->entry);
+ kfree_rcu(new_node, rcu);
+ goto free_leaf;
+ }
+
+ set_bit(new_node->bitpos, tc_info->tc_entries_bitmap);
+ flow_cfg->nr_flows++;
+
+ return 0;
+
+free_leaf:
+ if (new_node->is_act_police) {
+ mutex_lock(&nic->mbox.lock);
+
+ err = cn10k_map_unmap_rq_policer(nic, new_node->rq,
+ new_node->leaf_profile, false);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unmapping RQ %d & profile %d failed\n",
+ new_node->rq, new_node->leaf_profile);
+ err = cn10k_free_leaf_profile(nic, new_node->leaf_profile);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unable to free leaf bandwidth profile(%d)\n",
+ new_node->leaf_profile);
+
+ __clear_bit(new_node->rq, &nic->rq_bmap);
+
+ mutex_unlock(&nic->mbox.lock);
+ }
+
+ return rc;
+}
+
+static int otx2_tc_get_flow_stats(struct otx2_nic *nic,
+ struct flow_cls_offload *tc_flow_cmd)
+{
+ struct otx2_tc_info *tc_info = &nic->tc_info;
+ struct npc_mcam_get_stats_req *req;
+ struct npc_mcam_get_stats_rsp *rsp;
+ struct otx2_tc_flow_stats *stats;
+ struct otx2_tc_flow *flow_node;
+ int err;
+
+ flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
+ &tc_flow_cmd->cookie,
+ tc_info->flow_ht_params);
+ if (!flow_node) {
+ netdev_info(nic->netdev, "tc flow not found for cookie %lx",
+ tc_flow_cmd->cookie);
+ return -EINVAL;
+ }
+
+ mutex_lock(&nic->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox);
+ if (!req) {
+ mutex_unlock(&nic->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = flow_node->entry;
+
+ err = otx2_sync_mbox_msg(&nic->mbox);
+ if (err) {
+ netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n",
+ req->entry);
+ mutex_unlock(&nic->mbox.lock);
+ return -EFAULT;
+ }
+
+ rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp
+ (&nic->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ mutex_unlock(&nic->mbox.lock);
+ return PTR_ERR(rsp);
+ }
+
+ mutex_unlock(&nic->mbox.lock);
+
+ if (!rsp->stat_ena)
+ return -EINVAL;
+
+ stats = &flow_node->stats;
+
+ spin_lock(&flow_node->lock);
+ flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts,
+ 0x0, 0x0, FLOW_ACTION_HW_STATS_IMMEDIATE);
+ stats->pkts = rsp->stat;
+ spin_unlock(&flow_node->lock);
+
+ return 0;
+}
+
+static int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
+ struct flow_cls_offload *cls_flower)
+{
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return otx2_tc_add_flow(nic, cls_flower);
+ case FLOW_CLS_DESTROY:
+ return otx2_tc_del_flow(nic, cls_flower);
+ case FLOW_CLS_STATS:
+ return otx2_tc_get_flow_stats(nic, cls_flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct flow_action *actions = &cls->rule->action;
+ struct flow_action_entry *entry;
+ u64 rate;
+ int err;
+
+ err = otx2_tc_validate_flow(nic, actions, extack);
+ if (err)
+ return err;
+
+ if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one ingress MATCHALL ratelimitter can be offloaded");
+ return -ENOMEM;
+ }
+
+ entry = &cls->rule->action.entries[0];
+ switch (entry->id) {
+ case FLOW_ACTION_POLICE:
+ /* Ingress ratelimiting is not supported on OcteonTx2 */
+ if (is_dev_otx2(nic->pdev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress policing not supported on this platform");
+ return -EOPNOTSUPP;
+ }
+
+ err = cn10k_alloc_matchall_ipolicer(nic);
+ if (err)
+ return err;
+
+ /* Convert to bits per second */
+ rate = entry->police.rate_bytes_ps * 8;
+ err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate);
+ if (err)
+ return err;
+ nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only police action supported with Ingress MATCHALL offload");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ int err;
+
+ if (nic->flags & OTX2_FLAG_INTF_DOWN) {
+ NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
+ return -EINVAL;
+ }
+
+ err = cn10k_free_matchall_ipolicer(nic);
+ nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
+ return err;
+}
+
+static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls_matchall)
+{
+ switch (cls_matchall->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return otx2_tc_ingress_matchall_install(nic, cls_matchall);
+ case TC_CLSMATCHALL_DESTROY:
+ return otx2_tc_ingress_matchall_delete(nic, cls_matchall);
+ case TC_CLSMATCHALL_STATS:
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct otx2_nic *nic = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return otx2_setup_tc_cls_flower(nic, type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return otx2_setup_tc_ingress_matchall(nic, type_data);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls_matchall)
+{
+ switch (cls_matchall->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return otx2_tc_egress_matchall_install(nic, cls_matchall);
+ case TC_CLSMATCHALL_DESTROY:
+ return otx2_tc_egress_matchall_delete(nic, cls_matchall);
+ case TC_CLSMATCHALL_STATS:
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct otx2_nic *nic = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return otx2_setup_tc_egress_matchall(nic, type_data);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static LIST_HEAD(otx2_block_cb_list);
+
+static int otx2_setup_tc_block(struct net_device *netdev,
+ struct flow_block_offload *f)
+{
+ struct otx2_nic *nic = netdev_priv(netdev);
+ flow_setup_cb_t *cb;
+ bool ingress;
+
+ if (f->block_shared)
+ return -EOPNOTSUPP;
+
+ if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
+ cb = otx2_setup_tc_block_ingress_cb;
+ ingress = true;
+ } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
+ cb = otx2_setup_tc_block_egress_cb;
+ ingress = false;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb,
+ nic, nic, ingress);
+}
+
+int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return otx2_setup_tc_block(netdev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+EXPORT_SYMBOL(otx2_setup_tc);
+
+static const struct rhashtable_params tc_flow_ht_params = {
+ .head_offset = offsetof(struct otx2_tc_flow, node),
+ .key_offset = offsetof(struct otx2_tc_flow, cookie),
+ .key_len = sizeof(((struct otx2_tc_flow *)0)->cookie),
+ .automatic_shrinking = true,
+};
+
+int otx2_init_tc(struct otx2_nic *nic)
+{
+ struct otx2_tc_info *tc = &nic->tc_info;
+ int err;
+
+ /* Exclude receive queue 0 being used for police action */
+ set_bit(0, &nic->rq_bmap);
+
+ if (!nic->flow_cfg) {
+ netdev_err(nic->netdev,
+ "Can't init TC, nic->flow_cfg is not setup\n");
+ return -EINVAL;
+ }
+
+ err = otx2_tc_alloc_ent_bitmap(nic);
+ if (err)
+ return err;
+
+ tc->flow_ht_params = tc_flow_ht_params;
+ return rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
+}
+EXPORT_SYMBOL(otx2_init_tc);
+
+void otx2_shutdown_tc(struct otx2_nic *nic)
+{
+ struct otx2_tc_info *tc = &nic->tc_info;
+
+ kfree(tc->tc_entries_bitmap);
+ rhashtable_destroy(&tc->flow_table);
+}
+EXPORT_SYMBOL(otx2_shutdown_tc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index d5d7a2f37493..04e6cce8709a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -1,24 +1,61 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Ethernet driver
+/* Marvell RVU Ethernet driver
*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/etherdevice.h>
#include <net/ip.h>
#include <net/tso.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include "otx2_reg.h"
#include "otx2_common.h"
#include "otx2_struct.h"
#include "otx2_txrx.h"
#include "otx2_ptp.h"
+#include "cn10k.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
+#define PTP_PORT 0x13F
+/* Original timestamp offset starts at 34 byte in PTP Sync packet and its
+ * divided as 6 byte seconds field and 4 byte nano seconds field.
+ * Silicon supports only 4 byte seconds field so adjust seconds field
+ * offset with 2
+ */
+#define PTP_SYNC_SEC_OFFSET 36
+#define PTP_SYNC_NSEC_OFFSET 40
+
+static inline int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
+ struct otx2_cq_queue *cq)
+{
+ u64 incr = (u64)(cq->cq_idx) << 32;
+ u64 status;
+
+ status = otx2_atomic64_fetch_add(incr, pfvf->cq_op_addr);
+
+ if (unlikely(status & BIT_ULL(CQ_OP_STAT_OP_ERR) ||
+ status & BIT_ULL(CQ_OP_STAT_CQ_ERR))) {
+ dev_err(pfvf->dev, "CQ stopped due to error");
+ return -EINVAL;
+ }
+
+ cq->cq_tail = status & 0xFFFFF;
+ cq->cq_head = (status >> 20) & 0xFFFFF;
+ if (cq->cq_tail < cq->cq_head)
+ cq->pend_cqe = (cq->cqe_cnt - cq->cq_head) +
+ cq->cq_tail;
+ else
+ cq->pend_cqe = cq->cq_tail - cq->cq_head;
+
+ return 0;
+}
+
+static inline bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
+ struct nix_cqe_rx_s *cqe,
+ struct otx2_cq_queue *cq);
static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
{
@@ -75,6 +112,24 @@ static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
sg->num_segs = 0;
}
+static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
+ struct otx2_snd_queue *sq,
+ struct nix_cqe_tx_s *cqe)
+{
+ struct nix_send_comp_s *snd_comp = &cqe->comp;
+ struct sg_list *sg;
+ struct page *page;
+ u64 pa;
+
+ sg = &sq->sg[snd_comp->sqe_id];
+
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]);
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[0],
+ sg->size[0], DMA_TO_DEVICE);
+ page = virt_to_page(phys_to_virt(pa));
+ put_page(page);
+}
+
static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq,
struct otx2_snd_queue *sq,
@@ -84,9 +139,8 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
struct nix_send_comp_s *snd_comp = &cqe->comp;
struct skb_shared_hwtstamps ts;
struct sk_buff *skb = NULL;
- u64 timestamp, tsns;
struct sg_list *sg;
- int err;
+ u64 timestamp;
if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf))
net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n",
@@ -101,12 +155,10 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id];
if (timestamp != 1) {
- err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
- if (!err) {
- memset(&ts, 0, sizeof(ts));
- ts.hwtstamp = ns_to_ktime(tsns);
- skb_tstamp_tx(skb, &ts);
- }
+ timestamp = pfvf->ptp->convert_tx_ptp_tstmp(timestamp);
+ memset(&ts, 0, sizeof(ts));
+ ts.hwtstamp = ns_to_ktime(timestamp);
+ skb_tstamp_tx(skb, &ts);
}
}
@@ -120,22 +172,19 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
struct sk_buff *skb, void *data)
{
- u64 tsns;
- int err;
+ u64 timestamp;
if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED))
return;
/* The first 8 bytes is the timestamp */
- err = otx2_ptp_tstamp2time(pfvf, be64_to_cpu(*(__be64 *)data), &tsns);
- if (err)
- return;
-
- skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns);
+ timestamp = pfvf->ptp->convert_rx_ptp_tstmp(*(u64 *)data);
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(timestamp);
}
-static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
- u64 iova, int len, struct nix_rx_parse_s *parse)
+static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
+ u64 iova, int len, struct nix_rx_parse_s *parse,
+ int qidx)
{
struct page *page;
int off = 0;
@@ -156,11 +205,22 @@ static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
}
page = virt_to_page(va);
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- va - page_address(page) + off, len - off, pfvf->rbsize);
+ if (likely(skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)) {
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ va - page_address(page) + off,
+ len - off, pfvf->rbsize);
+
+ otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
+ pfvf->rbsize, DMA_FROM_DEVICE);
+ return true;
+ }
- otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
- pfvf->rbsize, DMA_FROM_DEVICE);
+ /* If more than MAX_SKB_FRAGS fragments are received then
+ * give back those buffer pointers to hardware for reuse.
+ */
+ pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL);
+
+ return false;
}
static void otx2_set_rxhash(struct otx2_nic *pfvf,
@@ -199,7 +259,8 @@ static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe,
sg = (struct nix_rx_sg_s *)start;
seg_addr = &sg->seg_addr;
for (seg = 0; seg < sg->segs; seg++, seg_addr++)
- otx2_aura_freeptr(pfvf, qidx, *seg_addr & ~0x07ULL);
+ pfvf->hw_ops->aura_freeptr(pfvf, qidx,
+ *seg_addr & ~0x07ULL);
start += sizeof(*sg);
}
}
@@ -255,12 +316,11 @@ static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
/* For now ignore all the NPC parser errors and
* pass the packets to stack.
*/
- if (cqe->sg.segs == 1)
- return false;
+ return false;
}
/* If RXALL is enabled pass on packets to stack. */
- if (cqe->sg.segs == 1 && (pfvf->netdev->features & NETIF_F_RXALL))
+ if (pfvf->netdev->features & NETIF_F_RXALL)
return false;
/* Free buffer back to pool */
@@ -275,19 +335,39 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
struct nix_cqe_rx_s *cqe)
{
struct nix_rx_parse_s *parse = &cqe->parse;
+ struct nix_rx_sg_s *sg = &cqe->sg;
struct sk_buff *skb = NULL;
+ void *end, *start;
+ u64 *seg_addr;
+ u16 *seg_size;
+ int seg;
- if (unlikely(parse->errlev || parse->errcode || cqe->sg.segs > 1)) {
+ if (unlikely(parse->errlev || parse->errcode)) {
if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
return;
}
+ if (pfvf->xdp_prog)
+ if (otx2_xdp_rcv_pkt_handler(pfvf, cqe, cq))
+ return;
+
skb = napi_get_frags(napi);
if (unlikely(!skb))
return;
- otx2_skb_add_frag(pfvf, skb, cqe->sg.seg_addr, cqe->sg.seg_size, parse);
- cq->pool_ptrs++;
+ start = (void *)sg;
+ end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
+ while (start < end) {
+ sg = (struct nix_rx_sg_s *)start;
+ seg_addr = &sg->seg_addr;
+ seg_size = (void *)sg;
+ for (seg = 0; seg < sg->segs; seg++, seg_addr++) {
+ if (otx2_skb_add_frag(pfvf, skb, *seg_addr,
+ seg_size[seg], parse, cq->cq_idx))
+ cq->pool_ptrs++;
+ }
+ start += sizeof(*sg);
+ }
otx2_set_rxhash(pfvf, cqe, skb);
@@ -295,6 +375,18 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
if (pfvf->netdev->features & NETIF_F_RXCSUM)
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* This holds true on condition RX VLAN offloads are enabled and
+ * 802.1AD or 802.1Q VLANs were found in frame.
+ */
+ if (parse->vtag0_gone) {
+ if (skb->protocol == htons(ETH_P_8021Q))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
+ parse->vtag0_tci);
+ else
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ parse->vtag0_tci);
+ }
+
napi_gro_frags(napi);
}
@@ -304,9 +396,15 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
{
struct nix_cqe_rx_s *cqe;
int processed_cqe = 0;
- s64 bufptr;
- while (likely(processed_cqe < budget)) {
+ if (cq->pend_cqe >= budget)
+ goto process_cqe;
+
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return 0;
+
+process_cqe:
+ while (likely(processed_cqe < budget) && cq->pend_cqe) {
cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head);
if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID ||
!cqe->sg.seg_addr) {
@@ -322,58 +420,63 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
cqe->sg.seg_addr = 0x00;
processed_cqe++;
+ cq->pend_cqe--;
}
/* Free CQEs to HW */
otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
((u64)cq->cq_idx << 32) | processed_cqe);
- if (unlikely(!cq->pool_ptrs))
- return 0;
+ return processed_cqe;
+}
+
+void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
+{
+ struct otx2_nic *pfvf = dev;
+ dma_addr_t bufptr;
/* Refill pool with new buffers */
while (cq->pool_ptrs) {
- bufptr = __otx2_alloc_rbuf(pfvf, cq->rbpool);
- if (unlikely(bufptr <= 0)) {
- struct refill_work *work;
- struct delayed_work *dwork;
-
- work = &pfvf->refill_wrk[cq->cq_idx];
- dwork = &work->pool_refill_work;
- /* Schedule a task if no other task is running */
- if (!cq->refill_task_sched) {
- cq->refill_task_sched = true;
- schedule_delayed_work(dwork,
- msecs_to_jiffies(100));
- }
+ if (otx2_alloc_buffer(pfvf, cq, &bufptr))
break;
- }
otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--;
}
-
- return processed_cqe;
}
static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq, int budget)
{
- int tx_pkts = 0, tx_bytes = 0;
+ int tx_pkts = 0, tx_bytes = 0, qidx;
struct nix_cqe_tx_s *cqe;
int processed_cqe = 0;
- while (likely(processed_cqe < budget)) {
+ if (cq->pend_cqe >= budget)
+ goto process_cqe;
+
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return 0;
+
+process_cqe:
+ while (likely(processed_cqe < budget) && cq->pend_cqe) {
cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
if (unlikely(!cqe)) {
if (!processed_cqe)
return 0;
break;
}
- otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[cq->cint_idx],
- cqe, budget, &tx_pkts, &tx_bytes);
-
+ if (cq->cq_type == CQ_XDP) {
+ qidx = cq->cq_idx - pfvf->hw.rx_queues;
+ otx2_xdp_snd_pkt_handler(pfvf, &pfvf->qset.sq[qidx],
+ cqe);
+ } else {
+ otx2_snd_pkt_handler(pfvf, cq,
+ &pfvf->qset.sq[cq->cint_idx],
+ cqe, budget, &tx_pkts, &tx_bytes);
+ }
cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
processed_cqe++;
+ cq->pend_cqe--;
}
/* Free CQEs to HW */
@@ -396,6 +499,7 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
int otx2_napi_handler(struct napi_struct *napi, int budget)
{
+ struct otx2_cq_queue *rx_cq = NULL;
struct otx2_cq_poll *cq_poll;
int workdone = 0, cq_idx, i;
struct otx2_cq_queue *cq;
@@ -406,17 +510,13 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
pfvf = (struct otx2_nic *)cq_poll->dev;
qset = &pfvf->qset;
- for (i = CQS_PER_CINT - 1; i >= 0; i--) {
+ for (i = 0; i < CQS_PER_CINT; i++) {
cq_idx = cq_poll->cq_ids[i];
if (unlikely(cq_idx == CINT_INVALID_CQ))
continue;
cq = &qset->cq[cq_idx];
if (cq->cq_type == CQ_RX) {
- /* If the RQ refill WQ task is running, skip napi
- * scheduler for this queue.
- */
- if (cq->refill_task_sched)
- continue;
+ rx_cq = cq;
workdone += otx2_rx_napi_handler(pfvf, napi,
cq, budget);
} else {
@@ -424,6 +524,8 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
}
}
+ if (rx_cq && rx_cq->pool_ptrs)
+ pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
/* Clear the IRQ */
otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
@@ -439,7 +541,8 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
return workdone;
}
-static void otx2_sqe_flush(struct otx2_snd_queue *sq, int size)
+void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
+ int size, int qidx)
{
u64 status;
@@ -556,11 +659,25 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
ext->tstmp = 1;
}
+#define OTX2_VLAN_PTR_OFFSET (ETH_HLEN - ETH_TLEN)
+ if (skb_vlan_tag_present(skb)) {
+ if (skb->vlan_proto == htons(ETH_P_8021Q)) {
+ ext->vlan1_ins_ena = 1;
+ ext->vlan1_ins_ptr = OTX2_VLAN_PTR_OFFSET;
+ ext->vlan1_ins_tci = skb_vlan_tag_get(skb);
+ } else if (skb->vlan_proto == htons(ETH_P_8021AD)) {
+ ext->vlan0_ins_ena = 1;
+ ext->vlan0_ins_ptr = OTX2_VLAN_PTR_OFFSET;
+ ext->vlan0_ins_tci = skb_vlan_tag_get(skb);
+ }
+ }
+
*offset += sizeof(*ext);
}
static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
- int alg, u64 iova)
+ int alg, u64 iova, int ptp_offset,
+ u64 base_ns, int udp_csum)
{
struct nix_sqe_mem_s *mem;
@@ -570,6 +687,13 @@ static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
mem->wmem = 1; /* wait for the memory operation */
mem->addr = iova;
+ if (ptp_offset) {
+ mem->start_offset = ptp_offset;
+ mem->udp_csum_crt = udp_csum;
+ mem->base_ns = base_ns;
+ mem->step_type = 1;
+ }
+
*offset += sizeof(*mem);
}
@@ -784,7 +908,7 @@ static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
sqe_hdr->sizem1 = (offset / 16) - 1;
/* Flush SQE to HW */
- otx2_sqe_flush(sq, offset);
+ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
}
}
@@ -793,16 +917,17 @@ static bool is_hw_tso_supported(struct otx2_nic *pfvf,
{
int payload_len, last_seg_size;
- if (!pfvf->hw.hw_tso)
+ if (test_bit(HW_TSO, &pfvf->hw.cap_flag))
+ return true;
+
+ /* On 96xx A0, HW TSO not supported */
+ if (!is_96xx_B0(pfvf->pdev))
return false;
/* HW has an issue due to which when the payload of the last LSO
* segment is shorter than 16 bytes, some header fields may not
* be correctly modified, hence don't offload such TSO segments.
*/
- if (!is_96xx_B0(pfvf->pdev))
- return true;
-
payload_len = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
if (last_seg_size && last_seg_size < 16)
@@ -824,16 +949,105 @@ static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
return skb_shinfo(skb)->gso_segs;
}
+static bool otx2_validate_network_transport(struct sk_buff *skb)
+{
+ if ((ip_hdr(skb)->protocol == IPPROTO_UDP) ||
+ (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)) {
+ struct udphdr *udph = udp_hdr(skb);
+
+ if (udph->source == htons(PTP_PORT) &&
+ udph->dest == htons(PTP_PORT))
+ return true;
+ }
+
+ return false;
+}
+
+static bool otx2_ptp_is_sync(struct sk_buff *skb, int *offset, int *udp_csum)
+{
+ struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ u16 nix_offload_hlen = 0, inner_vhlen = 0;
+ u8 *data = skb->data, *msgtype;
+ u16 proto = eth->h_proto;
+ int network_depth = 0;
+
+ /* NIX is programmed to offload outer VLAN header
+ * in case of single vlan protocol field holds Network header ETH_IP/V6
+ * in case of stacked vlan protocol field holds Inner vlan (8100)
+ */
+ if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX &&
+ skb->dev->features & NETIF_F_HW_VLAN_STAG_TX) {
+ if (skb->vlan_proto == htons(ETH_P_8021AD)) {
+ /* Get vlan protocol */
+ proto = __vlan_get_protocol(skb, eth->h_proto, NULL);
+ /* SKB APIs like skb_transport_offset does not include
+ * offloaded vlan header length. Need to explicitly add
+ * the length
+ */
+ nix_offload_hlen = VLAN_HLEN;
+ inner_vhlen = VLAN_HLEN;
+ } else if (skb->vlan_proto == htons(ETH_P_8021Q)) {
+ nix_offload_hlen = VLAN_HLEN;
+ }
+ } else if (eth_type_vlan(eth->h_proto)) {
+ proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
+ }
+
+ switch (htons(proto)) {
+ case ETH_P_1588:
+ if (network_depth)
+ *offset = network_depth;
+ else
+ *offset = ETH_HLEN + nix_offload_hlen +
+ inner_vhlen;
+ break;
+ case ETH_P_IP:
+ case ETH_P_IPV6:
+ if (!otx2_validate_network_transport(skb))
+ return false;
+
+ *udp_csum = 1;
+ *offset = nix_offload_hlen + skb_transport_offset(skb) +
+ sizeof(struct udphdr);
+ }
+
+ msgtype = data + *offset;
+
+ /* Check PTP messageId is SYNC or not */
+ return (*msgtype & 0xf) == 0;
+}
+
static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
struct otx2_snd_queue *sq, int *offset)
{
- u64 iova;
+ int ptp_offset = 0, udp_csum = 0;
+ struct timespec64 ts;
+ u64 iova, sec, nsec;
if (!skb_shinfo(skb)->gso_size &&
skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ if (pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC &&
+ otx2_ptp_is_sync(skb, &ptp_offset, &udp_csum)) {
+ ts = ns_to_timespec64(pfvf->ptp->tstamp);
+ sec = ntohl(ts.tv_sec);
+ nsec = ntohl(ts.tv_nsec);
+
+ memcpy((u8 *)skb->data + ptp_offset + PTP_SYNC_SEC_OFFSET,
+ &sec, 4);
+ memcpy((u8 *)skb->data + ptp_offset + PTP_SYNC_NSEC_OFFSET,
+ &nsec, 4);
+ /* Point to correction field in PTP packet */
+ ptp_offset += 8;
+ } else {
+ ptp_offset = 0;
+ }
+
+ if (!(pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC))
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
iova = sq->timestamps->iova + (sq->head * sizeof(u64));
- otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova);
+ otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova,
+ ptp_offset, ts.tv_nsec, udp_csum);
} else {
skb_tx_timestamp(skb);
}
@@ -871,6 +1085,9 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
}
if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
+ /* Insert vlan tag before giving pkt to tso */
+ if (skb_vlan_tag_present(skb))
+ skb = __vlan_hwaccel_push_inside(skb);
otx2_sq_append_tso(pfvf, sq, skb, qidx);
return true;
}
@@ -899,7 +1116,7 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
netdev_tx_sent_queue(txq, skb->len);
/* Flush SQE to HW */
- otx2_sqe_flush(sq, offset);
+ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
return true;
}
@@ -911,10 +1128,16 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
int processed_cqe = 0;
u64 iova, pa;
- while ((cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq))) {
- if (!cqe->sg.subdc)
- continue;
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return;
+
+ while (cq->pend_cqe) {
+ cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq);
processed_cqe++;
+ cq->pend_cqe--;
+
+ if (!cqe)
+ continue;
if (cqe->sg.segs > 1) {
otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx);
continue;
@@ -940,7 +1163,16 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
sq = &pfvf->qset.sq[cq->cint_idx];
- while ((cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq))) {
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return;
+
+ while (cq->pend_cqe) {
+ cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
+ processed_cqe++;
+ cq->pend_cqe--;
+
+ if (!cqe)
+ continue;
sg = &sq->sg[cqe->comp.sqe_id];
skb = (struct sk_buff *)sg->skb;
if (skb) {
@@ -948,7 +1180,6 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
dev_kfree_skb_any(skb);
sg->skb = (u64)NULL;
}
- processed_cqe++;
}
/* Free CQEs to HW */
@@ -976,3 +1207,115 @@ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
mutex_unlock(&pfvf->mbox.lock);
return err;
}
+
+static inline void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
+ int len, int *offset)
+{
+ struct nix_sqe_sg_s *sg = NULL;
+ u64 *iova = NULL;
+
+ sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
+ sg->ld_type = NIX_SEND_LDTYPE_LDD;
+ sg->subdc = NIX_SUBDC_SG;
+ sg->segs = 1;
+ sg->seg1_size = len;
+ iova = (void *)sg + sizeof(*sg);
+ *iova = dma_addr;
+ *offset += sizeof(*sg) + sizeof(u64);
+
+ sq->sg[sq->head].dma_addr[0] = dma_addr;
+ sq->sg[sq->head].size[0] = len;
+ sq->sg[sq->head].num_segs = 1;
+}
+
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
+{
+ struct nix_sqe_hdr_s *sqe_hdr;
+ struct otx2_snd_queue *sq;
+ int offset, free_sqe;
+
+ sq = &pfvf->qset.sq[qidx];
+ free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
+ if (free_sqe < sq->sqe_thresh)
+ return false;
+
+ memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
+
+ sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
+
+ if (!sqe_hdr->total) {
+ sqe_hdr->aura = sq->aura_id;
+ sqe_hdr->df = 1;
+ sqe_hdr->sq = qidx;
+ sqe_hdr->pnc = 1;
+ }
+ sqe_hdr->total = len;
+ sqe_hdr->sqe_id = sq->head;
+
+ offset = sizeof(*sqe_hdr);
+
+ otx2_xdp_sqe_add_sg(sq, iova, len, &offset);
+ sqe_hdr->sizem1 = (offset / 16) - 1;
+ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
+
+ return true;
+}
+
+static inline bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
+ struct nix_cqe_rx_s *cqe,
+ struct otx2_cq_queue *cq)
+{
+ struct bpf_prog *xdp_prog;
+ int qidx = cq->cq_idx;
+ struct xdp_buff xdp;
+ struct page *page;
+ u64 iova, pa;
+ u32 act;
+ int err;
+
+ iova = cqe->sg.seg_addr;
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
+ page = virt_to_page(phys_to_virt(pa));
+
+ xdp.data = phys_to_virt(pa);
+ xdp.data_hard_start = page_address(page) + OTX2_HEAD_ROOM;
+ xdp.data_end = xdp.data + cqe->sg.seg_size;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(pfvf->xdp_prog);
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ rcu_read_unlock();
+
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ qidx += pfvf->hw.tx_queues;
+ cq->pool_ptrs++;
+ return otx2_xdp_sq_append_pkt(pfvf, iova,
+ cqe->sg.seg_size, qidx);
+ case XDP_REDIRECT:
+ cq->pool_ptrs++;
+ err = xdp_do_redirect(pfvf->netdev, &xdp, xdp_prog);
+
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ if (!err)
+ return true;
+ put_page(page);
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ break;
+ case XDP_ABORTED:
+ trace_xdp_exception(pfvf->netdev, xdp_prog, act);
+ break;
+ case XDP_DROP:
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ cq->pool_ptrs++;
+ return true;
+ }
+ return false;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 73af15685657..96a7b7f3ccde 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -1,11 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Marvell OcteonTx2 RVU Ethernet driver
+/* Marvell RVU Ethernet driver
*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef OTX2_TXRX_H
@@ -24,7 +21,6 @@
#define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN)
#define OTX2_MIN_MTU 64
-#define OTX2_MAX_MTU (9212 - OTX2_ETH_HLEN)
#define OTX2_MAX_GSO_SEGS 255
#define OTX2_MAX_FRAGS_IN_SQE 9
@@ -40,9 +36,7 @@
#define RCV_FRAG_LEN(x) \
((RCV_FRAG_LEN1(x) < 2048) ? 2048 : RCV_FRAG_LEN1(x))
-#define DMA_BUFFER_LEN(x) \
- ((x) - OTX2_HEAD_ROOM - \
- OTX2_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define DMA_BUFFER_LEN(x) ((x) - OTX2_HEAD_ROOM)
/* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT]
* is equal to this value.
@@ -60,6 +54,9 @@
*/
#define CQ_QCOUNT_DEFAULT 1
+#define CQ_OP_STAT_OP_ERR 63
+#define CQ_OP_STAT_CQ_ERR 46
+
struct queue_stats {
u64 bytes;
u64 pkts;
@@ -100,7 +97,8 @@ struct otx2_snd_queue {
enum cq_type {
CQ_RX,
CQ_TX,
- CQS_PER_CINT = 2, /* RQ + SQ */
+ CQ_XDP,
+ CQS_PER_CINT = 3, /* RQ + SQ + XDP */
};
struct otx2_cq_poll {
@@ -126,6 +124,8 @@ struct otx2_cq_queue {
u16 pool_ptrs;
u32 cqe_cnt;
u32 cq_head;
+ u32 cq_tail;
+ u32 pend_cqe;
void *cqe_base;
struct qmem *cqe;
struct otx2_pool *rbpool;
@@ -156,4 +156,10 @@ static inline u64 otx2_iova_to_phys(void *iommu_domain, dma_addr_t dma_addr)
int otx2_napi_handler(struct napi_struct *napi, int budget);
bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
struct sk_buff *skb, u16 qidx);
+void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq,
+ int size, int qidx);
+void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
+ int size, int qidx);
+void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
+void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
#endif /* OTX2_TXRX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 67fabf265fe6..7aaec37f52a7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -1,19 +1,27 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Virtual Function ethernet driver */
+/* Marvell RVU Virtual Function ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
#include <linux/etherdevice.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/net_tstamp.h>
#include "otx2_common.h"
#include "otx2_reg.h"
+#include "otx2_ptp.h"
+#include "cn10k.h"
-#define DRV_NAME "octeontx2-nicvf"
-#define DRV_STRING "Marvell OcteonTX2 NIC Virtual Function Driver"
+#define DRV_NAME "rvu_nicvf"
+#define DRV_STRING "Marvell RVU NIC Virtual Function Driver"
static const struct pci_device_id otx2_vf_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) },
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_SDP_VF) },
{ }
};
@@ -108,9 +116,6 @@ static void otx2vf_vfaf_mbox_handler(struct work_struct *work)
static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf,
struct mbox_msghdr *req)
{
- struct msg_rsp *rsp;
- int err;
-
/* Check if valid, if not reply with a invalid msg */
if (req->sig != OTX2_MBOX_REQ_SIG) {
otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id);
@@ -118,20 +123,29 @@ static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf,
}
switch (req->id) {
- case MBOX_MSG_CGX_LINK_EVENT:
- rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(
- &vf->mbox.mbox_up, 0,
- sizeof(struct msg_rsp));
- if (!rsp)
- return -ENOMEM;
-
- rsp->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
- rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
- rsp->hdr.pcifunc = 0;
- rsp->hdr.rc = 0;
- err = otx2_mbox_up_handler_cgx_link_event(
- vf, (struct cgx_link_info_msg *)req, rsp);
- return err;
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+ case _id: { \
+ struct _rsp_type *rsp; \
+ int err; \
+ \
+ rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
+ &vf->mbox.mbox_up, 0, \
+ sizeof(struct _rsp_type)); \
+ if (!rsp) \
+ return -ENOMEM; \
+ \
+ rsp->hdr.id = _id; \
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
+ rsp->hdr.pcifunc = 0; \
+ rsp->hdr.rc = 0; \
+ \
+ err = otx2_mbox_up_handler_ ## _fn_name( \
+ vf, (struct _req_type *)req, rsp); \
+ return err; \
+ }
+MBOX_UP_CGX_MESSAGES
+#undef M
+ break;
default:
otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id);
return -ENODEV;
@@ -277,7 +291,7 @@ static void otx2vf_vfaf_mbox_destroy(struct otx2_nic *vf)
vf->mbox_wq = NULL;
}
- if (mbox->mbox.hwbase)
+ if (mbox->mbox.hwbase && !test_bit(CN10K_MBOX, &vf->hw.cap_flag))
iounmap((void __iomem *)mbox->mbox.hwbase);
otx2_mbox_destroy(&mbox->mbox);
@@ -297,16 +311,25 @@ static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf)
if (!vf->mbox_wq)
return -ENOMEM;
- /* Mailbox is a reserved memory (in RAM) region shared between
- * admin function (i.e PF0) and this VF, shouldn't be mapped as
- * device memory to allow unaligned accesses.
- */
- hwbase = ioremap_wc(pci_resource_start(vf->pdev, PCI_MBOX_BAR_NUM),
- pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM));
- if (!hwbase) {
- dev_err(vf->dev, "Unable to map VFAF mailbox region\n");
- err = -ENOMEM;
- goto exit;
+ if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) {
+ /* For cn10k platform, VF mailbox region is in its BAR2
+ * register space
+ */
+ hwbase = vf->reg_base + RVU_VF_MBOX_REGION;
+ } else {
+ /* Mailbox is a reserved memory (in RAM) region shared between
+ * admin function (i.e PF0) and this VF, shouldn't be mapped as
+ * device memory to allow unaligned accesses.
+ */
+ hwbase = ioremap_wc(pci_resource_start(vf->pdev,
+ PCI_MBOX_BAR_NUM),
+ pci_resource_len(vf->pdev,
+ PCI_MBOX_BAR_NUM));
+ if (!hwbase) {
+ dev_err(vf->dev, "Unable to map VFAF mailbox region\n");
+ err = -ENOMEM;
+ goto exit;
+ }
}
err = otx2_mbox_init(&mbox->mbox, hwbase, vf->pdev, vf->reg_base,
@@ -329,6 +352,8 @@ static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf)
return 0;
exit:
+ if (hwbase && !test_bit(CN10K_MBOX, &vf->hw.cap_flag))
+ iounmap(hwbase);
destroy_workqueue(vf->mbox_wq);
return err;
}
@@ -344,7 +369,7 @@ static int otx2vf_open(struct net_device *netdev)
/* LBKs do not receive link events so tell everyone we are up here */
vf = netdev_priv(netdev);
- if (is_otx2_lbkvf(vf->pdev)) {
+ if (is_otx2_lbkvf(vf->pdev) || is_otx2_sdpvf(vf->pdev)) {
pr_info("%s NIC Link is UP\n", netdev->name);
netif_carrier_on(netdev);
netif_tx_start_all_queues(netdev);
@@ -383,8 +408,45 @@ static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
+static void otx2vf_set_rx_mode(struct net_device *netdev)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+
+ queue_work(vf->otx2_wq, &vf->rx_mode_work);
+}
+
+static void otx2vf_do_set_rx_mode(struct work_struct *work)
+{
+ struct otx2_nic *vf = container_of(work, struct otx2_nic, rx_mode_work);
+ struct net_device *netdev = vf->netdev;
+ unsigned int flags = netdev->flags;
+ struct nix_rx_mode *req;
+
+ mutex_lock(&vf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_nix_set_rx_mode(&vf->mbox);
+ if (!req) {
+ mutex_unlock(&vf->mbox.lock);
+ return;
+ }
+
+ req->mode = NIX_RX_MODE_UCAST;
+
+ if (flags & IFF_PROMISC)
+ req->mode |= NIX_RX_MODE_PROMISC;
+ if (flags & (IFF_ALLMULTI | IFF_MULTICAST))
+ req->mode |= NIX_RX_MODE_ALLMULTI;
+
+ req->mode |= NIX_RX_MODE_USE_MCE;
+
+ otx2_sync_mbox_msg(&vf->mbox);
+
+ mutex_unlock(&vf->mbox.lock);
+}
+
static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu)
{
+ struct otx2_nic *vf = netdev_priv(netdev);
bool if_up = netif_running(netdev);
int err = 0;
@@ -394,6 +456,10 @@ static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu)
netdev_info(netdev, "Changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
+ /* Modify receive buffer size based on MTU and do not
+ * use the fixed size set.
+ */
+ vf->hw.rbuf_fixed_size = 0;
if (if_up)
err = otx2vf_open(netdev);
@@ -416,16 +482,37 @@ static void otx2vf_reset_task(struct work_struct *work)
rtnl_unlock();
}
+static int otx2vf_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ return otx2_handle_ntuple_tc_features(netdev, features);
+}
+
static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_open = otx2vf_open,
.ndo_stop = otx2vf_stop,
.ndo_start_xmit = otx2vf_xmit,
+ .ndo_set_rx_mode = otx2vf_set_rx_mode,
.ndo_set_mac_address = otx2_set_mac_address,
.ndo_change_mtu = otx2vf_change_mtu,
+ .ndo_set_features = otx2vf_set_features,
.ndo_get_stats64 = otx2_get_stats64,
.ndo_tx_timeout = otx2_tx_timeout,
+ .ndo_do_ioctl = otx2_ioctl,
+ .ndo_setup_tc = otx2_setup_tc,
};
+static int otx2_wq_init(struct otx2_nic *vf)
+{
+ vf->otx2_wq = create_singlethread_workqueue("otx2vf_wq");
+ if (!vf->otx2_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&vf->rx_mode_work, otx2vf_do_set_rx_mode);
+ INIT_WORK(&vf->reset_task, otx2vf_reset_task);
+ return 0;
+}
+
static int otx2vf_realloc_msix_vectors(struct otx2_nic *vf)
{
struct otx2_hw *hw = &vf->hw;
@@ -496,6 +583,9 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->rx_queues = qcount;
hw->tx_queues = qcount;
hw->max_queues = qcount;
+ hw->tot_tx_queues = qcount;
+ /* Use CQE of 128 byte descriptor size by default */
+ hw->xqe_size = 128;
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
GFP_KERNEL);
@@ -525,6 +615,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_free_irq_vectors;
}
+ otx2_setup_dev_hw_settings(vf);
/* Init VF <=> PF mailbox stuff */
err = otx2vf_vfaf_mbox_init(vf);
if (err)
@@ -548,7 +639,18 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_detach_rsrc;
- otx2_setup_dev_hw_settings(vf);
+ err = cn10k_lmtst_init(vf);
+ if (err)
+ goto err_detach_rsrc;
+
+ err = otx2smqvf_probe(vf);
+ if (!err)
+ return 0;
+ else if (err == -EINVAL)
+ goto err_detach_rsrc;
+
+ /* Don't check for error. Proceed without ptp */
+ otx2_ptp_init(vf);
/* Assign default mac address */
otx2_get_mac_from_af(netdev);
@@ -558,17 +660,23 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_L4;
netdev->features = netdev->hw_features;
+ /* Support TSO on tag interface */
+ netdev->vlan_features |= netdev->features;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
+ netdev->features |= netdev->hw_features;
+
+ netdev->hw_features |= NETIF_F_NTUPLE;
+ netdev->hw_features |= NETIF_F_RXALL;
+ netdev->hw_features |= NETIF_F_HW_TC;
netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2vf_netdev_ops;
- /* MTU range: 68 - 9190 */
netdev->min_mtu = OTX2_MIN_MTU;
- netdev->max_mtu = OTX2_MAX_MTU;
-
- INIT_WORK(&vf->reset_task, otx2vf_reset_task);
+ netdev->max_mtu = otx2_get_max_mtu(vf);
/* To distinguish, for LBK VFs set netdev name explicitly */
if (is_otx2_lbkvf(vf->pdev)) {
@@ -580,21 +688,63 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n);
}
+ /* To distinguish, for SDP VFs set netdev name explicitly */
+ if (is_otx2_sdpvf(vf->pdev)) {
+ int n;
+
+ n = (vf->pcifunc >> RVU_PFVF_FUNC_SHIFT) & RVU_PFVF_FUNC_MASK;
+ /* Need to subtract 1 to get proper VF number */
+ n -= 1;
+ snprintf(netdev->name, sizeof(netdev->name), "sdp%d-%d", pdev->bus->number, n);
+ }
+
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_detach_rsrc;
+ goto err_ptp_destroy;
}
- otx2vf_set_ethtool_ops(netdev);
+ err = otx2_wq_init(vf);
+ if (err)
+ goto err_unreg_netdev;
- /* Enable pause frames by default */
- vf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
- vf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+ if (!is_otx2_sdpvf(vf->pdev))
+ otx2vf_set_ethtool_ops(netdev);
+
+ err = otx2vf_mcam_flow_init(vf);
+ if (err)
+ goto err_unreg_netdev;
+
+ err = otx2_init_tc(vf);
+ if (err)
+ goto err_unreg_netdev;
+
+ err = otx2_register_dl(vf);
+ if (err)
+ goto err_shutdown_tc;
+
+ /* Set interface mode as Default */
+ vf->ethtool_flags |= OTX2_PRIV_FLAG_DEF_MODE;
+
+#ifdef CONFIG_DCB
+ err = otx2_dcbnl_set_ops(netdev);
+ if (err)
+ goto err_shutdown_tc;
+#endif
return 0;
+err_shutdown_tc:
+ otx2_shutdown_tc(vf);
+err_unreg_netdev:
+ unregister_netdev(netdev);
+err_ptp_destroy:
+ otx2_ptp_destroy(vf);
err_detach_rsrc:
+ if (vf->hw.lmt_info)
+ free_percpu(vf->hw.lmt_info);
+ if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+ qmem_free(vf->dev, vf->dync_lmt);
otx2_detach_resources(&vf->mbox);
err_disable_mbox_intr:
otx2vf_disable_mbox_intr(vf);
@@ -620,11 +770,38 @@ static void otx2vf_remove(struct pci_dev *pdev)
vf = netdev_priv(netdev);
- cancel_work_sync(&vf->reset_task);
- unregister_netdev(netdev);
- otx2vf_disable_mbox_intr(vf);
+ /* Disable 802.3x pause frames */
+ if (vf->flags & OTX2_FLAG_RX_PAUSE_ENABLED ||
+ (vf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) {
+ vf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
+ vf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
+ otx2_config_pause_frm(vf);
+ }
+
+#ifdef CONFIG_DCB
+ /* Disable PFC config */
+ if (vf->pfc_en) {
+ vf->pfc_en = 0;
+ otx2_config_priority_flow_ctrl(vf);
+ }
+#endif
+ if (otx2smqvf_remove(vf)) {
+ otx2_unregister_dl(vf);
+ cancel_work_sync(&vf->reset_task);
+ unregister_netdev(netdev);
+ }
+
+ if (vf->otx2_wq)
+ destroy_workqueue(vf->otx2_wq);
+
+ otx2_ptp_destroy(vf);
+ otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
+ if (vf->hw.lmt_info)
+ free_percpu(vf->hw.lmt_info);
+ if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+ qmem_free(vf->dev, vf->dync_lmt);
otx2vf_vfaf_mbox_destroy(vf);
pci_free_irq_vectors(vf->pdev);
pci_set_drvdata(pdev, NULL);