aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h19
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h630
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c23
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c29
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c91
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c36
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c51
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.c22
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c62
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h179
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c314
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c39
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c244
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c467
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c51
25 files changed, 1764 insertions, 597 deletions
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 40a44dcb3d9b..48757afeb34e 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -126,8 +126,7 @@ static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
{
- int timeout = NIC_MBOX_MSG_TIMEOUT;
- int sleep = 10;
+ unsigned long timeout;
int ret = 0;
mutex_lock(&nic->rx_mode_mtx);
@@ -137,6 +136,7 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
nicvf_write_to_mbx(nic, mbx);
+ timeout = jiffies + msecs_to_jiffies(NIC_MBOX_MSG_TIMEOUT);
/* Wait for previous message to be acked, timeout 2sec */
while (!nic->pf_acked) {
if (nic->pf_nacked) {
@@ -146,11 +146,10 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
ret = -EINVAL;
break;
}
- msleep(sleep);
+ usleep_range(8000, 10000);
if (nic->pf_acked)
break;
- timeout -= sleep;
- if (!timeout) {
+ if (time_after(jiffies, timeout)) {
netdev_err(nic->netdev,
"PF didn't ACK to mbox msg 0x%02x from VF%d\n",
(mbx->msg.msg & 0xFF), nic->vf_id);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index c39385c4855b..a64046727ede 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -10,4 +10,4 @@ octeontx2_mbox-y := mbox.o
octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o rvu_validation.o rvu_sso.o \
rvu_tim.o rvu_cpt.o rvu_debugfs.o rvu_npc_fs.o \
- ptp.o rvu_ptp.o rvu_fixes.o
+ ptp.o rvu_ptp.o rvu_fixes.o rvu_sdp.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index d537c1126834..4f8893ade710 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -176,12 +176,15 @@ enum nix_scheduler {
#define NIX_INTF_TYPE_CGX 0
#define NIX_INTF_TYPE_LBK 1
+#define NIX_INTF_TYPE_SDP 2
#define MAX_LMAC_PKIND 12
#define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b))
#define NIX_LINK_LBK(a) (12 + (a))
#define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c))
#define NIX_CHAN_LBK_CHX(a, b) (0 + 0x100 * (a) + (b))
+#define NIX_CHAN_SDP_CH_START (0x700ull)
+#define NIX_CHAN_SDP_CHX(a) (NIX_CHAN_SDP_CH_START + (a))
/* NIX LSO format indices.
* As of now TSO is the only one using, so statically assigning indices.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index 1a245e15e751..387e33fa417a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -136,19 +136,17 @@ EXPORT_SYMBOL(otx2_mbox_init);
int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
{
+ unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct device *sender = &mbox->pdev->dev;
- int timeout = 0, sleep = 1;
-
- while (mdev->num_msgs != mdev->msgs_acked) {
- msleep(sleep);
- timeout += sleep;
- if (timeout >= MBOX_RSP_TIMEOUT) {
- dev_dbg(sender, "timed out while waiting for rsp\n");
- return -EIO;
- }
+
+ while (!time_after(jiffies, timeout)) {
+ if (mdev->num_msgs == mdev->msgs_acked)
+ return 0;
+ usleep_range(800, 1000);
}
- return 0;
+ dev_dbg(sender, "timed out while waiting for rsp\n");
+ return -EIO;
}
EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index efe8b2afb7a8..950ba1774ba7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -36,7 +36,7 @@
#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull))
-#define MBOX_RSP_TIMEOUT 1000 /* in ms, Time to wait for mbox response */
+#define MBOX_RSP_TIMEOUT 2000 /* Time(ms) to wait for mbox response */
#define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */
@@ -86,7 +86,7 @@ struct mbox_msghdr {
#define OTX2_MBOX_REQ_SIG (0xdead)
#define OTX2_MBOX_RSP_SIG (0xbeef)
u16 sig; /* Signature, for validating corrupted msgs */
-#define OTX2_MBOX_VERSION (0x0002)
+#define OTX2_MBOX_VERSION (0x0003)
u16 ver; /* Version of msg's structure for this ID */
u16 next_msgoff; /* Offset of next msg within mailbox region */
int rc; /* Msg process'ed response code */
@@ -247,6 +247,8 @@ M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \
M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \
npc_mcam_read_entry_req, \
npc_mcam_read_entry_rsp) \
+M(NPC_SET_PKIND, 0x6010, npc_set_pkind, \
+ npc_set_pkind, msg_rsp) \
/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \
nix_lf_alloc_req, nix_lf_alloc_rsp) \
@@ -535,6 +537,18 @@ struct cgx_phy_mod_type {
int mod;
};
+struct npc_set_pkind {
+ struct mbox_msghdr hdr;
+#define OTX2_PRIV_FLAGS_DEFAULT BIT_ULL(0)
+#define OTX2_PRIV_FLAGS_EDSA BIT_ULL(1)
+#define OTX2_PRIV_FLAGS_HIGIG BIT_ULL(2)
+#define OTX2_PRIV_FLAGS_CUSTOM BIT_ULL(63)
+ u64 mode;
+#define PKIND_TX BIT_ULL(0)
+#define PKIND_RX BIT_ULL(1)
+ u8 dir;
+ u8 pkind; /* valid only in case custom flag */
+};
struct cgx_set_link_mode_args {
u32 speed;
u8 duplex;
@@ -693,6 +707,7 @@ struct nix_lf_alloc_rsp {
u8 lf_tx_stats; /* NIX_AF_CONST1::LF_TX_STATS */
u16 cints; /* NIX_AF_CONST2::CINTS */
u16 qints; /* NIX_AF_CONST2::QINTS */
+ u8 hw_rx_tstamp_en;
};
struct nix_lf_free_req {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index a54539fdfb7b..6c1c50247adb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -31,6 +31,8 @@ enum npc_kpu_la_ltype {
NPC_LT_LA_IH_8_ETHER,
NPC_LT_LA_IH_4_ETHER,
NPC_LT_LA_IH_2_ETHER,
+ NPC_LT_LA_HIGIG2_ETHER,
+ NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
NPC_LT_LA_CUSTOM0 = 0xE,
NPC_LT_LA_CUSTOM1 = 0xF,
};
@@ -137,6 +139,13 @@ enum npc_kpu_lh_ltype {
NPC_LT_LH_CUSTOM1 = 0xF,
};
+enum npc_pkind_type {
+ NPC_TX_HIGIG_PKIND = 60ULL,
+ NPC_RX_HIGIG_PKIND,
+ NPC_RX_EDSA_PKIND,
+ NPC_TX_DEF_PKIND,
+};
+
struct npc_kpu_profile_cam {
u8 state;
u8 state_mask;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index 0f52e2df4698..832810ad6b02 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -146,6 +146,8 @@ enum npc_kpu_parser_state {
NPC_S_KPU1_IH_NIX,
NPC_S_KPU1_IH,
NPC_S_KPU1_EXDSA,
+ NPC_S_KPU1_HIGIG2,
+ NPC_S_KPU1_IH_NIX_HIGIG2,
NPC_S_KPU2_CTAG,
NPC_S_KPU2_CTAG2,
NPC_S_KPU2_SBTAG,
@@ -232,11 +234,13 @@ enum npc_kpu_parser_state {
};
enum npc_kpu_la_uflag {
- NPC_F_LA_U_UNK_ETYPE = 0x10,
- NPC_F_LA_U_HAS_TAG = 0x20,
+ NPC_F_LA_U_HAS_TAG = 0x10,
+ NPC_F_LA_U_HAS_IH_NIX = 0x20,
+ NPC_F_LA_U_HAS_HIGIG2 = 0x40,
};
enum npc_kpu_la_lflag {
- NPC_F_LA_L_WITH_VLAN = 1,
+ NPC_F_LA_L_UNK_ETYPE = 1,
+ NPC_F_LA_L_WITH_VLAN,
NPC_F_LA_L_WITH_ETAG,
NPC_F_LA_L_WITH_ITAG,
NPC_F_LA_L_WITH_MPLS,
@@ -957,8 +961,8 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
+ 36, 40, 44, 0, 0,
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0, 0,
NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -966,8 +970,8 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
+ 28, 32, 36, 0, 0,
+ NPC_S_KPU1_HIGIG2, 0, 0,
NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -1364,6 +1368,294 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_HIGIG2, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_ITAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -7115,7 +7407,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
4, 8, 0, 0, 0,
NPC_S_KPU2_CTAG, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
- NPC_F_LA_U_HAS_TAG|NPC_F_LA_L_WITH_VLAN,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
0, 0, 0, 0,
},
{
@@ -7123,7 +7415,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
4, 8, 22, 0, 0,
NPC_S_KPU2_SBTAG, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
- NPC_F_LA_U_HAS_TAG|NPC_F_LA_L_WITH_VLAN,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
0, 0, 0, 0,
},
{
@@ -7131,7 +7423,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
4, 8, 0, 0, 0,
NPC_S_KPU2_QINQ, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
- NPC_F_LA_U_HAS_TAG|NPC_F_LA_L_WITH_VLAN,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
0, 0, 0, 0,
},
{
@@ -7139,7 +7431,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
8, 12, 26, 0, 0,
NPC_S_KPU2_ETAG, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
- NPC_F_LA_U_HAS_TAG|NPC_F_LA_L_WITH_ETAG,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
0, 0, 0, 0,
},
{
@@ -7147,7 +7439,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
18, 22, 26, 0, 0,
NPC_S_KPU2_ITAG, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
- NPC_F_LA_U_HAS_TAG|NPC_F_LA_L_WITH_ITAG,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -7203,7 +7495,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
- NPC_F_LA_U_UNK_ETYPE,
+ NPC_F_LA_L_UNK_ETYPE,
0, 0, 0, 0,
},
{
@@ -7211,7 +7503,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
8, 0, 6, 3, 0,
NPC_S_KPU5_IP, 22, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
- 0,
+ NPC_F_LA_U_HAS_IH_NIX,
0, 0, 0, 0,
},
{
@@ -7219,7 +7511,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
6, 0, 0, 3, 0,
NPC_S_KPU5_IP6, 22, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
- 0,
+ NPC_F_LA_U_HAS_IH_NIX,
0, 0, 0, 0,
},
{
@@ -7227,7 +7519,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
0, 0, 0, 3, 0,
NPC_S_KPU5_ARP, 22, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
- 0,
+ NPC_F_LA_U_HAS_IH_NIX,
0, 0, 0, 0,
},
{
@@ -7235,7 +7527,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
0, 0, 0, 3, 0,
NPC_S_KPU5_RARP, 22, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
- 0,
+ NPC_F_LA_U_HAS_IH_NIX,
0, 0, 0, 0,
},
{
@@ -7243,7 +7535,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
0, 0, 0, 3, 0,
NPC_S_KPU5_PTP, 22, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
- 0,
+ NPC_F_LA_U_HAS_IH_NIX,
0, 0, 0, 0,
},
{
@@ -7251,7 +7543,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
0, 0, 0, 3, 0,
NPC_S_KPU5_FCOE, 22, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
- 0,
+ NPC_F_LA_U_HAS_IH_NIX,
0, 0, 0, 0,
},
{
@@ -7259,7 +7551,8 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
8, 12, 0, 0, 0,
NPC_S_KPU2_CTAG2, 20, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
0, 0, 0, 0,
},
{
@@ -7267,7 +7560,8 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
4, 8, 0, 0, 0,
NPC_S_KPU2_CTAG, 20, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
0, 0, 0, 0,
},
{
@@ -7275,7 +7569,8 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
4, 8, 22, 0, 0,
NPC_S_KPU2_SBTAG, 20, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
0, 0, 0, 0,
},
{
@@ -7283,7 +7578,8 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
4, 8, 0, 0, 0,
NPC_S_KPU2_QINQ, 20, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
0, 0, 0, 0,
},
{
@@ -7291,7 +7587,8 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
8, 12, 26, 0, 0,
NPC_S_KPU2_ETAG, 20, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_ETAG,
0, 0, 0, 0,
},
{
@@ -7299,7 +7596,8 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
18, 22, 26, 0, 0,
NPC_S_KPU2_ITAG, 20, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ITAG,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -7307,7 +7605,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
2, 6, 10, 2, 0,
NPC_S_KPU4_MPLS, 22, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
- NPC_F_LA_L_WITH_MPLS,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_WITH_MPLS,
0, 0, 0, 0,
},
{
@@ -7315,7 +7613,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
2, 6, 10, 2, 0,
NPC_S_KPU4_MPLS, 22, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
- NPC_F_LA_L_WITH_MPLS,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_WITH_MPLS,
0, 0, 0, 0,
},
{
@@ -7323,7 +7621,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
2, 0, 0, 2, 0,
NPC_S_KPU4_NSH, 22, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
- NPC_F_LA_L_WITH_NSH,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_WITH_NSH,
0, 0, 0, 0,
},
{
@@ -7331,7 +7629,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
- NPC_F_LA_U_UNK_ETYPE,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_UNK_ETYPE,
0, 0, 0, 0,
},
{
@@ -7363,7 +7661,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
- NPC_F_LA_U_UNK_ETYPE,
+ NPC_F_LA_L_UNK_ETYPE,
0, 0, 0, 0,
},
{
@@ -7383,6 +7681,278 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 18, 22, 26, 0, 0,
+ NPC_S_KPU2_ITAG, 28, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
+ | NPC_F_LA_L_WITH_ITAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 30, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 18, 22, 26, 0, 0,
+ NPC_S_KPU2_ITAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ITAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
+ | NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LA, NPC_EC_L2_K1,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index c20faedabaa9..3868898757f6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -617,7 +617,10 @@ setup_vfmsix:
*/
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
max_msix = cfg & 0xFFFFF;
- phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
+ if (rvu->fwdata && rvu->fwdata->msixtr_base)
+ phy_addr = rvu->fwdata->msixtr_base;
+ else
+ phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
/* Register save */
rvu->msixtr_base_phy = phy_addr;
iova = dma_map_resource(rvu->dev, phy_addr,
@@ -699,6 +702,8 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
mac = &rvu->fwdata->pf_macs[pf];
if (*mac)
u64_to_ether_addr(*mac, pfvf->mac_addr);
+ else
+ eth_random_addr(pfvf->mac_addr);
} else {
eth_random_addr(pfvf->mac_addr);
}
@@ -711,6 +716,8 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
mac = &rvu->fwdata->vf_macs[hwvf];
if (*mac)
u64_to_ether_addr(*mac, pfvf->mac_addr);
+ else
+ eth_random_addr(pfvf->mac_addr);
} else {
eth_random_addr(pfvf->mac_addr);
}
@@ -904,6 +911,8 @@ init:
mutex_init(&rvu->rsrc_lock);
+ rvu_fwdata_init(rvu);
+
err = rvu_setup_msix_resources(rvu);
if (err)
return err;
@@ -927,8 +936,6 @@ init:
rvu_scan_block(rvu, block);
}
- rvu_fwdata_init(rvu);
-
err = rvu_npc_init(rvu);
if (err)
goto fwdata_err;
@@ -960,6 +967,10 @@ init:
if (err)
goto cgx_err;
+ err = rvu_sdp_init(rvu);
+ if (err)
+ goto cgx_err;
+
return 0;
cgx_err:
@@ -1337,12 +1348,12 @@ int rvu_mbox_handler_attach_resources(struct rvu *rvu,
goto exit;
/* Now attach the requested resources */
- if (attach->npalf)
- rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1);
-
if (attach->nixlf)
rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1);
+ if (attach->npalf)
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1);
+
if (attach->sso) {
/* RVU func doesn't know which exact LF or slot is attached
* to it, it always sees as slot 0,1,2. So for a 'modify'
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 6cb284b8027c..5e90047b9a62 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -392,7 +392,8 @@ struct rvu_fwdata {
u64 rclk;
u64 mcam_addr;
u64 mcam_sz;
-#define FWDATA_RESERVED_MEM 1024
+ u64 msixtr_base;
+#define FWDATA_RESERVED_MEM 1023
u64 reserved[FWDATA_RESERVED_MEM];
/* Do not add new fields below this line */
#define CGX_MAX 4
@@ -583,6 +584,7 @@ void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc);
int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start);
int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index,
int rxtxflag, u64 *stat);
+bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc);
/* SSO APIs */
int rvu_sso_init(struct rvu *rvu);
@@ -643,11 +645,14 @@ void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
int *enable_cnt);
int npc_flow_steering_init(struct rvu *rvu, int blkaddr);
const char *npc_get_field_name(u8 hdr);
-int rvu_npc_write_default_rule(struct rvu *rvu, int blkaddr, int nixlf,
- u16 pcifunc, u8 intf, struct mcam_entry *entry);
+bool rvu_npc_write_default_rule(struct rvu *rvu, int blkaddr, int nixlf,
+ u16 pcifunc, u8 intf, struct mcam_entry *entry,
+ int *entry_index);
int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel);
int npc_get_bank(struct npc_mcam *mcam, int index);
void npc_mcam_enable_flows(struct rvu *rvu, u16 target);
+void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, bool enable);
/* CPT APIs */
int rvu_cpt_init(struct rvu *rvu);
@@ -658,6 +663,10 @@ void rvu_cpt_unregister_interrupts(struct rvu *rvu);
int rvu_tim_init(struct rvu *rvu);
int rvu_tim_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot);
+/* SDP APIs */
+int rvu_sdp_init(struct rvu *rvu);
+bool is_sdp_pf(u16 pcifunc);
+
/* CONFIG_DEBUG_FS*/
#ifdef CONFIG_DEBUG_FS
void rvu_dbg_init(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index a4d9848bb4db..e7f2a5678d57 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -350,7 +350,7 @@ int rvu_cgx_exit(struct rvu *rvu)
* VF's of mapped PF and other PFs are not allowed. This fn() checks
* whether a PFFUNC is permitted to do the config or not.
*/
-static inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
+inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
{
if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index ed5d42491d4b..52a590982ffd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -60,8 +60,6 @@ enum nix_makr_fmt_indexes {
NIX_MARK_CFG_MAX,
};
-#define NIX_TX_PKIND 63ULL
-
/* For now considering MC resources needed for broadcast
* pkt replication only. i.e 256 HWVFs + 12 PFs.
*/
@@ -183,7 +181,8 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
int err;
pf = rvu_get_pf(pcifunc);
- if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
+ if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
+ type != NIX_INTF_TYPE_SDP)
return 0;
switch (type) {
@@ -224,6 +223,15 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base, false);
break;
+ case NIX_INTF_TYPE_SDP:
+ /* Added single interface and single channel support for now */
+ pfvf->rx_chan_base = NIX_CHAN_SDP_CHX(0);
+ pfvf->tx_chan_base = pfvf->rx_chan_base;
+ pfvf->rx_chan_cnt = 1;
+ pfvf->tx_chan_cnt = 1;
+ rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base, false);
+ break;
}
/* Add a UCAST forwarding rule in MCAM with this NIXLF attached
@@ -955,10 +963,10 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
struct nix_lf_alloc_rsp *rsp)
{
int nixlf, qints, hwctx_size, intf, err, rc = 0;
+ struct rvu_pfvf *pfvf, *parent_pf;
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_block *block;
- struct rvu_pfvf *pfvf;
u64 cfg, ctx_cfg;
int blkaddr;
@@ -968,6 +976,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
if (req->way_mask)
req->way_mask &= 0xFFFF;
+ parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (!pfvf->nixlf || blkaddr < 0)
@@ -1125,10 +1134,13 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
/* Configure pkind for TX parse config, 63 from npc_profile */
- cfg = NIX_TX_PKIND;
+ cfg = NPC_TX_DEF_PKIND;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ if (is_sdp_pf(pcifunc))
+ intf = NIX_INTF_TYPE_SDP;
+
err = nix_interface_init(rvu, pcifunc, intf, nixlf);
if (err)
goto free_mem;
@@ -1163,6 +1175,7 @@ exit:
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
rsp->qints = ((cfg >> 12) & 0xFFF);
rsp->cints = ((cfg >> 24) & 0xFFF);
+ rsp->hw_rx_tstamp_en = parent_pf->hw_rx_tstamp_en;
return rc;
}
@@ -2882,6 +2895,7 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
struct nix_set_mac_addr *req,
struct msg_rsp *rsp)
{
+ bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK);
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
@@ -2896,7 +2910,10 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
- ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ /* Skip updating mac addr if request is from vf */
+ if (!from_vf)
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+
rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base, req->mac_addr);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index ba0a24798ed9..391f378c473f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -183,8 +183,8 @@ static bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam,
return (cfg & 1);
}
-static void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
- int blkaddr, int index, bool enable)
+void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, bool enable)
{
int bank = npc_get_bank(mcam, index);
int actbank = bank;
@@ -898,6 +898,11 @@ static void npc_config_tx_ldata_extract(struct rvu *rvu, int blkaddr)
cfg = KEX_LD_CFG(0x01, 0x0, 0x1, 0x0, 0x4);
SET_KEX_LD(NIX_INTF_TX, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, 0, cfg);
+ /* PF_FUNC incase of higig2 */
+ cfg = KEX_LD_CFG(0x01, 0x0, 0x1, 0x0, 0x4);
+ SET_KEX_LD(NIX_INTF_TX, NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, 0,
+ cfg);
+
/* Layer B: Single VLAN (CTAG) */
/* CTAG VLAN[2..3] KW0[63:48] */
cfg = KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6);
@@ -920,6 +925,11 @@ static void npc_config_tx_ldata_extract(struct rvu *rvu, int blkaddr)
cfg = KEX_LD_CFG(0x05, 0x8, 0x1, 0x0, 0xa);
SET_KEX_LD(NIX_INTF_TX, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, 1, cfg);
+ /* clasification in higig2 header */
+ cfg = KEX_LD_CFG(0x01, 0x10, 0x1, 0x0, 0xa);
+ SET_KEX_LD(NIX_INTF_TX, NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, 1,
+ cfg);
+
/* Layer C: IPv4 */
/* SIP+DIP: 8 bytes, KW2[63:0] */
cfg = KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10);
@@ -959,6 +969,14 @@ static void npc_config_rx_ldata_extract(struct rvu *rvu, int blkaddr)
cfg = KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x4);
SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 1, cfg);
+ /* Classification in higig2 header */
+ cfg = KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, NPC_PARSE_RESULT_DMAC_OFFSET);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, 0, cfg);
+
+ /* Vid in higig2 header */
+ cfg = KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, NPC_PARSE_RESULT_DMAC_OFFSET + 2);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, 1, cfg);
+
/* Layer B: Single VLAN (CTAG) */
/* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
cfg = KEX_LD_CFG(0x03, 0x2, 0x1, 0x0, 0x4);
@@ -2600,15 +2618,70 @@ int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
return 0;
}
-int rvu_npc_write_default_rule(struct rvu *rvu, int blkaddr, int nixlf,
- u16 pcifunc, u8 intf, struct mcam_entry *entry)
+bool rvu_npc_write_default_rule(struct rvu *rvu, int blkaddr, int nixlf,
+ u16 pcifunc, u8 intf, struct mcam_entry *entry,
+ int *index)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
- int index;
+ bool enable;
- index = npc_get_nixlf_mcam_index(mcam, pcifunc,
- nixlf, NIXLF_UCAST_ENTRY);
- npc_config_mcam_entry(rvu, mcam, blkaddr, index, intf, entry, true);
+ *index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ /* dont force enable unicast entry */
+ enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, *index);
+ npc_config_mcam_entry(rvu, mcam, blkaddr, *index, intf, entry, enable);
+
+ return enable;
+}
+
+int rvu_mbox_handler_npc_set_pkind(struct rvu *rvu,
+ struct npc_set_pkind *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ int blkaddr, nixlf, rc;
+ u64 rxpkind, txpkind;
+ u8 cgx_id, lmac_id;
+
+ /* use default pkind to disable edsa/higig */
+ rxpkind = rvu_npc_get_pkind(rvu, pf);
+ txpkind = NPC_TX_DEF_PKIND;
+
+ if (req->mode & OTX2_PRIV_FLAGS_EDSA) {
+ rxpkind = NPC_RX_EDSA_PKIND;
+ } else if (req->mode & OTX2_PRIV_FLAGS_HIGIG) {
+ rxpkind = NPC_RX_HIGIG_PKIND;
+ txpkind = NPC_TX_HIGIG_PKIND;
+ } else if (req->mode & OTX2_PRIV_FLAGS_CUSTOM) {
+ rxpkind = req->pkind;
+ txpkind = req->pkind;
+ }
+
+ if (req->dir & PKIND_RX) {
+ /* rx pkind set req valid only for cgx mapped PFs */
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+ rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
+
+ rc = cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, rxpkind);
+ if (rc)
+ return rc;
+ }
+
+ if (req->dir & PKIND_TX) {
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
+
+ /* tx pkind set req valid if NIXLF attached */
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
- return index;
+ nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr],
+ req->hdr.pcifunc, 0);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf),
+ txpkind);
+ }
+ return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index ce0eab703e54..c587b8dab336 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -856,7 +856,8 @@ static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
int nixlf, struct rvu_pfvf *pfvf,
struct npc_install_flow_req *req,
- struct npc_install_flow_rsp *rsp, bool enable)
+ struct npc_install_flow_rsp *rsp, bool enable,
+ bool pf_set_vfs_mac)
{
u64 features, installed_features, missing_features = 0;
struct rvu_npc_mcam_rule *def_rule = pfvf->def_rule;
@@ -899,15 +900,17 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
npc_update_flow(rvu, entry, missing_features,
&def_rule->packet, &def_rule->mask,
&dummy, req->intf);
- entry_index = rvu_npc_write_default_rule(rvu, blkaddr,
- nixlf, target,
- NIX_INTF_RX, entry);
+ enable = rvu_npc_write_default_rule(rvu, blkaddr,
+ nixlf, target,
+ NIX_INTF_RX, entry,
+ &entry_index);
installed_features = req->features | missing_features;
} else if (req->default_rule && !req->append) {
/* overwrite default rule */
- entry_index = rvu_npc_write_default_rule(rvu, blkaddr,
- nixlf, target,
- NIX_INTF_RX, entry);
+ enable = rvu_npc_write_default_rule(rvu, blkaddr,
+ nixlf, target,
+ NIX_INTF_RX, entry,
+ &entry_index);
} else if (msg_from_vf) {
/* normal rule - include default rule also to it for VF */
npc_update_flow(rvu, entry, missing_features, &def_rule->packet,
@@ -975,6 +978,10 @@ update_rule:
if (req->default_rule)
pfvf->def_rule = rule;
+ /* VF's MAC address is being changed via PF */
+ if (pf_set_vfs_mac)
+ ether_addr_copy(pfvf->mac_addr, req->packet.dmac);
+
return 0;
}
@@ -985,6 +992,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK);
int blkaddr, nixlf, err;
struct rvu_pfvf *pfvf;
+ bool pf_set_vfs_mac = false;
bool enable = true;
u16 target;
@@ -1006,8 +1014,11 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
if (!req->hdr.pcifunc)
target = req->vf;
/* PF installing for its VF */
- else if (!from_vf && req->vf)
+ else if (!from_vf && req->vf) {
target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf;
+ pf_set_vfs_mac = req->default_rule &&
+ (req->features & BIT_ULL(NPC_DMAC));
+ }
/* msg received from PF/VF */
else
target = req->hdr.pcifunc;
@@ -1045,7 +1056,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
return -EINVAL;
return npc_install_flow(rvu, blkaddr, target, nixlf, pfvf,
- req, rsp, enable);
+ req, rsp, enable, pf_set_vfs_mac);
}
static int npc_delete_flow(struct rvu *rvu, u16 entry, u16 pcifunc)
@@ -1134,6 +1145,13 @@ void npc_mcam_enable_flows(struct rvu *rvu, u16 target)
list_for_each_entry(rule, &mcam->mcam_rules, list) {
if (rule->intf == NIX_INTF_RX &&
rule->rx_action.pf_func == target && !rule->enable) {
+ if (rule->default_rule) {
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ rule->entry, true);
+ rule->enable = true;
+ continue;
+ }
+
if (rule->rx_action.op == NIX_RX_ACTION_DEFAULT) {
if (!pfvf->def_rule)
continue;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
new file mode 100644
index 000000000000..071e69d7da15
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/pci.h>
+#include "rvu.h"
+
+/* SDP PF device id */
+#define PCI_DEVID_OTX2_SDP_PF 0xA0F6
+
+/* SDP PF number */
+static int sdp_pf_num = -1;
+
+bool is_sdp_pf(u16 pcifunc)
+{
+ if (rvu_get_pf(pcifunc) != sdp_pf_num)
+ return false;
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ return false;
+
+ return true;
+}
+
+int rvu_sdp_init(struct rvu *rvu)
+{
+ struct pci_dev *pdev;
+ int i;
+
+ for (i = 0; i < rvu->hw->total_pfs; i++) {
+ pdev = pci_get_domain_bus_and_slot(
+ pci_domain_nr(rvu->pdev->bus), i + 1, 0);
+ if (!pdev)
+ continue;
+
+ if (pdev->device == PCI_DEVID_OTX2_SDP_PF) {
+ sdp_pf_num = i;
+ put_device(&pdev->dev);
+ break;
+ }
+
+ put_device(&pdev->dev);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.c
index ed66a93e33bc..33a7821456d1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_validation.c
@@ -16,6 +16,7 @@
#define PCI_DEVID_OCTEONTX2_SSO_RVU_PF 0xA0F9
#define PCI_DEVID_OCTEONTX2_NPA_RVU_PF 0xA0FB
#define PCI_DEVID_OCTEONTX2_CPT_RVU_PF 0xA0FD
+#define PCI_DEVID_OCTEONTX2_SDP_RVU_PF 0xA0F6
static u64 quotas_get_sum(struct rvu_quotas *quotas)
{
@@ -463,7 +464,8 @@ static void rvu_set_default_limits(struct rvu *rvu)
if (rvu->pf[i].pdev->device == PCI_DEVID_OCTEONTX2_SSO_RVU_PF)
sso_rvus++;
if (rvu->pf[i].pdev->device == PCI_DEVID_OCTEONTX2_RVU_PF ||
- rvu->pf[i].pdev->device == PCI_DEVID_OCTEONTX2_RVU_AF)
+ rvu->pf[i].pdev->device == PCI_DEVID_OCTEONTX2_RVU_AF ||
+ rvu->pf[i].pdev->device == PCI_DEVID_OCTEONTX2_SDP_RVU_PF)
nix_rvus++;
}
@@ -530,6 +532,24 @@ static void rvu_set_default_limits(struct rvu *rvu)
rvu->pf_limits.cpt->a[i].val = num_online_cpus();
rvu->pf_limits.npa->a[i].val = 1;
break;
+ case PCI_DEVID_OCTEONTX2_SDP_RVU_PF:
+ rvu->pf_limits.nix->a[i].val = 1 + totalvfs;
+ rvu->pf_limits.npa->a[i].val = 1 + totalvfs;
+ if (rvu->hw->cap.nix_fixed_txschq_mapping)
+ break;
+ rvu->pf_limits.smq->a[i].val =
+ nix_hw->txsch[NIX_TXSCH_LVL_SMQ].schq.max /
+ nix_rvus;
+ rvu->pf_limits.tl4->a[i].val =
+ nix_hw->txsch[NIX_TXSCH_LVL_TL4].schq.max /
+ nix_rvus;
+ rvu->pf_limits.tl3->a[i].val =
+ nix_hw->txsch[NIX_TXSCH_LVL_TL3].schq.max /
+ nix_rvus;
+ rvu->pf_limits.tl2->a[i].val =
+ nix_hw->txsch[NIX_TXSCH_LVL_TL2].schq.max /
+ nix_rvus;
+ break;
}
}
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 2d5ef65d40c2..0145dcf068e7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -207,7 +207,10 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
}
req->update_smq = true;
- req->maxlen = mtu + OTX2_ETH_HLEN;
+ /* Add EDSA/HIGIG2 header len to maxlen */
+ pfvf->max_frs = mtu + OTX2_ETH_HLEN + pfvf->addl_mtu;
+ req->maxlen = pfvf->max_frs;
+
err = otx2_sync_mbox_msg(&pfvf->mbox);
otx2_mbox_unlock(&pfvf->mbox);
return err;
@@ -335,9 +338,9 @@ void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
* usecs, convert that to 100ns count.
*/
otx2_write64(pfvf, NIX_LF_CINTX_WAIT(qidx),
- ((u64)(pfvf->cq_time_wait * 10) << 48) |
- ((u64)pfvf->cq_qcount_wait << 32) |
- (pfvf->cq_ecount_wait - 1));
+ ((u64)(pfvf->hw.cq_time_wait * 10) << 48) |
+ ((u64)pfvf->hw.cq_qcount_wait << 32) |
+ (pfvf->hw.cq_ecount_wait - 1));
}
dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
@@ -406,12 +409,12 @@ static int otx2_get_link(struct otx2_nic *pfvf)
u16 map;
/* cgx lmac link */
- if (pfvf->tx_chan_base >= CGX_CHAN_BASE) {
- map = pfvf->tx_chan_base & 0x7FF;
+ if (pfvf->hw.tx_chan_base >= CGX_CHAN_BASE) {
+ map = pfvf->hw.tx_chan_base & 0x7FF;
link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
}
/* LBK channel */
- if (pfvf->tx_chan_base < SDP_CHAN_BASE)
+ if (pfvf->hw.tx_chan_base < SDP_CHAN_BASE)
link = 12;
return link;
@@ -575,8 +578,8 @@ static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
aq->rq.qint_idx = 0;
aq->rq.lpb_drop_ena = 1; /* Enable RED dropping for AURA */
aq->rq.xqe_drop_ena = 1; /* Enable RED dropping for CQ/SSO */
- aq->rq.xqe_pass = RQ_PASS_LVL_CQ(pfvf->rq_skid, qset->rqe_cnt);
- aq->rq.xqe_drop = RQ_DROP_LVL_CQ(pfvf->rq_skid, qset->rqe_cnt);
+ aq->rq.xqe_pass = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
+ aq->rq.xqe_drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
aq->rq.lpb_aura_pass = RQ_PASS_LVL_AURA;
aq->rq.lpb_aura_drop = RQ_DROP_LVL_AURA;
@@ -630,7 +633,7 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
sq->aura_id = sqb_aura;
sq->aura_fc_addr = pool->fc_addr->base;
sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx));
- sq->io_addr = (__force u64)(pfvf->reg_base + NIX_LF_OP_SENDX(0));
+ sq->io_addr = (__force u64)otx2_get_regaddr(pfvf, NIX_LF_OP_SENDX(0));
sq->stats.bytes = 0;
sq->stats.pkts = 0;
@@ -647,7 +650,7 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
/* Only one SMQ is allocated, map all SQ's to that SMQ */
aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
aq->sq.smq_rr_quantum = OTX2_MAX_MTU;
- aq->sq.default_chan = pfvf->tx_chan_base;
+ aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
aq->sq.sqb_aura = sqb_aura;
aq->sq.sq_int_ena = NIX_SQINT_BITS;
@@ -673,8 +676,16 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
int err, pool_id;
cq = &qset->cq[qidx];
- cq->cqe_cnt = (qidx < pfvf->hw.rx_queues) ? qset->rqe_cnt
- : qset->sqe_cnt;
+ cq->cq_idx = qidx;
+ if (qidx < pfvf->hw.rx_queues) {
+ cq->cq_type = CQ_RX;
+ cq->cint_idx = qidx;
+ cq->cqe_cnt = qset->rqe_cnt;
+ } else {
+ cq->cq_type = CQ_TX;
+ cq->cint_idx = qidx - pfvf->hw.rx_queues;
+ cq->cqe_cnt = qset->sqe_cnt;
+ }
cq->cqe_size = pfvf->qset.xqe_size;
/* Allocate memory for CQEs */
@@ -687,11 +698,9 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
/* In case where all RQs auras point to single pool,
* all CQs receive buffer pool also point to same pool.
*/
- pool_id = ((qidx < pfvf->hw.rx_queues) &&
+ pool_id = ((cq->cq_type == CQ_RX) &&
(pfvf->hw.rqpool_cnt != pfvf->hw.rx_queues)) ? 0 : qidx;
cq->rbpool = &qset->pool[pool_id];
-
- cq->cq_idx = qidx;
cq->refill_task_sched = false;
/* Get memory to put this msg */
@@ -703,15 +712,13 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
aq->cq.qsize = Q_SIZE(cq->cqe_cnt, 4);
aq->cq.caching = 1;
aq->cq.base = cq->cqe->iova;
- aq->cq.cint_idx = (qidx < pfvf->hw.rx_queues) ? qidx
- : (qidx - pfvf->hw.rx_queues);
- cq->cint_idx = aq->cq.cint_idx;
+ aq->cq.cint_idx = cq->cint_idx;
aq->cq.cq_err_int_ena = NIX_CQERRINT_BITS;
aq->cq.qint_idx = 0;
aq->cq.avg_level = 255;
if (qidx < pfvf->hw.rx_queues) {
- aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->rq_skid, cq->cqe_cnt);
+ aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt);
aq->cq.drop_ena = 1;
/* Enable receive CQ backpressure */
@@ -719,7 +726,7 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
aq->cq.bpid = pfvf->bpid[0];
/* Set backpressure level is same as cq pass level */
- aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->rq_skid, qset->rqe_cnt);
+ aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
}
/* Fill AQ info */
@@ -1261,6 +1268,7 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf)
"RVUPF: Invalid MSIX vector offset for NPA/NIX\n");
return -EINVAL;
}
+
return 0;
}
EXPORT_SYMBOL(otx2_attach_npa_nix);
@@ -1312,10 +1320,10 @@ static inline void otx2_nix_rq_op_stats(struct queue_stats *stats,
u64 incr = (u64)qidx << 32;
atomic64_t *ptr;
- ptr = (__force atomic64_t *)(pfvf->reg_base + NIX_LF_RQ_OP_OCTS);
+ ptr = (__force atomic64_t *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS);
stats->bytes = atomic64_fetch_add_relaxed(incr, ptr);
- ptr = (__force atomic64_t *)(pfvf->reg_base + NIX_LF_RQ_OP_PKTS);
+ ptr = (__force atomic64_t *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS);
stats->pkts = atomic64_fetch_add_relaxed(incr, ptr);
}
@@ -1325,10 +1333,10 @@ static inline void otx2_nix_sq_op_stats(struct queue_stats *stats,
u64 incr = (u64)qidx << 32;
atomic64_t *ptr;
- ptr = (__force atomic64_t *)(pfvf->reg_base + NIX_LF_SQ_OP_OCTS);
+ ptr = (__force atomic64_t *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS);
stats->bytes = atomic64_fetch_add_relaxed(incr, ptr);
- ptr = (__force atomic64_t *)(pfvf->reg_base + NIX_LF_SQ_OP_PKTS);
+ ptr = (__force atomic64_t *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS);
stats->pkts = atomic64_fetch_add_relaxed(incr, ptr);
}
@@ -1376,8 +1384,8 @@ void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
struct nix_lf_alloc_rsp *rsp)
{
pfvf->hw.sqb_size = rsp->sqb_size;
- pfvf->rx_chan_base = rsp->rx_chan_base;
- pfvf->tx_chan_base = rsp->tx_chan_base;
+ pfvf->hw.rx_chan_base = rsp->rx_chan_base;
+ pfvf->hw.tx_chan_base = rsp->tx_chan_base;
pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx;
pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index c6607327b3ea..6cb8ee69aa1b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -12,6 +12,7 @@
#define OTX2_COMMON_H
#include <linux/pci.h>
+#include <linux/ptp_clock_kernel.h>
#include <mbox.h>
#include "otx2_reg.h"
@@ -151,8 +152,6 @@ struct mbox {
struct otx2_hw {
struct pci_dev *pdev;
struct otx2_rss_info rss_info;
- struct otx2_dev_stats dev_stats;
- struct otx2_drv_stats drv_stats;
u16 rx_queues;
u16 tx_queues;
u16 max_queues;
@@ -165,32 +164,52 @@ struct otx2_hw {
u32 stack_pg_bytes; /* Size of stack page */
u16 sqb_size;
- /* MSI-X*/
- u16 npa_msixoff; /* Offset of NPA vectors */
- u16 nix_msixoff; /* Offset of NIX vectors */
- char *irq_name;
- cpumask_var_t *affinity_mask;
-
- u8 cint_cnt; /* CQ interrupt count */
+ /* NIX */
u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ /* HW settings, coalescing etc */
+ u16 rx_chan_base;
+ u16 tx_chan_base;
+ u16 cq_qcount_wait;
+ u16 cq_ecount_wait;
+ u16 rq_skid;
+ u8 cq_time_wait;
+
/* For TSO segmentation */
u8 lso_tsov4_idx;
u8 lso_tsov6_idx;
u8 hw_tso;
+ /* MSI-X*/
+ u8 cint_cnt; /* CQ interrupt count */
+ u16 npa_msixoff; /* Offset of NPA vectors */
+ u16 nix_msixoff; /* Offset of NIX vectors */
+ char *irq_name;
+ cpumask_var_t *affinity_mask;
+
+ /* Stats */
+ struct otx2_dev_stats dev_stats;
+ struct otx2_drv_stats drv_stats;
u64 cgx_rx_stats[CGX_RX_STATS_COUNT];
u64 cgx_tx_stats[CGX_TX_STATS_COUNT];
u64 cgx_fec_corr_blks;
u64 cgx_fec_uncorr_blks;
};
-struct otx2_ptp;
+struct otx2_ptp {
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+ struct otx2_nic *nic;
+
+ struct cyclecounter cycle_counter;
+ struct timecounter time_counter;
+ bool ptp_en;
+};
struct otx2_vf_config {
struct otx2_nic *pf;
struct delayed_work link_event_work;
- struct delayed_work mac_vlan_work;
+ struct delayed_work ptp_info_work;
bool intf_down; /* interface was either configured or not */
u8 mac[ETH_ALEN];
u16 vlan;
@@ -214,53 +233,71 @@ struct otx2_mac_table {
struct otx2_nic {
void __iomem *reg_base;
- struct pci_dev *pdev;
- struct device *dev;
struct net_device *netdev;
void *iommu_domain;
+ u16 xtra_hdr;
+ u16 max_frs;
+
+#define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0)
+#define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1)
+#define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
+#define OTX2_FLAG_MCAM_ENTRIES_ALLOC BIT_ULL(3)
+#define OTX2_FLAG_NTUPLE_SUPPORT BIT_ULL(4)
+#define OTX2_FLAG_UCAST_FLTR_SUPPORT BIT_ULL(5)
+#define OTX2_FLAG_RX_VLAN_SUPPORT BIT_ULL(6)
+ u64 flags;
struct otx2_qset qset;
struct otx2_hw hw;
+ struct pci_dev *pdev;
+ struct device *dev;
+
+ /* Mbox */
struct mbox mbox;
struct mbox *mbox_pfvf;
struct workqueue_struct *mbox_wq;
struct workqueue_struct *mbox_pfvf_wq;
- u8 intf_down;
- u16 bpid[NIX_MAX_BPID_CHAN];
- u16 pcifunc;
- u16 rx_chan_base;
- u16 tx_chan_base;
- u8 cq_time_wait;
- u16 cq_qcount_wait;
- u16 cq_ecount_wait;
- u16 rq_skid;
- u32 msg_enable;
- struct work_struct reset_task;
- u64 reset_count;
u8 total_vfs;
+ u16 pcifunc; /* RVU PF_FUNC */
+ u16 bpid[NIX_MAX_BPID_CHAN];
+ struct otx2_ptp *ptp;
struct otx2_vf_config *vf_configs;
struct cgx_link_user_info linfo;
- bool entries_alloc;
+ /* NPC MCAM */
u32 nr_flows;
u32 ntuple_max_flows;
-#define OTX2_NTUPLE_FILTER_CAPABLE 0
-#define OTX2_UNICAST_FILTER_CAPABLE 1
-#define OTX2_RX_VLAN_OFFLOAD_CAPABLE 2
- unsigned long priv_flags;
u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES];
struct list_head flows;
+ struct otx2_mac_table *mac_table;
+
+ u64 reset_count;
+ struct work_struct reset_task;
struct workqueue_struct *flr_wq;
struct flr_work *flr_wrk;
struct refill_work *refill_wrk;
-
- u8 hw_rx_tstamp;
- u8 hw_tx_tstamp;
- struct otx2_ptp *ptp;
- struct otx2_mac_table *mac_table;
- struct workqueue_struct *otx2_ndo_wq;
struct work_struct otx2_rx_mode_work;
+ struct workqueue_struct *otx2_ndo_wq;
+
+ /* Ethtool stuff */
+ u32 msg_enable;
+
+#define OTX2_PRIV_FLAG_PAM4 BIT(0)
+#define OTX2_PRIV_FLAG_EDSA_HDR BIT(1)
+#define OTX2_PRIV_FLAG_HIGIG2_HDR BIT(2)
+#define OTX2_IS_EDSA_ENABLED(flags) ((flags) & \
+ OTX2_PRIV_FLAG_EDSA_HDR)
+#define OTX2_IS_HIGIG2_ENABLED(flags) ((flags) & \
+ OTX2_PRIV_FLAG_HIGIG2_HDR)
+ u32 ethtool_flags;
+
+ /* extended DSA and EDSA header lengths are 8/16 bytes
+ * so take max length 16 bytes here
+ */
+#define OTX2_EDSA_HDR_LEN 16
+#define OTX2_HIGIG2_HDR_LEN 16
+ u32 addl_mtu;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -284,9 +321,9 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
{
struct otx2_hw *hw = &pfvf->hw;
- pfvf->cq_time_wait = CQ_TIMER_THRESH_DEFAULT;
- pfvf->cq_ecount_wait = CQ_CQE_THRESH_DEFAULT;
- pfvf->cq_qcount_wait = CQ_QCOUNT_DEFAULT;
+ pfvf->hw.cq_time_wait = CQ_TIMER_THRESH_DEFAULT;
+ pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT;
+ pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT;
hw->hw_tso = true;
@@ -295,22 +332,48 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
/* Due to HW issue previous silicons required minimum 600
* unused CQE to avoid CQ overflow.
*/
- pfvf->rq_skid = 600;
+ pfvf->hw.rq_skid = 600;
pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K);
}
if (is_96xx_A0(pfvf->pdev))
- pfvf->cq_qcount_wait = 0x0;
+ pfvf->hw.cq_qcount_wait = 0x0;
+}
+
+static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
+{
+ u64 blkaddr;
+
+ switch ((offset >> RVU_FUNC_BLKADDR_SHIFT) & RVU_FUNC_BLKADDR_MASK) {
+ case BLKTYPE_NIX:
+ blkaddr = BLKADDR_NIX0;
+ break;
+ case BLKTYPE_NPA:
+ blkaddr = BLKADDR_NPA;
+ break;
+ default:
+ blkaddr = BLKADDR_RVUM;
+ break;
+ };
+
+ offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT);
+ offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT);
+
+ return nic->reg_base + offset;
}
/* Register read/write APIs */
static inline void otx2_write64(struct otx2_nic *nic, u64 offset, u64 val)
{
- writeq(val, nic->reg_base + offset);
+ void __iomem *addr = otx2_get_regaddr(nic, offset);
+
+ writeq(val, addr);
}
static inline u64 otx2_read64(struct otx2_nic *nic, u64 offset)
{
- return readq(nic->reg_base + offset);
+ void __iomem *addr = otx2_get_regaddr(nic, offset);
+
+ return readq(addr);
}
/* Mbox bounce buffer APIs */
@@ -408,8 +471,8 @@ static inline __uint128_t otx2_read128(const void __iomem *addr)
/* Alloc pointer from pool/aura */
static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
{
- atomic64_t *ptr = (__force atomic64_t *)(pfvf->reg_base
- + NPA_LF_AURA_OP_ALLOCX(0));
+ atomic64_t *ptr = (__force atomic64_t *)otx2_get_regaddr(pfvf,
+ NPA_LF_AURA_OP_ALLOCX(0));
u64 incr = (u64)aura | BIT_ULL(63);
return atomic64_fetch_add_relaxed(incr, ptr);
@@ -420,7 +483,7 @@ static inline void otx2_aura_freeptr(struct otx2_nic *pfvf,
int aura, s64 buf)
{
otx2_write128((u64)buf, (u64)aura | BIT_ULL(63),
- pfvf->reg_base + NPA_LF_AURA_OP_FREE0);
+ otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0));
}
/* Update page ref count */
@@ -565,9 +628,8 @@ dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
-
-int otx2_napi_handler(struct otx2_cq_queue *cq,
- struct otx2_nic *pfvf, int budget);
+void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
+void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
/* RSS configuration APIs*/
int otx2_rss_init(struct otx2_nic *pfvf);
@@ -605,8 +667,12 @@ void otx2vf_set_ethtool_ops(struct net_device *netdev);
int otx2_open(struct net_device *netdev);
int otx2_stop(struct net_device *netdev);
+int otx2vf_open(struct net_device *netdev);
+int otx2vf_stop(struct net_device *netdev);
int otx2_set_real_num_queues(struct net_device *netdev,
int tx_queues, int rx_queues);
+int otx2_set_npc_parse_mode(struct otx2_nic *pfvf);
+
/* MCAM filter related APIs */
void otx2_do_set_rx_mode(struct work_struct *work);
int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
@@ -628,19 +694,4 @@ int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2smqvf_probe(struct otx2_nic *vf);
int otx2smqvf_remove(struct otx2_nic *vf);
-/* OTX2_NIC access priv_flags */
-static inline void otx2_nic_enable_feature(struct otx2_nic *pf,
- unsigned long nr) {
- set_bit(nr, &pf->priv_flags);
-}
-
-static inline void otx2_nic_disable_feature(struct otx2_nic *pf,
- unsigned long nr) {
- clear_bit(nr, &pf->priv_flags);
-}
-
-static inline int otx2_nic_is_feature_enabled(struct otx2_nic *pf,
- unsigned long nr) {
- return test_bit(nr, &pf->priv_flags);
-}
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 1a4cd814d238..8f804effb352 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -26,8 +26,9 @@
#define OTX2_DEFAULT_ACTION 0x1
static const char otx2_priv_flags_strings[][ETH_GSTRING_LEN] = {
-#define OTX2_PRIV_FLAGS_PAM4 BIT(0)
"pam4",
+ "edsa",
+ "higig2",
};
struct otx2_stat {
@@ -41,7 +42,7 @@ struct otx2_stat {
.index = offsetof(struct otx2_dev_stats, stat) / sizeof(u64), \
}
-#define OTX2_ETHTOOL_SUPPORTED_MODES 0x630CC23 //110001100001100110000100011
+#define OTX2_ETHTOOL_SUPPORTED_MODES 0x638CE23 //110001110001100111000100011
#define OTX2_ETHTOOL_ALL_MODES (ULLONG_MAX)
static const struct otx2_stat otx2_dev_stats[] = {
@@ -84,6 +85,36 @@ static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats);
static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats);
static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats);
+int __weak otx2vf_open(struct net_device *netdev)
+{
+ return 0;
+}
+
+int __weak otx2vf_stop(struct net_device *netdev)
+{
+ return 0;
+}
+
+static void otx2_dev_open(struct net_device *netdev)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ if (pfvf->pcifunc & RVU_PFVF_FUNC_MASK)
+ otx2vf_open(netdev);
+ else
+ otx2_open(netdev);
+}
+
+static void otx2_dev_stop(struct net_device *netdev)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ if (pfvf->pcifunc & RVU_PFVF_FUNC_MASK)
+ otx2vf_stop(netdev);
+ else
+ otx2_stop(netdev);
+}
+
static void otx2_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
@@ -276,7 +307,7 @@ static int otx2_set_channels(struct net_device *dev,
return -EINVAL;
if (if_up)
- otx2_stop(dev);
+ otx2_dev_stop(dev);
pfvf->hw.rx_queues = channel->rx_count;
pfvf->hw.tx_queues = channel->tx_count;
@@ -287,7 +318,7 @@ static int otx2_set_channels(struct net_device *dev,
return err;
if (if_up)
- otx2_open(dev);
+ otx2_dev_open(dev);
netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
pfvf->hw.tx_queues, pfvf->hw.rx_queues);
@@ -362,8 +393,8 @@ static int otx2_set_ringparam(struct net_device *netdev,
/* On some silicon variants a skid or reserved CQEs are
* needed to avoid CQ overflow.
*/
- if (rx_count < pfvf->rq_skid)
- rx_count = pfvf->rq_skid;
+ if (rx_count < pfvf->hw.rq_skid)
+ rx_count = pfvf->hw.rq_skid;
rx_count = Q_COUNT(Q_SIZE(rx_count, 3));
/* Due pipelining impact minimum 2000 unused SQ CQE's
@@ -378,14 +409,14 @@ static int otx2_set_ringparam(struct net_device *netdev,
return 0;
if (if_up)
- otx2_stop(netdev);
+ otx2_dev_stop(netdev);
/* Assigned to the nearest possible exponent. */
qs->sqe_cnt = tx_count;
qs->rqe_cnt = rx_count;
if (if_up)
- otx2_open(netdev);
+ otx2_dev_open(netdev);
return 0;
}
@@ -393,11 +424,12 @@ static int otx2_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct otx2_hw *hw = &pfvf->hw;
- cmd->rx_coalesce_usecs = pfvf->cq_time_wait;
- cmd->rx_max_coalesced_frames = pfvf->cq_ecount_wait;
- cmd->tx_coalesce_usecs = pfvf->cq_time_wait;
- cmd->tx_max_coalesced_frames = pfvf->cq_ecount_wait;
+ cmd->rx_coalesce_usecs = hw->cq_time_wait;
+ cmd->rx_max_coalesced_frames = hw->cq_ecount_wait;
+ cmd->tx_coalesce_usecs = hw->cq_time_wait;
+ cmd->tx_max_coalesced_frames = hw->cq_ecount_wait;
return 0;
}
@@ -406,6 +438,7 @@ static int otx2_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct otx2_hw *hw = &pfvf->hw;
int qidx;
if (ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce ||
@@ -433,13 +466,13 @@ static int otx2_set_coalesce(struct net_device *netdev,
/* Rx and Tx are mapped to same CQ, check which one
* is changed, if both then choose the min.
*/
- if (pfvf->cq_time_wait == ec->rx_coalesce_usecs)
- pfvf->cq_time_wait = ec->tx_coalesce_usecs;
- else if (pfvf->cq_time_wait == ec->tx_coalesce_usecs)
- pfvf->cq_time_wait = ec->rx_coalesce_usecs;
+ if (hw->cq_time_wait == ec->rx_coalesce_usecs)
+ hw->cq_time_wait = ec->tx_coalesce_usecs;
+ else if (hw->cq_time_wait == ec->tx_coalesce_usecs)
+ hw->cq_time_wait = ec->rx_coalesce_usecs;
else
- pfvf->cq_time_wait = min_t(u8, ec->rx_coalesce_usecs,
- ec->tx_coalesce_usecs);
+ hw->cq_time_wait = min_t(u8, ec->rx_coalesce_usecs,
+ ec->tx_coalesce_usecs);
/* Max ecount_wait supported is 16bit,
* so clamp the user given value to the range of 1 to 64k.
@@ -452,13 +485,13 @@ static int otx2_set_coalesce(struct net_device *netdev,
/* Rx and Tx are mapped to same CQ, check which one
* is changed, if both then choose the min.
*/
- if (pfvf->cq_ecount_wait == ec->rx_max_coalesced_frames)
- pfvf->cq_ecount_wait = ec->tx_max_coalesced_frames;
- else if (pfvf->cq_ecount_wait == ec->tx_max_coalesced_frames)
- pfvf->cq_ecount_wait = ec->rx_max_coalesced_frames;
+ if (hw->cq_ecount_wait == ec->rx_max_coalesced_frames)
+ hw->cq_ecount_wait = ec->tx_max_coalesced_frames;
+ else if (hw->cq_ecount_wait == ec->tx_max_coalesced_frames)
+ hw->cq_ecount_wait = ec->rx_max_coalesced_frames;
else
- pfvf->cq_ecount_wait = min_t(u16, ec->rx_max_coalesced_frames,
- ec->tx_max_coalesced_frames);
+ hw->cq_ecount_wait = min_t(u16, ec->rx_max_coalesced_frames,
+ ec->tx_max_coalesced_frames);
if (netif_running(netdev)) {
for (qidx = 0; qidx < pfvf->hw.cint_cnt; qidx++)
@@ -939,7 +972,7 @@ static void otx2_get_link_mode_info(u64 index, int mode,
ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
@@ -949,7 +982,7 @@ static void otx2_get_link_mode_info(u64 index, int mode,
ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
- ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
@@ -967,7 +1000,7 @@ static void otx2_get_link_mode_info(u64 index, int mode,
if (bit_position == 0)
ethtool_link_mode = 0x3F;
- ethtool_link_mode |= 1 << cgx_link_mode[bit_position];
+ ethtool_link_mode |= 1ULL << cgx_link_mode[bit_position];
if (mode)
*link_ksettings->link_modes.advertising |=
ethtool_link_mode;
@@ -1184,6 +1217,172 @@ end: otx2_mbox_unlock(&pfvf->mbox);
return err;
}
+static u32 otx2_get_priv_flags(struct net_device *netdev)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_fw_data *rsp;
+
+ rsp = otx2_get_fwdata(pfvf);
+
+ if (IS_ERR(rsp)) {
+ pfvf->ethtool_flags &= ~OTX2_PRIV_FLAG_PAM4;
+ } else {
+ if (rsp->fwdata.phy.mod_type)
+ pfvf->ethtool_flags |= OTX2_PRIV_FLAG_PAM4;
+ else
+ pfvf->ethtool_flags &= ~OTX2_PRIV_FLAG_PAM4;
+ }
+
+ return pfvf->ethtool_flags;
+}
+
+static int otx2_set_phy_mod_type(struct net_device *netdev, bool enable)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_phy_mod_type *req;
+ struct cgx_fw_data *fwd;
+ int rc = -EAGAIN;
+
+ fwd = otx2_get_fwdata(pfvf);
+ if (IS_ERR(fwd))
+ return -EAGAIN;
+
+ /* ret here if phy does not support this feature */
+ if (!fwd->fwdata.phy.can_change_mod_type)
+ return -EOPNOTSUPP;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ req = otx2_mbox_alloc_msg_cgx_set_phy_mod_type(&pfvf->mbox);
+ if (!req)
+ goto end;
+
+ req->mod = enable;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox))
+ rc = 0;
+end:
+ otx2_mbox_unlock(&pfvf->mbox);
+ return rc;
+}
+
+int otx2_set_npc_parse_mode(struct otx2_nic *pfvf)
+{
+ struct npc_set_pkind *req;
+ int rc = -EAGAIN;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ req = otx2_mbox_alloc_msg_npc_set_pkind(&pfvf->mbox);
+ if (!req)
+ goto end;
+
+ if (OTX2_IS_HIGIG2_ENABLED(pfvf->ethtool_flags))
+ req->mode = OTX2_PRIV_FLAGS_HIGIG;
+ else if (OTX2_IS_EDSA_ENABLED(pfvf->ethtool_flags))
+ req->mode = OTX2_PRIV_FLAGS_EDSA;
+ else
+ req->mode = OTX2_PRIV_FLAGS_DEFAULT;
+
+ req->dir = PKIND_RX;
+
+ /* req AF to change pkind on both the dir */
+ if (req->mode == OTX2_PRIV_FLAGS_HIGIG)
+ req->dir |= PKIND_TX;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox))
+ rc = 0;
+end:
+ otx2_mbox_unlock(&pfvf->mbox);
+ return rc;
+}
+
+static int otx2_enable_addl_header(struct net_device *netdev, int bitpos,
+ u32 len, bool enable)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ bool if_up = netif_running(netdev);
+
+ if (enable) {
+ pfvf->ethtool_flags |= BIT(bitpos);
+ } else {
+ pfvf->ethtool_flags &= ~BIT(bitpos);
+ len = 0;
+ }
+
+ if (if_up)
+ otx2_stop(netdev);
+
+ /* Update max FRS so that additional hdrs are considered */
+ pfvf->addl_mtu = len;
+
+ /* Incase HIGIG2 mode is set packet will have 16 bytes of
+ * extra header at start of packet which stack does not need.
+ */
+ if (OTX2_IS_HIGIG2_ENABLED(pfvf->ethtool_flags))
+ pfvf->xtra_hdr = 16;
+ else
+ pfvf->xtra_hdr = 0;
+
+ /* NPC parse mode will be updated here */
+ if (if_up)
+ otx2_open(netdev);
+
+ return 0;
+}
+
+static int otx2_set_priv_flags(struct net_device *netdev, u32 new_flags)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ bool enable = false;
+ int bitnr, rc = 0;
+ u32 chg_flags;
+
+ /* Get latest PAM4 settings */
+ otx2_get_priv_flags(netdev);
+
+ chg_flags = new_flags ^ pfvf->ethtool_flags;
+ if (!chg_flags)
+ return 0;
+
+ /* Some are mutually exclusive, so allow only change at a time */
+ if (hweight32(chg_flags) != 1)
+ return -EINVAL;
+
+ bitnr = ffs(chg_flags) - 1;
+ if (new_flags & BIT(bitnr))
+ enable = true;
+
+ switch (BIT(bitnr)) {
+ case OTX2_PRIV_FLAG_PAM4:
+ rc = otx2_set_phy_mod_type(netdev, enable);
+ break;
+ case OTX2_PRIV_FLAG_EDSA_HDR:
+ /* HIGIG & EDSA are mutual exclusive */
+ if (enable && OTX2_IS_HIGIG2_ENABLED(pfvf->ethtool_flags))
+ return -EINVAL;
+ return otx2_enable_addl_header(netdev, bitnr,
+ OTX2_EDSA_HDR_LEN, enable);
+ break;
+ case OTX2_PRIV_FLAG_HIGIG2_HDR:
+ if (enable && OTX2_IS_EDSA_ENABLED(pfvf->ethtool_flags))
+ return -EINVAL;
+ return otx2_enable_addl_header(netdev, bitnr,
+ OTX2_HIGIG2_HDR_LEN, enable);
+ break;
+ default:
+ break;
+ }
+
+ /* save the change */
+ if (!rc) {
+ if (enable)
+ pfvf->ethtool_flags |= BIT(bitnr);
+ else
+ pfvf->ethtool_flags &= ~BIT(bitnr);
+ }
+
+ return rc;
+}
+
static struct ethtool_ops otx2_ethtool_ops = {
.get_link = otx2_get_link,
.get_drvinfo = otx2_get_drvinfo,
@@ -1213,69 +1412,12 @@ static struct ethtool_ops otx2_ethtool_ops = {
.set_fecparam = otx2_set_fecparam,
.get_module_info = otx2_get_module_info,
.get_module_eeprom = otx2_get_module_eeprom,
+ .get_priv_flags = otx2_get_priv_flags,
+ .set_priv_flags = otx2_set_priv_flags,
};
-static int otx2_set_priv_flags(struct net_device *netdev, u32 priv_flags)
-{
- struct otx2_nic *pfvf = netdev_priv(netdev);
- struct cgx_phy_mod_type *req, *rsp;
- int rc = 0;
-
- otx2_mbox_lock(&pfvf->mbox);
- req = otx2_mbox_alloc_msg_cgx_set_phy_mod_type(&pfvf->mbox);
- if (!req) {
- rc = -EAGAIN;
- goto end;
- }
- req->mod = priv_flags & OTX2_PRIV_FLAGS_PAM4;
- rc = otx2_sync_mbox_msg(&pfvf->mbox);
- if (rc)
- goto end;
-
- rsp = (struct cgx_phy_mod_type *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0,
- &req->hdr);
- if (IS_ERR(rsp)) {
- rc = PTR_ERR(rsp);
- goto end;
- }
- if (rsp->hdr.rc) {
- rc = rsp->hdr.rc;
- goto end;
- }
-
-end: otx2_mbox_unlock(&pfvf->mbox);
- return rc;
-}
-
-static u32 otx2_get_priv_flags(struct net_device *netdev)
-{
- struct otx2_nic *pfvf = netdev_priv(netdev);
- struct cgx_fw_data *rsp;
- u32 priv_flags = 0;
-
- rsp = otx2_get_fwdata(pfvf);
-
- if (IS_ERR(rsp))
- return 0;
-
- if (rsp->fwdata.phy.mod_type)
- priv_flags |= OTX2_PRIV_FLAGS_PAM4;
-
- return priv_flags;
-}
-
void otx2_set_ethtool_ops(struct net_device *netdev)
{
- struct otx2_nic *pfvf = netdev_priv(netdev);
- struct cgx_fw_data *rsp;
-
- rsp = otx2_get_fwdata(pfvf);
-
- if (!IS_ERR(rsp) && rsp->fwdata.phy.can_change_mod_type) {
- otx2_ethtool_ops.set_priv_flags = otx2_set_priv_flags;
- otx2_ethtool_ops.get_priv_flags = otx2_get_priv_flags;
- }
-
netdev->ethtool_ops = &otx2_ethtool_ops;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 78e9a851d594..96a4dbba25a3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -37,10 +37,8 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
pf->ntuple_max_flows = OTX2_MAX_NTUPLE_FLOWS;
- /* support ntuple,mac filters */
- otx2_nic_enable_feature(pf, OTX2_NTUPLE_FILTER_CAPABLE);
- otx2_nic_enable_feature(pf, OTX2_UNICAST_FILTER_CAPABLE);
- otx2_nic_enable_feature(pf, OTX2_RX_VLAN_OFFLOAD_CAPABLE);
+ pf->flags |= (OTX2_FLAG_NTUPLE_SUPPORT |
+ OTX2_FLAG_UCAST_FLTR_SUPPORT | OTX2_FLAG_RX_VLAN_SUPPORT);
pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
* OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
@@ -74,8 +72,7 @@ static int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
int i;
otx2_mbox_lock(&pfvf->mbox);
-
- if (pfvf->entries_alloc) {
+ if (pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC) {
otx2_mbox_unlock(&pfvf->mbox);
return 0;
}
@@ -106,8 +103,8 @@ static int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
/* support only ntuples here */
pfvf->ntuple_max_flows = rsp->count;
pfvf->netdev->priv_flags &= ~IFF_UNICAST_FLT;
- pfvf->priv_flags &= ~(BIT(OTX2_UNICAST_FILTER_CAPABLE));
- pfvf->priv_flags &= ~(BIT(OTX2_RX_VLAN_OFFLOAD_CAPABLE));
+ pfvf->flags &= ~OTX2_FLAG_UCAST_FLTR_SUPPORT;
+ pfvf->flags &= ~OTX2_FLAG_RX_VLAN_SUPPORT;
pfvf->netdev->features &= ~wanted;
pfvf->netdev->hw_features &= ~wanted;
}
@@ -115,7 +112,7 @@ static int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
for (i = 0; i < rsp->count; i++)
pfvf->entry_list[i] = rsp->entry_list[i];
- pfvf->entries_alloc = true;
+ pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
otx2_mbox_unlock(&pfvf->mbox);
return 0;
@@ -129,13 +126,15 @@ static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
struct npc_install_flow_req *req;
int err, i;
- if (!pf->entries_alloc) {
+ if (!(pf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) {
err = otx2_alloc_mcam_entries(pf);
if (err)
return err;
}
- if (!otx2_nic_is_feature_enabled(pf, OTX2_UNICAST_FILTER_CAPABLE))
+
+ if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
return -ENOMEM;
+
/* dont have free mcam entries or uc list is greater than alloted */
if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
return -ENOMEM;
@@ -162,7 +161,7 @@ static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
ether_addr_copy(req->packet.dmac, mac);
u64_to_ether_addr(0xffffffffffffull, req->mask.dmac);
req->features = BIT_ULL(NPC_DMAC);
- req->channel = pf->rx_chan_base;
+ req->channel = pf->hw.rx_chan_base;
req->intf = NIX_INTF_RX;
req->op = NIX_RX_ACTION_DEFAULT;
req->set_cntr = 1;
@@ -314,7 +313,7 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
req->entry = flow->entry;
req->intf = NIX_INTF_RX;
req->set_cntr = 1;
- req->channel = pfvf->rx_chan_base;
+ req->channel = pfvf->hw.rx_chan_base;
if (ring_cookie == RX_CLS_FLOW_DISC) {
req->op = NIX_RX_ACTIONOP_DROP;
} else {
@@ -354,7 +353,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rx_flow_spec *fsp)
if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
return -EINVAL;
- if (!pfvf->entries_alloc) {
+ if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) {
err = otx2_alloc_mcam_entries(pfvf);
if (err)
return err;
@@ -442,7 +441,7 @@ int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
struct otx2_flow *iter, *tmp;
int err;
- if (!pfvf->entries_alloc)
+ if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
return 0;
otx2_mbox_lock(&pfvf->mbox);
@@ -472,7 +471,7 @@ int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
struct otx2_flow *iter, *tmp;
int err;
- if (!pfvf->entries_alloc)
+ if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
return 0;
/* remove all flows */
@@ -501,7 +500,7 @@ int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
return err;
}
- pfvf->entries_alloc = false;
+ pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
otx2_mbox_unlock(&pfvf->mbox);
return 0;
@@ -523,7 +522,7 @@ static int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
req->intf = NIX_INTF_RX;
ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
u64_to_ether_addr(0xffffffffffffull, req->mask.dmac);
- req->channel = pfvf->rx_chan_base;
+ req->channel = pfvf->hw.rx_chan_base;
req->op = NIX_RX_ACTION_DEFAULT;
req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
req->vtag0_valid = true;
@@ -560,14 +559,14 @@ int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
struct mbox_msghdr *rsp_hdr;
int err;
- if (!pf->entries_alloc) {
+ if (!(pf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) {
err = otx2_alloc_mcam_entries(pf);
if (err)
return err;
}
/* Dont have enough mcam entries */
- if (!otx2_nic_is_feature_enabled(pf, OTX2_RX_VLAN_OFFLOAD_CAPABLE))
+ if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
return -ENOMEM;
if (enable) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index b13807f0b445..d40ca122a4a9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -759,8 +759,6 @@ static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
config->intf_down = false;
dwork = &config->link_event_work;
schedule_delayed_work(dwork, msecs_to_jiffies(100));
- dwork = &config->mac_vlan_work;
- schedule_delayed_work(dwork, msecs_to_jiffies(100));
break;
case MBOX_MSG_NIX_LF_STOP_RX:
config->intf_down = true;
@@ -874,13 +872,34 @@ int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
}
/* interface has not been fully configured yet */
- if (pf->intf_down)
+ if (pf->flags & OTX2_FLAG_INTF_DOWN)
return 0;
otx2_handle_link_event(pf);
return 0;
}
+int otx2_mbox_up_handler_cgx_ptp_rx_info(struct otx2_nic *pf,
+ struct cgx_ptp_rx_info_msg *msg,
+ struct msg_rsp *rsp)
+{
+ int i;
+
+ pf->ptp->ptp_en = msg->ptp_en;
+
+ /* notify VFs about ptp event */
+ for (i = 0; i < pci_num_vf(pf->pdev); i++) {
+ struct otx2_vf_config *config = &pf->vf_configs[i];
+ struct delayed_work *dwork = &config->ptp_info_work;
+
+ if (config->intf_down)
+ continue;
+
+ schedule_delayed_work(dwork, msecs_to_jiffies(100));
+ }
+ return 0;
+}
+
static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
struct mbox_msghdr *req)
{
@@ -977,7 +996,7 @@ static void otx2_disable_mbox_intr(struct otx2_nic *pf)
free_irq(vector, pf);
}
-static int otx2_register_mbox_intr(struct otx2_nic *pf)
+static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
{
struct otx2_hw *hw = &pf->hw;
struct msg_req *req;
@@ -1001,6 +1020,9 @@ static int otx2_register_mbox_intr(struct otx2_nic *pf)
otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
+ if (!probe_af)
+ return 0;
+
/* Check mailbox communication with AF */
req = otx2_mbox_alloc_msg_ready(&pf->mbox);
if (!req) {
@@ -1155,23 +1177,24 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
/* CQ */
for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) {
- ptr = pf->reg_base + NIX_LF_CQ_OP_INT;
- val = atomic64_fetch_add_relaxed((qidx << 44) |
- NIX_CQERRINT_BITS, ptr);
+ ptr = otx2_get_regaddr(pf, NIX_LF_CQ_OP_INT);
+ val = atomic64_fetch_add_relaxed((qidx << 44), ptr);
+ otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) |
+ (val & NIX_CQERRINT_BITS));
if (!(val & (NIX_CQERRINT_BITS | BIT_ULL(42))))
continue;
if (val & BIT_ULL(42)) {
- dev_err(pf->dev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
- qidx, otx2_read64(pf, NIX_LF_ERR_INT));
+ netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
+ qidx, otx2_read64(pf, NIX_LF_ERR_INT));
} else {
if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
- dev_err(pf->dev, "CQ%lld: Doorbell error",
- qidx);
+ netdev_err(pf->netdev, "CQ%lld: Doorbell error",
+ qidx);
if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
- dev_err(pf->dev, "CQ%lld: Memory fault on CQE write to LLC/DRAM",
- qidx);
+ netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM",
+ qidx);
}
schedule_work(&pf->reset_task);
@@ -1179,29 +1202,44 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
/* SQ */
for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
- ptr = pf->reg_base + NIX_LF_SQ_OP_INT;
- val = atomic64_fetch_add_relaxed((qidx << 44) | NIX_SQINT_BITS,
- ptr);
+ ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
+ val = atomic64_fetch_add_relaxed((qidx << 44), ptr);
+ otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
+ (val & NIX_SQINT_BITS));
+
if (!(val & (NIX_SQINT_BITS | BIT_ULL(42))))
continue;
if (val & BIT_ULL(42)) {
- dev_err(pf->dev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
- qidx, otx2_read64(pf, NIX_LF_ERR_INT));
+ netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
+ qidx, otx2_read64(pf, NIX_LF_ERR_INT));
} else {
- if (val & BIT_ULL(NIX_SQINT_LMT_ERR))
- dev_err(pf->dev, "SQ%lld: LMT store error",
- qidx);
- if (val & BIT_ULL(NIX_SQINT_MNQ_ERR))
- dev_err(pf->dev, "SQ%lld: Meta-descriptor enqueue error",
- qidx);
- if (val & BIT_ULL(NIX_SQINT_SEND_ERR))
- dev_err(pf->dev, "SQ%lld: Send error, NIX_LF_SEND_ERR_DBG 0x%llx",
- qidx,
- otx2_read64(pf, NIX_LF_SEND_ERR_DBG));
+ if (val & BIT_ULL(NIX_SQINT_LMT_ERR)) {
+ netdev_err(pf->netdev, "SQ%lld: LMT store error NIX_LF_SQ_OP_ERR_DBG:0x%llx",
+ qidx,
+ otx2_read64(pf,
+ NIX_LF_SQ_OP_ERR_DBG));
+ otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG,
+ BIT_ULL(44));
+ }
+ if (val & BIT_ULL(NIX_SQINT_MNQ_ERR)) {
+ netdev_err(pf->netdev, "SQ%lld: Meta-descriptor enqueue error NIX_LF_MNQ_ERR_DGB:0x%llx\n",
+ qidx,
+ otx2_read64(pf, NIX_LF_MNQ_ERR_DBG));
+ otx2_write64(pf, NIX_LF_MNQ_ERR_DBG,
+ BIT_ULL(44));
+ }
+ if (val & BIT_ULL(NIX_SQINT_SEND_ERR)) {
+ netdev_err(pf->netdev, "SQ%lld: Send error, NIX_LF_SEND_ERR_DBG 0x%llx",
+ qidx,
+ otx2_read64(pf,
+ NIX_LF_SEND_ERR_DBG));
+ otx2_write64(pf, NIX_LF_SEND_ERR_DBG,
+ BIT_ULL(44));
+ }
if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
- dev_err(pf->dev, "SQ%lld: SQB allocation failed",
- qidx);
+ netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
+ qidx);
}
schedule_work(&pf->reset_task);
@@ -1378,8 +1416,8 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
struct nix_lf_free_req *free_req;
struct mbox *mbox = &pf->mbox;
struct otx2_cq_queue *cq;
- int err, qidx, cqe_count;
struct msg_req *req;
+ int qidx, err;
/* Stop transmission */
err = otx2_txschq_stop(pf);
@@ -1400,10 +1438,10 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
/*Dequeue all CQEs */
for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
cq = &qset->cq[qidx];
- cqe_count = otx2_read64(pf, NIX_LF_CINTX_CNT(cq->cint_idx));
- cqe_count &= 0xFFFFFFFF;
- if (cqe_count)
- otx2_napi_handler(cq, pf, cqe_count);
+ if (cq->cq_type == CQ_RX)
+ otx2_cleanup_rx_cqes(pf, cq);
+ else
+ otx2_cleanup_tx_cqes(pf, cq);
}
/* Free RQ buffer pointers*/
@@ -1441,8 +1479,9 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
int qidx = skb_get_queue_mapping(skb);
struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx);
- /* Check for minimum packet length */
- if (skb->len <= ETH_HLEN) {
+ /* Check for minimum and maximum packet length */
+ if (skb->len <= ETH_HLEN ||
+ (!skb_shinfo(skb)->gso_size && skb->len > pf->max_frs)) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -1522,13 +1561,13 @@ int otx2_open(struct net_device *netdev)
* 'cq_ids[0]' points to RQ's CQ and
* 'cq_ids[1]' points to SQ's CQ and
*/
- cq_poll->cq_ids[0] =
+ cq_poll->cq_ids[CQ_RX] =
(qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
- cq_poll->cq_ids[1] = (qidx < pf->hw.tx_queues) ?
+ cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
cq_poll->dev = (void *)pf;
netif_napi_add(netdev, &cq_poll->napi,
- otx2_poll, NAPI_POLL_WEIGHT);
+ otx2_napi_handler, NAPI_POLL_WEIGHT);
napi_enable(&cq_poll->napi);
}
@@ -1588,7 +1627,7 @@ int otx2_open(struct net_device *netdev)
otx2_set_cints_affinity(pf);
- pf->intf_down = false;
+ pf->flags &= ~OTX2_FLAG_INTF_DOWN;
/* 'intf_down' may be checked on any cpu */
smp_wmb();
@@ -1596,19 +1635,26 @@ int otx2_open(struct net_device *netdev)
if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
otx2_handle_link_event(pf);
- if (otx2_nic_is_feature_enabled(pf, OTX2_RX_VLAN_OFFLOAD_CAPABLE))
+ if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
otx2_enable_rxvlan(pf, true);
- /* When reinitializing enable time stamping if it is enabled before */
- if (pf->hw_tx_tstamp) {
- pf->hw_tx_tstamp = 0;
+ /* When reinitializing enable time stamping if it was enabled before */
+ if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) {
+ pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
otx2_config_hw_tx_tstamp(pf, true);
}
- if (pf->hw_rx_tstamp) {
- pf->hw_rx_tstamp = 0;
+ if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) {
+ pf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
otx2_config_hw_rx_tstamp(pf, true);
}
+ /* Set NPC parsing mode, skip LBKs */
+ if (!is_otx2_lbkvf(pf->pdev)) {
+ err = otx2_set_npc_parse_mode(pf);
+ if (err)
+ goto err_free_cints;
+ }
+
err = otx2_rxtx_enable(pf, true);
if (err)
goto err_free_cints;
@@ -1643,7 +1689,7 @@ int otx2_stop(struct net_device *netdev)
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
- pf->intf_down = true;
+ pf->flags |= OTX2_FLAG_INTF_DOWN;
/* 'intf_down' may be checked on any cpu */
smp_wmb();
@@ -1783,7 +1829,7 @@ static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable)
struct msg_req *req;
int err;
- if (!!pfvf->hw_rx_tstamp == enable)
+ if (pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED && enable)
return 0;
otx2_mbox_lock(&pfvf->mbox);
@@ -1803,7 +1849,10 @@ static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable)
}
otx2_mbox_unlock(&pfvf->mbox);
- pfvf->hw_rx_tstamp = enable;
+ if (enable)
+ pfvf->flags |= OTX2_FLAG_RX_TSTAMP_ENABLED;
+ else
+ pfvf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
return 0;
}
@@ -1812,7 +1861,7 @@ static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
struct msg_req *req;
int err;
- if (!!pfvf->hw_tx_tstamp == enable)
+ if (pfvf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED && enable)
return 0;
otx2_mbox_lock(&pfvf->mbox);
@@ -1832,7 +1881,10 @@ static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
}
otx2_mbox_unlock(&pfvf->mbox);
- pfvf->hw_tx_tstamp = enable;
+ if (enable)
+ pfvf->flags |= OTX2_FLAG_TX_TSTAMP_ENABLED;
+ else
+ pfvf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
return 0;
}
@@ -1918,7 +1970,7 @@ static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac)
ether_addr_copy(req->packet.dmac, mac);
u64_to_ether_addr(0xffffffffffffull, req->mask.dmac);
req->features = BIT_ULL(NPC_DMAC);
- req->channel = pf->rx_chan_base;
+ req->channel = pf->hw.rx_chan_base;
req->intf = NIX_INTF_RX;
req->default_rule = 1;
req->append = 1;
@@ -1937,6 +1989,9 @@ static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
struct pci_dev *pdev = pf->pdev;
struct otx2_vf_config *config;
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
if (vf >= pci_num_vf(pdev))
return -EINVAL;
@@ -1945,8 +2000,6 @@ static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
config = &pf->vf_configs[vf];
ether_addr_copy(config->mac, mac);
- if (config->intf_down)
- return 0;
return otx2_do_set_vf_mac(pf, vf, mac);
}
@@ -1966,7 +2019,7 @@ static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos)
req->packet.vlan_tci = htons(vlan);
req->mask.vlan_tci = htons(VLAN_VID_MASK);
req->features = BIT_ULL(NPC_OUTER_VID);
- req->channel = pf->rx_chan_base;
+ req->channel = pf->hw.rx_chan_base;
req->intf = NIX_INTF_RX;
req->default_rule = 1;
req->append = 1;
@@ -1986,6 +2039,9 @@ static int otx2_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
struct pci_dev *pdev = pf->pdev;
struct otx2_vf_config *config;
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
if (vf >= pci_num_vf(pdev))
return -EINVAL;
@@ -1998,8 +2054,6 @@ static int otx2_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
config = &pf->vf_configs[vf];
config->vlan = vlan;
- if (config->intf_down)
- return 0;
return otx2_do_set_vf_vlan(pf, vf, vlan, qos);
}
@@ -2065,6 +2119,30 @@ static int otx2_check_pf_usable(struct otx2_nic *nic)
return 0;
}
+static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
+{
+ struct otx2_hw *hw = &pf->hw;
+ int num_vec, err;
+
+ num_vec = hw->nix_msixoff;
+ num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
+
+ otx2_disable_mbox_intr(pf);
+ pci_free_irq_vectors(hw->pdev);
+ pci_free_irq_vectors(hw->pdev);
+ err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n",
+ __func__, num_vec);
+ return err;
+ }
+
+ err = otx2_register_mbox_intr(pf, false);
+ if (err)
+ return err;
+ return 0;
+}
+
static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
@@ -2116,7 +2194,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pf->pdev = pdev;
pf->dev = dev;
pf->total_vfs = pci_sriov_get_totalvfs(pdev);
- pf->intf_down = true;
+ pf->flags |= OTX2_FLAG_INTF_DOWN;
hw = &pf->hw;
hw->pdev = pdev;
@@ -2147,7 +2225,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_free_netdev;
- err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
+ err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
+ RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
if (err < 0) {
dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
__func__, num_vec);
@@ -2160,7 +2239,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_free_irq_vectors;
/* Register mailbox interrupt */
- err = otx2_register_mbox_intr(pf);
+ err = otx2_register_mbox_intr(pf, true);
if (err)
goto err_mbox_destroy;
@@ -2171,6 +2250,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_disable_mbox_intr;
+ err = otx2_realloc_msix_vectors(pf);
+ if (err)
+ goto err_mbox_destroy;
+
err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues);
if (err)
goto err_detach_rsrc;
@@ -2211,7 +2294,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_RX;
- netdev->features |= netdev->hw_features;
+ netdev->features |= netdev->hw_features | NETIF_F_LLTX;
netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_NTUPLE |
NETIF_F_RXALL;
@@ -2223,7 +2306,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->netdev_ops = &otx2_netdev_ops;
- /* MTU range: 68 - 9190 */
+ /* MTU range: 64 - 9190 */
netdev->min_mtu = OTX2_MIN_MTU;
netdev->max_mtu = OTX2_MAX_MTU;
@@ -2295,21 +2378,32 @@ static void otx2_vf_link_event_task(struct work_struct *work)
otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
}
-static void otx2_vf_mac_vlan_task(struct work_struct *work)
+static void otx2_vf_ptp_info_task(struct work_struct *work)
{
+ struct cgx_ptp_rx_info_msg *req;
struct otx2_vf_config *config;
+ struct mbox_msghdr *msghdr;
struct otx2_nic *pf;
int vf_idx;
- config = container_of(work, struct otx2_vf_config, mac_vlan_work.work);
+ config = container_of(work, struct otx2_vf_config,
+ ptp_info_work.work);
vf_idx = config - config->pf->vf_configs;
pf = config->pf;
- if (!is_zero_ether_addr(config->mac))
- otx2_do_set_vf_mac(pf, vf_idx, config->mac);
+ msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
+ sizeof(*req), sizeof(struct msg_rsp));
+ if (!msghdr) {
+ dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
+ return;
+ }
+
+ req = (struct cgx_ptp_rx_info_msg *)msghdr;
+ req->hdr.id = MBOX_MSG_CGX_PTP_RX_INFO;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->ptp_en = pf->ptp->ptp_en;
- if (config->vlan)
- otx2_do_set_vf_vlan(pf, vf_idx, config->vlan, 0);
+ otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
}
static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
@@ -2342,8 +2436,8 @@ static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
pf->vf_configs[i].intf_down = true;
INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
otx2_vf_link_event_task);
- INIT_DELAYED_WORK(&pf->vf_configs[i].mac_vlan_work,
- otx2_vf_mac_vlan_task);
+ INIT_DELAYED_WORK(&pf->vf_configs[i].ptp_info_work,
+ otx2_vf_ptp_info_task);
}
ret = otx2_pf_flr_init(pf, numvfs);
@@ -2383,8 +2477,10 @@ static int otx2_sriov_disable(struct pci_dev *pdev)
pci_disable_sriov(pdev);
- for (i = 0; i < pci_num_vf(pdev); i++)
+ for (i = 0; i < pci_num_vf(pdev); i++) {
cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
+ cancel_delayed_work_sync(&pf->vf_configs[i].ptp_info_work);
+ }
kfree(pf->vf_configs);
otx2_disable_flr_me_intr(pf);
@@ -2413,9 +2509,9 @@ static void otx2_remove(struct pci_dev *pdev)
pf = netdev_priv(netdev);
- if (pf->hw_tx_tstamp)
+ if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED)
otx2_config_hw_tx_tstamp(pf, false);
- if (pf->hw_rx_tstamp)
+ if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
otx2_config_hw_rx_tstamp(pf, false);
/* Disable link notifications */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index 4211930746fd..01a6961afc93 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -8,20 +8,9 @@
* published by the Free Software Foundation.
*/
-#include <linux/ptp_clock_kernel.h>
-
#include "otx2_common.h"
#include "otx2_ptp.h"
-struct otx2_ptp {
- struct ptp_clock_info ptp_info;
- struct ptp_clock *ptp_clock;
- struct otx2_nic *nic;
-
- struct cyclecounter cycle_counter;
- struct timecounter time_counter;
-};
-
static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
index cfdd85cf2b4d..e84cf3cc67fd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
@@ -58,8 +58,11 @@
#define RVU_VF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
#define RVU_VF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+#define RVU_FUNC_BLKADDR_SHIFT 20
+#define RVU_FUNC_BLKADDR_MASK 0x1FULL
+
/* NPA LF registers */
-#define NPA_LFBASE (BLKADDR_NPA << 20)
+#define NPA_LFBASE (BLKTYPE_NPA << RVU_FUNC_BLKADDR_SHIFT)
#define NPA_LF_AURA_OP_ALLOCX(a) (NPA_LFBASE | 0x10 | (a) << 3)
#define NPA_LF_AURA_OP_FREE0 (NPA_LFBASE | 0x20)
#define NPA_LF_AURA_OP_FREE1 (NPA_LFBASE | 0x28)
@@ -90,7 +93,7 @@
#define NPA_LF_QINTX_ENA_W1C(a) (NPA_LFBASE | 0x330 | (a) << 12)
/* NIX LF registers */
-#define NIX_LFBASE (BLKADDR_NIX0 << 20)
+#define NIX_LFBASE (BLKTYPE_NIX << RVU_FUNC_BLKADDR_SHIFT)
#define NIX_LF_RX_SECRETX(a) (NIX_LFBASE | 0x0 | (a) << 3)
#define NIX_LF_CFG (NIX_LFBASE | 0x100)
#define NIX_LF_GINT (NIX_LFBASE | 0x200)
@@ -148,7 +151,7 @@
#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3)
/* LMT LF registers */
-#define LMT_LFBASE BIT_ULL(20)
+#define LMT_LFBASE BIT_ULL(RVU_FUNC_BLKADDR_SHIFT)
#define LMT_LF_LMTLINEX(a) (LMT_LFBASE | 0x000 | (a) << 12)
#define LMT_LF_LMTCANCEL (LMT_LFBASE | 0x400)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 209fe2212239..9513381d7e5f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -18,28 +18,26 @@
#include "otx2_txrx.h"
#include "otx2_ptp.h"
-/* Flush SQE written to LMT to SQB */
-static inline u64 otx2_lmt_flush(uint64_t addr)
-{
- return atomic64_fetch_xor_relaxed(0, (atomic64_t *)addr);
-}
+#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
-static inline u64 otx2_nix_cq_op_status(struct otx2_nic *pfvf, int cq_idx)
+static inline struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
{
- u64 incr = (u64)cq_idx << 32;
- atomic64_t *ptr;
- u64 status;
+ struct nix_cqe_hdr_s *cqe_hdr;
- ptr = (__force atomic64_t *)(pfvf->reg_base + NIX_LF_CQ_OP_STATUS);
+ cqe_hdr = (struct nix_cqe_hdr_s *)CQE_ADDR(cq, cq->cq_head);
+ if (cqe_hdr->cqe_type == NIX_XQE_TYPE_INVALID)
+ return NULL;
- status = atomic64_fetch_add_relaxed(incr, ptr);
+ cq->cq_head++;
+ cq->cq_head &= (cq->cqe_cnt - 1);
- /* Barrier to prevent speculative reads of CQEs and their
- * processing before above load of CQ_STATUS returns.
- */
- dma_rmb();
+ return cqe_hdr;
+}
- return status;
+/* Flush SQE written to LMT to SQB */
+static inline u64 otx2_lmt_flush(uint64_t addr)
+{
+ return atomic64_fetch_xor_relaxed(0, (atomic64_t *)addr);
}
static inline unsigned int frag_num(unsigned int i)
@@ -86,17 +84,17 @@ static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
}
static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
- struct otx2_cq_queue *cq, void *cqe,
+ struct otx2_cq_queue *cq,
+ struct otx2_snd_queue *sq,
+ struct nix_cqe_hdr_s *cqe_hdr,
int budget, int *tx_pkts, int *tx_bytes)
{
- struct nix_cqe_hdr_s *cqe_hdr = (struct nix_cqe_hdr_s *)cqe;
struct nix_send_comp_s *snd_comp;
struct sk_buff *skb = NULL;
- struct otx2_snd_queue *sq;
struct sg_list *sg;
- int sqe_id;
- snd_comp = (struct nix_send_comp_s *)(cqe + sizeof(*cqe_hdr));
+ snd_comp = (struct nix_send_comp_s *)
+ ((void *)cqe_hdr + sizeof(*cqe_hdr));
if (snd_comp->status) {
/* tx packet error handling*/
if (netif_msg_tx_err(pfvf)) {
@@ -108,16 +106,14 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
/* Barrier, so that update to sq by other cpus is visible */
smp_mb();
- sq = &pfvf->qset.sq[cq->cint_idx];
- sqe_id = snd_comp->sqe_id;
- sg = &sq->sg[sqe_id];
+ sg = &sq->sg[snd_comp->sqe_id];
skb = (struct sk_buff *)sg->skb;
if (!skb)
return;
if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
- u64 timestamp = ((u64 *)sq->timestamps->base)[sqe_id];
+ u64 timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id];
if (timestamp != 1) {
u64 tsns;
@@ -141,6 +137,24 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
sg->skb = (u64)NULL;
}
+static inline void otx2_set_taginfo(struct nix_rx_parse_s *parse,
+ struct sk_buff *skb)
+{
+ /* Check if VLAN is present, captured and stripped from packet */
+ if (parse->vtag0_valid && parse->vtag0_gone) {
+ skb_frag_t *frag0 = &skb_shinfo(skb)->frags[0];
+
+ /* Is the tag captured STAG or CTAG ? */
+ if (((struct ethhdr *)skb_frag_address(frag0))->h_proto ==
+ htons(ETH_P_8021Q))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
+ parse->vtag0_tci);
+ else
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ parse->vtag0_tci);
+ }
+}
+
static inline void otx2_set_rxhash(struct otx2_nic *pfvf,
struct nix_cqe_hdr_s *cqe_hdr,
struct sk_buff *skb)
@@ -164,78 +178,66 @@ static inline void otx2_set_rxhash(struct otx2_nic *pfvf,
skb_set_hash(skb, hash, hash_type);
}
-static void otx2_skb_add_frag(struct otx2_nic *pfvf,
- struct sk_buff *skb, u64 iova, int len)
+static inline void otx2_set_rxtstamp(struct otx2_nic *pfvf,
+ struct sk_buff *skb, void *data)
{
- struct page *page;
- void *va;
+ u64 tsns;
+ int err;
- va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
- page = virt_to_page(va);
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- va - page_address(page), len, RCV_FRAG_LEN);
+ if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED))
+ return;
- dma_unmap_page_attrs(pfvf->dev, iova - OTX2_HEAD_ROOM, RCV_FRAG_LEN,
- DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ /* The first 8 bytes is the timestamp */
+ err = otx2_ptp_tstamp2time(pfvf, be64_to_cpu(*(u64 *)data), &tsns);
+ if (err)
+ return;
+
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns);
}
-static inline struct sk_buff *
-otx2_get_rcv_skb(struct otx2_nic *pfvf, u64 iova, int len)
+static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
+ u64 iova, int len, struct nix_rx_parse_s *parse)
{
- struct sk_buff *skb;
+ struct page *page;
+ int off = 0;
void *va;
- iova -= OTX2_HEAD_ROOM;
va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
- skb = build_skb(va, RCV_FRAG_LEN);
- if (!skb) {
- put_page(virt_to_page(va));
- return NULL;
+
+ if (!skb_shinfo(skb)->nr_frags) {
+ /* Check if data starts at some nonzero offset
+ * from the start of the buffer. For now the
+ * only possible offset is 8 bytes in the case
+ * where packet is prepended by a timestamp.
+ */
+ if (parse->laptr) {
+ otx2_set_rxtstamp(pfvf, skb, va);
+ off = 8;
+ }
+ off += pfvf->xtra_hdr;
}
- skb_reserve(skb, OTX2_HEAD_ROOM);
- skb_put(skb, len);
+ page = virt_to_page(va);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ va - page_address(page) + off, len - off, RCV_FRAG_LEN);
- dma_unmap_page_attrs(pfvf->dev, iova, RCV_FRAG_LEN,
+ dma_unmap_page_attrs(pfvf->dev, iova - OTX2_HEAD_ROOM, RCV_FRAG_LEN,
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
- prefetch(skb->data);
- return skb;
-}
-
-static inline void otx2_set_rxtstamp(struct otx2_nic *pfvf, struct sk_buff *skb)
-{
- u64 tsns;
- int err;
-
- if (!pfvf->hw_rx_tstamp)
- return;
-
- /* The first 8 bytes is the timestamp */
- err = otx2_ptp_tstamp2time(pfvf, be64_to_cpu(*(u64 *)skb->data), &tsns);
- if (err)
- goto done;
-
- skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns);
-
-done:
- __skb_pull(skb, 8);
}
static inline bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
- struct otx2_cq_queue *cq, void *cqe)
+ struct nix_rx_parse_s *parse, int qidx)
{
struct otx2_drv_stats *stats = &pfvf->hw.drv_stats;
- struct nix_rx_parse_s *parse;
struct nix_rx_sg_s *sg;
void *start, *end;
u64 *iova;
int seg;
- parse = (struct nix_rx_parse_s *)(cqe + sizeof(struct nix_cqe_hdr_s));
if (netif_msg_rx_err(pfvf))
netdev_err(pfvf->netdev,
"RQ%d: Error pkt with errlev:0x%x errcode:0x%x\n",
- cq->cq_idx, parse->errlev, parse->errcode);
+ qidx, parse->errlev, parse->errcode);
if (parse->errlev == NPC_ERRLVL_RE) {
switch (parse->errcode) {
@@ -280,7 +282,7 @@ static inline bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
return false;
}
- start = cqe + sizeof(struct nix_cqe_hdr_s) + sizeof(*parse);
+ start = (void *)parse + sizeof(*parse);
end = start + ((parse->desc_sizem1 + 1) * 16);
while ((start + sizeof(*sg)) < end) {
sg = (struct nix_rx_sg_s *)start;
@@ -291,7 +293,7 @@ static inline bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
return false;
for (seg = 0; seg < sg->segs; seg++) {
- otx2_aura_freeptr(pfvf, cq->cq_idx, *iova & ~0x07ULL);
+ otx2_aura_freeptr(pfvf, qidx, *iova & ~0x07ULL);
iova++;
}
if (sg->segs == 1)
@@ -303,10 +305,10 @@ static inline bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
}
static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
- struct otx2_cq_queue *cq, void *cqe)
+ struct napi_struct *napi,
+ struct otx2_cq_queue *cq,
+ struct nix_cqe_hdr_s *cqe_hdr)
{
- struct nix_cqe_hdr_s *cqe_hdr = (struct nix_cqe_hdr_s *)cqe;
- struct otx2_qset *qset = &pfvf->qset;
struct nix_rx_parse_s *parse;
struct sk_buff *skb = NULL;
struct nix_rx_sg_s *sg;
@@ -316,15 +318,19 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
u64 *iova;
/* CQE_HDR_S for a Rx pkt is always followed by RX_PARSE_S */
- parse = (struct nix_rx_parse_s *)(cqe + sizeof(*cqe_hdr));
+ parse = (struct nix_rx_parse_s *)((void *)cqe_hdr + sizeof(*cqe_hdr));
if (parse->errlev || parse->errcode) {
- if (otx2_check_rcv_errors(pfvf, cq, cqe))
+ if (otx2_check_rcv_errors(pfvf, parse, cq->cq_idx))
return;
}
- start = cqe + sizeof(*cqe_hdr) + sizeof(*parse);
+ start = (void *)parse + sizeof(*parse);
end = start + ((parse->desc_sizem1 + 1) * 16);
+ skb = napi_get_frags(napi);
+ if (!skb)
+ return;
+
/* Run through the each NIX_RX_SG_S subdc and frame the skb */
while ((start + sizeof(*sg)) < end) {
sg = (struct nix_rx_sg_s *)start;
@@ -333,24 +339,10 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
for (seg = 0; seg < sg->segs; seg++) {
len = sg_lens[frag_num(seg)];
- /* Starting IOVA's 2:0 bits give alignment
- * bytes after which packet data starts.
- */
- if (!skb) {
- skb = otx2_get_rcv_skb(pfvf, *iova, len);
- /* check if data starts at some nonzero offset
- * from the start of the buffer. For now the
- * only possible offset is 8 bytes in the case
- * the packet data are prepended by a timestamp.
- */
- if (parse->laptr)
- otx2_set_rxtstamp(pfvf, skb);
- } else {
- otx2_skb_add_frag(pfvf, skb, *iova, len);
- }
+ otx2_skb_add_frag(pfvf, skb, *iova, len, parse);
iova++;
- cq->pool_ptrs++;
}
+ cq->pool_ptrs += sg->segs;
/* When SEGS = 1, only one IOVA is followed by NIX_RX_SG_S.
* When SEGS >= 2, three IOVAs will follow NIX_RX_SG_S,
@@ -362,119 +354,45 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
start += sizeof(*sg) + (3 * sizeof(u64));
}
- if (!skb)
- return;
-
- if (netif_msg_pktdata(pfvf) && !skb_is_nonlinear(skb)) {
- netdev_info(pfvf->netdev, "skb 0x%p, len=%d\n", skb, skb->len);
- print_hex_dump(KERN_DEBUG, "RX:", DUMP_PREFIX_OFFSET, 16, 1,
- skb->data, skb->len, true);
- }
-
otx2_set_rxhash(pfvf, cqe_hdr, skb);
skb_record_rx_queue(skb, cq->cq_idx);
- skb->protocol = eth_type_trans(skb, pfvf->netdev);
if (pfvf->netdev->features & NETIF_F_RXCSUM)
skb->ip_summed = CHECKSUM_UNNECESSARY;
- /* This holds true on condition RX VLAN offloads are enabled and
- * 802.1AD or 802.1Q VLANs were found in frame.
- */
- if (parse->vtag0_gone) {
- if (skb->protocol == htons(ETH_P_8021Q))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
- parse->vtag0_tci);
- else
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- parse->vtag0_tci);
- }
+ otx2_set_taginfo(parse, skb);
- napi_gro_receive(&qset->napi[cq->cint_idx].napi, skb);
+ napi_gro_frags(napi);
}
-#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
-
-int otx2_napi_handler(struct otx2_cq_queue *cq,
- struct otx2_nic *pfvf, int budget)
+static inline int otx2_rx_napi_handler(struct otx2_nic *pfvf,
+ struct napi_struct *napi,
+ struct otx2_cq_queue *cq, int budget)
{
struct otx2_pool *rbpool = cq->rbpool;
- int processed_cqe = 0, workdone = 0;
- int tx_pkts = 0, tx_bytes = 0;
struct nix_cqe_hdr_s *cqe_hdr;
- struct netdev_queue *txq;
- u64 cq_status;
+ int processed_cqe = 0;
s64 bufptr;
- /* If the pending CQE > 64 skip CQ status read */
- if (cq->pend_cqe >= budget)
- goto process_cqe;
-
- cq_status = otx2_nix_cq_op_status(pfvf, cq->cq_idx);
- if (cq_status & BIT_ULL(63)) {
- dev_err(pfvf->dev, "CQ operation error");
- pfvf->intf_down = true;
- schedule_work(&pfvf->reset_task);
- return 0;
- }
- if (cq_status & BIT_ULL(46)) {
- dev_err(pfvf->dev, "CQ stopped due to error");
- pfvf->intf_down = true;
- schedule_work(&pfvf->reset_task);
- return 0;
- }
-
- cq->cq_head = (cq_status >> 20) & 0xFFFFF;
- cq->cq_tail = cq_status & 0xFFFFF;
-
- /* Since multiple CQs may be mapped to same CINT,
- * check if there are valid CQEs in this CQ.
- */
- if (cq->cq_head == cq->cq_tail)
- return 0;
-process_cqe:
- cq->pend_cqe = 0;
- while (cq->cq_head != cq->cq_tail) {
- if (workdone >= budget) {
- /* Calculate number of pending CQEs */
- if (cq->cq_tail < cq->cq_head)
- cq->pend_cqe = (cq->cqe_cnt - cq->cq_head)
- + cq->cq_tail;
- else
- cq->pend_cqe = cq->cq_tail - cq->cq_head;
+ /* Make sure HW writes to CQ are done */
+ dma_rmb();
+ while (processed_cqe < budget) {
+ cqe_hdr = otx2_get_next_cqe(cq);
+ if (!cqe_hdr) {
+ if (!processed_cqe)
+ return 0;
break;
}
+ otx2_rcv_pkt_handler(pfvf, napi, cq, cqe_hdr);
- cqe_hdr = (struct nix_cqe_hdr_s *)CQE_ADDR(cq, cq->cq_head);
- cq->cq_head++;
- cq->cq_head &= (cq->cqe_cnt - 1);
-
- switch (cqe_hdr->cqe_type) {
- case NIX_XQE_TYPE_RX:
- /* Receive packet handler*/
- otx2_rcv_pkt_handler(pfvf, cq, cqe_hdr);
- workdone++;
- break;
- case NIX_XQE_TYPE_SEND:
- otx2_snd_pkt_handler(pfvf, cq, cqe_hdr, budget,
- &tx_pkts, &tx_bytes);
- }
+ cqe_hdr->cqe_type = NIX_XQE_TYPE_INVALID;
processed_cqe++;
}
+ /* Free CQEs to HW */
otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
((u64)cq->cq_idx << 32) | processed_cqe);
- if (tx_pkts) {
- txq = netdev_get_tx_queue(pfvf->netdev, cq->cint_idx);
- netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
- /* Check if queue was stopped earlier due to ring full */
- smp_mb();
- if (netif_carrier_ok(pfvf->netdev) &&
- netif_tx_queue_stopped(txq))
- netif_tx_wake_queue(txq);
- }
-
if (!cq->pool_ptrs)
return 0;
@@ -500,48 +418,88 @@ process_cqe:
}
otx2_get_page(rbpool);
- return workdone;
+ return processed_cqe;
+}
+
+static inline int otx2_tx_napi_handler(struct otx2_nic *pfvf,
+ struct otx2_cq_queue *cq, int budget)
+{
+ struct nix_cqe_hdr_s *cqe_hdr;
+ int tx_pkts = 0, tx_bytes = 0;
+ struct otx2_snd_queue *sq;
+ struct netdev_queue *txq;
+ int processed_cqe = 0;
+
+ sq = &pfvf->qset.sq[cq->cint_idx];
+
+ /* Make sure HW writes to CQ are done */
+ dma_rmb();
+ while (processed_cqe < budget) {
+ cqe_hdr = otx2_get_next_cqe(cq);
+ if (!cqe_hdr) {
+ if (!processed_cqe)
+ return 0;
+ break;
+ }
+ otx2_snd_pkt_handler(pfvf, cq, sq, cqe_hdr, budget,
+ &tx_pkts, &tx_bytes);
+
+ cqe_hdr->cqe_type = NIX_XQE_TYPE_INVALID;
+ processed_cqe++;
+ }
+
+ /* Free CQEs to HW */
+ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
+ ((u64)cq->cq_idx << 32) | processed_cqe);
+
+ if (tx_pkts) {
+ txq = netdev_get_tx_queue(pfvf->netdev, cq->cint_idx);
+ netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
+ /* Check if queue was stopped earlier due to ring full */
+ smp_mb();
+ if (netif_tx_queue_stopped(txq) &&
+ netif_carrier_ok(pfvf->netdev))
+ netif_tx_wake_queue(txq);
+ }
+ return 0;
}
-int otx2_poll(struct napi_struct *napi, int budget)
+int otx2_napi_handler(struct napi_struct *napi, int budget)
{
struct otx2_cq_poll *cq_poll;
int workdone = 0, cq_idx, i;
struct otx2_cq_queue *cq;
struct otx2_qset *qset;
struct otx2_nic *pfvf;
- u64 qcount;
cq_poll = container_of(napi, struct otx2_cq_poll, napi);
pfvf = (struct otx2_nic *)cq_poll->dev;
qset = &pfvf->qset;
- for (i = 0; i < MAX_CQS_PER_CNT; i++) {
+ for (i = 0; i < CQS_PER_CINT; i++) {
cq_idx = cq_poll->cq_ids[i];
if (cq_idx == CINT_INVALID_CQ)
continue;
cq = &qset->cq[cq_idx];
- qcount = otx2_read64(pfvf, NIX_LF_CINTX_CNT(cq_poll->cint_idx));
- qcount = (qcount >> 32) & 0xFFFF;
- /* If the RQ refill WQ task is running, skip napi
- * scheduler for this queue.
- */
- if (cq->refill_task_sched)
- continue;
- workdone += otx2_napi_handler(cq, pfvf, budget);
- if (workdone && qcount == 1)
- break;
+ if (cq->cq_type == CQ_RX) {
+ /* If the RQ refill WQ task is running, skip napi
+ * scheduler for this queue.
+ */
+ if (cq->refill_task_sched)
+ continue;
+ workdone += otx2_rx_napi_handler(pfvf, napi,
+ cq, budget);
+ } else {
+ workdone += otx2_tx_napi_handler(pfvf, cq, budget);
+ }
}
/* Clear the IRQ */
otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
- if (workdone < budget) {
- /* Exit polling */
- napi_complete(napi);
-
+ if (workdone < budget && napi_complete_done(napi, workdone)) {
/* If interface is going down, don't re-enable IRQ */
- if (pfvf->intf_down)
+ if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
return workdone;
/* Re-enable interrupts */
@@ -682,13 +640,18 @@ static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
{
int proto = 0;
+ /* Check if SQE was framed before, if yes then no need to
+ * set these constants again anf again.
+ */
+ if (!sqe_hdr->total) {
+ /* Don't free Tx buffers to Aura */
+ sqe_hdr->df = 1;
+ sqe_hdr->aura = sq->aura_id;
+ /* Post a CQE Tx after pkt transmission */
+ sqe_hdr->pnc = 1;
+ sqe_hdr->sq = qidx;
+ }
sqe_hdr->total = skb->len;
- /* Don't free Tx buffers to Aura */
- sqe_hdr->df = 1;
- sqe_hdr->aura = sq->aura_id;
- /* Post a CQE Tx after pkt transmission */
- sqe_hdr->pnc = 1;
- sqe_hdr->sq = qidx;
/* Set SQE identifier which will be used later for freeing SKB */
sqe_hdr->sqe_id = sq->head;
@@ -971,8 +934,10 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
return true;
}
- /* Set SQE's SEND_HDR */
- memset(sq->sqe_base, 0, sq->sqe_size);
+ /* Set SQE's SEND_HDR.
+ * Do not clear the first 64bit as it contains constant info.
+ */
+ memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
offset = sizeof(*sqe_hdr);
@@ -1003,6 +968,84 @@ fail:
}
EXPORT_SYMBOL(otx2_sq_append_skb);
+void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
+{
+ struct nix_cqe_hdr_s *cqe_hdr;
+ struct nix_rx_parse_s *parse;
+ struct nix_rx_sg_s *sg;
+ int processed_cqe = 0;
+ void *start, *end;
+ u64 *iova, pa;
+ int seg;
+
+ /* Make sure HW writes to CQ are done */
+ dma_rmb();
+ while ((cqe_hdr = otx2_get_next_cqe(cq))) {
+ parse = (struct nix_rx_parse_s *)
+ ((void *)cqe_hdr + sizeof(*cqe_hdr));
+ start = (void *)parse + sizeof(*parse);
+ end = start + ((parse->desc_sizem1 + 1) * 16);
+ while ((start + sizeof(*sg)) < end) {
+ sg = (struct nix_rx_sg_s *)start;
+ iova = (void *)sg + sizeof(*sg);
+ for (seg = 0; seg < sg->segs; seg++) {
+ /* Free IOVA */
+ *iova -= OTX2_HEAD_ROOM;
+ pa = otx2_iova_to_phys(pfvf->iommu_domain,
+ *iova);
+ dma_unmap_page_attrs(pfvf->dev, *iova,
+ RCV_FRAG_LEN,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ put_page(virt_to_page(phys_to_virt(pa)));
+ iova++;
+ }
+ start += sizeof(*sg);
+ start += (sg->segs == 1) ?
+ sizeof(u64) : 3 * sizeof(u64);
+ }
+ cqe_hdr->cqe_type = NIX_XQE_TYPE_INVALID;
+ processed_cqe++;
+ }
+
+ /* Free CQEs to HW */
+ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
+ ((u64)cq->cq_idx << 32) | processed_cqe);
+}
+
+void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
+{
+ struct nix_send_comp_s *snd_comp;
+ struct nix_cqe_hdr_s *cqe_hdr;
+ struct sk_buff *skb = NULL;
+ struct otx2_snd_queue *sq;
+ int processed_cqe = 0;
+ struct sg_list *sg;
+
+ sq = &pfvf->qset.sq[cq->cint_idx];
+
+ /* Make sure HW writes to CQ are done */
+ dma_rmb();
+ while ((cqe_hdr = otx2_get_next_cqe(cq))) {
+ snd_comp = (struct nix_send_comp_s *)
+ ((void *)cqe_hdr + sizeof(*cqe_hdr));
+ sg = &sq->sg[snd_comp->sqe_id];
+ skb = (struct sk_buff *)sg->skb;
+ if (skb) {
+ otx2_dma_unmap_skb_frags(pfvf, sg);
+ dev_kfree_skb_any(skb);
+ sg->skb = (u64)NULL;
+ }
+
+ cqe_hdr->cqe_type = NIX_XQE_TYPE_INVALID;
+ processed_cqe++;
+ }
+
+ /* Free CQEs to HW */
+ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
+ ((u64)cq->cq_idx << 32) | processed_cqe);
+}
+
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
{
struct msg_req *msg;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 7120ecf436a8..b898693ac08d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -21,10 +21,15 @@
#define DMA_BUFFER_LEN 1536 /* In multiples of 128bytes */
#define OTX2_DATA_ALIGN(X) ALIGN(X, OTX2_ALIGN)
-#define RCV_FRAG_LEN \
+#define RCV_FRAG_LEN1 \
((OTX2_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD)) + \
(OTX2_DATA_ALIGN(sizeof(struct skb_shared_info))))
+/* Prefer 2048 byte buffers for better last level cache
+ * utilization or data distribution across regions.
+ */
+#define RCV_FRAG_LEN ((RCV_FRAG_LEN1 < 2048) ? 2048 : RCV_FRAG_LEN1)
+
#define OTX2_HEAD_ROOM OTX2_ALIGN
#define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN)
@@ -83,12 +88,17 @@ struct otx2_snd_queue {
u64 *sqb_ptrs;
} ____cacheline_aligned_in_smp;
+enum cq_type {
+ CQ_RX,
+ CQ_TX,
+ CQS_PER_CINT = 2, /* RQ + SQ */
+};
+
struct otx2_cq_poll {
void *dev;
#define CINT_INVALID_CQ 255
-#define MAX_CQS_PER_CNT 2 /* RQ + SQ */
u8 cint_idx;
- u8 cq_ids[MAX_CQS_PER_CNT];
+ u8 cq_ids[CQS_PER_CINT];
struct napi_struct napi;
};
@@ -101,16 +111,18 @@ struct otx2_pool {
struct page *page;
};
+#define CQ_OP_ERROR BIT_ULL(63)
+#define CQ_CQ_ERROR BIT_ULL(46)
+
struct otx2_cq_queue {
u8 cq_idx;
+ u8 cq_type;
u8 cint_idx; /* CQ interrupt id */
u8 refill_task_sched;
u16 cqe_size;
u16 pool_ptrs;
u32 cqe_cnt;
u32 cq_head;
- u32 cq_tail;
- u32 pend_cqe;
void *cqe_base;
struct qmem *cqe;
struct otx2_pool *rbpool;
@@ -133,12 +145,12 @@ struct otx2_qset {
static inline u64 otx2_iova_to_phys(void *iommu_domain, dma_addr_t dma_addr)
{
/* Translation is installed only when IOMMU is present */
- if (iommu_domain)
+ if (likely(iommu_domain))
return iommu_iova_to_phys(iommu_domain, dma_addr);
return dma_addr;
}
-int otx2_poll(struct napi_struct *napi, int budget);
+int otx2_napi_handler(struct napi_struct *napi, int budget);
bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
struct sk_buff *skb, u16 qidx);
#endif /* OTX2_TXRX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 322d047d306d..129c693e62aa 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -38,9 +38,6 @@ enum {
RVU_VF_INT_VEC_MBOX = 0x0,
};
-static int otx2vf_open(struct net_device *netdev);
-static int otx2vf_stop(struct net_device *netdev);
-
static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu)
{
bool if_up = netif_running(netdev);
@@ -262,7 +259,7 @@ static void otx2vf_disable_mbox_intr(struct otx2_nic *vf)
free_irq(vector, vf);
}
-static int otx2vf_register_mbox_intr(struct otx2_nic *vf)
+static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf)
{
struct otx2_hw *hw = &vf->hw;
struct msg_req *req;
@@ -286,6 +283,9 @@ static int otx2vf_register_mbox_intr(struct otx2_nic *vf)
otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0));
+ if (!probe_pf)
+ return 0;
+
/* Check mailbox communication with PF */
req = otx2_mbox_alloc_msg_ready(&vf->mbox);
if (!req) {
@@ -369,7 +369,7 @@ exit:
return err;
}
-static int otx2vf_open(struct net_device *netdev)
+int otx2vf_open(struct net_device *netdev)
{
struct otx2_nic *vf;
int err;
@@ -388,11 +388,13 @@ static int otx2vf_open(struct net_device *netdev)
return 0;
}
+EXPORT_SYMBOL(otx2vf_open);
-static int otx2vf_stop(struct net_device *netdev)
+int otx2vf_stop(struct net_device *netdev)
{
return otx2_stop(netdev);
}
+EXPORT_SYMBOL(otx2vf_stop);
static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
{
@@ -401,8 +403,9 @@ static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
struct otx2_snd_queue *sq;
struct netdev_queue *txq;
- /* Check for minimum packet length */
- if (skb->len <= ETH_HLEN) {
+ /* Check for minimum and maximum packet length */
+ if (skb->len <= ETH_HLEN ||
+ (!skb_shinfo(skb)->gso_size && skb->len > vf->max_frs)) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -459,6 +462,30 @@ static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_features_check = otx2_features_check,
};
+static int otx2vf_realloc_msix_vectors(struct otx2_nic *vf)
+{
+ struct otx2_hw *hw = &vf->hw;
+ int num_vec, err;
+
+ num_vec = hw->nix_msixoff;
+ num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
+
+ otx2vf_disable_mbox_intr(vf);
+ pci_free_irq_vectors(hw->pdev);
+ pci_free_irq_vectors(hw->pdev);
+ err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(vf->dev, "%s: Failed to realloc %d IRQ vectors\n",
+ __func__, num_vec);
+ return err;
+ }
+
+ err = otx2vf_register_mbox_intr(vf, false);
+ if (err)
+ return err;
+ return 0;
+}
+
static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int num_vec = pci_msix_vec_count(pdev);
@@ -508,7 +535,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
vf->pdev = pdev;
vf->dev = dev;
vf->iommu_domain = iommu_get_domain_for_dev(dev);
- vf->intf_down = true;
+ vf->flags |= OTX2_FLAG_INTF_DOWN;
hw = &vf->hw;
hw->pdev = vf->pdev;
hw->rx_queues = qcount;
@@ -545,7 +572,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_free_irq_vectors;
/* Register mailbox interrupt */
- err = otx2vf_register_mbox_intr(vf);
+ err = otx2vf_register_mbox_intr(vf, true);
if (err)
goto err_mbox_destroy;
@@ -554,6 +581,10 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_disable_mbox_intr;
+ err = otx2vf_realloc_msix_vectors(vf);
+ if (err)
+ goto err_mbox_destroy;
+
err = otx2_set_real_num_queues(netdev, qcount, qcount);
if (err)
goto err_detach_rsrc;