aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBruce Ashfield <bruce.ashfield@gmail.com>2020-10-27 23:16:25 -0400
committerBruce Ashfield <bruce.ashfield@gmail.com>2020-10-27 23:16:25 -0400
commite37198a99ae1e55c5958648495a5d9b96d199f44 (patch)
tree27a0bb887ce2d29ced9433605a28e6229971b7a6
parentbe9874cb74a4afee2e95166d24362d2a2f4f8b40 (diff)
parent6e05fd5bacbaf8833b4a619f5041432e4737e93c (diff)
downloadlinux-yocto-v5.4/standard/preempt-rt/cn96xx.tar.gz
linux-yocto-v5.4/standard/preempt-rt/cn96xx.tar.bz2
linux-yocto-v5.4/standard/preempt-rt/cn96xx.zip
Merge branch 'v5.4/standard/cn96xx-merge' into v5.4/standard/preempt-rt/cn96xxv5.4/standard/preempt-rt/cn96xx
-rw-r--r--drivers/misc/mrvl-loki.c51
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c46
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sso.c87
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c87
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h41
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c77
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c64
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c44
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c82
13 files changed, 346 insertions, 267 deletions
diff --git a/drivers/misc/mrvl-loki.c b/drivers/misc/mrvl-loki.c
index 51609284c493..c2a444ad0bcf 100644
--- a/drivers/misc/mrvl-loki.c
+++ b/drivers/misc/mrvl-loki.c
@@ -25,24 +25,27 @@
#define PSM_GPINT0_ENA_W1C 0x80ULL
#define PSM_GPINT0_ENA_W1S 0xC0ULL
-#define CPRI_INT_MASK 0x1F
-
#define CPRI_IP_AXI_INT_STATUS(a) (0x100ULL | a << 10)
#define CPRI_IP_AXI_INT(a) (0x108ULL | a << 10)
+#define CPRI_MAX_MHAB 3
+#define CONNIP_MAX_INST 5
+#define CPRI_INT_MASK 0x1F
+
+typedef int (*connip_irq_cb_t)(uint32_t instance, uint32_t pss_int);
+
struct mrvl_loki {
struct pci_dev *pdev;
struct msix_entry msix_ent;
void __iomem *psm_gpint;
- void __iomem *cpri_axi[3];
+ void __iomem *cpri_axi[CPRI_MAX_MHAB];
int intr_num;
-
- int (*irq_cb)(uint32_t instance, uint32_t pss_int);
+ connip_irq_cb_t irq_cb;
};
struct mrvl_loki *g_ml;
-int mrvl_loki_register_irq_cb(int (*func)(uint32_t instance, uint32_t pss_int))
+int mrvl_loki_register_irq_cb(connip_irq_cb_t func)
{
if (!g_ml) {
pr_err("Error: mrvl_loki is NULL\n");
@@ -72,24 +75,32 @@ static irqreturn_t mrvl_loki_handler(int irq, void *dev)
uint8_t cpri, mac;
int ret;
+ /* clear GPINT */
val = readq_relaxed(ml->psm_gpint + PSM_GPINT0_SUM_W1C) & CPRI_INT_MASK;
-
- instance = ffs(val) - 1;
- cpri = instance / 2;
- mac = instance % 2;
- pss_int = (u32)readq_relaxed(ml->cpri_axi[cpri] +
- CPRI_IP_AXI_INT_STATUS(mac));
- if (ml->irq_cb) {
- ret = ml->irq_cb(instance, pss_int);
- if (ret < 0)
- dev_err(dev, "Error %d from loki CPRI callback\n", ret);
+ writeq_relaxed((u64)val, ml->psm_gpint + PSM_GPINT0_SUM_W1C);
+
+ for (instance = 0; instance < CONNIP_MAX_INST; instance++) {
+ if (!(val & (1 << instance)))
+ continue;
+ cpri = instance / 2;
+ mac = instance % 2;
+ pss_int = (u32)readq_relaxed(ml->cpri_axi[cpri] +
+ CPRI_IP_AXI_INT_STATUS(mac));
+ if (ml->irq_cb) {
+ ret = ml->irq_cb(instance, pss_int);
+ if (ret < 0)
+ dev_err(dev,
+ "Error %d from loki CPRI callback\n",
+ ret);
+ }
+
+ /* clear AXI_INT */
+ writeq_relaxed((u64)pss_int,
+ ml->cpri_axi[cpri] + CPRI_IP_AXI_INT(mac));
}
- writeq_relaxed(val, ml->psm_gpint + PSM_GPINT0_SUM_W1C);
- writeq_relaxed((u64)pss_int, ml->cpri_axi[cpri] + CPRI_IP_AXI_INT(mac));
-
return IRQ_HANDLED;
-};
+}
static inline void msix_enable_ctrl(struct pci_dev *dev)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index e5dbc92aebe7..e17ebfbaa91d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -86,7 +86,7 @@ struct mbox_msghdr {
#define OTX2_MBOX_REQ_SIG (0xdead)
#define OTX2_MBOX_RSP_SIG (0xbeef)
u16 sig; /* Signature, for validating corrupted msgs */
-#define OTX2_MBOX_VERSION (0x0008)
+#define OTX2_MBOX_VERSION (0x0009)
u16 ver; /* Version of msg's structure for this ID */
u16 next_msgoff; /* Offset of next msg within mailbox region */
int rc; /* Msg process'ed response code */
@@ -195,6 +195,8 @@ M(SSO_WS_CACHE_INV, 0x607, sso_ws_cache_inv, msg_req, msg_rsp) \
M(SSO_GRP_QOS_CONFIG, 0x608, sso_grp_qos_config, sso_grp_qos_cfg, msg_rsp)\
M(SSO_GRP_GET_STATS, 0x609, sso_grp_get_stats, sso_info_req, sso_grp_stats)\
M(SSO_HWS_GET_STATS, 0x610, sso_hws_get_stats, sso_info_req, sso_hws_stats)\
+M(SSO_HW_RELEASE_XAQ, 0x611, sso_hw_release_xaq_aura, \
+ sso_release_xaq, msg_rsp) \
/* TIM mbox IDs (range 0x800 - 0x9FF) */ \
M(TIM_LF_ALLOC, 0x800, tim_lf_alloc, \
tim_lf_alloc_req, tim_lf_alloc_rsp) \
@@ -265,6 +267,8 @@ M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \
npc_mcam_read_entry_rsp) \
M(NPC_SET_PKIND, 0x6010, npc_set_pkind, \
npc_set_pkind, msg_rsp) \
+M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \
+ msg_req, npc_mcam_read_base_rule_rsp) \
/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \
nix_lf_alloc_req, nix_lf_alloc_rsp) \
@@ -1128,6 +1132,11 @@ struct sso_hw_setconfig {
u16 hwgrps;
};
+struct sso_release_xaq {
+ struct mbox_msghdr hdr;
+ u16 hwgrps;
+};
+
struct sso_info_req {
struct mbox_msghdr hdr;
union {
@@ -1453,6 +1462,11 @@ struct npc_mcam_read_entry_rsp {
u8 enable;
};
+struct npc_mcam_read_base_rule_rsp {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry;
+};
+
/* TIM mailbox error codes
* Range 801 - 900.
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index af778c461e15..fa288c544a33 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -2950,3 +2950,49 @@ int rvu_mbox_handler_npc_set_pkind(struct rvu *rvu,
return rvu_npc_set_parse_mode(rvu, req->hdr.pcifunc, req->mode,
req->dir, req->pkind);
}
+
+int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu,
+ struct msg_req *req,
+ struct npc_mcam_read_base_rule_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int index, blkaddr, nixlf, rc = 0;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ u8 intf, enable;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ /* Return the channel number in case of PF */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ rsp->entry.kw[0] = pfvf->rx_chan_base;
+ rsp->entry.kw_mask[0] = 0xFFFULL;
+ goto out;
+ }
+
+ /* Find the pkt steering rule installed by PF to this VF */
+ mutex_lock(&mcam->lock);
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2target_pffunc[index] == pcifunc)
+ goto read_entry;
+ }
+
+ rc = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
+ if (rc < 0) {
+ mutex_unlock(&mcam->lock);
+ goto out;
+ }
+ /* Read the default ucast entry if there is no pkt steering rule */
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
+ NIXLF_UCAST_ENTRY);
+read_entry:
+ /* Read the mcam entry */
+ npc_read_mcam_entry(rvu, mcam, blkaddr, index, &rsp->entry, &intf,
+ &enable);
+ mutex_unlock(&mcam->lock);
+out:
+ return rc;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 274ace6fb271..8956d98a69a5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -1157,6 +1157,8 @@ int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu,
if (npc_delete_flow(rvu, iter, pcifunc))
dev_err(rvu->dev, "rule deletion failed for entry:%d",
iter->entry);
+ /* clear the mcam entry target pcifunc */
+ mcam->entry2target_pffunc[iter->entry] = 0x0;
}
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sso.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sso.c
index 29da67daf276..6380c4f0e381 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sso.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sso.c
@@ -585,6 +585,64 @@ int rvu_ssow_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot)
return 0;
}
+static void rvu_sso_deinit_xaq_aura(struct rvu *rvu, int blkaddr, int lf,
+ int hwgrp)
+{
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf));
+ if (reg & SSO_HWGRP_AW_STS_XAQ_BUFSC_MASK || reg & BIT_ULL(3)) {
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_AW_CFG(lf));
+ reg = (reg & ~SSO_HWGRP_AW_CFG_RWEN) |
+ SSO_HWGRP_AW_CFG_XAQ_BYP_DIS;
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_CFG(lf), reg);
+
+ reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf));
+ if (reg & SSO_HWGRP_AW_STS_TPTR_VLD) {
+ rvu_poll_reg(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf),
+ SSO_HWGRP_AW_STS_NPA_FETCH, true);
+
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf),
+ SSO_HWGRP_AW_STS_TPTR_VLD);
+ }
+
+ if (rvu_poll_reg(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf),
+ SSO_HWGRP_AW_STS_XAQ_BUFSC_MASK, true))
+ dev_warn(rvu->dev,
+ "SSO_HWGRP(%d)_AW_STATUS[XAQ_BUF_CACHED] not cleared",
+ lf);
+ }
+}
+
+int rvu_mbox_handler_sso_hw_release_xaq_aura(struct rvu *rvu,
+ struct sso_release_xaq *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int hwgrp, lf, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_SSO, pcifunc);
+ if (blkaddr < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ for (hwgrp = 0; hwgrp < req->hwgrps; hwgrp++) {
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, hwgrp);
+ if (lf < 0)
+ return SSO_AF_ERR_LF_INVALID;
+
+ rvu_sso_deinit_xaq_aura(rvu, blkaddr, lf, hwgrp);
+ /* disable XAQ */
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_CFG(lf),
+ SSO_HWGRP_AW_CFG_LDWB | SSO_HWGRP_AW_CFG_LDT |
+ SSO_HWGRP_AW_CFG_STT);
+ rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_XAQ_AURA(lf), 0);
+ rvu_write64(rvu, blkaddr, SSO_AF_XAQX_GMCTL(lf), 0);
+ }
+
+ return 0;
+}
+
int rvu_mbox_handler_sso_hw_setconfig(struct rvu *rvu,
struct sso_hw_setconfig *req,
struct msg_rsp *rsp)
@@ -616,34 +674,7 @@ int rvu_mbox_handler_sso_hw_setconfig(struct rvu *rvu,
if (lf < 0)
return SSO_AF_ERR_LF_INVALID;
- reg = rvu_read64(rvu, blkaddr, SSO_AF_HWGRPX_AW_STATUS(lf));
- if (reg & SSO_HWGRP_AW_STS_XAQ_BUFSC_MASK || reg & BIT_ULL(3)) {
- reg = rvu_read64(rvu, blkaddr,
- SSO_AF_HWGRPX_AW_CFG(lf));
- reg = (reg & ~SSO_HWGRP_AW_CFG_RWEN) |
- SSO_HWGRP_AW_CFG_XAQ_BYP_DIS;
- rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_AW_CFG(lf),
- reg);
-
- reg = rvu_read64(rvu, blkaddr,
- SSO_AF_HWGRPX_AW_STATUS(lf));
- if (reg & SSO_HWGRP_AW_STS_TPTR_VLD) {
- rvu_poll_reg(rvu, blkaddr,
- SSO_AF_HWGRPX_AW_STATUS(lf),
- SSO_HWGRP_AW_STS_NPA_FETCH, true);
-
- rvu_write64(rvu, blkaddr,
- SSO_AF_HWGRPX_AW_STATUS(lf),
- SSO_HWGRP_AW_STS_TPTR_VLD);
- }
-
- if (rvu_poll_reg(rvu, blkaddr,
- SSO_AF_HWGRPX_AW_STATUS(lf),
- SSO_HWGRP_AW_STS_XAQ_BUFSC_MASK, true))
- dev_warn(rvu->dev,
- "SSO_HWGRP(%d)_AW_STATUS[XAQ_BUF_CACHED] not cleared",
- lf);
- }
+ rvu_sso_deinit_xaq_aura(rvu, blkaddr, lf, hwgrp);
rvu_write64(rvu, blkaddr, SSO_AF_HWGRPX_XAQ_AURA(lf),
npa_aura_id);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 090b67fdb0c6..eda743f6aa17 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -17,10 +17,31 @@
#include "otx2_common.h"
#include "otx2_struct.h"
-static inline void otx2_nix_rq_op_stats(struct queue_stats *stats,
- struct otx2_nic *pfvf, int qidx);
-static inline void otx2_nix_sq_op_stats(struct queue_stats *stats,
- struct otx2_nic *pfvf, int qidx);
+static void otx2_nix_rq_op_stats(struct queue_stats *stats,
+ struct otx2_nic *pfvf, int qidx)
+{
+ u64 incr = (u64)qidx << 32;
+ u64 *ptr;
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS);
+ stats->bytes = otx2_atomic64_add(incr, ptr);
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS);
+ stats->pkts = otx2_atomic64_add(incr, ptr);
+}
+
+static void otx2_nix_sq_op_stats(struct queue_stats *stats,
+ struct otx2_nic *pfvf, int qidx)
+{
+ u64 incr = (u64)qidx << 32;
+ u64 *ptr;
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS);
+ stats->bytes = otx2_atomic64_add(incr, ptr);
+
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS);
+ stats->pkts = otx2_atomic64_add(incr, ptr);
+}
void otx2_update_lmac_stats(struct otx2_nic *pfvf)
{
@@ -110,10 +131,11 @@ void otx2_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- struct otx2_dev_stats *dev_stats = &pfvf->hw.dev_stats;
+ struct otx2_dev_stats *dev_stats;
otx2_get_dev_stats(pfvf);
+ dev_stats = &pfvf->hw.dev_stats;
stats->rx_bytes = dev_stats->rx_bytes;
stats->rx_packets = dev_stats->rx_frames;
stats->rx_dropped = dev_stats->rx_drops;
@@ -125,8 +147,8 @@ void otx2_get_stats64(struct net_device *netdev,
}
EXPORT_SYMBOL(otx2_get_stats64);
-/* Sync MAC address with RVU */
-int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac)
+/* Sync MAC address with RVU AF */
+static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac)
{
struct nix_set_mac_addr *req;
int err;
@@ -431,10 +453,7 @@ void otx2_get_mac_from_af(struct net_device *netdev)
if (err)
dev_warn(pfvf->dev, "Failed to read mac from hardware\n");
- /* Normally AF should provide mac addresses for both PFs and CGX mapped
- * VFs which means random mac gets generated either in case of error
- * or LBK netdev.
- */
+ /* If AF doesn't provide a valid MAC, generate a random one */
if (!is_valid_ether_addr(netdev->dev_addr))
eth_hw_addr_random(netdev);
}
@@ -442,8 +461,8 @@ EXPORT_SYMBOL(otx2_get_mac_from_af);
int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
{
- struct nix_txschq_config *req;
struct otx2_hw *hw = &pfvf->hw;
+ struct nix_txschq_config *req;
u64 schq, parent;
req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
@@ -456,13 +475,12 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
schq = hw->txschq_list[lvl][0];
/* Set topology e.t.c configuration */
if (lvl == NIX_TXSCH_LVL_SMQ) {
- /* Set min and max Tx packet lengths */
req->reg[0] = NIX_AF_SMQX_CFG(schq);
req->regval[0] = ((OTX2_MAX_MTU + OTX2_ETH_HLEN) << 8) |
OTX2_MIN_MTU;
req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
- (0x2ULL << 36);
+ (0x2ULL << 36);
req->num_regs++;
/* MDQ config */
parent = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
@@ -524,7 +542,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
int otx2_txsch_alloc(struct otx2_nic *pfvf)
{
struct nix_txsch_alloc_req *req;
- int lvl, err;
+ int lvl;
/* Get memory to put this msg */
req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
@@ -535,10 +553,7 @@ int otx2_txsch_alloc(struct otx2_nic *pfvf)
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
req->schq[lvl] = 1;
- err = otx2_sync_mbox_msg(&pfvf->mbox);
- if (err)
- return err;
- return 0;
+ return otx2_sync_mbox_msg(&pfvf->mbox);
}
int otx2_txschq_stop(struct otx2_nic *pfvf)
@@ -854,7 +869,6 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf)
}
/* Initialize work queue for receive buffer refill */
-
pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt,
sizeof(struct refill_work), GFP_KERNEL);
if (!pfvf->refill_wrk)
@@ -1223,7 +1237,7 @@ int otx2_config_npa(struct otx2_nic *pfvf)
struct otx2_qset *qset = &pfvf->qset;
struct npa_lf_alloc_req *npalf;
struct otx2_hw *hw = &pfvf->hw;
- int aura_cnt, err;
+ int aura_cnt;
/* Pool - Stack of free buffer pointers
* Aura - Alloc/frees pointers from/to pool for NIX DMA.
@@ -1247,10 +1261,7 @@ int otx2_config_npa(struct otx2_nic *pfvf)
aura_cnt = ilog2(roundup_pow_of_two(hw->pool_cnt));
npalf->aura_sz = (aura_cnt >= ilog2(128)) ? (aura_cnt - 6) : 1;
- err = otx2_sync_mbox_msg(&pfvf->mbox);
- if (err)
- return err;
- return 0;
+ return otx2_sync_mbox_msg(&pfvf->mbox);
}
int otx2_detach_resources(struct mbox *mbox)
@@ -1375,32 +1386,6 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
return otx2_sync_mbox_msg(&pfvf->mbox);
}
-static inline void otx2_nix_rq_op_stats(struct queue_stats *stats,
- struct otx2_nic *pfvf, int qidx)
-{
- u64 incr = (u64)qidx << 32;
- u64 *ptr;
-
- ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS);
- stats->bytes = otx2_atomic64_add(incr, ptr);
-
- ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS);
- stats->pkts = otx2_atomic64_add(incr, ptr);
-}
-
-static inline void otx2_nix_sq_op_stats(struct queue_stats *stats,
- struct otx2_nic *pfvf, int qidx)
-{
- u64 incr = (u64)qidx << 32;
- u64 *ptr;
-
- ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS);
- stats->bytes = otx2_atomic64_add(incr, ptr);
-
- ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS);
- stats->pkts = otx2_atomic64_add(incr, ptr);
-}
-
/* Mbox message handlers */
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
struct cgx_stats_rsp *rsp)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index e23839c90349..3f10f7e41942 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Marvell OcteonTx2 RVU Ethernet driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Ethernet driver
*
* Copyright (C) 2018 Marvell International Ltd.
*
@@ -25,6 +25,8 @@
#define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064
#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
+#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
+
/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 2
#define PCI_MBOX_BAR_NUM 4
@@ -110,7 +112,7 @@ enum nix_stat_lf_rx {
RX_STATS_ENUM_LAST,
};
-struct otx2_dev_stats {
+struct otx2_dev_stats {
u64 rx_bytes;
u64 rx_frames;
u64 rx_ucast_frames;
@@ -136,7 +138,7 @@ struct otx2_drv_stats {
atomic_t rx_other_errs;
};
-struct mbox {
+struct mbox {
struct otx2_mbox mbox;
struct work_struct mbox_wrk;
struct otx2_mbox mbox_up;
@@ -144,8 +146,8 @@ struct mbox {
struct otx2_nic *pfvf;
void *bbuf_base; /* Bounce buffer for mbox memory */
struct mutex lock; /* serialize mailbox access */
- int num_msgs; /*mbox number of messages*/
- int up_num_msgs;/* mbox_up number of messages*/
+ int num_msgs; /* mbox number of messages */
+ int up_num_msgs; /* mbox_up number of messages */
};
struct otx2_hw {
@@ -179,7 +181,7 @@ struct otx2_hw {
u8 lso_tsov6_idx;
u8 hw_tso;
- /* MSI-X*/
+ /* MSI-X */
u8 cint_cnt; /* CQ interrupt count */
u16 npa_msixoff; /* Offset of NPA vectors */
u16 nix_msixoff; /* Offset of NIX vectors */
@@ -304,8 +306,8 @@ struct otx2_nic {
struct workqueue_struct *flr_wq;
struct flr_work *flr_wrk;
struct refill_work *refill_wrk;
- struct work_struct otx2_rx_mode_work;
- struct workqueue_struct *otx2_ndo_wq;
+ struct workqueue_struct *otx2_wq;
+ struct work_struct rx_mode_work;
/* Ethtool stuff */
u32 msg_enable;
@@ -351,6 +353,12 @@ static inline bool is_96xx_A0(struct pci_dev *pdev)
return (pdev->revision == 0x00);
}
+static inline bool is_96xx_B0(struct pci_dev *pdev)
+{
+ return (pdev->revision == 0x01) &&
+ (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
+}
+
static inline bool is_95xx_A0(struct pci_dev *pdev)
{
return (pdev->revision == 0x10) || (pdev->revision == 0x11);
@@ -385,6 +393,7 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
}
}
+/* Register read/write APIs */
static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
{
u64 blkaddr;
@@ -399,7 +408,7 @@ static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
default:
blkaddr = BLKADDR_RVUM;
break;
- };
+ }
offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT);
offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT);
@@ -407,7 +416,6 @@ static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
return nic->reg_base + offset;
}
-/* Register read/write APIs */
static inline void otx2_write64(struct otx2_nic *nic, u64 offset, u64 val)
{
void __iomem *addr = otx2_get_regaddr(nic, offset);
@@ -425,8 +433,8 @@ static inline u64 otx2_read64(struct otx2_nic *nic, u64 offset)
/* Mbox bounce buffer APIs */
static inline int otx2_mbox_bbuf_init(struct mbox *mbox, struct pci_dev *pdev)
{
- struct otx2_mbox_dev *mdev;
struct otx2_mbox *otx2_mbox;
+ struct otx2_mbox_dev *mdev;
mbox->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL);
if (!mbox->bbuf_base)
@@ -521,10 +529,11 @@ static inline u64 otx2_lmt_flush(uint64_t addr)
: [rs]"r"(addr));
return result;
}
+
#else
#define otx2_write128(lo, hi, addr)
+#define otx2_atomic64_add(incr, ptr) ({ *(ptr) += incr; })
#define otx2_read128(addr) ({ 0; })
-#define otx2_atomic64_add(incr, ptr) ({ 0; })
#define otx2_lmt_flush(addr) ({ 0; })
#endif
@@ -642,7 +651,7 @@ MBOX_UP_CGX_MESSAGES
#undef M
/* Time to wait before watchdog kicks off */
-#define OTX2_TX_TIMEOUT (60 * HZ)
+#define OTX2_TX_TIMEOUT (100 * HZ)
#define RVU_PFVF_PF_SHIFT 10
#define RVU_PFVF_PF_MASK 0x3F
@@ -686,8 +695,6 @@ static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
/* MSI-X APIs */
void otx2_free_cints(struct otx2_nic *pfvf, int n);
void otx2_set_cints_affinity(struct otx2_nic *pfvf);
-
-int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac);
int otx2_set_mac_address(struct net_device *netdev, void *p);
int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu);
void otx2_tx_timeout(struct net_device *netdev);
@@ -754,8 +761,6 @@ void otx2vf_set_ethtool_ops(struct net_device *netdev);
int otx2_open(struct net_device *netdev);
int otx2_stop(struct net_device *netdev);
-int otx2vf_open(struct net_device *netdev);
-int otx2vf_stop(struct net_device *netdev);
int otx2_set_real_num_queues(struct net_device *netdev,
int tx_queues, int rx_queues);
int otx2_set_npc_parse_mode(struct otx2_nic *pfvf, bool unbind);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 17b02bf62cff..c9ae7d83b9ac 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -90,36 +90,6 @@ static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats);
static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats);
static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats);
-int __weak otx2vf_open(struct net_device *netdev)
-{
- return 0;
-}
-
-int __weak otx2vf_stop(struct net_device *netdev)
-{
- return 0;
-}
-
-static void otx2_dev_open(struct net_device *netdev)
-{
- struct otx2_nic *pfvf = netdev_priv(netdev);
-
- if (pfvf->pcifunc & RVU_PFVF_FUNC_MASK)
- otx2vf_open(netdev);
- else
- otx2_open(netdev);
-}
-
-static void otx2_dev_stop(struct net_device *netdev)
-{
- struct otx2_nic *pfvf = netdev_priv(netdev);
-
- if (pfvf->pcifunc & RVU_PFVF_FUNC_MASK)
- otx2vf_stop(netdev);
- else
- otx2_stop(netdev);
-}
-
static void otx2_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
@@ -356,7 +326,7 @@ static int otx2_set_channels(struct net_device *dev,
return -EINVAL;
if (if_up)
- otx2_dev_stop(dev);
+ dev->netdev_ops->ndo_stop(dev);
err = otx2_set_real_num_queues(dev, channel->tx_count,
channel->rx_count);
@@ -369,7 +339,7 @@ static int otx2_set_channels(struct net_device *dev,
fail:
if (if_up)
- otx2_dev_open(dev);
+ dev->netdev_ops->ndo_open(dev);
netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
pfvf->hw.tx_queues, pfvf->hw.rx_queues);
@@ -446,8 +416,7 @@ static int otx2_set_ringparam(struct net_device *netdev,
return -EINVAL;
/* Permitted lengths are 16 64 256 1K 4K 16K 64K 256K 1M */
- rx_count = clamp_t(u32, ring->rx_pending,
- Q_COUNT(Q_SIZE_MIN), Q_COUNT(Q_SIZE_MAX));
+ rx_count = ring->rx_pending;
/* On some silicon variants a skid or reserved CQEs are
* needed to avoid CQ overflow.
*/
@@ -456,7 +425,7 @@ static int otx2_set_ringparam(struct net_device *netdev,
rx_count = Q_COUNT(Q_SIZE(rx_count, 3));
/* Due pipelining impact minimum 2000 unused SQ CQE's
- * need to maintain to avoid CQ overflow, hence the
+ * need to be maintained to avoid CQ overflow, hence the
* minimum 4K size.
*/
tx_count = clamp_t(u32, ring->tx_pending,
@@ -467,14 +436,15 @@ static int otx2_set_ringparam(struct net_device *netdev,
return 0;
if (if_up)
- otx2_dev_stop(netdev);
+ netdev->netdev_ops->ndo_stop(netdev);
/* Assigned to the nearest possible exponent. */
qs->sqe_cnt = tx_count;
qs->rqe_cnt = rx_count;
if (if_up)
- otx2_dev_open(netdev);
+ netdev->netdev_ops->ndo_open(netdev);
+
return 0;
}
@@ -921,7 +891,9 @@ static int otx2vf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
static u32 otx2_get_rxfh_key_size(struct net_device *netdev)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct otx2_rss_info *rss;
+
+ rss = &pfvf->hw.rss_info;
return sizeof(rss->key);
}
@@ -930,17 +902,19 @@ static u32 otx2_get_rxfh_indir_size(struct net_device *dev)
{
struct otx2_nic *pfvf = netdev_priv(dev);
- return pfvf->hw.rss_info.rss_size;
+ return sizeof(pfvf->hw.rss_info.ind_tbl);
}
-/* Get RSS configuration*/
+/* Get RSS configuration */
static int otx2_get_rxfh(struct net_device *dev, u32 *indir,
u8 *hkey, u8 *hfunc)
{
struct otx2_nic *pfvf = netdev_priv(dev);
- struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct otx2_rss_info *rss;
int idx;
+ rss = &pfvf->hw.rss_info;
+
if (indir) {
for (idx = 0; idx < rss->rss_size; idx++)
indir[idx] = rss->ind_tbl[idx];
@@ -1636,7 +1610,15 @@ static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
data += ETH_GSTRING_LEN;
}
+ for (stats = 0; stats < otx2_n_drv_stats; stats++) {
+ memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
otx2_get_qset_strings(vf, &data, 0);
+
+ strcpy(data, "reset_count");
+ data += ETH_GSTRING_LEN;
}
static void otx2vf_get_ethtool_stats(struct net_device *netdev,
@@ -1646,13 +1628,16 @@ static void otx2vf_get_ethtool_stats(struct net_device *netdev,
int stat;
otx2_get_dev_stats(vf);
+ for (stat = 0; stat < otx2_n_dev_stats; stat++)
+ *(data++) = ((u64 *)&vf->hw.dev_stats)
+ [otx2_dev_stats[stat].index];
- for (stat = 0; stat < otx2_n_dev_stats; stat++) {
- *data = ((u64 *)&vf->hw.dev_stats)[otx2_dev_stats[stat].index];
- data++;
- }
+ for (stat = 0; stat < otx2_n_drv_stats; stat++)
+ *(data++) = atomic_read(&((atomic_t *)&vf->hw.drv_stats)
+ [otx2_drv_stats[stat].index]);
otx2_get_qset_stats(vf, stats, &data);
+ *(data++) = vf->reset_count;
}
static int otx2vf_get_sset_count(struct net_device *netdev, int sset)
@@ -1698,6 +1683,8 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.set_ringparam = otx2_set_ringparam,
.get_coalesce = otx2_get_coalesce,
.set_coalesce = otx2_set_coalesce,
+ .get_msglevel = otx2_get_msglevel,
+ .set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
.set_pauseparam = otx2_set_pauseparam,
.get_link_ksettings = otx2vf_get_link_ksettings,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 0fdc19a87a10..74897c2c97fc 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -50,20 +50,18 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
return -ENOMEM;
/* register work queue for ndo callbacks */
- pf->otx2_ndo_wq = create_singlethread_workqueue("otx2_ndo_work_queue");
- if (!pf->otx2_ndo_wq)
+ pf->otx2_wq = create_singlethread_workqueue("otx2_ndo_work_queue");
+ if (!pf->otx2_wq)
return -ENOMEM;
- INIT_WORK(&pf->otx2_rx_mode_work, otx2_do_set_rx_mode);
+ INIT_WORK(&pf->rx_mode_work, otx2_do_set_rx_mode);
return 0;
}
void otx2_mcam_flow_del(struct otx2_nic *pf)
{
otx2_destroy_mcam_flows(pf);
- if (pf->otx2_ndo_wq) {
- flush_workqueue(pf->otx2_ndo_wq);
- destroy_workqueue(pf->otx2_ndo_wq);
- }
+ if (pf->otx2_wq)
+ destroy_workqueue(pf->otx2_wq);
}
int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 13d3e802778b..bc54f9d14fd6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -92,9 +92,9 @@ static void otx2_flr_wq_destroy(struct otx2_nic *pf)
{
if (!pf->flr_wq)
return;
- flush_workqueue(pf->flr_wq);
destroy_workqueue(pf->flr_wq);
pf->flr_wq = NULL;
+ devm_kfree(pf->dev, pf->flr_wrk);
}
static void otx2_flr_handler(struct work_struct *work)
@@ -142,7 +142,7 @@ static irqreturn_t otx2_pf_flr_intr_handler(int irq, void *pf_irq)
intr = otx2_read64(pf, RVU_PF_VFFLR_INTX(reg));
if (!intr)
continue;
- start_vf = 64 * reg;
+ start_vf = 64 * reg;
for (vf = 0; vf < 64; vf++) {
if (!(intr & BIT_ULL(vf)))
continue;
@@ -197,7 +197,7 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf)
otx2_pf_me_intr_handler, 0, irq_name, pf);
if (ret) {
dev_err(pf->dev,
- "RVUPF: IRQ registration failed for ME\n");
+ "RVUPF: IRQ registration failed for ME0\n");
}
/* Register FLR interrupt handler */
@@ -207,7 +207,7 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf)
otx2_pf_flr_intr_handler, 0, irq_name, pf);
if (ret) {
dev_err(pf->dev,
- "RVUPF: IRQ registration failed for FLR\n");
+ "RVUPF: IRQ registration failed for FLR0\n");
return ret;
}
@@ -627,7 +627,6 @@ static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf)
return;
if (pf->mbox_pfvf_wq) {
- flush_workqueue(pf->mbox_pfvf_wq);
destroy_workqueue(pf->mbox_pfvf_wq);
pf->mbox_pfvf_wq = NULL;
}
@@ -712,7 +711,7 @@ static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf)
0, irq_name, pf);
if (err) {
dev_err(pf->dev,
- "RVUPF: IRQ registration failed for PFAF mbox1 irq\n");
+ "RVUPF: IRQ registration failed for PFVF mbox1 irq\n");
return err;
}
}
@@ -1036,7 +1035,6 @@ static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
struct mbox *mbox = &pf->mbox;
if (pf->mbox_wq) {
- flush_workqueue(pf->mbox_wq);
destroy_workqueue(pf->mbox_wq);
pf->mbox_wq = NULL;
}
@@ -1093,7 +1091,7 @@ static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
return 0;
exit:
- destroy_workqueue(pf->mbox_wq);
+ otx2_pfaf_mbox_destroy(pf);
return err;
}
@@ -1273,12 +1271,11 @@ static void otx2_disable_napi(struct otx2_nic *pf)
static void otx2_free_cq_res(struct otx2_nic *pf)
{
struct otx2_qset *qset = &pf->qset;
- struct mbox *mbox = &pf->mbox;
struct otx2_cq_queue *cq;
int qidx;
- /* Disable CQs*/
- otx2_ctx_disable(mbox, NIX_AQ_CTYPE_CQ, false);
+ /* Disable CQs */
+ otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_CQ, false);
for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
cq = &qset->cq[qidx];
qmem_free(pf->dev, cq->cqe);
@@ -1288,12 +1285,11 @@ static void otx2_free_cq_res(struct otx2_nic *pf)
static void otx2_free_sq_res(struct otx2_nic *pf)
{
struct otx2_qset *qset = &pf->qset;
- struct mbox *mbox = &pf->mbox;
struct otx2_snd_queue *sq;
int qidx;
/* Disable SQs */
- otx2_ctx_disable(mbox, NIX_AQ_CTYPE_SQ, false);
+ otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
/* Free SQB pointers */
otx2_sq_free_sqbs(pf);
for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
@@ -1488,12 +1484,11 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
}
static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
-
{
struct otx2_nic *pf = netdev_priv(netdev);
- struct otx2_snd_queue *sq;
int qidx = skb_get_queue_mapping(skb);
- struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx);
+ struct otx2_snd_queue *sq;
+ struct netdev_queue *txq;
/* Check for minimum and maximum packet length */
if (skb->len <= ETH_HLEN ||
@@ -1503,10 +1498,9 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
}
sq = &pf->qset.sq[qidx];
+ txq = netdev_get_tx_queue(netdev, qidx);
- if (netif_tx_queue_stopped(txq)) {
- dev_kfree_skb(skb);
- } else if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
+ if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
netif_tx_stop_queue(txq);
/* Check again, incase SQBs got freed up */
@@ -1583,7 +1577,7 @@ int otx2_open(struct net_device *netdev)
napi_enable(&cq_poll->napi);
}
- /* Set default MTU in HW */
+ /* Set maximum frame size allowed in HW */
err = otx2_hw_set_mtu(pf, netdev->mtu);
if (err)
goto err_disable_napi;
@@ -1699,6 +1693,7 @@ err_disable_napi:
err_free_mem:
kfree(qset->sq);
kfree(qset->cq);
+ kfree(qset->rq);
kfree(qset->napi);
return err;
}
@@ -1780,13 +1775,12 @@ static void otx2_set_rx_mode(struct net_device *netdev)
{
struct otx2_nic *pf = netdev_priv(netdev);
- queue_work(pf->otx2_ndo_wq, &pf->otx2_rx_mode_work);
+ queue_work(pf->otx2_wq, &pf->rx_mode_work);
}
void otx2_do_set_rx_mode(struct work_struct *work)
{
- struct otx2_nic *pf = container_of(work, struct otx2_nic,
- otx2_rx_mode_work);
+ struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
struct net_device *netdev = pf->netdev;
struct nix_rx_mode *req;
@@ -1831,9 +1825,9 @@ static void otx2_reset_task(struct work_struct *work)
static int otx2_set_features(struct net_device *netdev,
netdev_features_t features)
{
- struct otx2_nic *pf = netdev_priv(netdev);
netdev_features_t changed = features ^ netdev->features;
bool ntuple = !!(features & NETIF_F_NTUPLE);
+ struct otx2_nic *pf = netdev_priv(netdev);
if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
return otx2_cgx_config_loopback(pf,
@@ -2273,6 +2267,9 @@ static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
struct otx2_hw *hw = &pf->hw;
int num_vec, err;
+ /* NPA interrupts are inot registered, so alloc only
+ * upto NIX vector offset.
+ */
num_vec = hw->nix_msixoff;
num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
@@ -2286,10 +2283,7 @@ static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
return err;
}
- err = otx2_register_mbox_intr(pf, false);
- if (err)
- return err;
- return 0;
+ return otx2_register_mbox_intr(pf, false);
}
static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -2321,7 +2315,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
if (err) {
- dev_err(dev, "Unable to set consistent DMA mask\n");
+ dev_err(dev, "DMA mask config failed, abort\n");
goto err_release_regions;
}
@@ -2354,13 +2348,17 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
num_vec = pci_msix_vec_count(pdev);
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
GFP_KERNEL);
- if (!hw->irq_name)
+ if (!hw->irq_name) {
+ err = -ENOMEM;
goto err_free_netdev;
+ }
hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
sizeof(cpumask_var_t), GFP_KERNEL);
- if (!hw->affinity_mask)
+ if (!hw->affinity_mask) {
+ err = -ENOMEM;
goto err_free_netdev;
+ }
/* Map CSRs */
pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
@@ -2627,9 +2625,10 @@ static int otx2_sriov_disable(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct otx2_nic *pf = netdev_priv(netdev);
+ int numvfs = pci_num_vf(pdev);
int i;
- if (!pci_num_vf(pdev))
+ if (!numvfs)
return 0;
pci_disable_sriov(pdev);
@@ -2679,7 +2678,6 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_cgx_config_linkevents(pf, false);
unregister_netdev(netdev);
-
otx2_sriov_disable(pf->pdev);
otx2_ptp_destroy(pf);
otx2_mcam_flow_del(pf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 2e43f41651f0..29db3686a928 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -20,7 +20,7 @@
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
-static inline struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
+static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
{
struct nix_cqe_hdr_s *cqe_hdr;
@@ -34,7 +34,7 @@ static inline struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
return cqe_hdr;
}
-static inline unsigned int frag_num(unsigned int i)
+static unsigned int frag_num(unsigned int i)
{
#ifdef __BIG_ENDIAN
return (i & ~3) + 3 - (i & 3);
@@ -87,19 +87,14 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
struct sk_buff *skb = NULL;
struct sg_list *sg;
- if (unlikely(snd_comp->status)) {
- /* tx packet error handling*/
- if (netif_msg_tx_err(pfvf)) {
- netdev_info(pfvf->netdev,
- "TX%d: Error in send CQ status:%x\n",
- cq->cint_idx, snd_comp->status);
- }
- }
+ if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf))
+ net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n",
+ pfvf->netdev->name, cq->cint_idx,
+ snd_comp->status);
/* Barrier, so that update to sq by other cpus is visible */
smp_mb();
sg = &sq->sg[snd_comp->sqe_id];
-
skb = (struct sk_buff *)sg->skb;
if (unlikely(!skb))
return;
@@ -236,8 +231,8 @@ static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe,
}
}
-static inline bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
- struct nix_cqe_rx_s *cqe, int qidx)
+static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
+ struct nix_cqe_rx_s *cqe, int qidx)
{
struct otx2_drv_stats *stats = &pfvf->hw.drv_stats;
struct nix_rx_parse_s *parse = &cqe->parse;
@@ -333,9 +328,9 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
napi_gro_frags(napi);
}
-static inline int otx2_rx_napi_handler(struct otx2_nic *pfvf,
- struct napi_struct *napi,
- struct otx2_cq_queue *cq, int budget)
+static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
+ struct napi_struct *napi,
+ struct otx2_cq_queue *cq, int budget)
{
struct nix_cqe_rx_s *cqe;
int processed_cqe = 0;
@@ -351,7 +346,6 @@ static inline int otx2_rx_napi_handler(struct otx2_nic *pfvf,
return 0;
break;
}
-
cq->cq_head++;
cq->cq_head &= (cq->cqe_cnt - 1);
@@ -394,8 +388,8 @@ static inline int otx2_rx_napi_handler(struct otx2_nic *pfvf,
return processed_cqe;
}
-static inline int otx2_tx_napi_handler(struct otx2_nic *pfvf,
- struct otx2_cq_queue *cq, int budget)
+static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
+ struct otx2_cq_queue *cq, int budget)
{
int tx_pkts = 0, tx_bytes = 0;
struct nix_cqe_tx_s *cqe;
@@ -480,7 +474,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
return workdone;
}
-static inline void otx2_sqe_flush(struct otx2_snd_queue *sq, int size)
+static void otx2_sqe_flush(struct otx2_snd_queue *sq, int size)
{
u64 status;
@@ -612,7 +606,7 @@ static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
int proto = 0;
/* Check if SQE was framed before, if yes then no need to
- * set these constants again anf again.
+ * set these constants again and again.
*/
if (!sqe_hdr->total) {
/* Don't free Tx buffers to Aura */
@@ -820,8 +814,8 @@ static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
}
}
-static inline bool is_hw_tso_supported(struct otx2_nic *pfvf,
- struct sk_buff *skb)
+static bool is_hw_tso_supported(struct otx2_nic *pfvf,
+ struct sk_buff *skb)
{
int payload_len, last_seg_size;
@@ -834,10 +828,14 @@ static inline bool is_hw_tso_supported(struct otx2_nic *pfvf,
* segment is shorter than 16 bytes, some header fields may not
* be correctly modified, hence don't offload such TSO segments.
*/
+ if (!is_96xx_B0(pfvf->pdev))
+ return true;
+
payload_len = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
if (last_seg_size && last_seg_size < 16)
return false;
+
return true;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 6cd42d9e63a9..ef4c47697b49 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -54,6 +54,10 @@
*/
#define CQ_TIMER_THRESH_DEFAULT 1 /* 1 usec */
#define CQ_TIMER_THRESH_MAX 25 /* 25 usec */
+
+/* Min number of CQs (of the ones mapped to this CINT)
+ * with valid CQEs.
+ */
#define CQ_QCOUNT_DEFAULT 1
struct queue_stats {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index e41fb0078071..b4e0d82fc221 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -31,31 +31,11 @@ MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, otx2_vf_id_table);
-/**
- * RVU VF Interrupt Vector Enumeration
- */
+/* RVU VF Interrupt Vector Enumeration */
enum {
RVU_VF_INT_VEC_MBOX = 0x0,
};
-static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu)
-{
- bool if_up = netif_running(netdev);
- int err = 0;
-
- if (if_up)
- otx2vf_stop(netdev);
-
- netdev_info(netdev, "Changing MTU from %d to %d\n",
- netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
-
- if (if_up)
- err = otx2vf_open(netdev);
-
- return err;
-}
-
static void otx2vf_process_vfaf_mbox_msg(struct otx2_nic *vf,
struct mbox_msghdr *msg)
{
@@ -185,7 +165,7 @@ static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
int offset, id;
vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
- vf = vf_mbox->pfvf;
+ vf = vf_mbox->pfvf;
mbox = &vf_mbox->mbox_up;
mdev = &mbox->dev[0];
@@ -366,7 +346,7 @@ exit:
return err;
}
-int otx2vf_open(struct net_device *netdev)
+static int otx2vf_open(struct net_device *netdev)
{
struct otx2_nic *vf;
int err;
@@ -385,13 +365,11 @@ int otx2vf_open(struct net_device *netdev)
return 0;
}
-EXPORT_SYMBOL(otx2vf_open);
-int otx2vf_stop(struct net_device *netdev)
+static int otx2vf_stop(struct net_device *netdev)
{
return otx2_stop(netdev);
}
-EXPORT_SYMBOL(otx2vf_stop);
static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
{
@@ -408,13 +386,12 @@ static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
}
sq = &vf->qset.sq[qidx];
-
txq = netdev_get_tx_queue(netdev, qidx);
- if (!netif_tx_queue_stopped(txq) &&
- !otx2_sq_append_skb(netdev, sq, skb, qidx)) {
+
+ if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
netif_tx_stop_queue(txq);
- /* Barrier, for stop_queue visible to be on other cpus */
+ /* Check again, incase SQBs got freed up */
smp_mb();
if ((sq->num_sqbs - *sq->aura_fc_addr) > 1)
netif_tx_start_queue(txq);
@@ -429,16 +406,37 @@ static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
+static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ bool if_up = netif_running(netdev);
+ int err = 0;
+
+ if (if_up)
+ otx2vf_stop(netdev);
+
+ netdev_info(netdev, "Changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
+ netdev->mtu = new_mtu;
+
+ if (if_up)
+ err = otx2vf_open(netdev);
+
+ return err;
+}
+
static void otx2vf_reset_task(struct work_struct *work)
{
struct otx2_nic *vf = container_of(work, struct otx2_nic, reset_task);
- if (!netif_running(vf->netdev))
- return;
+ rtnl_lock();
- otx2vf_stop(vf->netdev);
- otx2vf_open(vf->netdev);
- netif_trans_update(vf->netdev);
+ if (netif_running(vf->netdev)) {
+ otx2vf_stop(vf->netdev);
+ vf->reset_count++;
+ otx2vf_open(vf->netdev);
+ }
+
+ rtnl_unlock();
}
static netdev_features_t
@@ -477,10 +475,7 @@ static int otx2vf_realloc_msix_vectors(struct otx2_nic *vf)
return err;
}
- err = otx2vf_register_mbox_intr(vf, false);
- if (err)
- return err;
- return 0;
+ return otx2vf_register_mbox_intr(vf, false);
}
static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -545,13 +540,17 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
GFP_KERNEL);
- if (!hw->irq_name)
+ if (!hw->irq_name) {
+ err = -ENOMEM;
goto err_free_netdev;
+ }
hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
sizeof(cpumask_var_t), GFP_KERNEL);
- if (!hw->affinity_mask)
+ if (!hw->affinity_mask) {
+ err = -ENOMEM;
goto err_free_netdev;
+ }
err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
if (err < 0) {
@@ -620,6 +619,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_WORK(&vf->reset_task, otx2vf_reset_task);
+ /* To distinguish, for LBK VFs set netdev name explicitly */
if (is_otx2_lbkvf(vf->pdev)) {
int n;