aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/marvell
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/marvell')
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c108
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c3
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sso.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_tim.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c76
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h92
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c60
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c54
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_smqvf.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c64
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c8
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
28 files changed, 412 insertions, 182 deletions
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 269bd73be1a0..0f4ae3a6e043 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -104,9 +104,11 @@
#define MVNETA_TX_IN_PRGRS BIT(1)
#define MVNETA_TX_FIFO_EMPTY BIT(8)
#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
+/* Only exists on Armada XP and Armada 370 */
#define MVNETA_SERDES_CFG 0x24A0
#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
#define MVNETA_QSGMII_SERDES_PROTO 0x0667
+#define MVNETA_HSGMII_SERDES_PROTO 0x1107
#define MVNETA_TYPE_PRIO 0x24bc
#define MVNETA_FORCE_UNI BIT(21)
#define MVNETA_TXQ_CMD_1 0x24e4
@@ -388,6 +390,8 @@ struct mvneta_pcpu_stats {
struct u64_stats_sync syncp;
u64 rx_packets;
u64 rx_bytes;
+ u64 rx_dropped;
+ u64 rx_errors;
u64 tx_packets;
u64 tx_bytes;
};
@@ -705,6 +709,8 @@ mvneta_get_stats64(struct net_device *dev,
struct mvneta_pcpu_stats *cpu_stats;
u64 rx_packets;
u64 rx_bytes;
+ u64 rx_dropped;
+ u64 rx_errors;
u64 tx_packets;
u64 tx_bytes;
@@ -713,19 +719,20 @@ mvneta_get_stats64(struct net_device *dev,
start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
rx_packets = cpu_stats->rx_packets;
rx_bytes = cpu_stats->rx_bytes;
+ rx_dropped = cpu_stats->rx_dropped;
+ rx_errors = cpu_stats->rx_errors;
tx_packets = cpu_stats->tx_packets;
tx_bytes = cpu_stats->tx_bytes;
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
+ stats->rx_dropped += rx_dropped;
+ stats->rx_errors += rx_errors;
stats->tx_packets += tx_packets;
stats->tx_bytes += tx_bytes;
}
- stats->rx_errors = dev->stats.rx_errors;
- stats->rx_dropped = dev->stats.rx_dropped;
-
stats->tx_dropped = dev->stats.tx_dropped;
}
@@ -1702,8 +1709,14 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
static void mvneta_rx_error(struct mvneta_port *pp,
struct mvneta_rx_desc *rx_desc)
{
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
u32 status = rx_desc->status;
+ /* update per-cpu counter */
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_errors++;
+ u64_stats_update_end(&stats->syncp);
+
switch (status & MVNETA_RXD_ERR_CODE_MASK) {
case MVNETA_RXD_ERR_CRC:
netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
@@ -1964,7 +1977,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
/* Check errors only for FIRST descriptor */
if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
mvneta_rx_error(pp, rx_desc);
- dev->stats.rx_errors++;
/* leave the descriptor untouched */
continue;
}
@@ -1975,11 +1987,17 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
skb_size = max(rx_copybreak, rx_header_size);
rxq->skb = netdev_alloc_skb_ip_align(dev, skb_size);
if (unlikely(!rxq->skb)) {
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
netdev_err(dev,
"Can't allocate skb on queue %d\n",
rxq->id);
- dev->stats.rx_dropped++;
rxq->skb_alloc_err++;
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_dropped++;
+ u64_stats_update_end(&stats->syncp);
+
continue;
}
copy_size = min(skb_size, rx_bytes);
@@ -2136,7 +2154,6 @@ err_drop_frame_ret_pool:
mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
rx_desc->buf_phys_addr);
err_drop_frame:
- dev->stats.rx_errors++;
mvneta_rx_error(pp, rx_desc);
/* leave the descriptor untouched */
continue;
@@ -2788,11 +2805,10 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
/* For the case where the last mvneta_poll did not process all
* RX packets
*/
- rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
-
cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
port->cause_rx_tx;
+ rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
if (rx_queue) {
rx_queue = rx_queue - 1;
if (pp->bm_priv)
@@ -3149,26 +3165,60 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
return 0;
}
-static int mvneta_comphy_init(struct mvneta_port *pp)
+static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface)
{
int ret;
- if (!pp->comphy)
- return 0;
-
- ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET,
- pp->phy_interface);
+ ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface);
if (ret)
return ret;
return phy_power_on(pp->comphy);
}
+static int mvneta_config_interface(struct mvneta_port *pp,
+ phy_interface_t interface)
+{
+ int ret = 0;
+
+ if (pp->comphy) {
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_1000BASEX ||
+ interface == PHY_INTERFACE_MODE_2500BASEX) {
+ ret = mvneta_comphy_init(pp, interface);
+ }
+ } else {
+ switch (interface) {
+ case PHY_INTERFACE_MODE_QSGMII:
+ mvreg_write(pp, MVNETA_SERDES_CFG,
+ MVNETA_QSGMII_SERDES_PROTO);
+ break;
+
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ mvreg_write(pp, MVNETA_SERDES_CFG,
+ MVNETA_SGMII_SERDES_PROTO);
+ break;
+
+ case PHY_INTERFACE_MODE_2500BASEX:
+ mvreg_write(pp, MVNETA_SERDES_CFG,
+ MVNETA_HSGMII_SERDES_PROTO);
+ break;
+ default:
+ break;
+ }
+ }
+
+ pp->phy_interface = interface;
+
+ return ret;
+}
+
static void mvneta_start_dev(struct mvneta_port *pp)
{
int cpu;
- WARN_ON(mvneta_comphy_init(pp));
+ WARN_ON(mvneta_config_interface(pp, pp->phy_interface));
mvneta_max_rx_size_set(pp, pp->pkt_size);
mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
@@ -3538,17 +3588,13 @@ static void mvneta_mac_config(struct net_device *ndev, unsigned int mode,
/* When at 2.5G, the link partner can send frames with shortened
* preambles.
*/
- if (state->speed == SPEED_2500)
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
- if (pp->comphy && pp->phy_interface != state->interface &&
- (state->interface == PHY_INTERFACE_MODE_SGMII ||
- state->interface == PHY_INTERFACE_MODE_1000BASEX ||
- state->interface == PHY_INTERFACE_MODE_2500BASEX)) {
- pp->phy_interface = state->interface;
-
- WARN_ON(phy_power_off(pp->comphy));
- WARN_ON(mvneta_comphy_init(pp));
+ if (pp->phy_interface != state->interface) {
+ if (pp->comphy)
+ WARN_ON(phy_power_off(pp->comphy));
+ WARN_ON(mvneta_config_interface(pp, state->interface));
}
if (new_ctrl0 != gmac_ctrl0)
@@ -4447,12 +4493,10 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
/* MAC Cause register should be cleared */
mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
- if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
- mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
- else if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
- phy_interface_mode_is_8023z(phy_mode))
- mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
- else if (!phy_interface_mode_is_rgmii(phy_mode))
+ if (phy_mode != PHY_INTERFACE_MODE_QSGMII &&
+ phy_mode != PHY_INTERFACE_MODE_SGMII &&
+ !phy_interface_mode_is_8023z(phy_mode) &&
+ !phy_interface_mode_is_rgmii(phy_mode))
return -EINVAL;
return 0;
@@ -4637,10 +4681,10 @@ static int mvneta_probe(struct platform_device *pdev)
if (err < 0)
goto err_netdev;
- err = mvneta_port_power_up(pp, phy_mode);
+ err = mvneta_port_power_up(pp, pp->phy_interface);
if (err < 0) {
dev_err(&pdev->dev, "can't power up port\n");
- goto err_netdev;
+ return err;
}
/* Armada3700 network controller does not support per-cpu
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index fb06c0aa620a..e4d26092745a 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -1299,6 +1299,9 @@ int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port,
struct mvpp2_ethtool_fs *efs;
int ret;
+ if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
+ return -EINVAL;
+
efs = port->rfs_rules[info->fs.location];
if (!efs)
return -EINVAL;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 885529701de9..b8c576d9656d 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -3445,7 +3445,7 @@ static int mvpp2_open(struct net_device *dev)
valid = true;
}
- if (priv->hw_version == MVPP22 && port->link_irq && !port->phylink) {
+ if (priv->hw_version == MVPP22 && port->link_irq) {
err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
dev->name, port);
if (err) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
index 95b2d2541e95..e808fe9e1043 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Kconfig
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -9,7 +9,6 @@ config OCTEONTX2_MBOX
config OCTEONTX2_AF
tristate "Marvell OcteonTX2 RVU Admin Function driver"
select OCTEONTX2_MBOX
- depends on (64BIT && COMPILE_TEST) || ARM64
depends on PCI
help
This driver supports Marvell's OcteonTX2 Resource Virtualization
@@ -29,7 +28,7 @@ config NDC_DIS_DYNAMIC_CACHING
config OCTEONTX2_PF
tristate "Marvell OcteonTX2 NIC Physical Function driver"
select OCTEONTX2_MBOX
- depends on PCI && ARM64 && ARM64_LSE_ATOMICS
+ depends on PCI
help
This driver supports Marvell's OcteonTX2 Resource Virtualization
Unit's physical function NIC driver.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 5518e81a91e8..7366d25f86d0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -648,7 +648,7 @@ static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
cfg &= ~0xFFFFULL;
cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
- cfg | (DEFAULT_PAUSE_TIME - 0x1000));
+ cfg | (DEFAULT_PAUSE_TIME / 2));
cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME,
DEFAULT_PAUSE_TIME);
@@ -657,7 +657,7 @@ static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL);
cfg &= ~0xFFFFULL;
cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
- cfg | (DEFAULT_PAUSE_TIME - 0x1000));
+ cfg | (DEFAULT_PAUSE_TIME / 2));
} else {
/* ALL pause frames received are completely ignored */
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
@@ -1161,6 +1161,18 @@ int cgx_get_phy_mod_type(void *cgxd, int lmac_id)
return err;
}
+int cgx_get_phy_fec_stats(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u64 req = 0, resp;
+
+ if (!cgx)
+ return -ENODEV;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_PHY_FEC_STATS, req);
+ return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+}
+
int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
int cgx_id, int lmac_id)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index e9b6102bfef7..4445e23b688b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -83,7 +83,7 @@
#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
#define CGX_EVENT_REG CGXX_SCRATCH0_REG
#define CGX_CMD_TIMEOUT 2200 /* msecs */
-#define DEFAULT_PAUSE_TIME 0xFFFF
+#define DEFAULT_PAUSE_TIME 0x7FF
#define CGX_NVEC 37
#define CGX_LMAC_FWI 0
@@ -162,4 +162,5 @@ void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable);
int cgx_set_link_state(void *cgxd, int lmac_id, bool enable);
int cgx_set_phy_mod_type(int mod, void *cgxd, int lmac_id);
int cgx_get_phy_mod_type(void *cgxd, int lmac_id);
+int cgx_get_phy_fec_stats(void *cgxd, int lmac_id);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
index 1dc3059c9e0d..37a20d66246b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -129,6 +129,9 @@ enum cgx_cmd_id {
CGX_CMD_GET_ADV_FEC,
CGX_CMD_GET_PHY_MOD_TYPE, /* line-side modulation type: NRZ or PAM4 */
CGX_CMD_SET_PHY_MOD_TYPE,
+ CGX_CMD_PRBS,
+ CGX_CMD_DISPLAY_EYE,
+ CGX_CMD_GET_PHY_FEC_STATS,
};
/* async event ids */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index 4f8893ade710..9980598fe79e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -43,7 +43,7 @@ struct qmem {
void *base;
dma_addr_t iova;
int alloc_sz;
- u8 entry_sz;
+ u16 entry_sz;
u8 align;
u32 qsize;
};
@@ -143,8 +143,13 @@ enum nix_scheduler {
NIX_TXSCH_LVL_CNT = 0x5,
};
-#define TXSCH_TL1_DFLT_RR_QTM ((1 << 24) - 1)
-#define TXSCH_TL1_DFLT_RR_PRIO (0x1ull)
+#define TXSCH_RR_QTM_MAX ((1 << 24) - 1)
+#define TXSCH_TL1_DFLT_RR_QTM TXSCH_RR_QTM_MAX
+#define TXSCH_TL1_DFLT_RR_PRIO (0x1ull)
+#define MAX_SCHED_WEIGHT 0xFF
+#define DFLT_RR_WEIGHT 71
+#define DFLT_RR_QTM ((DFLT_RR_WEIGHT * TXSCH_RR_QTM_MAX) \
+ / MAX_SCHED_WEIGHT)
/* Min/Max packet sizes, excluding FCS */
#define NIC_HW_MIN_FRS 40
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 950ba1774ba7..8354572fa132 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -167,6 +167,7 @@ M(CGX_SET_PHY_MOD_TYPE, 0x216, cgx_set_phy_mod_type, cgx_phy_mod_type, \
M(CGX_FEC_STATS, 0x217, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \
M(CGX_SET_LINK_MODE, 0x218, cgx_set_link_mode, cgx_set_link_mode_req,\
cgx_set_link_mode_rsp) \
+M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req, msg_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
npa_lf_alloc_req, npa_lf_alloc_rsp) \
@@ -498,8 +499,17 @@ enum fec_type {
};
struct phy_s {
- u64 can_change_mod_type : 1;
- u64 mod_type : 1;
+ struct {
+ u64 can_change_mod_type : 1;
+ u64 mod_type : 1;
+ u64 has_fec_stats : 1;
+ } misc;
+ struct fec_stats_s {
+ u32 rsfec_corr_cws;
+ u32 rsfec_uncorr_cws;
+ u32 brfec_corr_blks;
+ u32 brfec_uncorr_blks;
+ } fec_stats;
};
struct cgx_lmac_fwdata_s {
@@ -513,7 +523,7 @@ struct cgx_lmac_fwdata_s {
/* Only applicable if SFP/QSFP slot is present */
struct sfp_eeprom_s sfp_eeprom;
struct phy_s phy;
-#define LMAC_FWDATA_RESERVED_MEM 1023
+#define LMAC_FWDATA_RESERVED_MEM 1021
u64 reserved[LMAC_FWDATA_RESERVED_MEM];
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index c85ee22b3d2b..8d1a0ec7ad9f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -131,7 +131,11 @@ int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
static inline u64 get_tsc(bool is_pmu)
{
+#if defined(CONFIG_ARM64)
return is_pmu ? read_sysreg(pmccntr_el0) : read_sysreg(cntvct_el0);
+#else
+ return 0;
+#endif
}
int ptp_get_clock(struct ptp *ptp, bool is_pmu, u64 *clk, u64 *tsc)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 3868898757f6..1f877d2258d6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -707,6 +707,7 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
} else {
eth_random_addr(pfvf->mac_addr);
}
+ ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
/* Assign MAC address to VFs*/
rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
@@ -721,6 +722,7 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
} else {
eth_random_addr(pfvf->mac_addr);
}
+ ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
}
}
}
@@ -1106,6 +1108,9 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
if (blkaddr < 0)
return;
+ if (blkaddr == BLKADDR_NIX0)
+ rvu_nix_reset_mac(pfvf, pcifunc);
+
block = &hw->block[blkaddr];
num_lfs = rvu_get_rsrc_mapcount(pfvf, block->type);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 5e90047b9a62..62e881254b8b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -266,7 +266,9 @@ struct rvu_pfvf {
u16 minlen;
bool hw_rx_tstamp_en; /* Is rx_tstamp enabled */
+ bool pf_set_vfs_mac;
u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
+ u8 default_mac[ETH_ALEN]; /* MAC address from FWdata */
/* Broadcast pkt replication info */
u16 bcast_mce_idx;
@@ -615,6 +617,7 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf);
int rvu_nix_register_interrupts(struct rvu *rvu);
void rvu_nix_unregister_interrupts(struct rvu *rvu);
+void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc);
/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index e7f2a5678d57..145329cf5eaf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -906,6 +906,16 @@ int rvu_mbox_handler_cgx_get_phy_mod_type(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
+}
+
/* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
* from its VFs as well. ie. NIX rx/tx counters at the CGX port level
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 52a590982ffd..55304fa55643 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -2910,10 +2910,11 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
- /* Skip updating mac addr if request is from vf */
- if (!from_vf)
- ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ /* VF can't overwrite admin(PF) changes */
+ if (from_vf && pfvf->pf_set_vfs_mac)
+ return -EPERM;
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base, req->mac_addr);
@@ -3531,11 +3532,14 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
dev_err(rvu->dev, "CQ ctx disable failed\n");
}
- /* Disabling CGX config done for PTP */
+ /* Disabling CGX and NPC config done for PTP */
if (pfvf->hw_rx_tstamp_en) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgxd = rvu_cgx_pdata(cgx_id, rvu);
cgx_lmac_ptp_config(cgxd, lmac_id, false);
+ /* Undo NPC config done for PTP */
+ if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false))
+ dev_err(rvu->dev, "NPC config for PTP failed\n");
pfvf->hw_rx_tstamp_en = false;
}
@@ -3968,3 +3972,12 @@ struct rvu *rvu, struct nix_inline_ipsec_lf_cfg *req, struct msg_rsp *rsp)
return 0;
}
+
+void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
+{
+ bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
+
+ /* overwrite vf mac address with default_mac */
+ if (from_vf)
+ ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 391f378c473f..b193d4bd3f81 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -2661,7 +2661,7 @@ int rvu_mbox_handler_npc_set_pkind(struct rvu *rvu,
if (req->dir & PKIND_RX) {
/* rx pkind set req valid only for cgx mapped PFs */
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
- return -EPERM;
+ return 0;
rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
rc = cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu),
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index c587b8dab336..212bfca5cf33 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -979,8 +979,10 @@ update_rule:
pfvf->def_rule = rule;
/* VF's MAC address is being changed via PF */
- if (pf_set_vfs_mac)
- ether_addr_copy(pfvf->mac_addr, req->packet.dmac);
+ if (pf_set_vfs_mac) {
+ ether_addr_copy(pfvf->default_mac, req->packet.dmac);
+ pfvf->pf_set_vfs_mac = true;
+ }
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sso.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sso.c
index 663fb2ce2865..dead90cb2f28 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sso.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sso.c
@@ -27,8 +27,9 @@
#else
#define rvu_sso_store_pair(val0, val1, addr) \
do { \
- *(uint64_t *)addr = val0; \
- *(uint64_t *)(((uint8_t *)addr) + 8) = val1; \
+ u64 *addr1 = (void *)addr; \
+ *addr1 = val0; \
+ *(u64 *)(((u8 *)addr1) + 8) = val1; \
} while (0)
#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_tim.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_tim.c
index e79a6ddd33dd..e23c036eb023 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_tim.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_tim.c
@@ -24,7 +24,9 @@ static inline u64 get_tenns_tsc(void)
{
u64 tsc;
+#if defined(CONFIG_ARM64)
asm volatile("mrs %0, cntvct_el0" : "=r" (tsc));
+#endif
return tsc;
}
@@ -32,7 +34,9 @@ static inline u64 get_tenns_clk(void)
{
u64 tsc;
+#if defined(CONFIG_ARM64)
asm volatile("mrs %0, cntfrq_el0" : "=r" (tsc));
+#endif
return tsc;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 0145dcf068e7..ab6bd745b881 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -126,7 +126,7 @@ void otx2_get_stats64(struct net_device *netdev,
EXPORT_SYMBOL(otx2_get_stats64);
/* Sync MAC address with RVU */
-int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, struct net_device *netdev)
+int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac)
{
struct nix_set_mac_addr *req;
int err;
@@ -138,7 +138,7 @@ int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, struct net_device *netdev)
return -ENOMEM;
}
- ether_addr_copy(req->mac_addr, netdev->dev_addr);
+ ether_addr_copy(req->mac_addr, mac);
err = otx2_sync_mbox_msg(&pfvf->mbox);
otx2_mbox_unlock(&pfvf->mbox);
@@ -186,9 +186,14 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-
- otx2_hw_set_mac_addr(pfvf, netdev);
+ if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) {
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ /* update dmac field in vlan offload rule */
+ if (pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
+ otx2_install_rxvlan_offload_flow(pfvf);
+ } else {
+ return -EPERM;
+ }
return 0;
}
@@ -359,15 +364,15 @@ dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
/* Allocate a new page */
pool->page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 0);
- if (!pool->page)
+ if (unlikely(!pool->page))
return -ENOMEM;
pool->page_offset = 0;
ret:
- iova = (u64)dma_map_page_attrs(pfvf->dev, pool->page,
- pool->page_offset, pool->rbsize,
- DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
- if (dma_mapping_error(pfvf->dev, iova)) {
+ iova = (u64)otx2_dma_map_page(pfvf, pool->page, pool->page_offset,
+ pool->rbsize, DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (!iova) {
if (!pool->page_offset)
__free_pages(pool->page, 0);
pool->page = NULL;
@@ -450,15 +455,21 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->num_regs++;
/* Set DWRR quantum */
req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
- req->regval[2] = pfvf->netdev->mtu;
+ req->regval[2] = DFLT_RR_QTM;
} else if (lvl == NIX_TXSCH_LVL_TL4) {
parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0];
req->reg[0] = NIX_AF_TL4X_PARENT(schq);
req->regval[0] = parent << 16;
+ req->num_regs++;
+ req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
+ req->regval[1] = DFLT_RR_QTM;
} else if (lvl == NIX_TXSCH_LVL_TL3) {
parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0];
req->reg[0] = NIX_AF_TL3X_PARENT(schq);
req->regval[0] = parent << 16;
+ req->num_regs++;
+ req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
+ req->regval[1] = DFLT_RR_QTM;
} else if (lvl == NIX_TXSCH_LVL_TL2) {
parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0];
req->reg[0] = NIX_AF_TL2X_PARENT(schq);
@@ -466,7 +477,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->num_regs++;
req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
- req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24;
+ req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | DFLT_RR_QTM;
req->num_regs++;
req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
@@ -518,7 +529,7 @@ int otx2_txsch_alloc(struct otx2_nic *pfvf)
int otx2_txschq_stop(struct otx2_nic *pfvf)
{
struct nix_txsch_free_req *free_req;
- int lvl, schq;
+ int lvl, schq, err;
otx2_mbox_lock(&pfvf->mbox);
/* Free the transmit schedulers */
@@ -529,7 +540,7 @@ int otx2_txschq_stop(struct otx2_nic *pfvf)
}
free_req->flags = TXSCHQ_FREE_ALL;
- WARN_ON(otx2_sync_mbox_msg(&pfvf->mbox));
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
otx2_mbox_unlock(&pfvf->mbox);
/* Clear the txschq list */
@@ -537,7 +548,7 @@ int otx2_txschq_stop(struct otx2_nic *pfvf)
for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++)
pfvf->hw.txschq_list[lvl][schq] = 0;
}
- return 0;
+ return err;
}
/* RED and drop levels of CQ on packet reception.
@@ -649,7 +660,7 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
aq->sq.ena = 1;
/* Only one SMQ is allocated, map all SQ's to that SMQ */
aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
- aq->sq.smq_rr_quantum = OTX2_MAX_MTU;
+ aq->sq.smq_rr_quantum = DFLT_RR_QTM;
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
aq->sq.sqb_aura = sqb_aura;
@@ -1248,6 +1259,14 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf)
return err;
}
+ pfvf->nix_blkaddr = BLKADDR_NIX0;
+
+ /* If the platform has two NIX blocks then LF may be
+ * allocated from NIX1.
+ */
+ if (otx2_read64(pfvf, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_NIX1)) & 0x1FFULL)
+ pfvf->nix_blkaddr = BLKADDR_NIX1;
+
/* Get NPA and NIX MSIX vector offsets */
msix = otx2_mbox_alloc_msg_msix_offset(&pfvf->mbox);
if (!msix) {
@@ -1291,7 +1310,10 @@ void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
req->ctype = type;
- WARN_ON(otx2_sync_mbox_msg(mbox));
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(mbox->pfvf->dev, "%s failed to disable context\n",
+ __func__);
+
otx2_mbox_unlock(mbox);
}
@@ -1318,26 +1340,26 @@ static inline void otx2_nix_rq_op_stats(struct queue_stats *stats,
struct otx2_nic *pfvf, int qidx)
{
u64 incr = (u64)qidx << 32;
- atomic64_t *ptr;
+ u64 *ptr;
- ptr = (__force atomic64_t *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS);
- stats->bytes = atomic64_fetch_add_relaxed(incr, ptr);
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS);
+ stats->bytes = otx2_atomic64_add(incr, ptr);
- ptr = (__force atomic64_t *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS);
- stats->pkts = atomic64_fetch_add_relaxed(incr, ptr);
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS);
+ stats->pkts = otx2_atomic64_add(incr, ptr);
}
static inline void otx2_nix_sq_op_stats(struct queue_stats *stats,
struct otx2_nic *pfvf, int qidx)
{
u64 incr = (u64)qidx << 32;
- atomic64_t *ptr;
+ u64 *ptr;
- ptr = (__force atomic64_t *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS);
- stats->bytes = atomic64_fetch_add_relaxed(incr, ptr);
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS);
+ stats->bytes = otx2_atomic64_add(incr, ptr);
- ptr = (__force atomic64_t *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS);
- stats->pkts = atomic64_fetch_add_relaxed(incr, ptr);
+ ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS);
+ stats->pkts = otx2_atomic64_add(incr, ptr);
}
/* Mbox message handlers */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 6cb8ee69aa1b..b88763010b85 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -13,6 +13,8 @@
#include <linux/pci.h>
#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+#include <linux/iommu.h>
#include <mbox.h>
#include "otx2_reg.h"
@@ -144,7 +146,7 @@ struct mbox {
struct work_struct mbox_up_wrk;
struct otx2_nic *pfvf;
void *bbuf_base; /* Bounce buffer for mbox memory */
- atomic_t lock; /* serialize mailbox access */
+ struct mutex lock; /* serialize mailbox access */
int num_msgs; /*mbox number of messages*/
int up_num_msgs;/* mbox_up number of messages*/
};
@@ -235,6 +237,7 @@ struct otx2_nic {
void __iomem *reg_base;
struct net_device *netdev;
void *iommu_domain;
+ u16 iommu_domain_type;
u16 xtra_hdr;
u16 max_frs;
@@ -298,6 +301,8 @@ struct otx2_nic {
#define OTX2_EDSA_HDR_LEN 16
#define OTX2_HIGIG2_HDR_LEN 16
u32 addl_mtu;
+ /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
+ int nix_blkaddr;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -345,7 +350,7 @@ static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
switch ((offset >> RVU_FUNC_BLKADDR_SHIFT) & RVU_FUNC_BLKADDR_MASK) {
case BLKTYPE_NIX:
- blkaddr = BLKADDR_NIX0;
+ blkaddr = nic->nix_blkaddr;
break;
case BLKTYPE_NPA:
blkaddr = BLKADDR_NPA;
@@ -424,18 +429,17 @@ static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid)
static inline void otx2_mbox_lock_init(struct mbox *mbox)
{
- atomic_set(&mbox->lock, 0);
+ mutex_init(&mbox->lock);
}
static inline void otx2_mbox_lock(struct mbox *mbox)
{
- while (!(atomic_add_return(1, &mbox->lock) == 1))
- cpu_relax();
+ mutex_lock(&mbox->lock);
}
static inline void otx2_mbox_unlock(struct mbox *mbox)
{
- atomic_set(&mbox->lock, 0);
+ mutex_unlock(&mbox->lock);
}
/* With the absence of API for 128-bit IO memory access for arm64,
@@ -449,10 +453,11 @@ static inline void otx2_mbox_unlock(struct mbox *mbox)
#define otx2_low(high, low) (low)
#endif
+#if defined(CONFIG_ARM64)
static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr)
{
- asm volatile("stp %x[x0], %x[x1], [%x[p1],#0]!"
- ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr));
+ __asm__ volatile("stp %x[x0], %x[x1], [%x[p1],#0]!"
+ ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr));
}
static inline __uint128_t otx2_read128(const void __iomem *addr)
@@ -460,22 +465,51 @@ static inline __uint128_t otx2_read128(const void __iomem *addr)
__uint128_t *__addr = (__force __uint128_t *)addr;
u64 h, l;
- asm volatile("ldp %x[x0], %x[x1], %x[p1]"
- : [x0]"=r"(l), [x1]"=r"(h)
- : [p1]"Ump"(*__addr));
+ __asm__ volatile("ldp %x[x0], %x[x1], %x[p1]"
+ : [x0]"=r"(l), [x1]"=r"(h)
+ : [p1]"Ump"(*__addr));
return (__uint128_t)le64_to_cpu(otx2_low(h, l)) |
(((__uint128_t)le64_to_cpu(otx2_high(h, l))) << 64);
}
+static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
+{
+ u64 result;
+
+ __asm__ volatile(".cpu generic+lse\n"
+ "ldadd %x[i], %x[r], [%[b]]"
+ : [r]"=r"(result), "+m"(*ptr)
+ : [i]"r"(incr), [b]"r"(ptr)
+ : "memory");
+ return result;
+}
+
+static inline u64 otx2_lmt_flush(uint64_t addr)
+{
+ u64 result = 0;
+
+ __asm__ volatile(".cpu generic+lse\n"
+ "ldeor xzr,%x[rf],[%[rs]]"
+ : [rf]"=r"(result)
+ : [rs]"r"(addr));
+ return result;
+}
+#else
+#define otx2_write128(lo, hi, addr)
+#define otx2_read128(addr) ({ 0; })
+#define otx2_atomic64_add(incr, ptr) ({ 0; })
+#define otx2_lmt_flush(addr) ({ 0; })
+#endif
+
/* Alloc pointer from pool/aura */
static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
{
- atomic64_t *ptr = (__force atomic64_t *)otx2_get_regaddr(pfvf,
+ u64 *ptr = (u64 *)otx2_get_regaddr(pfvf,
NPA_LF_AURA_OP_ALLOCX(0));
u64 incr = (u64)aura | BIT_ULL(63);
- return atomic64_fetch_add_relaxed(incr, ptr);
+ return otx2_atomic64_add(incr, ptr);
}
/* Free pointer to a pool/aura */
@@ -598,11 +632,40 @@ static inline int rvu_get_pf(u16 pcifunc)
return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
}
+static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf,
+ struct page *page,
+ size_t offset, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ dma_addr_t iova;
+
+ if (pfvf->iommu_domain_type == IOMMU_DOMAIN_IDENTITY)
+ return page_to_phys(page) + offset;
+
+ iova = dma_map_page_attrs(pfvf->dev, page,
+ offset, size, dir, attrs);
+ if (unlikely(dma_mapping_error(pfvf->dev, iova)))
+ return (dma_addr_t)NULL;
+ return iova;
+}
+
+static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
+ dma_addr_t addr, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ if (pfvf->iommu_domain_type == IOMMU_DOMAIN_IDENTITY)
+ return;
+
+ dma_unmap_page_attrs(pfvf->dev, addr, size, dir, attrs);
+}
+
/* MSI-X APIs */
void otx2_free_cints(struct otx2_nic *pfvf, int n);
void otx2_set_cints_affinity(struct otx2_nic *pfvf);
-int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, struct net_device *netdev);
+int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac);
int otx2_set_mac_address(struct net_device *netdev, void *p);
int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu);
void otx2_tx_timeout(struct net_device *netdev);
@@ -691,6 +754,7 @@ int otx2_remove_flow(struct otx2_nic *pfvf, u32 location);
int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
struct npc_install_flow_req *req);
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
+int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
int otx2smqvf_probe(struct otx2_nic *vf);
int otx2smqvf_remove(struct otx2_nic *vf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 8f804effb352..e2cb70a79dbe 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -25,6 +25,8 @@
#define OTX2_DEFAULT_ACTION 0x1
+static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf);
+
static const char otx2_priv_flags_strings[][ETH_GSTRING_LEN] = {
"pam4",
"edsa",
@@ -222,11 +224,30 @@ static void otx2_get_qset_stats(struct otx2_nic *pfvf,
}
}
+static int otx2_get_phy_fec_stats(struct otx2_nic *pfvf)
+{
+ struct msg_req *req;
+ int rc = -EAGAIN;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ req = otx2_mbox_alloc_msg_cgx_get_phy_fec_stats(&pfvf->mbox);
+ if (!req)
+ goto end;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox))
+ rc = 0;
+end:
+ otx2_mbox_unlock(&pfvf->mbox);
+ return rc;
+}
+
/* Get device and per queue statistics */
static void otx2_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
+ u64 fec_corr_blks, fec_uncorr_blks;
+ struct cgx_fw_data *rsp;
int stat;
otx2_get_dev_stats(pfvf);
@@ -245,10 +266,35 @@ static void otx2_get_ethtool_stats(struct net_device *netdev,
for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++)
*(data++) = pfvf->hw.cgx_tx_stats[stat];
*(data++) = pfvf->reset_count;
- if (pfvf->linfo.fec) {
- *(data++) = pfvf->hw.cgx_fec_corr_blks;
- *(data++) = pfvf->hw.cgx_fec_uncorr_blks;
+
+ if (pfvf->linfo.fec == OTX2_FEC_NONE)
+ return;
+
+ fec_corr_blks = pfvf->hw.cgx_fec_corr_blks;
+ fec_uncorr_blks = pfvf->hw.cgx_fec_uncorr_blks;
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (!IS_ERR(rsp) && rsp->fwdata.phy.misc.has_fec_stats &&
+ !otx2_get_phy_fec_stats(pfvf)) {
+ /* Fetch fwdata again because it's been recently populated with
+ * latest PHY FEC stats.
+ */
+ rsp = otx2_get_fwdata(pfvf);
+ if (!IS_ERR(rsp)) {
+ struct fec_stats_s *p = &rsp->fwdata.phy.fec_stats;
+
+ if (pfvf->linfo.fec == OTX2_FEC_BASER) {
+ fec_corr_blks = p->brfec_corr_blks;
+ fec_uncorr_blks = p->brfec_uncorr_blks;
+ } else {
+ fec_corr_blks = p->rsfec_corr_cws;
+ fec_uncorr_blks = p->rsfec_uncorr_cws;
+ }
+ }
}
+
+ *(data++) = fec_corr_blks;
+ *(data++) = fec_uncorr_blks;
}
static int otx2_get_sset_count(struct net_device *netdev, int sset)
@@ -267,12 +313,12 @@ static int otx2_get_sset_count(struct net_device *netdev, int sset)
(pfvf->hw.rx_queues + pfvf->hw.tx_queues);
if (!if_up || !pfvf->linfo.fec) {
- return otx2_n_dev_stats + qstats_count +
+ return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count +
CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + 1;
}
fec_stats_count = 2;
otx2_update_lmac_fec_stats(pfvf);
- return otx2_n_dev_stats + qstats_count +
+ return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count +
CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + 1 +
fec_stats_count;
}
@@ -1227,7 +1273,7 @@ static u32 otx2_get_priv_flags(struct net_device *netdev)
if (IS_ERR(rsp)) {
pfvf->ethtool_flags &= ~OTX2_PRIV_FLAG_PAM4;
} else {
- if (rsp->fwdata.phy.mod_type)
+ if (rsp->fwdata.phy.misc.mod_type)
pfvf->ethtool_flags |= OTX2_PRIV_FLAG_PAM4;
else
pfvf->ethtool_flags &= ~OTX2_PRIV_FLAG_PAM4;
@@ -1248,7 +1294,7 @@ static int otx2_set_phy_mod_type(struct net_device *netdev, bool enable)
return -EAGAIN;
/* ret here if phy does not support this feature */
- if (!fwd->fwdata.phy.can_change_mod_type)
+ if (!fwd->fwdata.phy.misc.can_change_mod_type)
return -EOPNOTSUPP;
otx2_mbox_lock(&pfvf->mbox);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 96a4dbba25a3..39f439202235 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -506,11 +506,14 @@ int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
return 0;
}
-static int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
+int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
{
struct npc_install_flow_req *req;
int err;
+ if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
+ return -ENOMEM;
+
otx2_mbox_lock(&pfvf->mbox);
req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
if (!req) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index d40ca122a4a9..0698e2a76434 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -102,7 +102,6 @@ static void otx2_flr_handler(struct work_struct *work)
struct flr_work *flrwork = container_of(work, struct flr_work, work);
struct otx2_nic *pf = flrwork->pf;
struct msg_req *req;
- struct msg_rsp *rsp;
int vf, reg = 0;
vf = flrwork - pf->flr_wrk;
@@ -122,11 +121,6 @@ static void otx2_flr_handler(struct work_struct *work)
reg = 1;
vf = vf - 64;
}
- rsp = (struct msg_rsp *)
- otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
- otx2_mbox_unlock(&pf->mbox);
- if (rsp->hdr.rc)
- return;
/* clear transcation pending bit */
otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
@@ -377,9 +371,7 @@ static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
src_mdev = &src_mbox->dev[vf];
mbox_hdr = src_mbox->hwbase +
src_mbox->rx_start + (vf * MBOX_SIZE);
- req_hdr = (struct mbox_hdr *)(src_mdev->mbase +
- src_mbox->rx_start);
- req_hdr->num_msgs = num_msgs;
+
dst_mbox = &pf->mbox;
dst_size = dst_mbox->mbox.tx_size -
ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
@@ -392,7 +384,7 @@ static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
otx2_mbox_lock(&pf->mbox);
dst_mdev->mbase = src_mdev->mbase;
dst_mdev->msg_size = mbox_hdr->msg_size;
- dst_mdev->num_msgs = mbox_hdr->num_msgs;
+ dst_mdev->num_msgs = num_msgs;
err = otx2_sync_mbox_msg(dst_mbox);
if (err) {
dev_warn(pf->dev,
@@ -829,10 +821,6 @@ static void otx2_pfaf_mbox_handler(struct work_struct *work)
}
otx2_mbox_reset(mbox, 0);
-
- /* Clear the IRQ */
- smp_wmb();
- otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
}
static void otx2_handle_link_event(struct otx2_nic *pf)
@@ -1171,14 +1159,13 @@ EXPORT_SYMBOL(otx2_set_real_num_queues);
static irqreturn_t otx2_q_intr_handler(int irq, void *data)
{
struct otx2_nic *pf = data;
- atomic64_t *ptr;
+ u64 val, *ptr;
u64 qidx = 0;
- u64 val;
/* CQ */
for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) {
ptr = otx2_get_regaddr(pf, NIX_LF_CQ_OP_INT);
- val = atomic64_fetch_add_relaxed((qidx << 44), ptr);
+ val = otx2_atomic64_add((qidx << 44), ptr);
otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) |
(val & NIX_CQERRINT_BITS));
@@ -1203,7 +1190,7 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
/* SQ */
for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
- val = atomic64_fetch_add_relaxed((qidx << 44), ptr);
+ val = otx2_atomic64_add((qidx << 44), ptr);
otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
(val & NIX_SQINT_BITS));
@@ -1385,7 +1372,8 @@ err_free_nix_queues:
otx2_free_cq_res(pf);
otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
err_free_txsch:
- otx2_txschq_stop(pf);
+ if (otx2_txschq_stop(pf))
+ dev_err(pf->dev, "%s failed to stop TX schedulers\n", __func__);
err_free_sq_ptrs:
otx2_sq_free_sqbs(pf);
err_free_rq_ptrs:
@@ -1398,13 +1386,16 @@ err_free_nix_lf:
free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
if (free_req) {
free_req->flags = NIX_LF_DISABLE_FLOWS;
- WARN_ON(otx2_sync_mbox_msg(mbox));
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
}
err_free_npa_lf:
/* Reset NPA LF */
req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
- if (req)
- WARN_ON(otx2_sync_mbox_msg(mbox));
+ if (req) {
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "%s failed to free npalf\n", __func__);
+ }
exit:
otx2_mbox_unlock(mbox);
return err;
@@ -1454,7 +1445,8 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
if (free_req) {
free_req->flags = NIX_LF_DISABLE_FLOWS;
- WARN_ON(otx2_sync_mbox_msg(mbox));
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
}
otx2_mbox_unlock(mbox);
@@ -1466,8 +1458,10 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
otx2_mbox_lock(mbox);
/* Reset NPA LF */
req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
- if (req)
- WARN_ON(otx2_sync_mbox_msg(mbox));
+ if (req) {
+ if (otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "%s failed to free npalf\n", __func__);
+ }
otx2_mbox_unlock(mbox);
}
@@ -1498,10 +1492,6 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
> sq->sqe_thresh)
netif_tx_wake_queue(txq);
- else
- netdev_warn(netdev,
- "%s: Transmit ring full, stopping SQ%d\n",
- netdev->name, qidx);
return NETDEV_TX_BUSY;
}
@@ -2276,11 +2266,11 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* So the only way to convert Rx packet's buffer address is to use
* IOMMU's iova_to_phys() handler which translates the address by
* walking through the translation tables.
- *
- * So check if device is binded to IOMMU, otherwise translation is
- * not needed.
*/
pf->iommu_domain = iommu_get_domain_for_dev(dev);
+ if (pf->iommu_domain)
+ pf->iommu_domain_type =
+ ((struct iommu_domain *)pf->iommu_domain)->type;
netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
index e84cf3cc67fd..84271cddd645 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
@@ -145,7 +145,9 @@
#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16)
#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16)
#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16)
+#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16)
#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16)
+#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16)
#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16)
#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16)
#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_smqvf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_smqvf.c
index 9ebe6650a8dd..4f5f73f621d4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_smqvf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_smqvf.c
@@ -42,12 +42,6 @@ static bool is_otx2_smqvf(struct otx2_nic *vf)
return false;
}
-/* Flush SQE written to LMT to SQB */
-static u64 otx2_lmt_flush(uint64_t addr)
-{
- return atomic64_fetch_xor_relaxed(0, (atomic64_t *)addr);
-}
-
static void otx2_sqe_flush(struct otx2_snd_queue *sq, int size)
{
u64 status;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 9513381d7e5f..6864fa315900 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -34,12 +34,6 @@ static inline struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
return cqe_hdr;
}
-/* Flush SQE written to LMT to SQB */
-static inline u64 otx2_lmt_flush(uint64_t addr)
-{
- return atomic64_fetch_xor_relaxed(0, (atomic64_t *)addr);
-}
-
static inline unsigned int frag_num(unsigned int i)
{
#ifdef __BIG_ENDIAN
@@ -67,8 +61,8 @@ static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
offset = frag->page_offset;
*len = skb_frag_size(frag);
}
- return dma_map_page_attrs(pfvf->dev, page, offset, *len,
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ return otx2_dma_map_page(pfvf, page, offset, *len,
+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
}
static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
@@ -76,9 +70,9 @@ static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
int seg;
for (seg = 0; seg < sg->num_segs; seg++) {
- dma_unmap_page_attrs(pfvf->dev, sg->dma_addr[seg],
- sg->size[seg], DMA_TO_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
+ sg->size[seg], DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
}
sg->num_segs = 0;
}
@@ -95,7 +89,7 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
snd_comp = (struct nix_send_comp_s *)
((void *)cqe_hdr + sizeof(*cqe_hdr));
- if (snd_comp->status) {
+ if (unlikely(snd_comp->status)) {
/* tx packet error handling*/
if (netif_msg_tx_err(pfvf)) {
netdev_info(pfvf->netdev,
@@ -109,7 +103,7 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
sg = &sq->sg[snd_comp->sqe_id];
skb = (struct sk_buff *)sg->skb;
- if (!skb)
+ if (unlikely(!skb))
return;
if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
@@ -204,7 +198,7 @@ static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
- if (!skb_shinfo(skb)->nr_frags) {
+ if (likely(!skb_shinfo(skb)->nr_frags)) {
/* Check if data starts at some nonzero offset
* from the start of the buffer. For now the
* only possible offset is 8 bytes in the case
@@ -221,8 +215,8 @@ static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
va - page_address(page) + off, len - off, RCV_FRAG_LEN);
- dma_unmap_page_attrs(pfvf->dev, iova - OTX2_HEAD_ROOM, RCV_FRAG_LEN,
- DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM, RCV_FRAG_LEN,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
}
static inline bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
@@ -319,7 +313,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
/* CQE_HDR_S for a Rx pkt is always followed by RX_PARSE_S */
parse = (struct nix_rx_parse_s *)((void *)cqe_hdr + sizeof(*cqe_hdr));
- if (parse->errlev || parse->errcode) {
+ if (unlikely(parse->errlev || parse->errcode)) {
if (otx2_check_rcv_errors(pfvf, parse, cq->cq_idx))
return;
}
@@ -328,7 +322,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
end = start + ((parse->desc_sizem1 + 1) * 16);
skb = napi_get_frags(napi);
- if (!skb)
+ if (unlikely(!skb))
return;
/* Run through the each NIX_RX_SG_S subdc and frame the skb */
@@ -376,9 +370,9 @@ static inline int otx2_rx_napi_handler(struct otx2_nic *pfvf,
/* Make sure HW writes to CQ are done */
dma_rmb();
- while (processed_cqe < budget) {
+ while (likely(processed_cqe < budget)) {
cqe_hdr = otx2_get_next_cqe(cq);
- if (!cqe_hdr) {
+ if (unlikely(!cqe_hdr)) {
if (!processed_cqe)
return 0;
break;
@@ -393,13 +387,13 @@ static inline int otx2_rx_napi_handler(struct otx2_nic *pfvf,
otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
((u64)cq->cq_idx << 32) | processed_cqe);
- if (!cq->pool_ptrs)
+ if (unlikely(!cq->pool_ptrs))
return 0;
/* Refill pool with new buffers */
while (cq->pool_ptrs) {
bufptr = otx2_alloc_rbuf(pfvf, rbpool, GFP_ATOMIC);
- if (bufptr <= 0) {
+ if (unlikely(bufptr <= 0)) {
struct refill_work *work;
struct delayed_work *dwork;
@@ -434,9 +428,9 @@ static inline int otx2_tx_napi_handler(struct otx2_nic *pfvf,
/* Make sure HW writes to CQ are done */
dma_rmb();
- while (processed_cqe < budget) {
+ while (likely(processed_cqe < budget)) {
cqe_hdr = otx2_get_next_cqe(cq);
- if (!cqe_hdr) {
+ if (unlikely(!cqe_hdr)) {
if (!processed_cqe)
return 0;
break;
@@ -452,7 +446,7 @@ static inline int otx2_tx_napi_handler(struct otx2_nic *pfvf,
otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
((u64)cq->cq_idx << 32) | processed_cqe);
- if (tx_pkts) {
+ if (likely(tx_pkts)) {
txq = netdev_get_tx_queue(pfvf->netdev, cq->cint_idx);
netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
/* Check if queue was stopped earlier due to ring full */
@@ -476,9 +470,9 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
pfvf = (struct otx2_nic *)cq_poll->dev;
qset = &pfvf->qset;
- for (i = 0; i < CQS_PER_CINT; i++) {
+ for (i = CQS_PER_CINT - 1; i >= 0; i--) {
cq_idx = cq_poll->cq_ids[i];
- if (cq_idx == CINT_INVALID_CQ)
+ if (unlikely(cq_idx == CINT_INVALID_CQ))
continue;
cq = &qset->cq[cq_idx];
if (cq->cq_type == CQ_RX) {
@@ -911,14 +905,14 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
if (free_sqe < sq->sqe_thresh ||
free_sqe < otx2_get_sqe_count(pfvf, skb))
- goto fail;
+ return false;
num_segs = skb_shinfo(skb)->nr_frags + 1;
/* If SKB doesn't fit in a single SQE, linearize it.
* TODO: Consider adding JUMP descriptor instead.
*/
- if (num_segs > OTX2_MAX_FRAGS_IN_SQE) {
+ if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) {
if (__skb_linearize(skb)) {
dev_kfree_skb_any(skb);
return true;
@@ -961,10 +955,6 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
otx2_sqe_flush(sq, offset);
return true;
-fail:
- netdev_warn(pfvf->netdev, "SQ%d full, SQB count %d Aura count %lld\n",
- qidx, sq->num_sqbs, *sq->aura_fc_addr);
- return false;
}
EXPORT_SYMBOL(otx2_sq_append_skb);
@@ -993,10 +983,10 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
*iova -= OTX2_HEAD_ROOM;
pa = otx2_iova_to_phys(pfvf->iommu_domain,
*iova);
- dma_unmap_page_attrs(pfvf->dev, *iova,
- RCV_FRAG_LEN,
- DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
+ otx2_dma_unmap_page(pfvf, *iova,
+ RCV_FRAG_LEN,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
put_page(virt_to_page(phys_to_virt(pa)));
iova++;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 129c693e62aa..218ce0382adb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -132,10 +132,6 @@ static void otx2vf_vfaf_mbox_handler(struct work_struct *work)
}
otx2_mbox_reset(mbox, 0);
-
- /* Clear the IRQ */
- smp_wmb();
- otx2_write64(af_mbox->pfvf, RVU_VF_INT, BIT_ULL(0));
}
static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf,
@@ -535,6 +531,10 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
vf->pdev = pdev;
vf->dev = dev;
vf->iommu_domain = iommu_get_domain_for_dev(dev);
+ if (vf->iommu_domain)
+ vf->iommu_domain_type =
+ ((struct iommu_domain *)vf->iommu_domain)->type;
+
vf->flags |= OTX2_FLAG_INTF_DOWN;
hw = &vf->hw;
hw->pdev = vf->pdev;
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 7e88446ac97a..0b88fb96503f 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -203,7 +203,7 @@ io_error:
static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
{
- u16 v;
+ u16 v = 0;
__gm_phy_read(hw, port, reg, &v);
return v;
}