aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/stmicro/stmmac
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/stmicro/stmmac')
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h59
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c137
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h51
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c111
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h37
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c117
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.c165
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.h64
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c195
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c320
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c91
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c8
25 files changed, 1003 insertions, 542 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 80e598bd4255..26cad4344701 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -6,7 +6,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \
dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \
stmmac_tc.o dwxgmac2_core.o dwxgmac2_dma.o dwxgmac2_descs.o \
- stmmac_xdp.o \
+ stmmac_xdp.o stmmac_est.o \
$(stmmac-y)
stmmac-$(CONFIG_STMMAC_SELFTESTS) += stmmac_selftests.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index e3f650e88f82..5ba606a596e7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -59,28 +59,51 @@
#undef FRAME_FILTER_DEBUG
/* #define FRAME_FILTER_DEBUG */
+struct stmmac_q_tx_stats {
+ u64_stats_t tx_bytes;
+ u64_stats_t tx_set_ic_bit;
+ u64_stats_t tx_tso_frames;
+ u64_stats_t tx_tso_nfrags;
+};
+
+struct stmmac_napi_tx_stats {
+ u64_stats_t tx_packets;
+ u64_stats_t tx_pkt_n;
+ u64_stats_t poll;
+ u64_stats_t tx_clean;
+ u64_stats_t tx_set_ic_bit;
+};
+
struct stmmac_txq_stats {
- u64 tx_bytes;
- u64 tx_packets;
- u64 tx_pkt_n;
- u64 tx_normal_irq_n;
- u64 napi_poll;
- u64 tx_clean;
- u64 tx_set_ic_bit;
- u64 tx_tso_frames;
- u64 tx_tso_nfrags;
- struct u64_stats_sync syncp;
+ /* Updates protected by tx queue lock. */
+ struct u64_stats_sync q_syncp;
+ struct stmmac_q_tx_stats q;
+
+ /* Updates protected by NAPI poll logic. */
+ struct u64_stats_sync napi_syncp;
+ struct stmmac_napi_tx_stats napi;
} ____cacheline_aligned_in_smp;
+struct stmmac_napi_rx_stats {
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_pkt_n;
+ u64_stats_t poll;
+};
+
struct stmmac_rxq_stats {
- u64 rx_bytes;
- u64 rx_packets;
- u64 rx_pkt_n;
- u64 rx_normal_irq_n;
- u64 napi_poll;
- struct u64_stats_sync syncp;
+ /* Updates protected by NAPI poll logic. */
+ struct u64_stats_sync napi_syncp;
+ struct stmmac_napi_rx_stats napi;
} ____cacheline_aligned_in_smp;
+/* Updates on each CPU protected by not allowing nested irqs. */
+struct stmmac_pcpu_stats {
+ struct u64_stats_sync syncp;
+ u64_stats_t rx_normal_irq_n[MTL_MAX_TX_QUEUES];
+ u64_stats_t tx_normal_irq_n[MTL_MAX_RX_QUEUES];
+};
+
/* Extra statistic and debug information exposed by ethtool */
struct stmmac_extra_stats {
/* Transmit errors */
@@ -205,6 +228,7 @@ struct stmmac_extra_stats {
/* per queue statistics */
struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES];
struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES];
+ struct stmmac_pcpu_stats __percpu *pcpu_stats;
unsigned long rx_dropped;
unsigned long rx_errors;
unsigned long tx_dropped;
@@ -216,6 +240,7 @@ struct stmmac_safety_stats {
unsigned long mac_errors[32];
unsigned long mtl_errors[32];
unsigned long dma_errors[32];
+ unsigned long dma_dpp_errors[32];
};
/* Number of fields in Safety Stats */
@@ -563,6 +588,7 @@ struct mac_device_info {
const struct stmmac_hwtimestamp *ptp;
const struct stmmac_tc_ops *tc;
const struct stmmac_mmc_ops *mmc;
+ const struct stmmac_est_ops *est;
struct dw_xpcs *xpcs;
struct phylink_pcs *lynx_pcs; /* Lynx external PCS */
struct mii_regs mii; /* MII register Addresses */
@@ -580,6 +606,7 @@ struct mac_device_info {
u32 vlan_filter[32];
bool vlan_fail_q_en;
u8 vlan_fail_q;
+ bool hw_vlan_en;
};
struct stmmac_rx_routing {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 8f730ada71f9..6b65420e11b5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -353,6 +353,10 @@ static int imx_dwmac_probe(struct platform_device *pdev)
if (data->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
plat_dat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY;
+ /* Default TX Q0 to use TSO and rest TXQ for TBS */
+ for (int i = 1; i < plat_dat->tx_queues_to_use; i++)
+ plat_dat->tx_queues_cfg[i].tbs_en = 1;
+
plat_dat->host_dma_width = dwmac->ops->addr_width;
plat_dat->init = imx_dwmac_init;
plat_dat->exit = imx_dwmac_exit;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 137741b94122..b21d99faa2d0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -441,8 +441,7 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
struct stmmac_extra_stats *x, u32 chan,
u32 dir)
{
- struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
- struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
+ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
int ret = 0;
u32 v;
@@ -455,9 +454,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
if (v & EMAC_TX_INT) {
ret |= handle_tx;
- u64_stats_update_begin(&txq_stats->syncp);
- txq_stats->tx_normal_irq_n++;
- u64_stats_update_end(&txq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
}
if (v & EMAC_TX_DMA_STOP_INT)
@@ -479,9 +478,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
if (v & EMAC_RX_INT) {
ret |= handle_rx;
- u64_stats_update_begin(&rxq_stats->syncp);
- rxq_stats->rx_normal_irq_n++;
- u64_stats_update_end(&rxq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
}
if (v & EMAC_RX_BUF_UA_INT)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index c6ff1fa0e04d..6b6d0de09619 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -1134,6 +1134,35 @@ static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
return 0;
}
+static void dwmac4_rx_hw_vlan(struct mac_device_info *hw,
+ struct dma_desc *rx_desc, struct sk_buff *skb)
+{
+ if (hw->desc->get_rx_vlan_valid(rx_desc)) {
+ u16 vid = hw->desc->get_rx_vlan_tci(rx_desc);
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+ }
+}
+
+static void dwmac4_set_hw_vlan_mode(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + GMAC_VLAN_TAG);
+
+ value &= ~GMAC_VLAN_TAG_CTRL_EVLS_MASK;
+
+ if (hw->hw_vlan_en)
+ /* Always strip VLAN on Receive */
+ value |= GMAC_VLAN_TAG_STRIP_ALL;
+ else
+ /* Do not strip VLAN on Receive */
+ value |= GMAC_VLAN_TAG_STRIP_NONE;
+
+ /* Enable outer VLAN Tag in Rx DMA descriptor */
+ value |= GMAC_VLAN_TAG_CTRL_EVLRXS;
+ writel(value, ioaddr + GMAC_VLAN_TAG);
+}
+
const struct stmmac_ops dwmac4_ops = {
.core_init = dwmac4_core_init,
.phylink_get_caps = dwmac4_phylink_get_caps,
@@ -1175,6 +1204,8 @@ const struct stmmac_ops dwmac4_ops = {
.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
+ .rx_hw_vlan = dwmac4_rx_hw_vlan,
+ .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
};
const struct stmmac_ops dwmac410_ops = {
@@ -1216,14 +1247,14 @@ const struct stmmac_ops dwmac410_ops = {
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
- .est_configure = dwmac5_est_configure,
- .est_irq_status = dwmac5_est_irq_status,
.fpe_configure = dwmac5_fpe_configure,
.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
.fpe_irq_status = dwmac5_fpe_irq_status,
.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
+ .rx_hw_vlan = dwmac4_rx_hw_vlan,
+ .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
};
const struct stmmac_ops dwmac510_ops = {
@@ -1269,14 +1300,14 @@ const struct stmmac_ops dwmac510_ops = {
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
- .est_configure = dwmac5_est_configure,
- .est_irq_status = dwmac5_est_irq_status,
.fpe_configure = dwmac5_fpe_configure,
.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
.fpe_irq_status = dwmac5_fpe_irq_status,
.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
+ .rx_hw_vlan = dwmac4_rx_hw_vlan,
+ .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
};
static u32 dwmac4_get_num_vlan(void __iomem *ioaddr)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 89a14084c611..1c5802e0d7f4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -198,6 +198,17 @@ static int dwmac4_get_tx_ls(struct dma_desc *p)
>> TDES3_LAST_DESCRIPTOR_SHIFT;
}
+static u16 dwmac4_wrback_get_rx_vlan_tci(struct dma_desc *p)
+{
+ return (le32_to_cpu(p->des0) & RDES0_VLAN_TAG_MASK);
+}
+
+static bool dwmac4_wrback_get_rx_vlan_valid(struct dma_desc *p)
+{
+ return ((le32_to_cpu(p->des3) & RDES3_LAST_DESCRIPTOR) &&
+ (le32_to_cpu(p->des3) & RDES3_RDES0_VALID));
+}
+
static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe)
{
return (le32_to_cpu(p->des3) & RDES3_PACKET_SIZE_MASK);
@@ -551,6 +562,8 @@ const struct stmmac_desc_ops dwmac4_desc_ops = {
.set_tx_owner = dwmac4_set_tx_owner,
.set_rx_owner = dwmac4_set_rx_owner,
.get_tx_ls = dwmac4_get_tx_ls,
+ .get_rx_vlan_tci = dwmac4_wrback_get_rx_vlan_tci,
+ .get_rx_vlan_valid = dwmac4_wrback_get_rx_vlan_valid,
.get_rx_frame_len = dwmac4_wrback_get_rx_frame_len,
.enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp,
.get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 9470d3fd2ded..0d185e54eb7e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -171,8 +171,7 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, chan));
u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
- struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
- struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
+ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
int ret = 0;
if (dir == DMA_DIR_RX)
@@ -201,15 +200,15 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
}
/* TX/RX NORMAL interrupts */
if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
- u64_stats_update_begin(&rxq_stats->syncp);
- rxq_stats->rx_normal_irq_n++;
- u64_stats_update_end(&rxq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_rx;
}
if (likely(intr_status & DMA_CHAN_STATUS_TI)) {
- u64_stats_update_begin(&txq_stats->syncp);
- txq_stats->tx_normal_irq_n++;
- u64_stats_update_end(&txq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_tx;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index 8fd167501fa0..e02cebc3f1b7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -573,143 +573,6 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
return 0;
}
-static int dwmac5_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
-{
- u32 ctrl;
-
- writel(val, ioaddr + MTL_EST_GCL_DATA);
-
- ctrl = (reg << ADDR_SHIFT);
- ctrl |= gcl ? 0 : GCRR;
-
- writel(ctrl, ioaddr + MTL_EST_GCL_CONTROL);
-
- ctrl |= SRWO;
- writel(ctrl, ioaddr + MTL_EST_GCL_CONTROL);
-
- return readl_poll_timeout(ioaddr + MTL_EST_GCL_CONTROL,
- ctrl, !(ctrl & SRWO), 100, 5000);
-}
-
-int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
- unsigned int ptp_rate)
-{
- int i, ret = 0x0;
- u32 ctrl;
-
- ret |= dwmac5_est_write(ioaddr, BTR_LOW, cfg->btr[0], false);
- ret |= dwmac5_est_write(ioaddr, BTR_HIGH, cfg->btr[1], false);
- ret |= dwmac5_est_write(ioaddr, TER, cfg->ter, false);
- ret |= dwmac5_est_write(ioaddr, LLR, cfg->gcl_size, false);
- ret |= dwmac5_est_write(ioaddr, CTR_LOW, cfg->ctr[0], false);
- ret |= dwmac5_est_write(ioaddr, CTR_HIGH, cfg->ctr[1], false);
- if (ret)
- return ret;
-
- for (i = 0; i < cfg->gcl_size; i++) {
- ret = dwmac5_est_write(ioaddr, i, cfg->gcl[i], true);
- if (ret)
- return ret;
- }
-
- ctrl = readl(ioaddr + MTL_EST_CONTROL);
- ctrl &= ~PTOV;
- ctrl |= ((1000000000 / ptp_rate) * 6) << PTOV_SHIFT;
- if (cfg->enable)
- ctrl |= EEST | SSWL;
- else
- ctrl &= ~EEST;
-
- writel(ctrl, ioaddr + MTL_EST_CONTROL);
-
- /* Configure EST interrupt */
- if (cfg->enable)
- ctrl = (IECGCE | IEHS | IEHF | IEBE | IECC);
- else
- ctrl = 0;
-
- writel(ctrl, ioaddr + MTL_EST_INT_EN);
-
- return 0;
-}
-
-void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev,
- struct stmmac_extra_stats *x, u32 txqcnt)
-{
- u32 status, value, feqn, hbfq, hbfs, btrl;
- u32 txqcnt_mask = (1 << txqcnt) - 1;
-
- status = readl(ioaddr + MTL_EST_STATUS);
-
- value = (CGCE | HLBS | HLBF | BTRE | SWLC);
-
- /* Return if there is no error */
- if (!(status & value))
- return;
-
- if (status & CGCE) {
- /* Clear Interrupt */
- writel(CGCE, ioaddr + MTL_EST_STATUS);
-
- x->mtl_est_cgce++;
- }
-
- if (status & HLBS) {
- value = readl(ioaddr + MTL_EST_SCH_ERR);
- value &= txqcnt_mask;
-
- x->mtl_est_hlbs++;
-
- /* Clear Interrupt */
- writel(value, ioaddr + MTL_EST_SCH_ERR);
-
- /* Collecting info to shows all the queues that has HLBS
- * issue. The only way to clear this is to clear the
- * statistic
- */
- if (net_ratelimit())
- netdev_err(dev, "EST: HLB(sched) Queue 0x%x\n", value);
- }
-
- if (status & HLBF) {
- value = readl(ioaddr + MTL_EST_FRM_SZ_ERR);
- feqn = value & txqcnt_mask;
-
- value = readl(ioaddr + MTL_EST_FRM_SZ_CAP);
- hbfq = (value & SZ_CAP_HBFQ_MASK(txqcnt)) >> SZ_CAP_HBFQ_SHIFT;
- hbfs = value & SZ_CAP_HBFS_MASK;
-
- x->mtl_est_hlbf++;
-
- /* Clear Interrupt */
- writel(feqn, ioaddr + MTL_EST_FRM_SZ_ERR);
-
- if (net_ratelimit())
- netdev_err(dev, "EST: HLB(size) Queue %u Size %u\n",
- hbfq, hbfs);
- }
-
- if (status & BTRE) {
- if ((status & BTRL) == BTRL_MAX)
- x->mtl_est_btrlm++;
- else
- x->mtl_est_btre++;
-
- btrl = (status & BTRL) >> BTRL_SHIFT;
-
- if (net_ratelimit())
- netdev_info(dev, "EST: BTR Error Loop Count %u\n",
- btrl);
-
- writel(BTRE, ioaddr + MTL_EST_STATUS);
- }
-
- if (status & SWLC) {
- writel(SWLC, ioaddr + MTL_EST_STATUS);
- netdev_info(dev, "EST: SWOL has been switched\n");
- }
-}
-
void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
u32 num_txq, u32 num_rxq,
bool enable)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
index 34e620790eb3..bf33a51d229e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
@@ -39,53 +39,6 @@
#define MAC_PPSx_INTERVAL(x) (0x00000b88 + ((x) * 0x10))
#define MAC_PPSx_WIDTH(x) (0x00000b8c + ((x) * 0x10))
-#define MTL_EST_CONTROL 0x00000c50
-#define PTOV GENMASK(31, 24)
-#define PTOV_SHIFT 24
-#define SSWL BIT(1)
-#define EEST BIT(0)
-
-#define MTL_EST_STATUS 0x00000c58
-#define BTRL GENMASK(11, 8)
-#define BTRL_SHIFT 8
-#define BTRL_MAX (0xF << BTRL_SHIFT)
-#define SWOL BIT(7)
-#define SWOL_SHIFT 7
-#define CGCE BIT(4)
-#define HLBS BIT(3)
-#define HLBF BIT(2)
-#define BTRE BIT(1)
-#define SWLC BIT(0)
-
-#define MTL_EST_SCH_ERR 0x00000c60
-#define MTL_EST_FRM_SZ_ERR 0x00000c64
-#define MTL_EST_FRM_SZ_CAP 0x00000c68
-#define SZ_CAP_HBFS_MASK GENMASK(14, 0)
-#define SZ_CAP_HBFQ_SHIFT 16
-#define SZ_CAP_HBFQ_MASK(_val) ({ typeof(_val) (val) = (_val); \
- ((val) > 4 ? GENMASK(18, 16) : \
- (val) > 2 ? GENMASK(17, 16) : \
- BIT(16)); })
-
-#define MTL_EST_INT_EN 0x00000c70
-#define IECGCE CGCE
-#define IEHS HLBS
-#define IEHF HLBF
-#define IEBE BTRE
-#define IECC SWLC
-
-#define MTL_EST_GCL_CONTROL 0x00000c80
-#define BTR_LOW 0x0
-#define BTR_HIGH 0x1
-#define CTR_LOW 0x2
-#define CTR_HIGH 0x3
-#define TER 0x4
-#define LLR 0x5
-#define ADDR_SHIFT 8
-#define GCRR BIT(2)
-#define SRWO BIT(0)
-#define MTL_EST_GCL_DATA 0x00000c84
-
#define MTL_RXP_CONTROL_STATUS 0x00000ca0
#define RXPI BIT(31)
#define NPE GENMASK(23, 16)
@@ -149,10 +102,6 @@ int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries,
int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
struct stmmac_pps_cfg *cfg, bool enable,
u32 sub_second_inc, u32 systime_flags);
-int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
- unsigned int ptp_rate);
-void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev,
- struct stmmac_extra_stats *x, u32 txqcnt);
void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
u32 num_txq, u32 num_rxq,
bool enable);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 7907d62d3437..85e18f9a22f9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -162,8 +162,7 @@ static void show_rx_process_state(unsigned int status)
int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan, u32 dir)
{
- struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
- struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
+ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
int ret = 0;
/* read the status register (CSR5) */
u32 intr_status = readl(ioaddr + DMA_STATUS);
@@ -215,16 +214,16 @@ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 value = readl(ioaddr + DMA_INTR_ENA);
/* to schedule NAPI on real RIE event. */
if (likely(value & DMA_INTR_ENA_RIE)) {
- u64_stats_update_begin(&rxq_stats->syncp);
- rxq_stats->rx_normal_irq_n++;
- u64_stats_update_end(&rxq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_rx;
}
}
if (likely(intr_status & DMA_STATUS_TI)) {
- u64_stats_update_begin(&txq_stats->syncp);
- txq_stats->tx_normal_irq_n++;
- u64_stats_update_end(&txq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_tx;
}
if (unlikely(intr_status & DMA_STATUS_ERI))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index a4e8b498dea9..6a2c7d22df1e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -284,22 +284,6 @@
#define XGMAC_TC_PRTY_MAP1 0x00001044
#define XGMAC_PSTC(x) GENMASK((x) * 8 + 7, (x) * 8)
#define XGMAC_PSTC_SHIFT(x) ((x) * 8)
-#define XGMAC_MTL_EST_CONTROL 0x00001050
-#define XGMAC_PTOV GENMASK(31, 23)
-#define XGMAC_PTOV_SHIFT 23
-#define XGMAC_SSWL BIT(1)
-#define XGMAC_EEST BIT(0)
-#define XGMAC_MTL_EST_GCL_CONTROL 0x00001080
-#define XGMAC_BTR_LOW 0x0
-#define XGMAC_BTR_HIGH 0x1
-#define XGMAC_CTR_LOW 0x2
-#define XGMAC_CTR_HIGH 0x3
-#define XGMAC_TER 0x4
-#define XGMAC_LLR 0x5
-#define XGMAC_ADDR_SHIFT 8
-#define XGMAC_GCRR BIT(2)
-#define XGMAC_SRWO BIT(0)
-#define XGMAC_MTL_EST_GCL_DATA 0x00001084
#define XGMAC_MTL_RXP_CONTROL_STATUS 0x000010a0
#define XGMAC_RXPI BIT(31)
#define XGMAC_NPE GENMASK(23, 16)
@@ -319,6 +303,8 @@
#define XGMAC_RXCEIE BIT(4)
#define XGMAC_TXCEIE BIT(0)
#define XGMAC_MTL_ECC_INT_STATUS 0x000010cc
+#define XGMAC_MTL_DPP_CONTROL 0x000010e0
+#define XGMAC_DPP_DISABLE BIT(0)
#define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x)))
#define XGMAC_TQS GENMASK(25, 16)
#define XGMAC_TQS_SHIFT 16
@@ -401,6 +387,7 @@
#define XGMAC_DCEIE BIT(1)
#define XGMAC_TCEIE BIT(0)
#define XGMAC_DMA_ECC_INT_STATUS 0x0000306c
+#define XGMAC_DMA_DPP_INT_STATUS 0x00003074
#define XGMAC_DMA_CH_CONTROL(x) (0x00003100 + (0x80 * (x)))
#define XGMAC_SPH BIT(24)
#define XGMAC_PBLx8 BIT(16)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index a74e71db79f9..1af2f89a0504 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -830,6 +830,44 @@ static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
};
+#define DPP_RX_ERR "Read Rx Descriptor Parity checker Error"
+#define DPP_TX_ERR "Read Tx Descriptor Parity checker Error"
+
+static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = {
+ { true, "TDPES0", DPP_TX_ERR },
+ { true, "TDPES1", DPP_TX_ERR },
+ { true, "TDPES2", DPP_TX_ERR },
+ { true, "TDPES3", DPP_TX_ERR },
+ { true, "TDPES4", DPP_TX_ERR },
+ { true, "TDPES5", DPP_TX_ERR },
+ { true, "TDPES6", DPP_TX_ERR },
+ { true, "TDPES7", DPP_TX_ERR },
+ { true, "TDPES8", DPP_TX_ERR },
+ { true, "TDPES9", DPP_TX_ERR },
+ { true, "TDPES10", DPP_TX_ERR },
+ { true, "TDPES11", DPP_TX_ERR },
+ { true, "TDPES12", DPP_TX_ERR },
+ { true, "TDPES13", DPP_TX_ERR },
+ { true, "TDPES14", DPP_TX_ERR },
+ { true, "TDPES15", DPP_TX_ERR },
+ { true, "RDPES0", DPP_RX_ERR },
+ { true, "RDPES1", DPP_RX_ERR },
+ { true, "RDPES2", DPP_RX_ERR },
+ { true, "RDPES3", DPP_RX_ERR },
+ { true, "RDPES4", DPP_RX_ERR },
+ { true, "RDPES5", DPP_RX_ERR },
+ { true, "RDPES6", DPP_RX_ERR },
+ { true, "RDPES7", DPP_RX_ERR },
+ { true, "RDPES8", DPP_RX_ERR },
+ { true, "RDPES9", DPP_RX_ERR },
+ { true, "RDPES10", DPP_RX_ERR },
+ { true, "RDPES11", DPP_RX_ERR },
+ { true, "RDPES12", DPP_RX_ERR },
+ { true, "RDPES13", DPP_RX_ERR },
+ { true, "RDPES14", DPP_RX_ERR },
+ { true, "RDPES15", DPP_RX_ERR },
+};
+
static void dwxgmac3_handle_dma_err(struct net_device *ndev,
void __iomem *ioaddr, bool correctable,
struct stmmac_safety_stats *stats)
@@ -841,6 +879,13 @@ static void dwxgmac3_handle_dma_err(struct net_device *ndev,
dwxgmac3_log_error(ndev, value, correctable, "DMA",
dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
+
+ value = readl(ioaddr + XGMAC_DMA_DPP_INT_STATUS);
+ writel(value, ioaddr + XGMAC_DMA_DPP_INT_STATUS);
+
+ dwxgmac3_log_error(ndev, value, false, "DMA_DPP",
+ dwxgmac3_dma_dpp_errors,
+ STAT_OFF(dma_dpp_errors), stats);
}
static int
@@ -881,6 +926,12 @@ dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
+ /* 5. Enable Data Path Parity Protection */
+ value = readl(ioaddr + XGMAC_MTL_DPP_CONTROL);
+ /* already enabled by default, explicit enable it again */
+ value &= ~XGMAC_DPP_DISABLE;
+ writel(value, ioaddr + XGMAC_MTL_DPP_CONTROL);
+
return 0;
}
@@ -914,7 +965,11 @@ static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
ret |= !corr;
}
- err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
+ /* DMA_DPP_Interrupt_Status is indicated by MCSIS bit in
+ * DMA_Safety_Interrupt_Status, so we handle DMA Data Path
+ * Parity Errors here
+ */
+ err = dma & (XGMAC_DEUIS | XGMAC_DECIS | XGMAC_MCSIS);
corr = dma & XGMAC_DECIS;
if (err) {
dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
@@ -930,6 +985,7 @@ static const struct dwxgmac3_error {
{ dwxgmac3_mac_errors },
{ dwxgmac3_mtl_errors },
{ dwxgmac3_dma_errors },
+ { dwxgmac3_dma_dpp_errors },
};
static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
@@ -1433,57 +1489,6 @@ static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
writel(value, ioaddr + XGMAC_RX_CONFIG);
}
-static int dwxgmac3_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
-{
- u32 ctrl;
-
- writel(val, ioaddr + XGMAC_MTL_EST_GCL_DATA);
-
- ctrl = (reg << XGMAC_ADDR_SHIFT);
- ctrl |= gcl ? 0 : XGMAC_GCRR;
-
- writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
-
- ctrl |= XGMAC_SRWO;
- writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
-
- return readl_poll_timeout_atomic(ioaddr + XGMAC_MTL_EST_GCL_CONTROL,
- ctrl, !(ctrl & XGMAC_SRWO), 100, 5000);
-}
-
-static int dwxgmac3_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
- unsigned int ptp_rate)
-{
- int i, ret = 0x0;
- u32 ctrl;
-
- ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_LOW, cfg->btr[0], false);
- ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_HIGH, cfg->btr[1], false);
- ret |= dwxgmac3_est_write(ioaddr, XGMAC_TER, cfg->ter, false);
- ret |= dwxgmac3_est_write(ioaddr, XGMAC_LLR, cfg->gcl_size, false);
- ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_LOW, cfg->ctr[0], false);
- ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_HIGH, cfg->ctr[1], false);
- if (ret)
- return ret;
-
- for (i = 0; i < cfg->gcl_size; i++) {
- ret = dwxgmac3_est_write(ioaddr, i, cfg->gcl[i], true);
- if (ret)
- return ret;
- }
-
- ctrl = readl(ioaddr + XGMAC_MTL_EST_CONTROL);
- ctrl &= ~XGMAC_PTOV;
- ctrl |= ((1000000000 / ptp_rate) * 9) << XGMAC_PTOV_SHIFT;
- if (cfg->enable)
- ctrl |= XGMAC_EEST | XGMAC_SSWL;
- else
- ctrl &= ~XGMAC_EEST;
-
- writel(ctrl, ioaddr + XGMAC_MTL_EST_CONTROL);
- return 0;
-}
-
static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
u32 num_txq,
u32 num_rxq, bool enable)
@@ -1553,7 +1558,6 @@ const struct stmmac_ops dwxgmac210_ops = {
.config_l3_filter = dwxgmac2_config_l3_filter,
.config_l4_filter = dwxgmac2_config_l4_filter,
.set_arp_offload = dwxgmac2_set_arp_offload,
- .est_configure = dwxgmac3_est_configure,
.fpe_configure = dwxgmac3_fpe_configure,
};
@@ -1615,7 +1619,6 @@ const struct stmmac_ops dwxlgmac2_ops = {
.config_l3_filter = dwxgmac2_config_l3_filter,
.config_l4_filter = dwxgmac2_config_l4_filter,
.set_arp_offload = dwxgmac2_set_arp_offload,
- .est_configure = dwxgmac3_est_configure,
.fpe_configure = dwxgmac3_fpe_configure,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 3cde695fec91..dd2ab6185c40 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -337,8 +337,7 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
struct stmmac_extra_stats *x, u32 chan,
u32 dir)
{
- struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
- struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
+ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
int ret = 0;
@@ -367,15 +366,15 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
/* TX/RX NORMAL interrupts */
if (likely(intr_status & XGMAC_NIS)) {
if (likely(intr_status & XGMAC_RI)) {
- u64_stats_update_begin(&rxq_stats->syncp);
- rxq_stats->rx_normal_irq_n++;
- u64_stats_update_end(&rxq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_rx;
}
if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
- u64_stats_update_begin(&txq_stats->syncp);
- txq_stats->tx_normal_irq_n++;
- u64_stats_update_end(&txq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_tx;
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index b8ba8f2d8041..29367105df54 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -7,6 +7,7 @@
#include "common.h"
#include "stmmac.h"
#include "stmmac_ptp.h"
+#include "stmmac_est.h"
static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg)
{
@@ -114,6 +115,7 @@ static const struct stmmac_hwif_entry {
const void *mode;
const void *tc;
const void *mmc;
+ const void *est;
int (*setup)(struct stmmac_priv *priv);
int (*quirks)(struct stmmac_priv *priv);
} stmmac_hw[] = {
@@ -162,6 +164,7 @@ static const struct stmmac_hwif_entry {
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
.mmc_off = MMC_GMAC4_OFFSET,
+ .est_off = EST_GMAC4_OFFSET,
},
.desc = &dwmac4_desc_ops,
.dma = &dwmac4_dma_ops,
@@ -170,6 +173,7 @@ static const struct stmmac_hwif_entry {
.mode = NULL,
.tc = &dwmac510_tc_ops,
.mmc = &dwmac_mmc_ops,
+ .est = &dwmac510_est_ops,
.setup = dwmac4_setup,
.quirks = stmmac_dwmac4_quirks,
}, {
@@ -180,6 +184,7 @@ static const struct stmmac_hwif_entry {
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
.mmc_off = MMC_GMAC4_OFFSET,
+ .est_off = EST_GMAC4_OFFSET,
},
.desc = &dwmac4_desc_ops,
.dma = &dwmac4_dma_ops,
@@ -188,6 +193,7 @@ static const struct stmmac_hwif_entry {
.mode = &dwmac4_ring_mode_ops,
.tc = &dwmac510_tc_ops,
.mmc = &dwmac_mmc_ops,
+ .est = &dwmac510_est_ops,
.setup = dwmac4_setup,
.quirks = NULL,
}, {
@@ -198,6 +204,7 @@ static const struct stmmac_hwif_entry {
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
.mmc_off = MMC_GMAC4_OFFSET,
+ .est_off = EST_GMAC4_OFFSET,
},
.desc = &dwmac4_desc_ops,
.dma = &dwmac410_dma_ops,
@@ -206,6 +213,7 @@ static const struct stmmac_hwif_entry {
.mode = &dwmac4_ring_mode_ops,
.tc = &dwmac510_tc_ops,
.mmc = &dwmac_mmc_ops,
+ .est = &dwmac510_est_ops,
.setup = dwmac4_setup,
.quirks = NULL,
}, {
@@ -216,6 +224,7 @@ static const struct stmmac_hwif_entry {
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
.mmc_off = MMC_GMAC4_OFFSET,
+ .est_off = EST_GMAC4_OFFSET,
},
.desc = &dwmac4_desc_ops,
.dma = &dwmac410_dma_ops,
@@ -224,6 +233,7 @@ static const struct stmmac_hwif_entry {
.mode = &dwmac4_ring_mode_ops,
.tc = &dwmac510_tc_ops,
.mmc = &dwmac_mmc_ops,
+ .est = &dwmac510_est_ops,
.setup = dwmac4_setup,
.quirks = NULL,
}, {
@@ -235,6 +245,7 @@ static const struct stmmac_hwif_entry {
.regs = {
.ptp_off = PTP_XGMAC_OFFSET,
.mmc_off = MMC_XGMAC_OFFSET,
+ .est_off = EST_XGMAC_OFFSET,
},
.desc = &dwxgmac210_desc_ops,
.dma = &dwxgmac210_dma_ops,
@@ -243,6 +254,7 @@ static const struct stmmac_hwif_entry {
.mode = NULL,
.tc = &dwmac510_tc_ops,
.mmc = &dwxgmac_mmc_ops,
+ .est = &dwmac510_est_ops,
.setup = dwxgmac2_setup,
.quirks = NULL,
}, {
@@ -254,6 +266,7 @@ static const struct stmmac_hwif_entry {
.regs = {
.ptp_off = PTP_XGMAC_OFFSET,
.mmc_off = MMC_XGMAC_OFFSET,
+ .est_off = EST_XGMAC_OFFSET,
},
.desc = &dwxgmac210_desc_ops,
.dma = &dwxgmac210_dma_ops,
@@ -262,6 +275,7 @@ static const struct stmmac_hwif_entry {
.mode = NULL,
.tc = &dwmac510_tc_ops,
.mmc = &dwxgmac_mmc_ops,
+ .est = &dwmac510_est_ops,
.setup = dwxlgmac2_setup,
.quirks = stmmac_dwxlgmac_quirks,
},
@@ -296,6 +310,10 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
(needs_gmac4 ? PTP_GMAC4_OFFSET : PTP_GMAC3_X_OFFSET);
priv->mmcaddr = priv->ioaddr +
(needs_gmac4 ? MMC_GMAC4_OFFSET : MMC_GMAC3_X_OFFSET);
+ if (needs_gmac4)
+ priv->estaddr = priv->ioaddr + EST_GMAC4_OFFSET;
+ else if (needs_xgmac)
+ priv->estaddr = priv->ioaddr + EST_XGMAC_OFFSET;
/* Check for HW specific setup first */
if (priv->plat->setup) {
@@ -332,10 +350,13 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
mac->mode = mac->mode ? : entry->mode;
mac->tc = mac->tc ? : entry->tc;
mac->mmc = mac->mmc ? : entry->mmc;
+ mac->est = mac->est ? : entry->est;
priv->hw = mac;
priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off;
priv->mmcaddr = priv->ioaddr + entry->regs.mmc_off;
+ if (entry->est)
+ priv->estaddr = priv->ioaddr + entry->regs.est_off;
/* Entry found */
if (needs_setup) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 68aa2d5ca6e5..7be04b54738b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -56,6 +56,10 @@ struct stmmac_desc_ops {
void (*set_tx_ic)(struct dma_desc *p);
/* Last tx segment reports the transmit status */
int (*get_tx_ls)(struct dma_desc *p);
+ /* Get the tag of the descriptor */
+ u16 (*get_rx_vlan_tci)(struct dma_desc *p);
+ /* Get the valid status of descriptor */
+ bool (*get_rx_vlan_valid)(struct dma_desc *p);
/* Return the transmit status looking at the TDES1 */
int (*tx_status)(struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr);
@@ -117,6 +121,10 @@ struct stmmac_desc_ops {
stmmac_do_void_callback(__priv, desc, set_tx_ic, __args)
#define stmmac_get_tx_ls(__priv, __args...) \
stmmac_do_callback(__priv, desc, get_tx_ls, __args)
+#define stmmac_get_rx_vlan_tci(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_rx_vlan_tci, __args)
+#define stmmac_get_rx_vlan_valid(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_rx_vlan_valid, __args)
#define stmmac_tx_status(__priv, __args...) \
stmmac_do_callback(__priv, desc, tx_status, __args)
#define stmmac_get_tx_len(__priv, __args...) \
@@ -388,6 +396,9 @@ struct stmmac_ops {
void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
__le16 perfect_match, bool is_double);
void (*enable_vlan)(struct mac_device_info *hw, u32 type);
+ void (*rx_hw_vlan)(struct mac_device_info *hw, struct dma_desc *rx_desc,
+ struct sk_buff *skb);
+ void (*set_hw_vlan_mode)(struct mac_device_info *hw);
int (*add_hw_vlan_rx_fltr)(struct net_device *dev,
struct mac_device_info *hw,
__be16 proto, u16 vid);
@@ -408,10 +419,6 @@ struct stmmac_ops {
bool en, bool udp, bool sa, bool inv,
u32 match);
void (*set_arp_offload)(struct mac_device_info *hw, bool en, u32 addr);
- int (*est_configure)(void __iomem *ioaddr, struct stmmac_est *cfg,
- unsigned int ptp_rate);
- void (*est_irq_status)(void __iomem *ioaddr, struct net_device *dev,
- struct stmmac_extra_stats *x, u32 txqcnt);
void (*fpe_configure)(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
u32 num_txq, u32 num_rxq,
bool enable);
@@ -499,6 +506,10 @@ struct stmmac_ops {
stmmac_do_void_callback(__priv, mac, update_vlan_hash, __args)
#define stmmac_enable_vlan(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, enable_vlan, __args)
+#define stmmac_rx_hw_vlan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, rx_hw_vlan, __args)
+#define stmmac_set_hw_vlan_mode(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, set_hw_vlan_mode, __args)
#define stmmac_add_hw_vlan_rx_fltr(__priv, __args...) \
stmmac_do_callback(__priv, mac, add_hw_vlan_rx_fltr, __args)
#define stmmac_del_hw_vlan_rx_fltr(__priv, __args...) \
@@ -515,10 +526,6 @@ struct stmmac_ops {
stmmac_do_callback(__priv, mac, config_l4_filter, __args)
#define stmmac_set_arp_offload(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_arp_offload, __args)
-#define stmmac_est_configure(__priv, __args...) \
- stmmac_do_callback(__priv, mac, est_configure, __args)
-#define stmmac_est_irq_status(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, est_irq_status, __args)
#define stmmac_fpe_configure(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, fpe_configure, __args)
#define stmmac_fpe_send_mpacket(__priv, __args...) \
@@ -644,9 +651,22 @@ struct stmmac_mmc_ops {
#define stmmac_mmc_read(__priv, __args...) \
stmmac_do_void_callback(__priv, mmc, read, __args)
+struct stmmac_est_ops {
+ int (*configure)(struct stmmac_priv *priv, struct stmmac_est *cfg,
+ unsigned int ptp_rate);
+ void (*irq_status)(struct stmmac_priv *priv, struct net_device *dev,
+ struct stmmac_extra_stats *x, u32 txqcnt);
+};
+
+#define stmmac_est_configure(__priv, __args...) \
+ stmmac_do_callback(__priv, est, configure, __args)
+#define stmmac_est_irq_status(__priv, __args...) \
+ stmmac_do_void_callback(__priv, est, irq_status, __args)
+
struct stmmac_regs_off {
u32 ptp_off;
u32 mmc_off;
+ u32 est_off;
};
extern const struct stmmac_ops dwmac100_ops;
@@ -665,6 +685,7 @@ extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
extern const struct stmmac_mmc_ops dwmac_mmc_ops;
extern const struct stmmac_mmc_ops dwxgmac_mmc_ops;
+extern const struct stmmac_est_ops dwmac510_est_ops;
#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index a0c05925883e..14c9d2637dfe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -52,6 +52,8 @@ struct stmmac_counters {
unsigned int mmc_tx_excessdef;
unsigned int mmc_tx_pause_frame;
unsigned int mmc_tx_vlan_frame_g;
+ unsigned int mmc_tx_lpi_usec;
+ unsigned int mmc_tx_lpi_tran;
/* MMC RX counter registers */
unsigned int mmc_rx_framecount_gb;
@@ -78,9 +80,16 @@ struct stmmac_counters {
unsigned int mmc_rx_fifo_overflow;
unsigned int mmc_rx_vlan_frames_gb;
unsigned int mmc_rx_watchdog_error;
+ unsigned int mmc_rx_lpi_usec;
+ unsigned int mmc_rx_lpi_tran;
+ unsigned int mmc_rx_discard_frames_gb;
+ unsigned int mmc_rx_discard_octets_gb;
+ unsigned int mmc_rx_align_err_frames;
+
/* IPC */
unsigned int mmc_rx_ipc_intr_mask;
unsigned int mmc_rx_ipc_intr;
+
/* IPv4 */
unsigned int mmc_rx_ipv4_gd;
unsigned int mmc_rx_ipv4_hderr;
@@ -118,9 +127,14 @@ struct stmmac_counters {
unsigned int mmc_rx_icmp_gd_octets;
unsigned int mmc_rx_icmp_err_octets;
+ /* Stream-Gate Filter */
+ unsigned int mmc_sgf_pass_fragment_cntr;
+ unsigned int mmc_sgf_fail_fragment_cntr;
+
/* FPE */
unsigned int mmc_tx_fpe_fragment_cntr;
unsigned int mmc_tx_hold_req_cntr;
+ unsigned int mmc_tx_gate_overrun_cntr;
unsigned int mmc_rx_packet_assembly_err_cntr;
unsigned int mmc_rx_packet_smd_err_cntr;
unsigned int mmc_rx_packet_assembly_ok_cntr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 6a7c1d325c46..8597c6abae8d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -177,9 +177,12 @@
#define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4
#define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc
+#define MMC_XGMAC_SGF_PASS_PKT 0x1f0
+#define MMC_XGMAC_SGF_FAIL_PKT 0x1f4
#define MMC_XGMAC_TX_FPE_INTR_MASK 0x204
#define MMC_XGMAC_TX_FPE_FRAG 0x208
#define MMC_XGMAC_TX_HOLD_REQ 0x20c
+#define MMC_XGMAC_TX_GATE_OVERRUN 0x210
#define MMC_XGMAC_RX_FPE_INTR_MASK 0x224
#define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228
#define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c
@@ -187,6 +190,40 @@
#define MMC_XGMAC_RX_FPE_FRAG 0x234
#define MMC_XGMAC_RX_IPC_INTR_MASK 0x25c
+#define MMC_XGMAC_RX_IPV4_GD 0x264
+#define MMC_XGMAC_RX_IPV4_HDERR 0x26c
+#define MMC_XGMAC_RX_IPV4_NOPAY 0x274
+#define MMC_XGMAC_RX_IPV4_FRAG 0x27c
+#define MMC_XGMAC_RX_IPV4_UDSBL 0x284
+
+#define MMC_XGMAC_RX_IPV6_GD 0x28c
+#define MMC_XGMAC_RX_IPV6_HDERR 0x294
+#define MMC_XGMAC_RX_IPV6_NOPAY 0x29c
+
+#define MMC_XGMAC_RX_UDP_GD 0x2a4
+#define MMC_XGMAC_RX_UDP_ERR 0x2ac
+#define MMC_XGMAC_RX_TCP_GD 0x2b4
+#define MMC_XGMAC_RX_TCP_ERR 0x2bc
+#define MMC_XGMAC_RX_ICMP_GD 0x2c4
+#define MMC_XGMAC_RX_ICMP_ERR 0x2cc
+
+#define MMC_XGMAC_RX_IPV4_GD_OCTETS 0x2d4
+#define MMC_XGMAC_RX_IPV4_HDERR_OCTETS 0x2dc
+#define MMC_XGMAC_RX_IPV4_NOPAY_OCTETS 0x2e4
+#define MMC_XGMAC_RX_IPV4_FRAG_OCTETS 0x2ec
+#define MMC_XGMAC_RX_IPV4_UDSBL_OCTETS 0x2f4
+
+#define MMC_XGMAC_RX_IPV6_GD_OCTETS 0x2fc
+#define MMC_XGMAC_RX_IPV6_HDERR_OCTETS 0x304
+#define MMC_XGMAC_RX_IPV6_NOPAY_OCTETS 0x30c
+
+#define MMC_XGMAC_RX_UDP_GD_OCTETS 0x314
+#define MMC_XGMAC_RX_UDP_ERR_OCTETS 0x31c
+#define MMC_XGMAC_RX_TCP_GD_OCTETS 0x324
+#define MMC_XGMAC_RX_TCP_ERR_OCTETS 0x32c
+#define MMC_XGMAC_RX_ICMP_GD_OCTETS 0x334
+#define MMC_XGMAC_RX_ICMP_ERR_OCTETS 0x33c
+
static void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
{
u32 value = readl(mmcaddr + MMC_CNTRL);
@@ -414,6 +451,8 @@ static void dwxgmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
&mmc->mmc_tx_pause_frame);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_VLAN_PKT_G,
&mmc->mmc_tx_vlan_frame_g);
+ mmc->mmc_tx_lpi_usec += readl(mmcaddr + MMC_XGMAC_TX_LPI_USEC);
+ mmc->mmc_tx_lpi_tran += readl(mmcaddr + MMC_XGMAC_TX_LPI_TRAN);
/* MMC RX counter registers */
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_PKT_GB,
@@ -459,9 +498,23 @@ static void dwxgmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_VLAN_PKT_GB,
&mmc->mmc_rx_vlan_frames_gb);
mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_XGMAC_RX_WATCHDOG_ERR);
-
+ mmc->mmc_rx_lpi_usec += readl(mmcaddr + MMC_XGMAC_RX_LPI_USEC);
+ mmc->mmc_rx_lpi_tran += readl(mmcaddr + MMC_XGMAC_RX_LPI_TRAN);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_DISCARD_PKT_GB,
+ &mmc->mmc_rx_discard_frames_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_DISCARD_OCT_GB,
+ &mmc->mmc_rx_discard_octets_gb);
+ mmc->mmc_rx_align_err_frames +=
+ readl(mmcaddr + MMC_XGMAC_RX_ALIGN_ERR_PKT);
+
+ mmc->mmc_sgf_pass_fragment_cntr +=
+ readl(mmcaddr + MMC_XGMAC_SGF_PASS_PKT);
+ mmc->mmc_sgf_fail_fragment_cntr +=
+ readl(mmcaddr + MMC_XGMAC_SGF_FAIL_PKT);
mmc->mmc_tx_fpe_fragment_cntr += readl(mmcaddr + MMC_XGMAC_TX_FPE_FRAG);
mmc->mmc_tx_hold_req_cntr += readl(mmcaddr + MMC_XGMAC_TX_HOLD_REQ);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_GATE_OVERRUN,
+ &mmc->mmc_tx_gate_overrun_cntr);
mmc->mmc_rx_packet_assembly_err_cntr +=
readl(mmcaddr + MMC_XGMAC_RX_PKT_ASSEMBLY_ERR);
mmc->mmc_rx_packet_smd_err_cntr +=
@@ -470,6 +523,68 @@ static void dwxgmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
readl(mmcaddr + MMC_XGMAC_RX_PKT_ASSEMBLY_OK);
mmc->mmc_rx_fpe_fragment_cntr +=
readl(mmcaddr + MMC_XGMAC_RX_FPE_FRAG);
+
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_GD,
+ &mmc->mmc_rx_ipv4_gd);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_HDERR,
+ &mmc->mmc_rx_ipv4_hderr);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_NOPAY,
+ &mmc->mmc_rx_ipv4_nopay);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_FRAG,
+ &mmc->mmc_rx_ipv4_frag);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_UDSBL,
+ &mmc->mmc_rx_ipv4_udsbl);
+
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV6_GD,
+ &mmc->mmc_rx_ipv6_gd);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV6_HDERR,
+ &mmc->mmc_rx_ipv6_hderr);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV6_NOPAY,
+ &mmc->mmc_rx_ipv6_nopay);
+
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_UDP_GD,
+ &mmc->mmc_rx_udp_gd);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_UDP_ERR,
+ &mmc->mmc_rx_udp_err);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_TCP_GD,
+ &mmc->mmc_rx_tcp_gd);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_TCP_ERR,
+ &mmc->mmc_rx_tcp_err);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_ICMP_GD,
+ &mmc->mmc_rx_icmp_gd);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_ICMP_ERR,
+ &mmc->mmc_rx_icmp_err);
+
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_GD_OCTETS,
+ &mmc->mmc_rx_ipv4_gd_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_HDERR_OCTETS,
+ &mmc->mmc_rx_ipv4_hderr_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_NOPAY_OCTETS,
+ &mmc->mmc_rx_ipv4_nopay_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_FRAG_OCTETS,
+ &mmc->mmc_rx_ipv4_frag_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_UDSBL_OCTETS,
+ &mmc->mmc_rx_ipv4_udsbl_octets);
+
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV6_GD_OCTETS,
+ &mmc->mmc_rx_ipv6_gd_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV6_HDERR_OCTETS,
+ &mmc->mmc_rx_ipv6_hderr_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV6_NOPAY_OCTETS,
+ &mmc->mmc_rx_ipv6_nopay_octets);
+
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_UDP_GD_OCTETS,
+ &mmc->mmc_rx_udp_gd_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_UDP_ERR_OCTETS,
+ &mmc->mmc_rx_udp_err_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_TCP_GD_OCTETS,
+ &mmc->mmc_rx_tcp_gd_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_TCP_ERR_OCTETS,
+ &mmc->mmc_rx_tcp_err_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_ICMP_GD_OCTETS,
+ &mmc->mmc_rx_icmp_gd_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_ICMP_ERR_OCTETS,
+ &mmc->mmc_rx_icmp_err_octets);
}
const struct stmmac_mmc_ops dwxgmac_mmc_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index cd7a9768de5f..f155e4841c62 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -51,6 +51,7 @@ struct stmmac_tx_info {
bool last_segment;
bool is_jumbo;
enum stmmac_txbuf_type buf_type;
+ struct xsk_tx_metadata_compl xsk_meta;
};
#define STMMAC_TBS_AVAIL BIT(0)
@@ -100,6 +101,17 @@ struct stmmac_xdp_buff {
struct dma_desc *ndesc;
};
+struct stmmac_metadata_request {
+ struct stmmac_priv *priv;
+ struct dma_desc *tx_desc;
+ bool *set_ic;
+};
+
+struct stmmac_xsk_tx_complete {
+ struct stmmac_priv *priv;
+ struct dma_desc *desc;
+};
+
struct stmmac_rx_queue {
u32 rx_count_frames;
u32 queue_index;
@@ -255,6 +267,7 @@ struct stmmac_priv {
u32 msg_enable;
int wolopts;
int wol_irq;
+ bool wol_irq_disabled;
int clk_csr;
struct timer_list eee_ctrl_timer;
int lpi_irq;
@@ -283,6 +296,7 @@ struct stmmac_priv {
void __iomem *mmcaddr;
void __iomem *ptpaddr;
+ void __iomem *estaddr;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
int sfty_ce_irq;
int sfty_ue_irq;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
new file mode 100644
index 000000000000..4da6ccc17c20
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023, Intel Corporation
+ * stmmac EST(802.3 Qbv) handling
+ */
+#include <linux/iopoll.h>
+#include <linux/types.h>
+#include "stmmac.h"
+#include "stmmac_est.h"
+
+static int est_write(void __iomem *est_addr, u32 reg, u32 val, bool gcl)
+{
+ u32 ctrl;
+
+ writel(val, est_addr + EST_GCL_DATA);
+
+ ctrl = (reg << EST_ADDR_SHIFT);
+ ctrl |= gcl ? 0 : EST_GCRR;
+ writel(ctrl, est_addr + EST_GCL_CONTROL);
+
+ ctrl |= EST_SRWO;
+ writel(ctrl, est_addr + EST_GCL_CONTROL);
+
+ return readl_poll_timeout(est_addr + EST_GCL_CONTROL, ctrl,
+ !(ctrl & EST_SRWO), 100, 5000);
+}
+
+static int est_configure(struct stmmac_priv *priv, struct stmmac_est *cfg,
+ unsigned int ptp_rate)
+{
+ void __iomem *est_addr = priv->estaddr;
+ int i, ret = 0;
+ u32 ctrl;
+
+ ret |= est_write(est_addr, EST_BTR_LOW, cfg->btr[0], false);
+ ret |= est_write(est_addr, EST_BTR_HIGH, cfg->btr[1], false);
+ ret |= est_write(est_addr, EST_TER, cfg->ter, false);
+ ret |= est_write(est_addr, EST_LLR, cfg->gcl_size, false);
+ ret |= est_write(est_addr, EST_CTR_LOW, cfg->ctr[0], false);
+ ret |= est_write(est_addr, EST_CTR_HIGH, cfg->ctr[1], false);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < cfg->gcl_size; i++) {
+ ret = est_write(est_addr, i, cfg->gcl[i], true);
+ if (ret)
+ return ret;
+ }
+
+ ctrl = readl(est_addr + EST_CONTROL);
+ if (priv->plat->has_xgmac) {
+ ctrl &= ~EST_XGMAC_PTOV;
+ ctrl |= ((NSEC_PER_SEC / ptp_rate) * EST_XGMAC_PTOV_MUL) <<
+ EST_XGMAC_PTOV_SHIFT;
+ } else {
+ ctrl &= ~EST_GMAC5_PTOV;
+ ctrl |= ((NSEC_PER_SEC / ptp_rate) * EST_GMAC5_PTOV_MUL) <<
+ EST_GMAC5_PTOV_SHIFT;
+ }
+ if (cfg->enable)
+ ctrl |= EST_EEST | EST_SSWL;
+ else
+ ctrl &= ~EST_EEST;
+
+ writel(ctrl, est_addr + EST_CONTROL);
+
+ /* Configure EST interrupt */
+ if (cfg->enable)
+ ctrl = EST_IECGCE | EST_IEHS | EST_IEHF | EST_IEBE | EST_IECC;
+ else
+ ctrl = 0;
+
+ writel(ctrl, est_addr + EST_INT_EN);
+
+ return 0;
+}
+
+static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev,
+ struct stmmac_extra_stats *x, u32 txqcnt)
+{
+ u32 status, value, feqn, hbfq, hbfs, btrl, btrl_max;
+ void __iomem *est_addr = priv->estaddr;
+ u32 txqcnt_mask = BIT(txqcnt) - 1;
+
+ status = readl(est_addr + EST_STATUS);
+
+ value = EST_CGCE | EST_HLBS | EST_HLBF | EST_BTRE | EST_SWLC;
+
+ /* Return if there is no error */
+ if (!(status & value))
+ return;
+
+ if (status & EST_CGCE) {
+ /* Clear Interrupt */
+ writel(EST_CGCE, est_addr + EST_STATUS);
+
+ x->mtl_est_cgce++;
+ }
+
+ if (status & EST_HLBS) {
+ value = readl(est_addr + EST_SCH_ERR);
+ value &= txqcnt_mask;
+
+ x->mtl_est_hlbs++;
+
+ /* Clear Interrupt */
+ writel(value, est_addr + EST_SCH_ERR);
+
+ /* Collecting info to shows all the queues that has HLBS
+ * issue. The only way to clear this is to clear the
+ * statistic
+ */
+ if (net_ratelimit())
+ netdev_err(dev, "EST: HLB(sched) Queue 0x%x\n", value);
+ }
+
+ if (status & EST_HLBF) {
+ value = readl(est_addr + EST_FRM_SZ_ERR);
+ feqn = value & txqcnt_mask;
+
+ value = readl(est_addr + EST_FRM_SZ_CAP);
+ hbfq = (value & EST_SZ_CAP_HBFQ_MASK(txqcnt)) >>
+ EST_SZ_CAP_HBFQ_SHIFT;
+ hbfs = value & EST_SZ_CAP_HBFS_MASK;
+
+ x->mtl_est_hlbf++;
+
+ /* Clear Interrupt */
+ writel(feqn, est_addr + EST_FRM_SZ_ERR);
+
+ if (net_ratelimit())
+ netdev_err(dev, "EST: HLB(size) Queue %u Size %u\n",
+ hbfq, hbfs);
+ }
+
+ if (status & EST_BTRE) {
+ if (priv->plat->has_xgmac) {
+ btrl = FIELD_GET(EST_XGMAC_BTRL, status);
+ btrl_max = FIELD_MAX(EST_XGMAC_BTRL);
+ } else {
+ btrl = FIELD_GET(EST_GMAC5_BTRL, status);
+ btrl_max = FIELD_MAX(EST_GMAC5_BTRL);
+ }
+ if (btrl == btrl_max)
+ x->mtl_est_btrlm++;
+ else
+ x->mtl_est_btre++;
+
+ if (net_ratelimit())
+ netdev_info(dev, "EST: BTR Error Loop Count %u\n",
+ btrl);
+
+ writel(EST_BTRE, est_addr + EST_STATUS);
+ }
+
+ if (status & EST_SWLC) {
+ writel(EST_SWLC, est_addr + EST_STATUS);
+ netdev_info(dev, "EST: SWOL has been switched\n");
+ }
+}
+
+const struct stmmac_est_ops dwmac510_est_ops = {
+ .configure = est_configure,
+ .irq_status = est_irq_status,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
new file mode 100644
index 000000000000..7a858c566e7e
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023, Intel Corporation
+ * stmmac EST(802.3 Qbv) handling
+ */
+
+#define EST_GMAC4_OFFSET 0x00000c50
+#define EST_XGMAC_OFFSET 0x00001050
+
+#define EST_CONTROL 0x00000000
+#define EST_GMAC5_PTOV GENMASK(31, 24)
+#define EST_GMAC5_PTOV_SHIFT 24
+#define EST_GMAC5_PTOV_MUL 6
+#define EST_XGMAC_PTOV GENMASK(31, 23)
+#define EST_XGMAC_PTOV_SHIFT 23
+#define EST_XGMAC_PTOV_MUL 9
+#define EST_SSWL BIT(1)
+#define EST_EEST BIT(0)
+
+#define EST_STATUS 0x00000008
+#define EST_GMAC5_BTRL GENMASK(11, 8)
+#define EST_XGMAC_BTRL GENMASK(15, 8)
+#define EST_SWOL BIT(7)
+#define EST_SWOL_SHIFT 7
+#define EST_CGCE BIT(4)
+#define EST_HLBS BIT(3)
+#define EST_HLBF BIT(2)
+#define EST_BTRE BIT(1)
+#define EST_SWLC BIT(0)
+
+#define EST_SCH_ERR 0x00000010
+
+#define EST_FRM_SZ_ERR 0x00000014
+
+#define EST_FRM_SZ_CAP 0x00000018
+#define EST_SZ_CAP_HBFS_MASK GENMASK(14, 0)
+#define EST_SZ_CAP_HBFQ_SHIFT 16
+#define EST_SZ_CAP_HBFQ_MASK(val) \
+ ({ \
+ typeof(val) _val = (val); \
+ (_val > 4 ? GENMASK(18, 16) : \
+ _val > 2 ? GENMASK(17, 16) : \
+ BIT(16)); \
+ })
+
+#define EST_INT_EN 0x00000020
+#define EST_IECGCE EST_CGCE
+#define EST_IEHS EST_HLBS
+#define EST_IEHF EST_HLBF
+#define EST_IEBE EST_BTRE
+#define EST_IECC EST_SWLC
+
+#define EST_GCL_CONTROL 0x00000030
+#define EST_BTR_LOW 0x0
+#define EST_BTR_HIGH 0x1
+#define EST_CTR_LOW 0x2
+#define EST_CTR_HIGH 0x3
+#define EST_TER 0x4
+#define EST_LLR 0x5
+#define EST_ADDR_SHIFT 8
+#define EST_GCRR BIT(2)
+#define EST_SRWO BIT(0)
+
+#define EST_GCL_DATA 0x00000034
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index f628411ae4ae..ec44becf0e2d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -212,6 +212,8 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_tx_excessdef),
STMMAC_MMC_STAT(mmc_tx_pause_frame),
STMMAC_MMC_STAT(mmc_tx_vlan_frame_g),
+ STMMAC_MMC_STAT(mmc_tx_lpi_usec),
+ STMMAC_MMC_STAT(mmc_tx_lpi_tran),
STMMAC_MMC_STAT(mmc_rx_framecount_gb),
STMMAC_MMC_STAT(mmc_rx_octetcount_gb),
STMMAC_MMC_STAT(mmc_rx_octetcount_g),
@@ -236,6 +238,11 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_rx_fifo_overflow),
STMMAC_MMC_STAT(mmc_rx_vlan_frames_gb),
STMMAC_MMC_STAT(mmc_rx_watchdog_error),
+ STMMAC_MMC_STAT(mmc_rx_lpi_usec),
+ STMMAC_MMC_STAT(mmc_rx_lpi_tran),
+ STMMAC_MMC_STAT(mmc_rx_discard_frames_gb),
+ STMMAC_MMC_STAT(mmc_rx_discard_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_align_err_frames),
STMMAC_MMC_STAT(mmc_rx_ipc_intr_mask),
STMMAC_MMC_STAT(mmc_rx_ipc_intr),
STMMAC_MMC_STAT(mmc_rx_ipv4_gd),
@@ -266,8 +273,11 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_rx_tcp_err_octets),
STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets),
STMMAC_MMC_STAT(mmc_rx_icmp_err_octets),
+ STMMAC_MMC_STAT(mmc_sgf_pass_fragment_cntr),
+ STMMAC_MMC_STAT(mmc_sgf_fail_fragment_cntr),
STMMAC_MMC_STAT(mmc_tx_fpe_fragment_cntr),
STMMAC_MMC_STAT(mmc_tx_hold_req_cntr),
+ STMMAC_MMC_STAT(mmc_tx_gate_overrun_cntr),
STMMAC_MMC_STAT(mmc_rx_packet_assembly_err_cntr),
STMMAC_MMC_STAT(mmc_rx_packet_smd_err_cntr),
STMMAC_MMC_STAT(mmc_rx_packet_assembly_ok_cntr),
@@ -311,8 +321,9 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);
- if (priv->hw->pcs & STMMAC_PCS_RGMII ||
- priv->hw->pcs & STMMAC_PCS_SGMII) {
+ if (!(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS) &&
+ (priv->hw->pcs & STMMAC_PCS_RGMII ||
+ priv->hw->pcs & STMMAC_PCS_SGMII)) {
struct rgmii_adv adv;
u32 supported, advertising, lp_advertising;
@@ -397,8 +408,9 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);
- if (priv->hw->pcs & STMMAC_PCS_RGMII ||
- priv->hw->pcs & STMMAC_PCS_SGMII) {
+ if (!(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS) &&
+ (priv->hw->pcs & STMMAC_PCS_RGMII ||
+ priv->hw->pcs & STMMAC_PCS_SGMII)) {
/* Only support ANE */
if (cmd->base.autoneg != AUTONEG_ENABLE)
return -EINVAL;
@@ -537,49 +549,79 @@ stmmac_set_pauseparam(struct net_device *netdev,
}
}
+static u64 stmmac_get_rx_normal_irq_n(struct stmmac_priv *priv, int q)
+{
+ u64 total;
+ int cpu;
+
+ total = 0;
+ for_each_possible_cpu(cpu) {
+ struct stmmac_pcpu_stats *pcpu;
+ unsigned int start;
+ u64 irq_n;
+
+ pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu);
+ do {
+ start = u64_stats_fetch_begin(&pcpu->syncp);
+ irq_n = u64_stats_read(&pcpu->rx_normal_irq_n[q]);
+ } while (u64_stats_fetch_retry(&pcpu->syncp, start));
+ total += irq_n;
+ }
+ return total;
+}
+
+static u64 stmmac_get_tx_normal_irq_n(struct stmmac_priv *priv, int q)
+{
+ u64 total;
+ int cpu;
+
+ total = 0;
+ for_each_possible_cpu(cpu) {
+ struct stmmac_pcpu_stats *pcpu;
+ unsigned int start;
+ u64 irq_n;
+
+ pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu);
+ do {
+ start = u64_stats_fetch_begin(&pcpu->syncp);
+ irq_n = u64_stats_read(&pcpu->tx_normal_irq_n[q]);
+ } while (u64_stats_fetch_retry(&pcpu->syncp, start));
+ total += irq_n;
+ }
+ return total;
+}
+
static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 rx_cnt = priv->plat->rx_queues_to_use;
unsigned int start;
- int q, stat;
- u64 *pos;
- char *p;
+ int q;
- pos = data;
for (q = 0; q < tx_cnt; q++) {
struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
- struct stmmac_txq_stats snapshot;
+ u64 pkt_n;
- data = pos;
do {
- start = u64_stats_fetch_begin(&txq_stats->syncp);
- snapshot = *txq_stats;
- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+ start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
+ pkt_n = u64_stats_read(&txq_stats->napi.tx_pkt_n);
+ } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
- p = (char *)&snapshot + offsetof(struct stmmac_txq_stats, tx_pkt_n);
- for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) {
- *data++ += (*(u64 *)p);
- p += sizeof(u64);
- }
+ *data++ = pkt_n;
+ *data++ = stmmac_get_tx_normal_irq_n(priv, q);
}
- pos = data;
for (q = 0; q < rx_cnt; q++) {
struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
- struct stmmac_rxq_stats snapshot;
+ u64 pkt_n;
- data = pos;
do {
- start = u64_stats_fetch_begin(&rxq_stats->syncp);
- snapshot = *rxq_stats;
- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+ start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
+ pkt_n = u64_stats_read(&rxq_stats->napi.rx_pkt_n);
+ } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
- p = (char *)&snapshot + offsetof(struct stmmac_rxq_stats, rx_pkt_n);
- for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) {
- *data++ += (*(u64 *)p);
- p += sizeof(u64);
- }
+ *data++ = pkt_n;
+ *data++ = stmmac_get_rx_normal_irq_n(priv, q);
}
}
@@ -638,39 +680,49 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
pos = j;
for (i = 0; i < rx_queues_count; i++) {
struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[i];
- struct stmmac_rxq_stats snapshot;
+ struct stmmac_napi_rx_stats snapshot;
+ u64 n_irq;
j = pos;
do {
- start = u64_stats_fetch_begin(&rxq_stats->syncp);
- snapshot = *rxq_stats;
- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
-
- data[j++] += snapshot.rx_pkt_n;
- data[j++] += snapshot.rx_normal_irq_n;
- normal_irq_n += snapshot.rx_normal_irq_n;
- napi_poll += snapshot.napi_poll;
+ start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
+ snapshot = rxq_stats->napi;
+ } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
+
+ data[j++] += u64_stats_read(&snapshot.rx_pkt_n);
+ n_irq = stmmac_get_rx_normal_irq_n(priv, i);
+ data[j++] += n_irq;
+ normal_irq_n += n_irq;
+ napi_poll += u64_stats_read(&snapshot.poll);
}
pos = j;
for (i = 0; i < tx_queues_count; i++) {
struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[i];
- struct stmmac_txq_stats snapshot;
+ struct stmmac_napi_tx_stats napi_snapshot;
+ struct stmmac_q_tx_stats q_snapshot;
+ u64 n_irq;
j = pos;
do {
- start = u64_stats_fetch_begin(&txq_stats->syncp);
- snapshot = *txq_stats;
- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
-
- data[j++] += snapshot.tx_pkt_n;
- data[j++] += snapshot.tx_normal_irq_n;
- normal_irq_n += snapshot.tx_normal_irq_n;
- data[j++] += snapshot.tx_clean;
- data[j++] += snapshot.tx_set_ic_bit;
- data[j++] += snapshot.tx_tso_frames;
- data[j++] += snapshot.tx_tso_nfrags;
- napi_poll += snapshot.napi_poll;
+ start = u64_stats_fetch_begin(&txq_stats->q_syncp);
+ q_snapshot = txq_stats->q;
+ } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
+ do {
+ start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
+ napi_snapshot = txq_stats->napi;
+ } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
+
+ data[j++] += u64_stats_read(&napi_snapshot.tx_pkt_n);
+ n_irq = stmmac_get_tx_normal_irq_n(priv, i);
+ data[j++] += n_irq;
+ normal_irq_n += n_irq;
+ data[j++] += u64_stats_read(&napi_snapshot.tx_clean);
+ data[j++] += u64_stats_read(&q_snapshot.tx_set_ic_bit) +
+ u64_stats_read(&napi_snapshot.tx_set_ic_bit);
+ data[j++] += u64_stats_read(&q_snapshot.tx_tso_frames);
+ data[j++] += u64_stats_read(&q_snapshot.tx_tso_nfrags);
+ napi_poll += u64_stats_read(&napi_snapshot.poll);
}
normal_irq_n += priv->xstats.rx_early_irq;
data[j++] = normal_irq_n;
@@ -825,10 +877,16 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (wol->wolopts) {
pr_info("stmmac: wakeup enable\n");
device_set_wakeup_enable(priv->device, 1);
- enable_irq_wake(priv->wol_irq);
+ /* Avoid unbalanced enable_irq_wake calls */
+ if (priv->wol_irq_disabled)
+ enable_irq_wake(priv->wol_irq);
+ priv->wol_irq_disabled = false;
} else {
device_set_wakeup_enable(priv->device, 0);
- disable_irq_wake(priv->wol_irq);
+ /* Avoid unbalanced disable_irq_wake calls */
+ if (!priv->wol_irq_disabled)
+ disable_irq_wake(priv->wol_irq);
+ priv->wol_irq_disabled = true;
}
mutex_lock(&priv->lock);
@@ -1077,41 +1135,42 @@ static u32 stmmac_get_rxfh_indir_size(struct net_device *dev)
return ARRAY_SIZE(priv->rss.table);
}
-static int stmmac_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int stmmac_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct stmmac_priv *priv = netdev_priv(dev);
int i;
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
- indir[i] = priv->rss.table[i];
+ rxfh->indir[i] = priv->rss.table[i];
}
- if (key)
- memcpy(key, priv->rss.key, sizeof(priv->rss.key));
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ if (rxfh->key)
+ memcpy(rxfh->key, priv->rss.key, sizeof(priv->rss.key));
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
-static int stmmac_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int stmmac_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct stmmac_priv *priv = netdev_priv(dev);
int i;
- if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP))
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
- priv->rss.table[i] = indir[i];
+ priv->rss.table[i] = rxfh->indir[i];
}
- if (key)
- memcpy(priv->rss.key, key, sizeof(priv->rss.key));
+ if (rxfh->key)
+ memcpy(priv->rss.key, rxfh->key, sizeof(priv->rss.key));
return stmmac_rss_configure(priv, priv->hw, &priv->rss,
priv->plat->rx_queues_to_use);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 37e64283f910..7c6aef033a45 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2431,6 +2431,46 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
}
}
+static void stmmac_xsk_request_timestamp(void *_priv)
+{
+ struct stmmac_metadata_request *meta_req = _priv;
+
+ stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
+ *meta_req->set_ic = true;
+}
+
+static u64 stmmac_xsk_fill_timestamp(void *_priv)
+{
+ struct stmmac_xsk_tx_complete *tx_compl = _priv;
+ struct stmmac_priv *priv = tx_compl->priv;
+ struct dma_desc *desc = tx_compl->desc;
+ bool found = false;
+ u64 ns = 0;
+
+ if (!priv->hwts_tx_en)
+ return 0;
+
+ /* check tx tstamp status */
+ if (stmmac_get_tx_timestamp_status(priv, desc)) {
+ stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
+ found = true;
+ } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
+ found = true;
+ }
+
+ if (found) {
+ ns -= priv->plat->cdc_error_adj;
+ return ns_to_ktime(ns);
+ }
+
+ return 0;
+}
+
+static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
+ .tmo_request_timestamp = stmmac_xsk_request_timestamp,
+ .tmo_fill_timestamp = stmmac_xsk_fill_timestamp,
+};
+
static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
{
struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
@@ -2442,7 +2482,6 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
struct xdp_desc xdp_desc;
bool work_done = true;
u32 tx_set_ic_bit = 0;
- unsigned long flags;
/* Avoids TX time-out as we are sharing with slow path */
txq_trans_cond_update(nq);
@@ -2450,6 +2489,8 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
budget = min(budget, stmmac_tx_avail(priv, queue));
while (budget-- > 0) {
+ struct stmmac_metadata_request meta_req;
+ struct xsk_tx_metadata *meta = NULL;
dma_addr_t dma_addr;
bool set_ic;
@@ -2473,6 +2514,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
tx_desc = tx_q->dma_tx + entry;
dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
+ meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
@@ -2500,6 +2542,11 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
else
set_ic = false;
+ meta_req.priv = priv;
+ meta_req.tx_desc = tx_desc;
+ meta_req.set_ic = &set_ic;
+ xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
+ &meta_req);
if (set_ic) {
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, tx_desc);
@@ -2512,12 +2559,15 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
stmmac_enable_dma_transmission(priv, priv->ioaddr);
+ xsk_tx_metadata_to_compl(meta,
+ &tx_q->tx_skbuff_dma[entry].xsk_meta);
+
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
entry = tx_q->cur_tx;
}
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->tx_set_ic_bit += tx_set_ic_bit;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_update_begin(&txq_stats->napi_syncp);
+ u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
+ u64_stats_update_end(&txq_stats->napi_syncp);
if (tx_desc) {
stmmac_flush_tx_descriptors(priv, queue);
@@ -2565,7 +2615,6 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
unsigned int bytes_compl = 0, pkts_compl = 0;
unsigned int entry, xmits = 0, count = 0;
u32 tx_packets = 0, tx_errors = 0;
- unsigned long flags;
__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
@@ -2621,8 +2670,19 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
} else {
tx_packets++;
}
- if (skb)
+ if (skb) {
stmmac_get_tx_hwtstamp(priv, p, skb);
+ } else if (tx_q->xsk_pool &&
+ xp_tx_metadata_enabled(tx_q->xsk_pool)) {
+ struct stmmac_xsk_tx_complete tx_compl = {
+ .priv = priv,
+ .desc = p,
+ };
+
+ xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
+ &stmmac_xsk_tx_metadata_ops,
+ &tx_compl);
+ }
}
if (likely(tx_q->tx_skbuff_dma[entry].buf &&
@@ -2721,11 +2781,11 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
if (tx_q->dirty_tx != tx_q->cur_tx)
*pending_packets = true;
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->tx_packets += tx_packets;
- txq_stats->tx_pkt_n += tx_packets;
- txq_stats->tx_clean++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_update_begin(&txq_stats->napi_syncp);
+ u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
+ u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
+ u64_stats_inc(&txq_stats->napi.tx_clean);
+ u64_stats_update_end(&txq_stats->napi_syncp);
priv->xstats.tx_errors += tx_errors;
@@ -3470,6 +3530,8 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* Start the ball rolling... */
stmmac_start_all_dma(priv);
+ stmmac_set_hw_vlan_mode(priv, priv->hw);
+
if (priv->dma_cap.fpesel) {
stmmac_fpe_start_wq(priv);
@@ -3565,6 +3627,7 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
/* Request the Wake IRQ in case of another line
* is used for WoL
*/
+ priv->wol_irq_disabled = true;
if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
int_name = priv->int_name_wol;
sprintf(int_name, "%s:%s", dev->name, "wol");
@@ -3868,6 +3931,9 @@ static int __stmmac_open(struct net_device *dev,
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
buf_sz = dma_conf->dma_buf_sz;
+ for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
+ if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
+ dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
stmmac_reset_queues_param(priv);
@@ -3940,8 +4006,10 @@ static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
{
set_bit(__FPE_REMOVING, &priv->fpe_task_state);
- if (priv->fpe_wq)
+ if (priv->fpe_wq) {
destroy_workqueue(priv->fpe_wq);
+ priv->fpe_wq = NULL;
+ }
netdev_info(priv->dev, "FPE workqueue stop");
}
@@ -4146,7 +4214,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic;
u8 proto_hdr_len, hdr;
- unsigned long flags;
u32 pay_len, mss;
dma_addr_t des;
int i;
@@ -4311,13 +4378,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->tx_bytes += skb->len;
- txq_stats->tx_tso_frames++;
- txq_stats->tx_tso_nfrags += nfrags;
+ u64_stats_update_begin(&txq_stats->q_syncp);
+ u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
+ u64_stats_inc(&txq_stats->q.tx_tso_frames);
+ u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
if (set_ic)
- txq_stats->tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
+ u64_stats_update_end(&txq_stats->q_syncp);
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
@@ -4372,6 +4439,28 @@ dma_map_err:
}
/**
+ * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
+ * @skb: socket buffer to check
+ *
+ * Check if a packet has an ethertype that will trigger the IP header checks
+ * and IP/TCP checksum engine of the stmmac core.
+ *
+ * Return: true if the ethertype can trigger the checksum engine, false
+ * otherwise
+ */
+static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
+{
+ int depth = 0;
+ __be16 proto;
+
+ proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
+ &depth);
+
+ return (depth <= ETH_HLEN) &&
+ (proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
+}
+
+/**
* stmmac_xmit - Tx entry point of the driver
* @skb : the socket buffer
* @dev : device pointer
@@ -4394,7 +4483,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic;
int entry, first_tx;
- unsigned long flags;
dma_addr_t des;
tx_q = &priv->dma_conf.tx_queue[queue];
@@ -4435,9 +4523,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* DWMAC IPs can be synthesized to support tx coe only for a few tx
* queues. In that case, checksum offloading for those queues that don't
* support tx coe needs to fallback to software checksum calculation.
+ *
+ * Packets that won't trigger the COE e.g. most DSA-tagged packets will
+ * also have to be checksummed in software.
*/
if (csum_insertion &&
- priv->plat->tx_queues_cfg[queue].coe_unsupported) {
+ (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
+ !stmmac_has_ip_ethertype(skb))) {
if (unlikely(skb_checksum_help(skb)))
goto dma_map_err;
csum_insertion = !csum_insertion;
@@ -4560,11 +4652,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->tx_bytes += skb->len;
+ u64_stats_update_begin(&txq_stats->q_syncp);
+ u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
if (set_ic)
- txq_stats->tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
+ u64_stats_update_end(&txq_stats->q_syncp);
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
@@ -4828,12 +4920,11 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
set_ic = false;
if (set_ic) {
- unsigned long flags;
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, tx_desc);
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_update_begin(&txq_stats->q_syncp);
+ u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
+ u64_stats_update_end(&txq_stats->q_syncp);
}
stmmac_enable_dma_transmission(priv, priv->ioaddr);
@@ -4983,7 +5074,6 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
unsigned int len = xdp->data_end - xdp->data;
enum pkt_hash_types hash_type;
int coe = priv->hw->rx_csum;
- unsigned long flags;
struct sk_buff *skb;
u32 hash;
@@ -4994,10 +5084,15 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
}
stmmac_get_rx_hwtstamp(priv, p, np, skb);
- stmmac_rx_vlan(priv->dev, skb);
+ if (priv->hw->hw_vlan_en)
+ /* MAC level stripping. */
+ stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
+ else
+ /* Driver level stripping. */
+ stmmac_rx_vlan(priv->dev, skb);
skb->protocol = eth_type_trans(skb, priv->dev);
- if (unlikely(!coe))
+ if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
skb_checksum_none_assert(skb);
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -5008,10 +5103,10 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
skb_record_rx_queue(skb, queue);
napi_gro_receive(&ch->rxtx_napi, skb);
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
- rxq_stats->rx_pkt_n++;
- rxq_stats->rx_bytes += len;
- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
+ u64_stats_add(&rxq_stats->napi.rx_bytes, len);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
}
static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
@@ -5093,7 +5188,6 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
unsigned int desc_size;
struct bpf_prog *prog;
bool failure = false;
- unsigned long flags;
int xdp_status = 0;
int status = 0;
@@ -5248,9 +5342,9 @@ read_again:
stmmac_finalize_xdp_rx(priv, xdp_status);
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
- rxq_stats->rx_pkt_n += count;
- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
priv->xstats.rx_dropped += rx_dropped;
priv->xstats.rx_errors += rx_errors;
@@ -5288,7 +5382,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
unsigned int desc_size;
struct sk_buff *skb = NULL;
struct stmmac_xdp_buff ctx;
- unsigned long flags;
int xdp_status = 0;
int buf_sz;
@@ -5510,10 +5603,17 @@ drain_data:
/* Got entire packet into SKB. Finish it. */
stmmac_get_rx_hwtstamp(priv, p, np, skb);
- stmmac_rx_vlan(priv->dev, skb);
+
+ if (priv->hw->hw_vlan_en)
+ /* MAC level stripping. */
+ stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
+ else
+ /* Driver level stripping. */
+ stmmac_rx_vlan(priv->dev, skb);
+
skb->protocol = eth_type_trans(skb, priv->dev);
- if (unlikely(!coe))
+ if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
skb_checksum_none_assert(skb);
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -5541,11 +5641,11 @@ drain_data:
stmmac_rx_refill(priv, queue);
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
- rxq_stats->rx_packets += rx_packets;
- rxq_stats->rx_bytes += rx_bytes;
- rxq_stats->rx_pkt_n += count;
- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
+ u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
+ u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
priv->xstats.rx_dropped += rx_dropped;
priv->xstats.rx_errors += rx_errors;
@@ -5560,13 +5660,12 @@ static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
struct stmmac_priv *priv = ch->priv_data;
struct stmmac_rxq_stats *rxq_stats;
u32 chan = ch->index;
- unsigned long flags;
int work_done;
rxq_stats = &priv->xstats.rxq_stats[chan];
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
- rxq_stats->napi_poll++;
- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_inc(&rxq_stats->napi.poll);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
work_done = stmmac_rx(priv, budget, chan);
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -5588,13 +5687,12 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
struct stmmac_txq_stats *txq_stats;
bool pending_packets = false;
u32 chan = ch->index;
- unsigned long flags;
int work_done;
txq_stats = &priv->xstats.txq_stats[chan];
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->napi_poll++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_update_begin(&txq_stats->napi_syncp);
+ u64_stats_inc(&txq_stats->napi.poll);
+ u64_stats_update_end(&txq_stats->napi_syncp);
work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
work_done = min(work_done, budget);
@@ -5624,17 +5722,16 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
struct stmmac_rxq_stats *rxq_stats;
struct stmmac_txq_stats *txq_stats;
u32 chan = ch->index;
- unsigned long flags;
rxq_stats = &priv->xstats.rxq_stats[chan];
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
- rxq_stats->napi_poll++;
- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_inc(&rxq_stats->napi.poll);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
txq_stats = &priv->xstats.txq_stats[chan];
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->napi_poll++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_update_begin(&txq_stats->napi_syncp);
+ u64_stats_inc(&txq_stats->napi.poll);
+ u64_stats_update_end(&txq_stats->napi_syncp);
tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
tx_done = min(tx_done, budget);
@@ -5819,6 +5916,13 @@ static int stmmac_set_features(struct net_device *netdev,
stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
}
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ priv->hw->hw_vlan_en = true;
+ else
+ priv->hw->hw_vlan_en = false;
+
+ stmmac_set_hw_vlan_mode(priv, priv->hw);
+
return 0;
}
@@ -5880,7 +5984,7 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv)
pm_wakeup_event(priv->device, 0);
if (priv->dma_cap.estsel)
- stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
+ stmmac_est_irq_status(priv, priv, priv->dev,
&priv->xstats, tx_cnt);
if (priv->dma_cap.fpesel) {
@@ -5958,11 +6062,6 @@ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev);
- if (unlikely(!dev)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -5978,11 +6077,6 @@ static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev);
- if (unlikely(!dev)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -6004,11 +6098,6 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
- if (unlikely(!data)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -6035,11 +6124,6 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
- if (unlikely(!data)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -6182,30 +6266,23 @@ static struct dentry *stmmac_fs_dir;
static void sysfs_display_ring(void *head, int size, int extend_desc,
struct seq_file *seq, dma_addr_t dma_phy_addr)
{
- int i;
struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
struct dma_desc *p = (struct dma_desc *)head;
+ unsigned int desc_size;
dma_addr_t dma_addr;
+ int i;
+ desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
for (i = 0; i < size; i++) {
- if (extend_desc) {
- dma_addr = dma_phy_addr + i * sizeof(*ep);
- seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
- i, &dma_addr,
- le32_to_cpu(ep->basic.des0),
- le32_to_cpu(ep->basic.des1),
- le32_to_cpu(ep->basic.des2),
- le32_to_cpu(ep->basic.des3));
- ep++;
- } else {
- dma_addr = dma_phy_addr + i * sizeof(*p);
- seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
- i, &dma_addr,
- le32_to_cpu(p->des0), le32_to_cpu(p->des1),
- le32_to_cpu(p->des2), le32_to_cpu(p->des3));
+ dma_addr = dma_phy_addr + i * desc_size;
+ seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, &dma_addr,
+ le32_to_cpu(p->des0), le32_to_cpu(p->des1),
+ le32_to_cpu(p->des2), le32_to_cpu(p->des3));
+ if (extend_desc)
+ p = &(++ep)->basic;
+ else
p++;
- }
- seq_printf(seq, "\n");
}
}
@@ -6960,10 +7037,13 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64
u64 tx_bytes;
do {
- start = u64_stats_fetch_begin(&txq_stats->syncp);
- tx_packets = txq_stats->tx_packets;
- tx_bytes = txq_stats->tx_bytes;
- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+ start = u64_stats_fetch_begin(&txq_stats->q_syncp);
+ tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes);
+ } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
+ do {
+ start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
+ tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
+ } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
stats->tx_packets += tx_packets;
stats->tx_bytes += tx_bytes;
@@ -6975,10 +7055,10 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64
u64 rx_bytes;
do {
- start = u64_stats_fetch_begin(&rxq_stats->syncp);
- rx_packets = rxq_stats->rx_packets;
- rx_bytes = rxq_stats->rx_bytes;
- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+ start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
+ rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
+ rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes);
+ } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
@@ -7372,9 +7452,16 @@ int stmmac_dvr_probe(struct device *device,
priv->dev = ndev;
for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
- u64_stats_init(&priv->xstats.rxq_stats[i].syncp);
- for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
- u64_stats_init(&priv->xstats.txq_stats[i].syncp);
+ u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
+ for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
+ u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
+ u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
+ }
+
+ priv->xstats.pcpu_stats =
+ devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
+ if (!priv->xstats.pcpu_stats)
+ return -ENOMEM;
stmmac_set_ethtool_ops(ndev);
priv->pause = pause;
@@ -7440,6 +7527,9 @@ int stmmac_dvr_probe(struct device *device,
dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
ERR_PTR(ret));
+ /* Wait a bit for the reset to take effect */
+ udelay(10);
+
/* Init MAC and get the capabilities */
ret = stmmac_hw_init(priv);
if (ret)
@@ -7455,6 +7545,7 @@ int stmmac_dvr_probe(struct device *device,
ndev->netdev_ops = &stmmac_netdev_ops;
ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
+ ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
@@ -7521,6 +7612,9 @@ int stmmac_dvr_probe(struct device *device,
#ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
+ ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ priv->hw->hw_vlan_en = true;
+
if (priv->dma_cap.vlhash) {
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 1ffde555da47..70eadc83ca68 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -296,62 +296,80 @@ out:
}
/**
- * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
- * @plat: driver data platform structure
- * @np: device tree node
- * @dev: device pointer
- * Description:
- * The mdio bus will be allocated in case of a phy transceiver is on board;
- * it will be NULL if the fixed-link is configured.
- * If there is the "snps,dwmac-mdio" sub-node the mdio will be allocated
- * in any case (for DSA, mdio must be registered even if fixed-link).
- * The table below sums the supported configurations:
- * -------------------------------
- * snps,phy-addr | Y
- * -------------------------------
- * phy-handle | Y
- * -------------------------------
- * fixed-link | N
- * -------------------------------
- * snps,dwmac-mdio |
- * even if | Y
- * fixed-link |
- * -------------------------------
+ * stmmac_of_get_mdio() - Gets the MDIO bus from the devicetree.
+ * @np: devicetree node
*
- * It returns 0 in case of success otherwise -ENODEV.
+ * The MDIO bus will be searched for in the following ways:
+ * 1. The compatible is "snps,dwc-qos-ethernet-4.10" && a "mdio" named
+ * child node exists
+ * 2. A child node with the "snps,dwmac-mdio" compatible is present
+ *
+ * Return: The MDIO node if present otherwise NULL
*/
-static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
- struct device_node *np, struct device *dev)
+static struct device_node *stmmac_of_get_mdio(struct device_node *np)
{
- bool mdio = !of_phy_is_fixed_link(np);
static const struct of_device_id need_mdio_ids[] = {
{ .compatible = "snps,dwc-qos-ethernet-4.10" },
{},
};
+ struct device_node *mdio_node = NULL;
if (of_match_node(need_mdio_ids, np)) {
- plat->mdio_node = of_get_child_by_name(np, "mdio");
+ mdio_node = of_get_child_by_name(np, "mdio");
} else {
/**
* If snps,dwmac-mdio is passed from DT, always register
* the MDIO
*/
- for_each_child_of_node(np, plat->mdio_node) {
- if (of_device_is_compatible(plat->mdio_node,
+ for_each_child_of_node(np, mdio_node) {
+ if (of_device_is_compatible(mdio_node,
"snps,dwmac-mdio"))
break;
}
}
- if (plat->mdio_node) {
+ return mdio_node;
+}
+
+/**
+ * stmmac_mdio_setup() - Populate platform related MDIO structures.
+ * @plat: driver data platform structure
+ * @np: devicetree node
+ * @dev: device pointer
+ *
+ * This searches for MDIO information from the devicetree.
+ * If an MDIO node is found, it's assigned to plat->mdio_node and
+ * plat->mdio_bus_data is allocated.
+ * If no connection can be determined, just plat->mdio_bus_data is allocated
+ * to indicate a bus should be created and scanned for a phy.
+ * If it's determined there's no MDIO bus needed, both are left NULL.
+ *
+ * This expects that plat->phy_node has already been searched for.
+ *
+ * Return: 0 on success, errno otherwise.
+ */
+static int stmmac_mdio_setup(struct plat_stmmacenet_data *plat,
+ struct device_node *np, struct device *dev)
+{
+ bool legacy_mdio;
+
+ plat->mdio_node = stmmac_of_get_mdio(np);
+ if (plat->mdio_node)
dev_dbg(dev, "Found MDIO subnode\n");
- mdio = true;
- }
- if (mdio) {
- plat->mdio_bus_data =
- devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data),
- GFP_KERNEL);
+ /* Legacy devicetrees allowed for no MDIO bus description and expect
+ * the bus to be scanned for devices. If there's no phy or fixed-link
+ * described assume this is the case since there must be something
+ * connected to the MAC.
+ */
+ legacy_mdio = !of_phy_is_fixed_link(np) && !plat->phy_node;
+ if (legacy_mdio)
+ dev_info(dev, "Deprecated MDIO bus assumption used\n");
+
+ if (plat->mdio_node || legacy_mdio) {
+ plat->mdio_bus_data = devm_kzalloc(dev,
+ sizeof(*plat->mdio_bus_data),
+ GFP_KERNEL);
if (!plat->mdio_bus_data)
return -ENOMEM;
@@ -471,8 +489,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
- /* To Configure PHY by using all device-tree supported properties */
- rc = stmmac_dt_phy(plat, np, &pdev->dev);
+ rc = stmmac_mdio_setup(plat, np, &pdev->dev);
if (rc)
return ERR_PTR(rc);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index bffa5c017032..e04830a3a1fb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -72,7 +72,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
est_rst = true;
mutex_lock(&priv->plat->est->lock);
priv->plat->est->enable = false;
- stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ stmmac_est_configure(priv, priv, priv->plat->est,
priv->plat->clk_ptp_rate);
mutex_unlock(&priv->plat->est->lock);
}
@@ -102,7 +102,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
priv->plat->est->btr[0] = (u32)time.tv_nsec;
priv->plat->est->btr[1] = (u32)time.tv_sec;
priv->plat->est->enable = true;
- ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ ret = stmmac_est_configure(priv, priv, priv->plat->est,
priv->plat->clk_ptp_rate);
mutex_unlock(&priv->plat->est->lock);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 6ad3e0a11936..26fa33e5ec34 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -975,6 +975,8 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
return -EINVAL;
if (!qopt->cycle_time)
return -ERANGE;
+ if (qopt->cycle_time_extension >= BIT(wid + 7))
+ return -ERANGE;
if (!plat->est) {
plat->est = devm_kzalloc(priv->device, sizeof(*plat->est),
@@ -1041,6 +1043,8 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
priv->plat->est->ctr[1] = (u32)ctr;
+ priv->plat->est->ter = qopt->cycle_time_extension;
+
if (fpe && !priv->dma_cap.fpesel) {
mutex_unlock(&priv->plat->est->lock);
return -EOPNOTSUPP;
@@ -1051,7 +1055,7 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
*/
priv->plat->fpe_cfg->enable = fpe;
- ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ ret = stmmac_est_configure(priv, priv, priv->plat->est,
priv->plat->clk_ptp_rate);
mutex_unlock(&priv->plat->est->lock);
if (ret) {
@@ -1072,7 +1076,7 @@ disable:
if (priv->plat->est) {
mutex_lock(&priv->plat->est->lock);
priv->plat->est->enable = false;
- stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ stmmac_est_configure(priv, priv, priv->plat->est,
priv->plat->clk_ptp_rate);
mutex_unlock(&priv->plat->est->lock);
}