diff options
Diffstat (limited to 'drivers/net/ethernet/cadence/macb_main.c')
-rw-r--r-- | drivers/net/ethernet/cadence/macb_main.c | 344 |
1 files changed, 259 insertions, 85 deletions
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index aef87ba68aec..18493f4e6257 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -35,6 +35,8 @@ #include <linux/tcp.h> #include <linux/iopoll.h> #include <linux/pm_runtime.h> +#include <linux/crc32.h> +#include <linux/inetdevice.h> #include "macb.h" #define MACB_RX_BUFFER_SIZE 128 @@ -74,13 +76,11 @@ #define GEM_MTU_MIN_SIZE ETH_MIN_MTU #define MACB_NETIF_LSO NETIF_F_TSO -#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) -#define MACB_WOL_ENABLED (0x1 << 1) - /* Graceful stop timeouts in us. We should allow up to * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) */ #define MACB_HALT_TIMEOUT 1230 +#define MACB_PM_TIMEOUT 100 /* ms */ #define MACB_PM_TIMEOUT 100 /* ms */ @@ -275,6 +275,9 @@ static void macb_set_hwaddr(struct macb *bp) top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); macb_or_gem_writel(bp, SA1T, top); + gem_writel(bp, RXPTPUNI, bottom); + gem_writel(bp, TXPTPUNI, bottom); + /* Clear unused address register sets */ macb_or_gem_writel(bp, SA2B, 0); macb_or_gem_writel(bp, SA2T, 0); @@ -576,8 +579,8 @@ static int macb_mii_probe(struct net_device *dev) static int macb_mii_init(struct macb *bp) { - struct device_node *np; - int err = -ENXIO; + struct device_node *np, *mdio_np; + int err = -ENXIO, i; /* Enable management port */ macb_writel(bp, NCR, MACB_BIT(MPE)); @@ -594,19 +597,39 @@ static int macb_mii_init(struct macb *bp) snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", bp->pdev->name, bp->pdev->id); bp->mii_bus->priv = bp; - bp->mii_bus->parent = &bp->pdev->dev; + bp->mii_bus->parent = &bp->dev->dev; dev_set_drvdata(&bp->dev->dev, bp->mii_bus); np = bp->pdev->dev.of_node; - if (np && of_phy_is_fixed_link(np)) { - if (of_phy_register_fixed_link(np) < 0) { - dev_err(&bp->pdev->dev, - "broken fixed-link specification %pOF\n", np); + mdio_np = of_get_child_by_name(np, "mdio"); + if (mdio_np) { + of_node_put(mdio_np); + err = of_mdiobus_register(bp->mii_bus, mdio_np); + if (err) goto err_out_free_mdiobus; - } + } else if (np) { + /* try dt phy registration */ + err = of_mdiobus_register(bp->mii_bus, np); - err = mdiobus_register(bp->mii_bus); + /* fallback to standard phy registration if no phy were + * found during dt phy registration + */ + if (!err && !phy_find_first(bp->mii_bus)) { + for (i = 0; i < PHY_MAX_ADDR; i++) { + struct phy_device *phydev; + + phydev = mdiobus_scan(bp->mii_bus, i); + if (IS_ERR(phydev) && + PTR_ERR(phydev) != -ENODEV) { + err = PTR_ERR(phydev); + break; + } + } + + if (err) + goto err_out_unregister_bus; + } } else { err = of_mdiobus_register(bp->mii_bus, np); } @@ -920,7 +943,6 @@ static void gem_rx_refill(struct macb_queue *queue) /* Make hw descriptor updates visible to CPU */ rmb(); - queue->rx_prepared_head++; desc = macb_rx_desc(queue, entry); if (!queue->rx_skbuff[entry]) { @@ -959,6 +981,7 @@ static void gem_rx_refill(struct macb_queue *queue) dma_wmb(); desc->addr &= ~MACB_BIT(RX_USED); } + queue->rx_prepared_head++; } /* Make descriptor updates visible to hardware */ @@ -989,6 +1012,15 @@ static void discard_partial_frame(struct macb_queue *queue, unsigned int begin, */ } +static int macb_validate_hw_csum(struct sk_buff *skb) +{ + u32 pkt_csum = *((u32 *)&skb->data[skb->len - ETH_FCS_LEN]); + u32 csum = ~crc32_le(~0, skb_mac_header(skb), + skb->len + ETH_HLEN - ETH_FCS_LEN); + + return (pkt_csum != csum); +} + static int gem_rx(struct macb_queue *queue, int budget) { struct macb *bp = queue->bp; @@ -1049,6 +1081,16 @@ static int gem_rx(struct macb_queue *queue, int budget) bp->rx_buffer_size, DMA_FROM_DEVICE); skb->protocol = eth_type_trans(skb, bp->dev); + + /* Validate MAC fcs if RX checsum offload disabled */ + if (!(bp->dev->features & NETIF_F_RXCSUM)) { + if (macb_validate_hw_csum(skb)) { + netdev_err(bp->dev, "incorrect FCS\n"); + bp->dev->stats.rx_dropped++; + break; + } + } + skb_checksum_none_assert(skb); if (bp->dev->features & NETIF_F_RXCSUM && !(bp->dev->flags & IFF_PROMISC) && @@ -1146,6 +1188,19 @@ static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag, break; } + /* Validate MAC fcs if RX checsum offload disabled */ + if (!(bp->dev->features & NETIF_F_RXCSUM)) { + if (macb_validate_hw_csum(skb)) { + netdev_err(bp->dev, "incorrect FCS\n"); + bp->dev->stats.rx_dropped++; + + /* Make descriptor updates visible to hardware */ + wmb(); + + return 1; + } + } + /* Make descriptor updates visible to hardware */ wmb(); @@ -1371,6 +1426,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) spin_lock(&bp->lock); while (status) { + if (status & MACB_BIT(WOL)) { + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + queue_writel(queue, ISR, MACB_BIT(WOL)); + break; + } + /* close possible race with dev_close */ if (unlikely(!netif_running(dev))) { queue_writel(queue, IDR, -1); @@ -1621,7 +1682,8 @@ static unsigned int macb_tx_map(struct macb *bp, ctrl |= MACB_BF(TX_LSO, lso_ctrl); ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl); if ((bp->dev->features & NETIF_F_HW_CSUM) && - skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl) + skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl && + (skb->data_len == 0)) ctrl |= MACB_BIT(TX_NOCRC); } else /* Only set MSS/MFS on payload descriptors @@ -1716,9 +1778,11 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) struct sk_buff *nskb; u32 fcs; + /* Not available for GSO and fragments */ if (!(ndev->features & NETIF_F_HW_CSUM) || !((*skb)->ip_summed != CHECKSUM_PARTIAL) || - skb_shinfo(*skb)->gso_size) /* Not available for GSO */ + skb_shinfo(*skb)->gso_size || + ((*skb)->data_len > 0)) return 0; if (padlen <= 0) { @@ -1772,7 +1836,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned long flags; unsigned int desc_cnt, nr_frags, frag_size, f; unsigned int hdrlen; - bool is_lso, is_udp = 0; + bool is_lso, is_udp = false; netdev_tx_t ret = NETDEV_TX_OK; if (macb_clear_csum(skb)) { @@ -1934,6 +1998,12 @@ static void macb_free_consistent(struct macb *bp) bp->macbgem_ops.mog_free_rx_buffers(bp); + if (bp->rx_ring_tieoff) { + dma_free_coherent(&bp->pdev->dev, macb_dma_desc_get_size(bp), + bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma); + bp->rx_ring_tieoff = NULL; + } + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { kfree(queue->tx_skb); queue->tx_skb = NULL; @@ -2023,6 +2093,14 @@ static int macb_alloc_consistent(struct macb *bp) if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) goto out_err; + /* Required for tie off descriptor for PM cases */ + bp->rx_ring_tieoff = dma_alloc_coherent(&bp->pdev->dev, + macb_dma_desc_get_size(bp), + &bp->rx_ring_tieoff_dma, + GFP_KERNEL); + if (!bp->rx_ring_tieoff) + goto out_err; + return 0; out_err: @@ -2030,6 +2108,19 @@ out_err: return -ENOMEM; } +static void macb_init_tieoff(struct macb *bp) +{ + struct macb_dma_desc *d = bp->rx_ring_tieoff; + + if (bp->num_queues > 1) { + /* Setup a wrapping descriptor with no free slots + * (WRAP and USED) to tie off/disable unused RX queues. + */ + macb_set_addr(bp, d, MACB_BIT(RX_WRAP) | MACB_BIT(RX_USED)); + d->ctrl = 0; + } +} + static void gem_init_rings(struct macb *bp) { struct macb_queue *queue; @@ -2052,6 +2143,7 @@ static void gem_init_rings(struct macb *bp) gem_rx_refill(queue); } + macb_init_tieoff(bp); } @@ -2070,6 +2162,8 @@ static void macb_init_rings(struct macb *bp) bp->queues[0].tx_head = 0; bp->queues[0].tx_tail = 0; desc->ctrl |= MACB_BIT(TX_WRAP); + + macb_init_tieoff(bp); } static void macb_reset_hw(struct macb *bp) @@ -2092,6 +2186,10 @@ static void macb_reset_hw(struct macb *bp) macb_writel(bp, TSR, -1); macb_writel(bp, RSR, -1); + /* Disable RX partial store and forward and reset watermark value */ + if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD) + gem_writel(bp, PBUFRXCUT, 0xFFF); + /* Disable all interrupts */ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { queue_writel(queue, IDR, -1); @@ -2231,7 +2329,11 @@ static void macb_init_hw(struct macb *bp) config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ config |= MACB_BIT(PAE); /* PAuse Enable */ - config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ + + /* Do not discard Rx FCS if RX checsum offload disabled */ + if (bp->dev->features & NETIF_F_RXCSUM) + config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ + if (bp->caps & MACB_CAPS_JUMBO) config |= MACB_BIT(JFRAME); /* Enable jumbo frames */ else @@ -2247,13 +2349,24 @@ static void macb_init_hw(struct macb *bp) if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) gem_writel(bp, JML, bp->jumbo_max_len); bp->speed = SPEED_10; - bp->duplex = DUPLEX_HALF; + if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD) + bp->duplex = DUPLEX_FULL; + else + bp->duplex = DUPLEX_HALF; bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK; if (bp->caps & MACB_CAPS_JUMBO) bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK; macb_configure_dma(bp); + /* Enable RX partial store and forward and set watermark */ + if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD) { + gem_writel(bp, PBUFRXCUT, + (gem_readl(bp, PBUFRXCUT) & + GEM_BF(WTRMRK, bp->rx_watermark)) | + GEM_BIT(ENCUTTHRU)); + } + /* Initialize TX and RX buffers */ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); @@ -2274,8 +2387,14 @@ static void macb_init_hw(struct macb *bp) MACB_BIT(HRESP)); } + if ((bp->phy_interface == PHY_INTERFACE_MODE_SGMII) && + (bp->caps & MACB_CAPS_PCS)) + gem_writel(bp, PCSCNTRL, + gem_readl(bp, PCSCNTRL) | GEM_BIT(PCSAUTONEG)); + /* Enable TX and RX */ - macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE)); + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE) | + MACB_BIT(PTPUNI)); } /* The hash address register is 64 bits long and takes up two @@ -2411,6 +2530,10 @@ static int macb_open(struct net_device *dev) if (err < 0) goto pm_exit; + err = pm_runtime_get_sync(&bp->pdev->dev); + if (err < 0) + return err; + /* carrier starts down */ netif_carrier_off(dev); @@ -2479,6 +2602,8 @@ static int macb_close(struct net_device *dev) pm_runtime_put(&bp->pdev->dev); + pm_runtime_put(&bp->pdev->dev); + return 0; } @@ -2692,38 +2817,6 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, regs_buff[13] = gem_readl(bp, DMACFG); } -static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) -{ - struct macb *bp = netdev_priv(netdev); - - wol->supported = 0; - wol->wolopts = 0; - - if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { - wol->supported = WAKE_MAGIC; - - if (bp->wol & MACB_WOL_ENABLED) - wol->wolopts |= WAKE_MAGIC; - } -} - -static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) -{ - struct macb *bp = netdev_priv(netdev); - - if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || - (wol->wolopts & ~WAKE_MAGIC)) - return -EOPNOTSUPP; - - if (wol->wolopts & WAKE_MAGIC) - bp->wol |= MACB_WOL_ENABLED; - else - bp->wol &= ~MACB_WOL_ENABLED; - - device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED); - - return 0; -} static void macb_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) @@ -3159,8 +3252,6 @@ static const struct ethtool_ops macb_ethtool_ops = { .get_regs = macb_get_regs, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, - .get_wol = macb_get_wol, - .set_wol = macb_set_wol, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, .get_ringparam = macb_get_ringparam, @@ -3311,10 +3402,29 @@ static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_conf) { u32 dcfg; + int retval; if (dt_conf) bp->caps = dt_conf->caps; + /* By default we set to partial store and forward mode for zynqmp. + * Disable if not set in devicetree. + */ + if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD) { + retval = of_property_read_u16(bp->pdev->dev.of_node, + "rx-watermark", + &bp->rx_watermark); + + /* Disable partial store and forward in case of error or + * invalid watermark value + */ + if (retval || bp->rx_watermark > 0xFFF) { + dev_info(&bp->pdev->dev, + "Not enabling partial store and forward\n"); + bp->caps &= ~MACB_CAPS_PARTIAL_STORE_FORWARD; + } + } + if (hw_is_gem(bp->regs, bp->native_io)) { bp->caps |= MACB_CAPS_MACB_IS_GEM; @@ -3372,18 +3482,9 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk, struct clk **tsu_clk) { - struct macb_platform_data *pdata; int err; - pdata = dev_get_platdata(&pdev->dev); - if (pdata) { - *pclk = pdata->pclk; - *hclk = pdata->hclk; - } else { - *pclk = devm_clk_get(&pdev->dev, "pclk"); - *hclk = devm_clk_get(&pdev->dev, "hclk"); - } - + *pclk = devm_clk_get(&pdev->dev, "pclk"); if (IS_ERR_OR_NULL(*pclk)) { err = PTR_ERR(*pclk); if (!err) @@ -3393,6 +3494,7 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, return err; } + *hclk = devm_clk_get(&pdev->dev, "hclk"); if (IS_ERR_OR_NULL(*hclk)) { err = PTR_ERR(*hclk); if (!err) @@ -3414,6 +3516,10 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, if (IS_ERR(*tsu_clk)) *tsu_clk = NULL; + *tsu_clk = devm_clk_get(&pdev->dev, "tsu_clk"); + if (IS_ERR(*tsu_clk)) + *tsu_clk = NULL; + err = clk_prepare_enable(*pclk); if (err) { dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err); @@ -3562,6 +3668,8 @@ static int macb_init(struct platform_device *pdev) /* Checksum offload is only available on gem with packet buffer */ if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; + if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD) + dev->hw_features &= ~NETIF_F_RXCSUM; if (bp->caps & MACB_CAPS_SG_DISABLED) dev->hw_features &= ~NETIF_F_SG; dev->features = dev->hw_features; @@ -3613,6 +3721,11 @@ static int macb_init(struct platform_device *pdev) val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); macb_writel(bp, NCFGR, val); + if ((bp->phy_interface == PHY_INTERFACE_MODE_SGMII) && + (bp->caps & MACB_CAPS_PCS)) + gem_writel(bp, PCSCNTRL, + gem_readl(bp, PCSCNTRL) | GEM_BIT(PCSAUTONEG)); + return 0; } @@ -4017,7 +4130,9 @@ static const struct macb_config np4_config = { static const struct macb_config zynqmp_config = { .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO | - MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH, + MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH | + MACB_CAPS_PCS | + MACB_CAPS_PARTIAL_STORE_FORWARD | MACB_CAPS_WOL, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, @@ -4071,6 +4186,7 @@ static int macb_probe(struct platform_device *pdev) struct clk **) = macb_config->clk_init; int (*init)(struct platform_device *) = macb_config->init; struct device_node *np = pdev->dev.of_node; + struct device_node *phy_node; struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL; struct clk *tsu_clk = NULL; unsigned int queue_mask, num_queues; @@ -4142,14 +4258,12 @@ static int macb_probe(struct platform_device *pdev) bp->tx_clk = tx_clk; bp->rx_clk = rx_clk; bp->tsu_clk = tsu_clk; + if (tsu_clk) + bp->tsu_rate = clk_get_rate(tsu_clk); + if (macb_config) bp->jumbo_max_len = macb_config->jumbo_max_len; - bp->wol = 0; - if (of_get_property(np, "magic-packet", NULL)) - bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; - device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); - spin_lock_init(&bp->lock); /* setup capabilities */ @@ -4202,6 +4316,18 @@ static int macb_probe(struct platform_device *pdev) macb_get_hwaddr(bp); } + /* Power up the PHY if there is a GPIO reset */ + phy_node = of_parse_phandle(np, "phy-handle", 0); + if (!phy_node && of_phy_is_fixed_link(np)) { + err = of_phy_register_fixed_link(np); + if (err < 0) { + dev_err(&pdev->dev, "broken fixed-link specification"); + goto err_out_free_netdev; + } + phy_node = of_node_get(np); + } + bp->phy_node = phy_node; + err = of_get_phy_mode(np); if (err < 0) /* not found in DT, MII by default */ @@ -4212,25 +4338,28 @@ static int macb_probe(struct platform_device *pdev) /* IP specific init */ err = init(pdev); if (err) - goto err_out_free_netdev; + goto err_out_phy_put; + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); + goto err_out_phy_put; + } err = macb_mii_init(bp); if (err) - goto err_out_free_netdev; + goto err_out_unregister_netdev; phydev = dev->phydev; netif_carrier_off(dev); - err = register_netdev(dev); - if (err) { - dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); - goto err_out_unregister_mdio; - } - tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task, (unsigned long)bp); + if (bp->caps & MACB_CAPS_WOL) + device_set_wakeup_capable(&bp->dev->dev, 1); + phy_attached_info(phydev); netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", @@ -4242,13 +4371,13 @@ static int macb_probe(struct platform_device *pdev) return 0; -err_out_unregister_mdio: - phy_disconnect(dev->phydev); - mdiobus_unregister(bp->mii_bus); +err_out_unregister_netdev: + unregister_netdev(dev); + +err_out_phy_put: of_node_put(bp->phy_node); if (np && of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); - mdiobus_free(bp->mii_bus); err_out_free_netdev: free_netdev(dev); @@ -4310,16 +4439,50 @@ static int __maybe_unused macb_suspend(struct device *dev) struct macb_queue *queue = bp->queues; unsigned long flags; unsigned int q; + u32 ctrl, arpipmask; if (!netif_running(netdev)) return 0; + if (device_may_wakeup(&bp->dev->dev)) { + spin_lock_irqsave(&bp->lock, flags); + ctrl = macb_readl(bp, NCR); + ctrl &= ~(MACB_BIT(TE) | MACB_BIT(RE)); + macb_writel(bp, NCR, ctrl); + /* Tie off RX queues */ + for (q = 0, queue = bp->queues; q < bp->num_queues; + ++q, ++queue) { + queue_writel(queue, RBQP, + lower_32_bits(bp->rx_ring_tieoff_dma)); + } + ctrl = macb_readl(bp, NCR); + ctrl |= MACB_BIT(RE); + macb_writel(bp, NCR, ctrl); + gem_writel(bp, NCFGR, gem_readl(bp, NCFGR) & ~MACB_BIT(NBC)); + macb_writel(bp, TSR, -1); + macb_writel(bp, RSR, -1); + macb_readl(bp, ISR); + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + macb_writel(bp, ISR, -1); - if (bp->wol & MACB_WOL_ENABLED) { + /* Enable WOL (Q0 only) and disable all other interrupts */ macb_writel(bp, IER, MACB_BIT(WOL)); - macb_writel(bp, WOL, MACB_BIT(MAG)); + for (q = 1, queue = bp->queues; q < bp->num_queues; + ++q, ++queue) { + queue_writel(queue, IDR, MACB_RX_INT_FLAGS | + MACB_TX_INT_FLAGS | + MACB_BIT(HRESP)); + } + + arpipmask = cpu_to_be32p(&bp->dev->ip_ptr->ifa_list->ifa_local) + & 0xFFFF; + gem_writel(bp, WOL, MACB_BIT(ARP) | arpipmask); + spin_unlock_irqrestore(&bp->lock, flags); enable_irq_wake(bp->queues[0].irq); netif_device_detach(netdev); + for (q = 0, queue = bp->queues; q < bp->num_queues; + ++q, ++queue) + napi_disable(&queue->napi); } else { netif_device_detach(netdev); for (q = 0, queue = bp->queues; q < bp->num_queues; @@ -4352,6 +4515,7 @@ static int __maybe_unused macb_resume(struct device *dev) struct net_device *netdev = dev_get_drvdata(dev); struct macb *bp = netdev_priv(netdev); struct macb_queue *queue = bp->queues; + unsigned long flags; unsigned int q; if (!netif_running(netdev)) @@ -4360,10 +4524,20 @@ static int __maybe_unused macb_resume(struct device *dev) if (!device_may_wakeup(dev)) pm_runtime_force_resume(dev); - if (bp->wol & MACB_WOL_ENABLED) { + if (device_may_wakeup(&bp->dev->dev)) { + spin_lock_irqsave(&bp->lock, flags); macb_writel(bp, IDR, MACB_BIT(WOL)); - macb_writel(bp, WOL, 0); + gem_writel(bp, WOL, 0); + /* Clear Q0 ISR as WOL was enabled on Q0 */ + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + macb_writel(bp, ISR, -1); disable_irq_wake(bp->queues[0].irq); + spin_unlock_irqrestore(&bp->lock, flags); + macb_writel(bp, NCR, MACB_BIT(MPE)); + for (q = 0, queue = bp->queues; q < bp->num_queues; + ++q, ++queue) + napi_enable(&queue->napi); + netif_carrier_on(netdev); } else { macb_writel(bp, NCR, MACB_BIT(MPE)); |