aboutsummaryrefslogtreecommitdiffstats
path: root/meta-seattle/recipes-kernel/linux/files/319-Update-xgbe-drivers-for-B0-board.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-seattle/recipes-kernel/linux/files/319-Update-xgbe-drivers-for-B0-board.patch')
-rw-r--r--meta-seattle/recipes-kernel/linux/files/319-Update-xgbe-drivers-for-B0-board.patch3560
1 files changed, 3560 insertions, 0 deletions
diff --git a/meta-seattle/recipes-kernel/linux/files/319-Update-xgbe-drivers-for-B0-board.patch b/meta-seattle/recipes-kernel/linux/files/319-Update-xgbe-drivers-for-B0-board.patch
new file mode 100644
index 00000000..13344ac8
--- /dev/null
+++ b/meta-seattle/recipes-kernel/linux/files/319-Update-xgbe-drivers-for-B0-board.patch
@@ -0,0 +1,3560 @@
+From 675ffdbcc905bc44a9fef9a7f6569493a3a8efe1 Mon Sep 17 00:00:00 2001
+From: Adrian Calianu <adrian.calianu@enea.com>
+Date: Fri, 28 Aug 2015 17:35:57 +0200
+Subject: [PATCH] Hierofalcon: Update xgbe drivers for B0 board
+
+Port ethernet drivers for AMD xgbe from 4.1 kernel to 3.19
+in order to have ethernet working on B0 board
+
+Upstream-Status: Inappropriate
+Support for this board was added starting with kernel version 4.1
+
+Signed-off-by: Adrian Calianu <adrian.calianu@enea.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-common.h | 2 +
+ drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c | 2 +-
+ drivers/net/ethernet/amd/xgbe/xgbe-desc.c | 34 +-
+ drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 147 +++-
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 309 +++----
+ drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 29 +-
+ drivers/net/ethernet/amd/xgbe/xgbe-main.c | 207 ++++-
+ drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 29 +-
+ drivers/net/ethernet/amd/xgbe/xgbe-ptp.c | 21 +-
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 46 +-
+ drivers/net/phy/amd-xgbe-phy.c | 1142 ++++++++++++++++++--------
+ include/linux/clocksource.h | 9 +
+ 12 files changed, 1284 insertions(+), 693 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+index 29a0927..34c28aa 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+@@ -365,6 +365,8 @@
+ #define MAC_HWF0R_TXCOESEL_WIDTH 1
+ #define MAC_HWF0R_VLHASH_INDEX 4
+ #define MAC_HWF0R_VLHASH_WIDTH 1
++#define MAC_HWF1R_ADDR64_INDEX 14
++#define MAC_HWF1R_ADDR64_WIDTH 2
+ #define MAC_HWF1R_ADVTHWORD_INDEX 13
+ #define MAC_HWF1R_ADVTHWORD_WIDTH 1
+ #define MAC_HWF1R_DBGMEMA_INDEX 19
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+index 76479d0..2c063b6 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+@@ -328,7 +328,7 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
+
+ buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
+ pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
+- if (pdata->xgbe_debugfs == NULL) {
++ if (!pdata->xgbe_debugfs) {
+ netdev_err(pdata->netdev, "debugfs_create_dir failed\n");
+ return;
+ }
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+index a50891f..5c92fb7 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+@@ -263,7 +263,7 @@ static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
+ int ret;
+
+ /* Try to obtain pages, decreasing order if necessary */
+- gfp |= __GFP_COLD | __GFP_COMP;
++ gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
+ while (order >= 0) {
+ pages = alloc_pages(gfp, order);
+ if (pages)
+@@ -422,7 +422,6 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
+
+ ring->cur = 0;
+ ring->dirty = 0;
+- memset(&ring->rx, 0, sizeof(ring->rx));
+
+ hw_if->rx_desc_init(channel);
+ }
+@@ -621,35 +620,6 @@ err_out:
+ return 0;
+ }
+
+-static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
+-{
+- struct xgbe_prv_data *pdata = channel->pdata;
+- struct xgbe_hw_if *hw_if = &pdata->hw_if;
+- struct xgbe_ring *ring = channel->rx_ring;
+- struct xgbe_ring_data *rdata;
+- int i;
+-
+- DBGPR("-->xgbe_realloc_rx_buffer: rx_ring->rx.realloc_index = %u\n",
+- ring->rx.realloc_index);
+-
+- for (i = 0; i < ring->dirty; i++) {
+- rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index);
+-
+- /* Reset rdata values */
+- xgbe_unmap_rdata(pdata, rdata);
+-
+- if (xgbe_map_rx_buffer(pdata, ring, rdata))
+- break;
+-
+- hw_if->rx_desc_reset(rdata);
+-
+- ring->rx.realloc_index++;
+- }
+- ring->dirty = 0;
+-
+- DBGPR("<--xgbe_realloc_rx_buffer\n");
+-}
+-
+ void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
+ {
+ DBGPR("-->xgbe_init_function_ptrs_desc\n");
+@@ -657,7 +627,7 @@ void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
+ desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
+ desc_if->free_ring_resources = xgbe_free_ring_resources;
+ desc_if->map_tx_skb = xgbe_map_tx_skb;
+- desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
++ desc_if->map_rx_buffer = xgbe_map_rx_buffer;
+ desc_if->unmap_rdata = xgbe_unmap_rdata;
+ desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
+ desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+index 4c66cd1..21d9497 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -115,6 +115,7 @@
+ */
+
+ #include <linux/phy.h>
++#include <linux/mdio.h>
+ #include <linux/clk.h>
+ #include <linux/bitrev.h>
+ #include <linux/crc32.h>
+@@ -130,7 +131,7 @@ static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
+
+ DBGPR("-->xgbe_usec_to_riwt\n");
+
+- rate = clk_get_rate(pdata->sysclk);
++ rate = pdata->sysclk_rate;
+
+ /*
+ * Convert the input usec value to the watchdog timer value. Each
+@@ -153,7 +154,7 @@ static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
+
+ DBGPR("-->xgbe_riwt_to_usec\n");
+
+- rate = clk_get_rate(pdata->sysclk);
++ rate = pdata->sysclk_rate;
+
+ /*
+ * Convert the input watchdog timer value to the usec value. Each
+@@ -673,6 +674,9 @@ static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
+
+ static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
+ {
++ if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x3)
++ return 0;
++
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3);
+
+ return 0;
+@@ -680,6 +684,9 @@ static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
+
+ static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
+ {
++ if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x2)
++ return 0;
++
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2);
+
+ return 0;
+@@ -687,6 +694,9 @@ static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
+
+ static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata)
+ {
++ if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0)
++ return 0;
++
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0);
+
+ return 0;
+@@ -843,6 +853,22 @@ static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
+ return 0;
+ }
+
++static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
++{
++ struct net_device *netdev = pdata->netdev;
++ unsigned int pr_mode, am_mode;
++
++ pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
++ am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
++
++ xgbe_set_promiscuous_mode(pdata, pr_mode);
++ xgbe_set_all_multicast_mode(pdata, am_mode);
++
++ xgbe_add_mac_addresses(pdata);
++
++ return 0;
++}
++
+ static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg)
+ {
+@@ -881,6 +907,23 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
++ /* If the PCS is changing modes, match the MAC speed to it */
++ if (((mmd_address >> 16) == MDIO_MMD_PCS) &&
++ ((mmd_address & 0xffff) == MDIO_CTRL2)) {
++ struct phy_device *phydev = pdata->phydev;
++
++ if (mmd_data & MDIO_PCS_CTRL2_TYPE) {
++ /* KX mode */
++ if (phydev->supported & SUPPORTED_1000baseKX_Full)
++ xgbe_set_gmii_speed(pdata);
++ else
++ xgbe_set_gmii_2500_speed(pdata);
++ } else {
++ /* KR mode */
++ xgbe_set_xgmii_speed(pdata);
++ }
++ }
++
+ /* The PCS registers are accessed using mmio. The underlying APB3
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+@@ -1041,7 +1084,7 @@ static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
+ rdesc->desc3 = 0;
+
+ /* Make sure ownership is written to the descriptor */
+- wmb();
++ dma_wmb();
+ }
+
+ static void xgbe_tx_desc_init(struct xgbe_channel *channel)
+@@ -1074,9 +1117,24 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel)
+ DBGPR("<--tx_desc_init\n");
+ }
+
+-static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
++static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
++ struct xgbe_ring_data *rdata, unsigned int index)
+ {
+ struct xgbe_ring_desc *rdesc = rdata->rdesc;
++ unsigned int rx_usecs = pdata->rx_usecs;
++ unsigned int rx_frames = pdata->rx_frames;
++ unsigned int inte;
++
++ if (!rx_usecs && !rx_frames) {
++ /* No coalescing, interrupt for every descriptor */
++ inte = 1;
++ } else {
++ /* Set interrupt based on Rx frame coalescing setting */
++ if (rx_frames && !((index + 1) % rx_frames))
++ inte = 1;
++ else
++ inte = 0;
++ }
+
+ /* Reset the Rx descriptor
+ * Set buffer 1 (lo) address to header dma address (lo)
+@@ -1090,19 +1148,18 @@ static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
+ rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma));
+ rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma));
+
+- XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
+- rdata->interrupt ? 1 : 0);
++ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
+
+ /* Since the Rx DMA engine is likely running, make sure everything
+ * is written to the descriptor(s) before setting the OWN bit
+ * for the descriptor
+ */
+- wmb();
++ dma_wmb();
+
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
+
+ /* Make sure ownership is written to the descriptor */
+- wmb();
++ dma_wmb();
+ }
+
+ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
+@@ -1111,26 +1168,16 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+ unsigned int start_index = ring->cur;
+- unsigned int rx_coalesce, rx_frames;
+ unsigned int i;
+
+ DBGPR("-->rx_desc_init\n");
+
+- rx_coalesce = (pdata->rx_riwt || pdata->rx_frames) ? 1 : 0;
+- rx_frames = pdata->rx_frames;
+-
+ /* Initialize all descriptors */
+ for (i = 0; i < ring->rdesc_count; i++) {
+ rdata = XGBE_GET_DESC_DATA(ring, i);
+
+- /* Set interrupt on completion bit as appropriate */
+- if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames)))
+- rdata->interrupt = 0;
+- else
+- rdata->interrupt = 1;
+-
+ /* Initialize Rx descriptor */
+- xgbe_rx_desc_reset(rdata);
++ xgbe_rx_desc_reset(pdata, rdata, i);
+ }
+
+ /* Update the total number of Rx descriptors */
+@@ -1331,18 +1378,20 @@ static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_ring_data *rdata;
+
++ /* Make sure everything is written before the register write */
++ wmb();
++
+ /* Issue a poll command to Tx DMA by writing address
+ * of next immediate free descriptor */
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+- /* Start the Tx coalescing timer */
++ /* Start the Tx timer */
+ if (pdata->tx_usecs && !channel->tx_timer_active) {
+ channel->tx_timer_active = 1;
+- hrtimer_start(&channel->tx_timer,
+- ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC),
+- HRTIMER_MODE_REL);
++ mod_timer(&channel->tx_timer,
++ jiffies + usecs_to_jiffies(pdata->tx_usecs));
+ }
+
+ ring->tx.xmit_more = 0;
+@@ -1359,6 +1408,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
+ unsigned int tso_context, vlan_context;
+ unsigned int tx_set_ic;
+ int start_index = ring->cur;
++ int cur_index = ring->cur;
+ int i;
+
+ DBGPR("-->xgbe_dev_xmit\n");
+@@ -1401,7 +1451,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
+ else
+ tx_set_ic = 0;
+
+- rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
++ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
+ rdesc = rdata->rdesc;
+
+ /* Create a context descriptor if this is a TSO packet */
+@@ -1444,8 +1494,8 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
+ ring->tx.cur_vlan_ctag = packet->vlan_ctag;
+ }
+
+- ring->cur++;
+- rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
++ cur_index++;
++ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
+ rdesc = rdata->rdesc;
+ }
+
+@@ -1473,7 +1523,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+
+ /* Set OWN bit if not the first descriptor */
+- if (ring->cur != start_index)
++ if (cur_index != start_index)
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
+
+ if (tso) {
+@@ -1497,9 +1547,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
+ packet->length);
+ }
+
+- for (i = ring->cur - start_index + 1; i < packet->rdesc_count; i++) {
+- ring->cur++;
+- rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
++ for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) {
++ cur_index++;
++ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
+ rdesc = rdata->rdesc;
+
+ /* Update buffer address */
+@@ -1537,7 +1587,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
+ * is written to the descriptor(s) before setting the OWN bit
+ * for the first descriptor
+ */
+- wmb();
++ dma_wmb();
+
+ /* Set OWN bit for the first descriptor */
+ rdata = XGBE_GET_DESC_DATA(ring, start_index);
+@@ -1549,9 +1599,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
+ #endif
+
+ /* Make sure ownership is written to the descriptor */
+- wmb();
++ dma_wmb();
+
+- ring->cur++;
++ ring->cur = cur_index + 1;
+ if (!packet->skb->xmit_more ||
+ netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
+ channel->queue_index)))
+@@ -1585,7 +1635,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
+ return 1;
+
+ /* Make sure descriptor fields are read after reading the OWN bit */
+- rmb();
++ dma_rmb();
+
+ #ifdef XGMAC_ENABLE_RX_DESC_DUMP
+ xgbe_dump_rx_desc(ring, rdesc, ring->cur);
+@@ -1976,7 +2026,8 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
+ for (i = 0; i < pdata->tx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
+
+- netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n",
++ netdev_notice(pdata->netdev,
++ "%d Tx hardware queues, %d byte fifo per queue\n",
+ pdata->tx_q_count, ((fifo_size + 1) * 256));
+ }
+
+@@ -1991,7 +2042,8 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
+ for (i = 0; i < pdata->rx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
+
+- netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n",
++ netdev_notice(pdata->netdev,
++ "%d Rx hardware queues, %d byte fifo per queue\n",
+ pdata->rx_q_count, ((fifo_size + 1) * 256));
+ }
+
+@@ -2107,6 +2159,23 @@ static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
+ }
+
++static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
++{
++ switch (pdata->phy_speed) {
++ case SPEED_10000:
++ xgbe_set_xgmii_speed(pdata);
++ break;
++
++ case SPEED_2500:
++ xgbe_set_gmii_2500_speed(pdata);
++ break;
++
++ case SPEED_1000:
++ xgbe_set_gmii_speed(pdata);
++ break;
++ }
++}
++
+ static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
+ {
+ if (pdata->netdev->features & NETIF_F_RXCSUM)
+@@ -2755,8 +2824,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
+ * Initialize MAC related features
+ */
+ xgbe_config_mac_address(pdata);
++ xgbe_config_rx_mode(pdata);
+ xgbe_config_jumbo_enable(pdata);
+ xgbe_config_flow_control(pdata);
++ xgbe_config_mac_speed(pdata);
+ xgbe_config_checksum_offload(pdata);
+ xgbe_config_vlan_support(pdata);
+ xgbe_config_mmc(pdata);
+@@ -2773,10 +2844,8 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
+
+ hw_if->tx_complete = xgbe_tx_complete;
+
+- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
+- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
+- hw_if->add_mac_addresses = xgbe_add_mac_addresses;
+ hw_if->set_mac_address = xgbe_set_mac_address;
++ hw_if->config_rx_mode = xgbe_config_rx_mode;
+
+ hw_if->enable_rx_csum = xgbe_enable_rx_csum;
+ hw_if->disable_rx_csum = xgbe_disable_rx_csum;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index e5ffb2c..343bf6a 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -129,7 +129,6 @@
+
+ static int xgbe_one_poll(struct napi_struct *, int);
+ static int xgbe_all_poll(struct napi_struct *, int);
+-static void xgbe_set_rx_mode(struct net_device *);
+
+ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
+ {
+@@ -225,6 +224,11 @@ static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
+ return (ring->rdesc_count - (ring->cur - ring->dirty));
+ }
+
++static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring)
++{
++ return (ring->cur - ring->dirty);
++}
++
+ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
+ struct xgbe_ring *ring, unsigned int count)
+ {
+@@ -337,12 +341,13 @@ static irqreturn_t xgbe_isr(int irq, void *data)
+ dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
+ DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
+
+- /* If we get a TI or RI interrupt that means per channel DMA
+- * interrupts are not enabled, so we use the private data napi
+- * structure, not the per channel napi structure
++ /* The TI or RI interrupt bits may still be set even if using
++ * per channel DMA interrupts. Check to be sure those are not
++ * enabled before using the private data napi structure.
+ */
+- if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
+- XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) {
++ if (!pdata->per_channel_irq &&
++ (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
++ XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
+ if (napi_schedule_prep(&pdata->napi)) {
+ /* Disable Tx and Rx interrupts */
+ xgbe_disable_rx_tx_ints(pdata);
+@@ -405,26 +410,20 @@ static irqreturn_t xgbe_dma_isr(int irq, void *data)
+ return IRQ_HANDLED;
+ }
+
+-static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
++static void xgbe_tx_timer(unsigned long data)
+ {
+- struct xgbe_channel *channel = container_of(timer,
+- struct xgbe_channel,
+- tx_timer);
+- struct xgbe_ring *ring = channel->tx_ring;
++ struct xgbe_channel *channel = (struct xgbe_channel *)data;
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct napi_struct *napi;
+- unsigned long flags;
+
+ DBGPR("-->xgbe_tx_timer\n");
+
+ napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
+
+- spin_lock_irqsave(&ring->lock, flags);
+-
+ if (napi_schedule_prep(napi)) {
+ /* Disable Tx and Rx interrupts */
+ if (pdata->per_channel_irq)
+- disable_irq(channel->dma_irq);
++ disable_irq_nosync(channel->dma_irq);
+ else
+ xgbe_disable_rx_tx_ints(pdata);
+
+@@ -434,11 +433,7 @@ static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
+
+ channel->tx_timer_active = 0;
+
+- spin_unlock_irqrestore(&ring->lock, flags);
+-
+ DBGPR("<--xgbe_tx_timer\n");
+-
+- return HRTIMER_NORESTART;
+ }
+
+ static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
+@@ -454,9 +449,8 @@ static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
+ break;
+
+ DBGPR(" %s adding tx timer\n", channel->name);
+- hrtimer_init(&channel->tx_timer, CLOCK_MONOTONIC,
+- HRTIMER_MODE_REL);
+- channel->tx_timer.function = xgbe_tx_timer;
++ setup_timer(&channel->tx_timer, xgbe_tx_timer,
++ (unsigned long)channel);
+ }
+
+ DBGPR("<--xgbe_init_tx_timers\n");
+@@ -475,8 +469,7 @@ static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
+ break;
+
+ DBGPR(" %s deleting tx timer\n", channel->name);
+- channel->tx_timer_active = 0;
+- hrtimer_cancel(&channel->tx_timer);
++ del_timer_sync(&channel->tx_timer);
+ }
+
+ DBGPR("<--xgbe_stop_tx_timers\n");
+@@ -519,6 +512,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
+ RXFIFOSIZE);
+ hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ TXFIFOSIZE);
++ hw_feat->dma_width = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
+ hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
+ hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
+ hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
+@@ -553,6 +547,21 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
+ break;
+ }
+
++ /* Translate the address width setting into actual number */
++ switch (hw_feat->dma_width) {
++ case 0:
++ hw_feat->dma_width = 32;
++ break;
++ case 1:
++ hw_feat->dma_width = 40;
++ break;
++ case 2:
++ hw_feat->dma_width = 48;
++ break;
++ default:
++ hw_feat->dma_width = 32;
++ }
++
+ /* The Queue, Channel and TC counts are zero based so increment them
+ * to get the actual number
+ */
+@@ -609,6 +618,68 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
+ }
+ }
+
++static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_channel *channel;
++ struct net_device *netdev = pdata->netdev;
++ unsigned int i;
++ int ret;
++
++ ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
++ netdev->name, pdata);
++ if (ret) {
++ netdev_alert(netdev, "error requesting irq %d\n",
++ pdata->dev_irq);
++ return ret;
++ }
++
++ if (!pdata->per_channel_irq)
++ return 0;
++
++ channel = pdata->channel;
++ for (i = 0; i < pdata->channel_count; i++, channel++) {
++ snprintf(channel->dma_irq_name,
++ sizeof(channel->dma_irq_name) - 1,
++ "%s-TxRx-%u", netdev_name(netdev),
++ channel->queue_index);
++
++ ret = devm_request_irq(pdata->dev, channel->dma_irq,
++ xgbe_dma_isr, 0,
++ channel->dma_irq_name, channel);
++ if (ret) {
++ netdev_alert(netdev, "error requesting irq %d\n",
++ channel->dma_irq);
++ goto err_irq;
++ }
++ }
++
++ return 0;
++
++err_irq:
++ /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
++ for (i--, channel--; i < pdata->channel_count; i--, channel--)
++ devm_free_irq(pdata->dev, channel->dma_irq, channel);
++
++ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
++
++ return ret;
++}
++
++static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_channel *channel;
++ unsigned int i;
++
++ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
++
++ if (!pdata->per_channel_irq)
++ return;
++
++ channel = pdata->channel;
++ for (i = 0; i < pdata->channel_count; i++, channel++)
++ devm_free_irq(pdata->dev, channel->dma_irq, channel);
++}
++
+ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
+ {
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+@@ -630,6 +701,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
+ DBGPR("-->xgbe_init_rx_coalesce\n");
+
+ pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
++ pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS;
+ pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
+
+ hw_if->config_rx_coalesce(pdata);
+@@ -694,7 +766,7 @@ static void xgbe_adjust_link(struct net_device *netdev)
+ struct phy_device *phydev = pdata->phydev;
+ int new_state = 0;
+
+- if (phydev == NULL)
++ if (!phydev)
+ return;
+
+ if (phydev->link) {
+@@ -810,20 +882,20 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+ return -EINVAL;
+ }
+
+- phy_stop(pdata->phydev);
+-
+ spin_lock_irqsave(&pdata->lock, flags);
+
+ if (caller == XGMAC_DRIVER_CONTEXT)
+ netif_device_detach(netdev);
+
+ netif_tx_stop_all_queues(netdev);
+- xgbe_napi_disable(pdata, 0);
+
+- /* Powerdown Tx/Rx */
+ hw_if->powerdown_tx(pdata);
+ hw_if->powerdown_rx(pdata);
+
++ xgbe_napi_disable(pdata, 0);
++
++ phy_stop(pdata->phydev);
++
+ pdata->power_down = 1;
+
+ spin_unlock_irqrestore(&pdata->lock, flags);
+@@ -854,14 +926,14 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+
+ phy_start(pdata->phydev);
+
+- /* Enable Tx/Rx */
++ xgbe_napi_enable(pdata, 0);
++
+ hw_if->powerup_tx(pdata);
+ hw_if->powerup_rx(pdata);
+
+ if (caller == XGMAC_DRIVER_CONTEXT)
+ netif_device_attach(netdev);
+
+- xgbe_napi_enable(pdata, 0);
+ netif_tx_start_all_queues(netdev);
+
+ spin_unlock_irqrestore(&pdata->lock, flags);
+@@ -875,26 +947,39 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
+ {
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct net_device *netdev = pdata->netdev;
++ int ret;
+
+ DBGPR("-->xgbe_start\n");
+
+- xgbe_set_rx_mode(netdev);
+-
+ hw_if->init(pdata);
+
+ phy_start(pdata->phydev);
+
++ xgbe_napi_enable(pdata, 1);
++
++ ret = xgbe_request_irqs(pdata);
++ if (ret)
++ goto err_napi;
++
+ hw_if->enable_tx(pdata);
+ hw_if->enable_rx(pdata);
+
+ xgbe_init_tx_timers(pdata);
+
+- xgbe_napi_enable(pdata, 1);
+ netif_tx_start_all_queues(netdev);
+
+ DBGPR("<--xgbe_start\n");
+
+ return 0;
++
++err_napi:
++ xgbe_napi_disable(pdata, 1);
++
++ phy_stop(pdata->phydev);
++
++ hw_if->exit(pdata);
++
++ return ret;
+ }
+
+ static void xgbe_stop(struct xgbe_prv_data *pdata)
+@@ -907,16 +992,21 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
+
+ DBGPR("-->xgbe_stop\n");
+
+- phy_stop(pdata->phydev);
+-
+ netif_tx_stop_all_queues(netdev);
+- xgbe_napi_disable(pdata, 1);
+
+ xgbe_stop_tx_timers(pdata);
+
+ hw_if->disable_tx(pdata);
+ hw_if->disable_rx(pdata);
+
++ xgbe_free_irqs(pdata);
++
++ xgbe_napi_disable(pdata, 1);
++
++ phy_stop(pdata->phydev);
++
++ hw_if->exit(pdata);
++
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+@@ -929,12 +1019,8 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
+ DBGPR("<--xgbe_stop\n");
+ }
+
+-static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
++static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
+ {
+- struct xgbe_channel *channel;
+- struct xgbe_hw_if *hw_if = &pdata->hw_if;
+- unsigned int i;
+-
+ DBGPR("-->xgbe_restart_dev\n");
+
+ /* If not running, "restart" will happen on open */
+@@ -942,20 +1028,10 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
+ return;
+
+ xgbe_stop(pdata);
+- synchronize_irq(pdata->dev_irq);
+- if (pdata->per_channel_irq) {
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++)
+- synchronize_irq(channel->dma_irq);
+- }
+
+ xgbe_free_tx_data(pdata);
+ xgbe_free_rx_data(pdata);
+
+- /* Issue software reset to device if requested */
+- if (reset)
+- hw_if->exit(pdata);
+-
+ xgbe_start(pdata);
+
+ DBGPR("<--xgbe_restart_dev\n");
+@@ -969,7 +1045,7 @@ static void xgbe_restart(struct work_struct *work)
+
+ rtnl_lock();
+
+- xgbe_restart_dev(pdata, 1);
++ xgbe_restart_dev(pdata);
+
+ rtnl_unlock();
+ }
+@@ -1284,10 +1360,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
+ static int xgbe_open(struct net_device *netdev)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+- struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+- struct xgbe_channel *channel = NULL;
+- unsigned int i = 0;
+ int ret;
+
+ DBGPR("-->xgbe_open\n");
+@@ -1330,55 +1403,14 @@ static int xgbe_open(struct net_device *netdev)
+ INIT_WORK(&pdata->restart_work, xgbe_restart);
+ INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
+
+- /* Request interrupts */
+- ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
+- netdev->name, pdata);
+- if (ret) {
+- netdev_alert(netdev, "error requesting irq %d\n",
+- pdata->dev_irq);
+- goto err_rings;
+- }
+-
+- if (pdata->per_channel_irq) {
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
+- snprintf(channel->dma_irq_name,
+- sizeof(channel->dma_irq_name) - 1,
+- "%s-TxRx-%u", netdev_name(netdev),
+- channel->queue_index);
+-
+- ret = devm_request_irq(pdata->dev, channel->dma_irq,
+- xgbe_dma_isr, 0,
+- channel->dma_irq_name, channel);
+- if (ret) {
+- netdev_alert(netdev,
+- "error requesting irq %d\n",
+- channel->dma_irq);
+- goto err_irq;
+- }
+- }
+- }
+-
+ ret = xgbe_start(pdata);
+ if (ret)
+- goto err_start;
++ goto err_rings;
+
+ DBGPR("<--xgbe_open\n");
+
+ return 0;
+
+-err_start:
+- hw_if->exit(pdata);
+-
+-err_irq:
+- if (pdata->per_channel_irq) {
+- /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
+- for (i--, channel--; i < pdata->channel_count; i--, channel--)
+- devm_free_irq(pdata->dev, channel->dma_irq, channel);
+- }
+-
+- devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+-
+ err_rings:
+ desc_if->free_ring_resources(pdata);
+
+@@ -1400,30 +1432,16 @@ err_phy_init:
+ static int xgbe_close(struct net_device *netdev)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+- struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+- struct xgbe_channel *channel;
+- unsigned int i;
+
+ DBGPR("-->xgbe_close\n");
+
+ /* Stop the device */
+ xgbe_stop(pdata);
+
+- /* Issue software reset to device */
+- hw_if->exit(pdata);
+-
+ /* Free the ring descriptors and buffers */
+ desc_if->free_ring_resources(pdata);
+
+- /* Release the interrupts */
+- devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+- if (pdata->per_channel_irq) {
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++)
+- devm_free_irq(pdata->dev, channel->dma_irq, channel);
+- }
+-
+ /* Free the channel and ring structures */
+ xgbe_free_channels(pdata);
+
+@@ -1448,7 +1466,6 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
+ struct xgbe_ring *ring;
+ struct xgbe_packet_data *packet;
+ struct netdev_queue *txq;
+- unsigned long flags;
+ int ret;
+
+ DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
+@@ -1460,8 +1477,6 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
+
+ ret = NETDEV_TX_OK;
+
+- spin_lock_irqsave(&ring->lock, flags);
+-
+ if (skb->len == 0) {
+ netdev_err(netdev, "empty skb received from stack\n");
+ dev_kfree_skb_any(skb);
+@@ -1508,10 +1523,6 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
+ ret = NETDEV_TX_OK;
+
+ tx_netdev_return:
+- spin_unlock_irqrestore(&ring->lock, flags);
+-
+- DBGPR("<--xgbe_xmit\n");
+-
+ return ret;
+ }
+
+@@ -1519,17 +1530,10 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+- unsigned int pr_mode, am_mode;
+
+ DBGPR("-->xgbe_set_rx_mode\n");
+
+- pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
+- am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
+-
+- hw_if->set_promiscuous_mode(pdata, pr_mode);
+- hw_if->set_all_multicast_mode(pdata, am_mode);
+-
+- hw_if->add_mac_addresses(pdata);
++ hw_if->config_rx_mode(pdata);
+
+ DBGPR("<--xgbe_set_rx_mode\n");
+ }
+@@ -1589,13 +1593,21 @@ static int xgbe_change_mtu(struct net_device *netdev, int mtu)
+ pdata->rx_buf_size = ret;
+ netdev->mtu = mtu;
+
+- xgbe_restart_dev(pdata, 0);
++ xgbe_restart_dev(pdata);
+
+ DBGPR("<--xgbe_change_mtu\n");
+
+ return 0;
+ }
+
++static void xgbe_tx_timeout(struct net_device *netdev)
++{
++ struct xgbe_prv_data *pdata = netdev_priv(netdev);
++
++ netdev_warn(netdev, "tx timeout, device restarting\n");
++ schedule_work(&pdata->restart_work);
++}
++
+ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *s)
+ {
+@@ -1760,6 +1772,7 @@ static const struct net_device_ops xgbe_netdev_ops = {
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = xgbe_ioctl,
+ .ndo_change_mtu = xgbe_change_mtu,
++ .ndo_tx_timeout = xgbe_tx_timeout,
+ .ndo_get_stats64 = xgbe_get_stats64,
+ .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid,
+@@ -1778,29 +1791,44 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
+ static void xgbe_rx_refresh(struct xgbe_channel *channel)
+ {
+ struct xgbe_prv_data *pdata = channel->pdata;
++ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+
+- desc_if->realloc_rx_buffer(channel);
++ while (ring->dirty != ring->cur) {
++ rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
++
++ /* Reset rdata values */
++ desc_if->unmap_rdata(pdata, rdata);
++
++ if (desc_if->map_rx_buffer(pdata, ring, rdata))
++ break;
++
++ hw_if->rx_desc_reset(pdata, rdata, ring->dirty);
++
++ ring->dirty++;
++ }
++
++ /* Make sure everything is written before the register write */
++ wmb();
+
+ /* Update the Rx Tail Pointer Register with address of
+ * the last cleaned entry */
+- rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
++ rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+ }
+
+-static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
++static struct sk_buff *xgbe_create_skb(struct napi_struct *napi,
+ struct xgbe_ring_data *rdata,
+ unsigned int *len)
+ {
+- struct net_device *netdev = pdata->netdev;
+ struct sk_buff *skb;
+ u8 *packet;
+ unsigned int copy_len;
+
+- skb = netdev_alloc_skb_ip_align(netdev, rdata->rx.hdr.dma_len);
++ skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
+ if (!skb)
+ return NULL;
+
+@@ -1826,7 +1854,6 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
+ struct xgbe_ring_desc *rdesc;
+ struct net_device *netdev = pdata->netdev;
+ struct netdev_queue *txq;
+- unsigned long flags;
+ int processed = 0;
+ unsigned int tx_packets = 0, tx_bytes = 0;
+
+@@ -1838,8 +1865,6 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
+
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
+
+- spin_lock_irqsave(&ring->lock, flags);
+-
+ while ((processed < XGBE_TX_DESC_MAX_PROC) &&
+ (ring->dirty != ring->cur)) {
+ rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
+@@ -1850,7 +1875,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
+
+ /* Make sure descriptor fields are read after reading the OWN
+ * bit */
+- rmb();
++ dma_rmb();
+
+ #ifdef XGMAC_ENABLE_TX_DESC_DUMP
+ xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
+@@ -1870,7 +1895,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
+ }
+
+ if (!processed)
+- goto unlock;
++ return 0;
+
+ netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
+
+@@ -1882,9 +1907,6 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
+
+ DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
+
+-unlock:
+- spin_unlock_irqrestore(&ring->lock, flags);
+-
+ return processed;
+ }
+
+@@ -1936,7 +1958,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
+ read_again:
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+
+- if (ring->dirty > (XGBE_RX_DESC_CNT >> 3))
++ if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3))
+ xgbe_rx_refresh(channel);
+
+ if (hw_if->dev_read(channel))
+@@ -1944,7 +1966,6 @@ read_again:
+
+ received++;
+ ring->cur++;
+- ring->dirty++;
+
+ incomplete = XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES,
+@@ -1977,7 +1998,7 @@ read_again:
+ rdata->rx.hdr.dma_len,
+ DMA_FROM_DEVICE);
+
+- skb = xgbe_create_skb(pdata, rdata, &put_len);
++ skb = xgbe_create_skb(napi, rdata, &put_len);
+ if (!skb) {
+ error = 1;
+ goto skip_data;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+index ebf4893..5f149e8 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+@@ -291,7 +291,6 @@ static int xgbe_get_settings(struct net_device *netdev,
+ return -ENODEV;
+
+ ret = phy_ethtool_gset(pdata->phydev, cmd);
+- cmd->transceiver = XCVR_EXTERNAL;
+
+ DBGPR("<--xgbe_get_settings\n");
+
+@@ -378,18 +377,14 @@ static int xgbe_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+- struct xgbe_hw_if *hw_if = &pdata->hw_if;
+- unsigned int riwt;
+
+ DBGPR("-->xgbe_get_coalesce\n");
+
+ memset(ec, 0, sizeof(struct ethtool_coalesce));
+
+- riwt = pdata->rx_riwt;
+- ec->rx_coalesce_usecs = hw_if->riwt_to_usec(pdata, riwt);
++ ec->rx_coalesce_usecs = pdata->rx_usecs;
+ ec->rx_max_coalesced_frames = pdata->rx_frames;
+
+- ec->tx_coalesce_usecs = pdata->tx_usecs;
+ ec->tx_max_coalesced_frames = pdata->tx_frames;
+
+ DBGPR("<--xgbe_get_coalesce\n");
+@@ -403,13 +398,14 @@ static int xgbe_set_coalesce(struct net_device *netdev,
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int rx_frames, rx_riwt, rx_usecs;
+- unsigned int tx_frames, tx_usecs;
++ unsigned int tx_frames;
+
+ DBGPR("-->xgbe_set_coalesce\n");
+
+ /* Check for not supported parameters */
+ if ((ec->rx_coalesce_usecs_irq) ||
+ (ec->rx_max_coalesced_frames_irq) ||
++ (ec->tx_coalesce_usecs) ||
+ (ec->tx_coalesce_usecs_irq) ||
+ (ec->tx_max_coalesced_frames_irq) ||
+ (ec->stats_block_coalesce_usecs) ||
+@@ -428,28 +424,18 @@ static int xgbe_set_coalesce(struct net_device *netdev,
+ (ec->rate_sample_interval))
+ return -EOPNOTSUPP;
+
+- /* Can only change rx-frames when interface is down (see
+- * rx_descriptor_init in xgbe-dev.c)
+- */
+- rx_frames = pdata->rx_frames;
+- if (rx_frames != ec->rx_max_coalesced_frames && netif_running(netdev)) {
+- netdev_alert(netdev,
+- "interface must be down to change rx-frames\n");
+- return -EINVAL;
+- }
+-
+ rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
++ rx_usecs = ec->rx_coalesce_usecs;
+ rx_frames = ec->rx_max_coalesced_frames;
+
+ /* Use smallest possible value if conversion resulted in zero */
+- if (ec->rx_coalesce_usecs && !rx_riwt)
++ if (rx_usecs && !rx_riwt)
+ rx_riwt = 1;
+
+ /* Check the bounds of values for Rx */
+ if (rx_riwt > XGMAC_MAX_DMA_RIWT) {
+- rx_usecs = hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT);
+ netdev_alert(netdev, "rx-usec is limited to %d usecs\n",
+- rx_usecs);
++ hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT));
+ return -EINVAL;
+ }
+ if (rx_frames > pdata->rx_desc_count) {
+@@ -458,7 +444,6 @@ static int xgbe_set_coalesce(struct net_device *netdev,
+ return -EINVAL;
+ }
+
+- tx_usecs = ec->tx_coalesce_usecs;
+ tx_frames = ec->tx_max_coalesced_frames;
+
+ /* Check the bounds of values for Tx */
+@@ -469,10 +454,10 @@ static int xgbe_set_coalesce(struct net_device *netdev,
+ }
+
+ pdata->rx_riwt = rx_riwt;
++ pdata->rx_usecs = rx_usecs;
+ pdata->rx_frames = rx_frames;
+ hw_if->config_rx_coalesce(pdata);
+
+- pdata->tx_usecs = tx_usecs;
+ pdata->tx_frames = tx_frames;
+ hw_if->config_tx_coalesce(pdata);
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+index dbd3850..7149053 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+@@ -123,7 +123,10 @@
+ #include <linux/io.h>
+ #include <linux/of.h>
+ #include <linux/of_net.h>
++#include <linux/of_address.h>
+ #include <linux/clk.h>
++#include <linux/property.h>
++#include <linux/acpi.h>
+
+ #include "xgbe.h"
+ #include "xgbe-common.h"
+@@ -148,6 +151,7 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
+ pdata->pause_autoneg = 1;
+ pdata->tx_pause = 1;
+ pdata->rx_pause = 1;
++ pdata->phy_speed = SPEED_UNKNOWN;
+ pdata->power_down = 0;
+ pdata->default_autoneg = AUTONEG_ENABLE;
+ pdata->default_speed = SPEED_10000;
+@@ -161,6 +165,96 @@ static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
+ xgbe_init_function_ptrs_desc(&pdata->desc_if);
+ }
+
++#ifdef CONFIG_ACPI
++static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
++{
++ struct acpi_device *adev = pdata->adev;
++ struct device *dev = pdata->dev;
++ u32 property;
++ acpi_handle handle;
++ acpi_status status;
++ unsigned long long data;
++ int cca;
++ int ret;
++
++ /* Obtain the system clock setting */
++ ret = device_property_read_u32(dev, XGBE_ACPI_DMA_FREQ, &property);
++ if (ret) {
++ dev_err(dev, "unable to obtain %s property\n",
++ XGBE_ACPI_DMA_FREQ);
++ return ret;
++ }
++ pdata->sysclk_rate = property;
++
++ /* Obtain the PTP clock setting */
++ ret = device_property_read_u32(dev, XGBE_ACPI_PTP_FREQ, &property);
++ if (ret) {
++ dev_err(dev, "unable to obtain %s property\n",
++ XGBE_ACPI_PTP_FREQ);
++ return ret;
++ }
++ pdata->ptpclk_rate = property;
++
++ /* Retrieve the device cache coherency value */
++ handle = adev->handle;
++ do {
++ status = acpi_evaluate_integer(handle, "_CCA", NULL, &data);
++ if (!ACPI_FAILURE(status)) {
++ cca = data;
++ break;
++ }
++
++ status = acpi_get_parent(handle, &handle);
++ } while (!ACPI_FAILURE(status));
++
++ if (ACPI_FAILURE(status)) {
++ dev_err(dev, "error obtaining acpi coherency value\n");
++ return -EINVAL;
++ }
++ pdata->coherent = !!cca;
++
++ return 0;
++}
++#else /* CONFIG_ACPI */
++static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
++{
++ return -EINVAL;
++}
++#endif /* CONFIG_ACPI */
++
++#ifdef CONFIG_OF
++static int xgbe_of_support(struct xgbe_prv_data *pdata)
++{
++ struct device *dev = pdata->dev;
++
++ /* Obtain the system clock setting */
++ pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
++ if (IS_ERR(pdata->sysclk)) {
++ dev_err(dev, "dma devm_clk_get failed\n");
++ return PTR_ERR(pdata->sysclk);
++ }
++ pdata->sysclk_rate = clk_get_rate(pdata->sysclk);
++
++ /* Obtain the PTP clock setting */
++ pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
++ if (IS_ERR(pdata->ptpclk)) {
++ dev_err(dev, "ptp devm_clk_get failed\n");
++ return PTR_ERR(pdata->ptpclk);
++ }
++ pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);
++
++ /* Retrieve the device cache coherency value */
++ pdata->coherent = of_dma_is_coherent(dev->of_node);
++
++ return 0;
++}
++#else /* CONFIG_OF */
++static int xgbe_of_support(struct xgbe_prv_data *pdata)
++{
++ return -EINVAL;
++}
++#endif /*CONFIG_OF */
++
+ static int xgbe_probe(struct platform_device *pdev)
+ {
+ struct xgbe_prv_data *pdata;
+@@ -169,7 +263,7 @@ static int xgbe_probe(struct platform_device *pdev)
+ struct net_device *netdev;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+- const u8 *mac_addr;
++ const char *phy_mode;
+ unsigned int i;
+ int ret;
+
+@@ -186,6 +280,7 @@ static int xgbe_probe(struct platform_device *pdev)
+ pdata = netdev_priv(netdev);
+ pdata->netdev = netdev;
+ pdata->pdev = pdev;
++ pdata->adev = ACPI_COMPANION(dev);
+ pdata->dev = dev;
+ platform_set_drvdata(pdev, netdev);
+
+@@ -194,6 +289,9 @@ static int xgbe_probe(struct platform_device *pdev)
+ mutex_init(&pdata->rss_mutex);
+ spin_lock_init(&pdata->tstamp_lock);
+
++ /* Check if we should use ACPI or DT */
++ pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1;
++
+ /* Set and validate the number of descriptors for a ring */
+ BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
+ pdata->tx_desc_count = XGBE_TX_DESC_CNT;
+@@ -212,22 +310,6 @@ static int xgbe_probe(struct platform_device *pdev)
+ goto err_io;
+ }
+
+- /* Obtain the system clock setting */
+- pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
+- if (IS_ERR(pdata->sysclk)) {
+- dev_err(dev, "dma devm_clk_get failed\n");
+- ret = PTR_ERR(pdata->sysclk);
+- goto err_io;
+- }
+-
+- /* Obtain the PTP clock setting */
+- pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
+- if (IS_ERR(pdata->ptpclk)) {
+- dev_err(dev, "ptp devm_clk_get failed\n");
+- ret = PTR_ERR(pdata->ptpclk);
+- goto err_io;
+- }
+-
+ /* Obtain the mmio areas for the device */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pdata->xgmac_regs = devm_ioremap_resource(dev, res);
+@@ -247,16 +329,42 @@ static int xgbe_probe(struct platform_device *pdev)
+ }
+ DBGPR(" xpcs_regs = %p\n", pdata->xpcs_regs);
+
+- /* Set the DMA mask */
+- if (!dev->dma_mask)
+- dev->dma_mask = &dev->coherent_dma_mask;
+- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+- if (ret) {
+- dev_err(dev, "dma_set_mask_and_coherent failed\n");
++ /* Retrieve the MAC address */
++ ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
++ pdata->mac_addr,
++ sizeof(pdata->mac_addr));
++ if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
++ dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
++ if (!ret)
++ ret = -EINVAL;
+ goto err_io;
+ }
+
+- if (of_property_read_bool(dev->of_node, "dma-coherent")) {
++ /* Retrieve the PHY mode - it must be "xgmii" */
++ ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
++ &phy_mode);
++ if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
++ dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
++ if (!ret)
++ ret = -EINVAL;
++ goto err_io;
++ }
++ pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
++
++ /* Check for per channel interrupt support */
++ if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
++ pdata->per_channel_irq = 1;
++
++ /* Obtain device settings unique to ACPI/OF */
++ if (pdata->use_acpi)
++ ret = xgbe_acpi_support(pdata);
++ else
++ ret = xgbe_of_support(pdata);
++ if (ret)
++ goto err_io;
++
++ /* Set the DMA coherency values */
++ if (pdata->coherent) {
+ pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
+ pdata->arcache = XGBE_DMA_OS_ARCACHE;
+ pdata->awcache = XGBE_DMA_OS_AWCACHE;
+@@ -266,10 +374,7 @@ static int xgbe_probe(struct platform_device *pdev)
+ pdata->awcache = XGBE_DMA_SYS_AWCACHE;
+ }
+
+- /* Check for per channel interrupt support */
+- if (of_property_read_bool(dev->of_node, XGBE_DMA_IRQS))
+- pdata->per_channel_irq = 1;
+-
++ /* Get the device interrupt */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(dev, "platform_get_irq 0 failed\n");
+@@ -279,6 +384,7 @@ static int xgbe_probe(struct platform_device *pdev)
+
+ netdev->irq = pdata->dev_irq;
+ netdev->base_addr = (unsigned long)pdata->xgmac_regs;
++ memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
+
+ /* Set all the function pointers */
+ xgbe_init_all_fptrs(pdata);
+@@ -291,26 +397,19 @@ static int xgbe_probe(struct platform_device *pdev)
+ /* Populate the hardware features */
+ xgbe_get_all_hw_features(pdata);
+
+- /* Retrieve the MAC address */
+- mac_addr = of_get_mac_address(dev->of_node);
+- if (!mac_addr) {
+- dev_err(dev, "invalid mac address for this device\n");
+- ret = -EINVAL;
+- goto err_io;
+- }
+- memcpy(netdev->dev_addr, mac_addr, netdev->addr_len);
++ /* Set default configuration data */
++ xgbe_default_config(pdata);
+
+- /* Retrieve the PHY mode - it must be "xgmii" */
+- pdata->phy_mode = of_get_phy_mode(dev->of_node);
+- if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
+- dev_err(dev, "invalid phy-mode specified for this device\n");
+- ret = -EINVAL;
++ /* Set the DMA mask */
++ if (!dev->dma_mask)
++ dev->dma_mask = &dev->coherent_dma_mask;
++ ret = dma_set_mask_and_coherent(dev,
++ DMA_BIT_MASK(pdata->hw_feat.dma_width));
++ if (ret) {
++ dev_err(dev, "dma_set_mask_and_coherent failed\n");
+ goto err_io;
+ }
+
+- /* Set default configuration data */
+- xgbe_default_config(pdata);
+-
+ /* Calculate the number of Tx and Rx rings to be created
+ * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
+ * the number of Tx queues to the number of Tx channels
+@@ -392,6 +491,9 @@ static int xgbe_probe(struct platform_device *pdev)
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
++ /* Use default watchdog timeout */
++ netdev->watchdog_timeo = 0;
++
+ xgbe_init_rx_coalesce(pdata);
+ xgbe_init_tx_coalesce(pdata);
+
+@@ -491,18 +593,35 @@ static int xgbe_resume(struct device *dev)
+ }
+ #endif /* CONFIG_PM */
+
++#ifdef CONFIG_ACPI
++static const struct acpi_device_id xgbe_acpi_match[] = {
++ { "AMDI8001", 0 },
++ {},
++};
++
++MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
++#endif
++
++#ifdef CONFIG_OF
+ static const struct of_device_id xgbe_of_match[] = {
+ { .compatible = "amd,xgbe-seattle-v1a", },
+ {},
+ };
+
+ MODULE_DEVICE_TABLE(of, xgbe_of_match);
++#endif
++
+ static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume);
+
+ static struct platform_driver xgbe_driver = {
+ .driver = {
+ .name = "amd-xgbe",
++#ifdef CONFIG_ACPI
++ .acpi_match_table = xgbe_acpi_match,
++#endif
++#ifdef CONFIG_OF
+ .of_match_table = xgbe_of_match,
++#endif
+ .pm = &xgbe_pm_ops,
+ },
+ .probe = xgbe_probe,
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+index 363b210..59e267f 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -205,25 +205,16 @@ void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
+
+ int xgbe_mdio_register(struct xgbe_prv_data *pdata)
+ {
+- struct device_node *phy_node;
+ struct mii_bus *mii;
+ struct phy_device *phydev;
+ int ret = 0;
+
+ DBGPR("-->xgbe_mdio_register\n");
+
+- /* Retrieve the phy-handle */
+- phy_node = of_parse_phandle(pdata->dev->of_node, "phy-handle", 0);
+- if (!phy_node) {
+- dev_err(pdata->dev, "unable to parse phy-handle\n");
+- return -EINVAL;
+- }
+-
+ mii = mdiobus_alloc();
+- if (mii == NULL) {
++ if (!mii) {
+ dev_err(pdata->dev, "mdiobus_alloc failed\n");
+- ret = -ENOMEM;
+- goto err_node_get;
++ return -ENOMEM;
+ }
+
+ /* Register on the MDIO bus (don't probe any PHYs) */
+@@ -252,18 +243,19 @@ int xgbe_mdio_register(struct xgbe_prv_data *pdata)
+ request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
+ MDIO_ID_ARGS(phydev->c45_ids.device_ids[MDIO_MMD_PCS]));
+
+- of_node_get(phy_node);
+- phydev->dev.of_node = phy_node;
+ ret = phy_device_register(phydev);
+ if (ret) {
+ dev_err(pdata->dev, "phy_device_register failed\n");
+- of_node_put(phy_node);
++ goto err_phy_device;
++ }
++ if (!phydev->dev.driver) {
++ dev_err(pdata->dev, "phy driver probe failed\n");
++ ret = -EIO;
+ goto err_phy_device;
+ }
+
+ /* Add a reference to the PHY driver so it can't be unloaded */
+- pdata->phy_module = phydev->dev.driver ?
+- phydev->dev.driver->owner : NULL;
++ pdata->phy_module = phydev->dev.driver->owner;
+ if (!try_module_get(pdata->phy_module)) {
+ dev_err(pdata->dev, "try_module_get failed\n");
+ ret = -EIO;
+@@ -283,8 +275,6 @@ int xgbe_mdio_register(struct xgbe_prv_data *pdata)
+
+ pdata->phydev = phydev;
+
+- of_node_put(phy_node);
+-
+ DBGPHY_REGS(pdata);
+
+ DBGPR("<--xgbe_mdio_register\n");
+@@ -300,9 +290,6 @@ err_mdiobus_register:
+ err_mdiobus_alloc:
+ mdiobus_free(mii);
+
+-err_node_get:
+- of_node_put(phy_node);
+-
+ return ret;
+ }
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+index a1bf9d1c..f0d0ac6 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+@@ -171,21 +171,15 @@ static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta)
+ struct xgbe_prv_data,
+ ptp_clock_info);
+ unsigned long flags;
+- u64 nsec;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+-
+- nsec = timecounter_read(&pdata->tstamp_tc);
+-
+- nsec += delta;
+- timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec);
+-
++ timecounter_adjtime(&pdata->tstamp_tc, delta);
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+ return 0;
+ }
+
+-static int xgbe_gettime(struct ptp_clock_info *info, struct timespec *ts)
++static int xgbe_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
+ {
+ struct xgbe_prv_data *pdata = container_of(info,
+ struct xgbe_prv_data,
+@@ -199,12 +193,13 @@ static int xgbe_gettime(struct ptp_clock_info *info, struct timespec *ts)
+
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+- *ts = ns_to_timespec(nsec);
++ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+ }
+
+-static int xgbe_settime(struct ptp_clock_info *info, const struct timespec *ts)
++static int xgbe_settime(struct ptp_clock_info *info,
++ const struct timespec64 *ts)
+ {
+ struct xgbe_prv_data *pdata = container_of(info,
+ struct xgbe_prv_data,
+@@ -212,7 +207,7 @@ static int xgbe_settime(struct ptp_clock_info *info, const struct timespec *ts)
+ unsigned long flags;
+ u64 nsec;
+
+- nsec = timespec_to_ns(ts);
++ nsec = timespec64_to_ns(ts);
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+
+@@ -239,7 +234,7 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
+ snprintf(info->name, sizeof(info->name), "%s",
+ netdev_name(pdata->netdev));
+ info->owner = THIS_MODULE;
+- info->max_adj = clk_get_rate(pdata->ptpclk);
++ info->max_adj = pdata->ptpclk_rate;
+ info->adjfreq = xgbe_adjfreq;
+ info->adjtime = xgbe_adjtime;
+ info->gettime = xgbe_gettime;
+@@ -260,7 +255,7 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
+ */
+ dividend = 50000000;
+ dividend <<= 32;
+- pdata->tstamp_addend = div_u64(dividend, clk_get_rate(pdata->ptpclk));
++ pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
+
+ /* Setup the timecounter */
+ cc->read = xgbe_cc_read;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index f9ec762..2ef3ffb 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -182,10 +182,18 @@
+ #define XGBE_PHY_NAME "amd_xgbe_phy"
+ #define XGBE_PRTAD 0
+
++/* Common property names */
++#define XGBE_MAC_ADDR_PROPERTY "mac-address"
++#define XGBE_PHY_MODE_PROPERTY "phy-mode"
++#define XGBE_DMA_IRQS_PROPERTY "amd,per-channel-interrupt"
++
+ /* Device-tree clock names */
+ #define XGBE_DMA_CLOCK "dma_clk"
+ #define XGBE_PTP_CLOCK "ptp_clk"
+-#define XGBE_DMA_IRQS "amd,per-channel-interrupt"
++
++/* ACPI property names */
++#define XGBE_ACPI_DMA_FREQ "amd,dma-freq"
++#define XGBE_ACPI_PTP_FREQ "amd,ptp-freq"
+
+ /* Timestamp support - values based on 50MHz PTP clock
+ * 50MHz => 20 nsec
+@@ -214,7 +222,7 @@
+ ((_idx) & ((_ring)->rdesc_count - 1)))
+
+ /* Default coalescing parameters */
+-#define XGMAC_INIT_DMA_TX_USECS 50
++#define XGMAC_INIT_DMA_TX_USECS 1000
+ #define XGMAC_INIT_DMA_TX_FRAMES 25
+
+ #define XGMAC_MAX_DMA_RIWT 0xff
+@@ -317,8 +325,6 @@ struct xgbe_ring_data {
+ struct xgbe_tx_ring_data tx; /* Tx-related data */
+ struct xgbe_rx_ring_data rx; /* Rx-related data */
+
+- unsigned int interrupt; /* Interrupt indicator */
+-
+ unsigned int mapped_as_page;
+
+ /* Incomplete receive save location. If the budget is exhausted
+@@ -361,8 +367,7 @@ struct xgbe_ring {
+ * cur - Tx: index of descriptor to be used for current transfer
+ * Rx: index of descriptor to check for packet availability
+ * dirty - Tx: index of descriptor to check for transfer complete
+- * Rx: count of descriptors in which a packet has been received
+- * (used with skb_realloc_index to refresh the ring)
++ * Rx: index of descriptor to check for buffer reallocation
+ */
+ unsigned int cur;
+ unsigned int dirty;
+@@ -377,11 +382,6 @@ struct xgbe_ring {
+ unsigned short cur_mss;
+ unsigned short cur_vlan_ctag;
+ } tx;
+-
+- struct {
+- unsigned int realloc_index;
+- unsigned int realloc_threshold;
+- } rx;
+ };
+ } ____cacheline_aligned;
+
+@@ -408,7 +408,7 @@ struct xgbe_channel {
+ unsigned int saved_ier;
+
+ unsigned int tx_timer_active;
+- struct hrtimer tx_timer;
++ struct timer_list tx_timer;
+
+ struct xgbe_ring *tx_ring;
+ struct xgbe_ring *rx_ring;
+@@ -495,10 +495,8 @@ struct xgbe_mmc_stats {
+ struct xgbe_hw_if {
+ int (*tx_complete)(struct xgbe_ring_desc *);
+
+- int (*set_promiscuous_mode)(struct xgbe_prv_data *, unsigned int);
+- int (*set_all_multicast_mode)(struct xgbe_prv_data *, unsigned int);
+- int (*add_mac_addresses)(struct xgbe_prv_data *);
+ int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr);
++ int (*config_rx_mode)(struct xgbe_prv_data *);
+
+ int (*enable_rx_csum)(struct xgbe_prv_data *);
+ int (*disable_rx_csum)(struct xgbe_prv_data *);
+@@ -534,8 +532,9 @@ struct xgbe_hw_if {
+ int (*dev_read)(struct xgbe_channel *);
+ void (*tx_desc_init)(struct xgbe_channel *);
+ void (*rx_desc_init)(struct xgbe_channel *);
+- void (*rx_desc_reset)(struct xgbe_ring_data *);
+ void (*tx_desc_reset)(struct xgbe_ring_data *);
++ void (*rx_desc_reset)(struct xgbe_prv_data *, struct xgbe_ring_data *,
++ unsigned int);
+ int (*is_last_desc)(struct xgbe_ring_desc *);
+ int (*is_context_desc)(struct xgbe_ring_desc *);
+ void (*tx_start_xmit)(struct xgbe_channel *, struct xgbe_ring *);
+@@ -596,7 +595,8 @@ struct xgbe_desc_if {
+ int (*alloc_ring_resources)(struct xgbe_prv_data *);
+ void (*free_ring_resources)(struct xgbe_prv_data *);
+ int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *);
+- void (*realloc_rx_buffer)(struct xgbe_channel *);
++ int (*map_rx_buffer)(struct xgbe_prv_data *, struct xgbe_ring *,
++ struct xgbe_ring_data *);
+ void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *);
+ void (*wrapper_tx_desc_init)(struct xgbe_prv_data *);
+ void (*wrapper_rx_desc_init)(struct xgbe_prv_data *);
+@@ -617,7 +617,7 @@ struct xgbe_hw_features {
+ unsigned int mgk; /* PMT magic packet */
+ unsigned int mmc; /* RMON module */
+ unsigned int aoe; /* ARP Offload */
+- unsigned int ts; /* IEEE 1588-2008 Adavanced Timestamp */
++ unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */
+ unsigned int eee; /* Energy Efficient Ethernet */
+ unsigned int tx_coe; /* Tx Checksum Offload */
+ unsigned int rx_coe; /* Rx Checksum Offload */
+@@ -629,6 +629,7 @@ struct xgbe_hw_features {
+ unsigned int rx_fifo_size; /* MTL Receive FIFO Size */
+ unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */
+ unsigned int adv_ts_hi; /* Advance Timestamping High Word */
++ unsigned int dma_width; /* DMA width */
+ unsigned int dcb; /* DCB Feature */
+ unsigned int sph; /* Split Header Feature */
+ unsigned int tso; /* TCP Segmentation Offload */
+@@ -650,8 +651,12 @@ struct xgbe_hw_features {
+ struct xgbe_prv_data {
+ struct net_device *netdev;
+ struct platform_device *pdev;
++ struct acpi_device *adev;
+ struct device *dev;
+
++ /* ACPI or DT flag */
++ unsigned int use_acpi;
++
+ /* XGMAC/XPCS related mmio registers */
+ void __iomem *xgmac_regs; /* XGMAC CSRs */
+ void __iomem *xpcs_regs; /* XPCS MMD registers */
+@@ -672,6 +677,7 @@ struct xgbe_prv_data {
+ struct xgbe_desc_if desc_if;
+
+ /* AXI DMA settings */
++ unsigned int coherent;
+ unsigned int axdomain;
+ unsigned int arcache;
+ unsigned int awcache;
+@@ -707,6 +713,7 @@ struct xgbe_prv_data {
+
+ /* Rx coalescing settings */
+ unsigned int rx_riwt;
++ unsigned int rx_usecs;
+ unsigned int rx_frames;
+
+ /* Current Rx buffer size */
+@@ -739,6 +746,7 @@ struct xgbe_prv_data {
+ unsigned int phy_rx_pause;
+
+ /* Netdev related settings */
++ unsigned char mac_addr[ETH_ALEN];
+ netdev_features_t netdev_features;
+ struct napi_struct napi;
+ struct xgbe_mmc_stats mmc_stats;
+@@ -748,7 +756,9 @@ struct xgbe_prv_data {
+
+ /* Device clocks */
+ struct clk *sysclk;
++ unsigned long sysclk_rate;
+ struct clk *ptpclk;
++ unsigned long ptpclk_rate;
+
+ /* Timestamp support */
+ spinlock_t tstamp_lock;
+diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
+index 903dc3d..34a75cb 100644
+--- a/drivers/net/phy/amd-xgbe-phy.c
++++ b/drivers/net/phy/amd-xgbe-phy.c
+@@ -60,6 +60,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/init.h>
+ #include <linux/delay.h>
++#include <linux/workqueue.h>
+ #include <linux/netdevice.h>
+ #include <linux/etherdevice.h>
+ #include <linux/skbuff.h>
+@@ -74,6 +75,10 @@
+ #include <linux/of_platform.h>
+ #include <linux/of_device.h>
+ #include <linux/uaccess.h>
++#include <linux/bitops.h>
++#include <linux/property.h>
++#include <linux/acpi.h>
++#include <linux/jiffies.h>
+
+ MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
+ MODULE_LICENSE("Dual BSD/GPL");
+@@ -84,22 +89,47 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
+ #define XGBE_PHY_MASK 0xfffffff0
+
+ #define XGBE_PHY_SPEEDSET_PROPERTY "amd,speed-set"
++#define XGBE_PHY_BLWC_PROPERTY "amd,serdes-blwc"
++#define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
++#define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
++#define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp"
++#define XGBE_PHY_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
++#define XGBE_PHY_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
++
++#define XGBE_PHY_SPEEDS 3
++#define XGBE_PHY_SPEED_1000 0
++#define XGBE_PHY_SPEED_2500 1
++#define XGBE_PHY_SPEED_10000 2
++
++#define XGBE_AN_MS_TIMEOUT 500
+
+ #define XGBE_AN_INT_CMPLT 0x01
+ #define XGBE_AN_INC_LINK 0x02
+ #define XGBE_AN_PG_RCV 0x04
++#define XGBE_AN_INT_MASK 0x07
+
+ #define XNP_MCF_NULL_MESSAGE 0x001
+-#define XNP_ACK_PROCESSED (1 << 12)
+-#define XNP_MP_FORMATTED (1 << 13)
+-#define XNP_NP_EXCHANGE (1 << 15)
++#define XNP_ACK_PROCESSED BIT(12)
++#define XNP_MP_FORMATTED BIT(13)
++#define XNP_NP_EXCHANGE BIT(15)
+
+ #define XGBE_PHY_RATECHANGE_COUNT 500
+
++#define XGBE_PHY_KR_TRAINING_START 0x01
++#define XGBE_PHY_KR_TRAINING_ENABLE 0x02
++
++#define XGBE_PHY_FEC_ENABLE 0x01
++#define XGBE_PHY_FEC_FORWARD 0x02
++#define XGBE_PHY_FEC_MASK 0x03
++
+ #ifndef MDIO_PMA_10GBR_PMD_CTRL
+ #define MDIO_PMA_10GBR_PMD_CTRL 0x0096
+ #endif
+
++#ifndef MDIO_PMA_10GBR_FEC_ABILITY
++#define MDIO_PMA_10GBR_FEC_ABILITY 0x00aa
++#endif
++
+ #ifndef MDIO_PMA_10GBR_FEC_CTRL
+ #define MDIO_PMA_10GBR_FEC_CTRL 0x00ab
+ #endif
+@@ -108,6 +138,10 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
+ #define MDIO_AN_XNP 0x0016
+ #endif
+
++#ifndef MDIO_AN_LPX
++#define MDIO_AN_LPX 0x0019
++#endif
++
+ #ifndef MDIO_AN_INTMASK
+ #define MDIO_AN_INTMASK 0x8001
+ #endif
+@@ -116,18 +150,10 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
+ #define MDIO_AN_INT 0x8002
+ #endif
+
+-#ifndef MDIO_AN_KR_CTRL
+-#define MDIO_AN_KR_CTRL 0x8003
+-#endif
+-
+ #ifndef MDIO_CTRL1_SPEED1G
+ #define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
+ #endif
+
+-#ifndef MDIO_KR_CTRL_PDETECT
+-#define MDIO_KR_CTRL_PDETECT 0x01
+-#endif
+-
+ /* SerDes integration register offsets */
+ #define SIR0_KR_RT_1 0x002c
+ #define SIR0_STATUS 0x0040
+@@ -140,10 +166,10 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
+ #define SIR0_STATUS_RX_READY_WIDTH 1
+ #define SIR0_STATUS_TX_READY_INDEX 8
+ #define SIR0_STATUS_TX_READY_WIDTH 1
++#define SIR1_SPEED_CDR_RATE_INDEX 12
++#define SIR1_SPEED_CDR_RATE_WIDTH 4
+ #define SIR1_SPEED_DATARATE_INDEX 4
+ #define SIR1_SPEED_DATARATE_WIDTH 2
+-#define SIR1_SPEED_PI_SPD_SEL_INDEX 12
+-#define SIR1_SPEED_PI_SPD_SEL_WIDTH 4
+ #define SIR1_SPEED_PLLSEL_INDEX 3
+ #define SIR1_SPEED_PLLSEL_WIDTH 1
+ #define SIR1_SPEED_RATECHANGE_INDEX 6
+@@ -153,42 +179,52 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
+ #define SIR1_SPEED_WORDMODE_INDEX 0
+ #define SIR1_SPEED_WORDMODE_WIDTH 3
+
++#define SPEED_10000_BLWC 0
+ #define SPEED_10000_CDR 0x7
+ #define SPEED_10000_PLL 0x1
++#define SPEED_10000_PQ 0x12
+ #define SPEED_10000_RATE 0x0
+ #define SPEED_10000_TXAMP 0xa
+ #define SPEED_10000_WORD 0x7
++#define SPEED_10000_DFE_TAP_CONFIG 0x1
++#define SPEED_10000_DFE_TAP_ENABLE 0x7f
+
++#define SPEED_2500_BLWC 1
+ #define SPEED_2500_CDR 0x2
+ #define SPEED_2500_PLL 0x0
++#define SPEED_2500_PQ 0xa
+ #define SPEED_2500_RATE 0x1
+ #define SPEED_2500_TXAMP 0xf
+ #define SPEED_2500_WORD 0x1
++#define SPEED_2500_DFE_TAP_CONFIG 0x3
++#define SPEED_2500_DFE_TAP_ENABLE 0x0
+
++#define SPEED_1000_BLWC 1
+ #define SPEED_1000_CDR 0x2
+ #define SPEED_1000_PLL 0x0
++#define SPEED_1000_PQ 0xa
+ #define SPEED_1000_RATE 0x3
+ #define SPEED_1000_TXAMP 0xf
+ #define SPEED_1000_WORD 0x1
++#define SPEED_1000_DFE_TAP_CONFIG 0x3
++#define SPEED_1000_DFE_TAP_ENABLE 0x0
+
+ /* SerDes RxTx register offsets */
++#define RXTX_REG6 0x0018
+ #define RXTX_REG20 0x0050
++#define RXTX_REG22 0x0058
+ #define RXTX_REG114 0x01c8
++#define RXTX_REG129 0x0204
+
+ /* SerDes RxTx register entry bit positions and sizes */
++#define RXTX_REG6_RESETB_RXD_INDEX 8
++#define RXTX_REG6_RESETB_RXD_WIDTH 1
+ #define RXTX_REG20_BLWC_ENA_INDEX 2
+ #define RXTX_REG20_BLWC_ENA_WIDTH 1
+ #define RXTX_REG114_PQ_REG_INDEX 9
+ #define RXTX_REG114_PQ_REG_WIDTH 7
+-
+-#define RXTX_10000_BLWC 0
+-#define RXTX_10000_PQ 0x1e
+-
+-#define RXTX_2500_BLWC 1
+-#define RXTX_2500_PQ 0xa
+-
+-#define RXTX_1000_BLWC 1
+-#define RXTX_1000_PQ 0xa
++#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
++#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
+
+ /* Bit setting and getting macros
+ * The get macro will extract the current bit field value from within
+@@ -291,23 +327,56 @@ do { \
+ XRXTX_IOWRITE((_priv), _reg, reg_val); \
+ } while (0)
+
++static const u32 amd_xgbe_phy_serdes_blwc[] = {
++ SPEED_1000_BLWC,
++ SPEED_2500_BLWC,
++ SPEED_10000_BLWC,
++};
++
++static const u32 amd_xgbe_phy_serdes_cdr_rate[] = {
++ SPEED_1000_CDR,
++ SPEED_2500_CDR,
++ SPEED_10000_CDR,
++};
++
++static const u32 amd_xgbe_phy_serdes_pq_skew[] = {
++ SPEED_1000_PQ,
++ SPEED_2500_PQ,
++ SPEED_10000_PQ,
++};
++
++static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
++ SPEED_1000_TXAMP,
++ SPEED_2500_TXAMP,
++ SPEED_10000_TXAMP,
++};
++
++static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = {
++ SPEED_1000_DFE_TAP_CONFIG,
++ SPEED_2500_DFE_TAP_CONFIG,
++ SPEED_10000_DFE_TAP_CONFIG,
++};
++
++static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = {
++ SPEED_1000_DFE_TAP_ENABLE,
++ SPEED_2500_DFE_TAP_ENABLE,
++ SPEED_10000_DFE_TAP_ENABLE,
++};
++
+ enum amd_xgbe_phy_an {
+ AMD_XGBE_AN_READY = 0,
+- AMD_XGBE_AN_START,
+- AMD_XGBE_AN_EVENT,
+ AMD_XGBE_AN_PAGE_RECEIVED,
+ AMD_XGBE_AN_INCOMPAT_LINK,
+ AMD_XGBE_AN_COMPLETE,
+ AMD_XGBE_AN_NO_LINK,
+- AMD_XGBE_AN_EXIT,
+ AMD_XGBE_AN_ERROR,
+ };
+
+ enum amd_xgbe_phy_rx {
+- AMD_XGBE_RX_READY = 0,
+- AMD_XGBE_RX_BPA,
++ AMD_XGBE_RX_BPA = 0,
+ AMD_XGBE_RX_XNP,
+ AMD_XGBE_RX_COMPLETE,
++ AMD_XGBE_RX_ERROR,
+ };
+
+ enum amd_xgbe_phy_mode {
+@@ -316,12 +385,13 @@ enum amd_xgbe_phy_mode {
+ };
+
+ enum amd_xgbe_phy_speedset {
+- AMD_XGBE_PHY_SPEEDSET_1000_10000,
++ AMD_XGBE_PHY_SPEEDSET_1000_10000 = 0,
+ AMD_XGBE_PHY_SPEEDSET_2500_10000,
+ };
+
+ struct amd_xgbe_phy_priv {
+ struct platform_device *pdev;
++ struct acpi_device *adev;
+ struct device *dev;
+
+ struct phy_device *phydev;
+@@ -336,10 +406,26 @@ struct amd_xgbe_phy_priv {
+ void __iomem *sir0_regs; /* SerDes integration registers (1/2) */
+ void __iomem *sir1_regs; /* SerDes integration registers (2/2) */
+
+- /* Maintain link status for re-starting auto-negotiation */
+- unsigned int link;
++ int an_irq;
++ char an_irq_name[IFNAMSIZ + 32];
++ struct work_struct an_irq_work;
++ unsigned int an_irq_allocated;
++
+ unsigned int speed_set;
+
++ /* SerDes UEFI configurable settings.
++ * Switching between modes/speeds requires new values for some
++ * SerDes settings. The values can be supplied as device
++ * properties in array format. The first array entry is for
++ * 1GbE, second for 2.5GbE and third for 10GbE
++ */
++ u32 serdes_blwc[XGBE_PHY_SPEEDS];
++ u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
++ u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
++ u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
++ u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS];
++ u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS];
++
+ /* Auto-negotiation state machine support */
+ struct mutex an_mutex;
+ enum amd_xgbe_phy_an an_result;
+@@ -348,7 +434,12 @@ struct amd_xgbe_phy_priv {
+ enum amd_xgbe_phy_rx kx_state;
+ struct work_struct an_work;
+ struct workqueue_struct *an_workqueue;
++ unsigned int an_supported;
+ unsigned int parallel_detect;
++ unsigned int fec_ability;
++ unsigned long an_start;
++
++ unsigned int lpm_ctrl; /* CTRL1 for resume */
+ };
+
+ static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
+@@ -359,7 +450,7 @@ static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
+ if (ret < 0)
+ return ret;
+
+- ret |= 0x02;
++ ret |= XGBE_PHY_KR_TRAINING_ENABLE;
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
+
+ return 0;
+@@ -373,7 +464,7 @@ static int amd_xgbe_an_disable_kr_training(struct phy_device *phydev)
+ if (ret < 0)
+ return ret;
+
+- ret &= ~0x02;
++ ret &= ~XGBE_PHY_KR_TRAINING_ENABLE;
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
+
+ return 0;
+@@ -423,11 +514,16 @@ static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
+ status = XSIR0_IOREAD(priv, SIR0_STATUS);
+ if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
+ XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
+- return;
++ goto rx_reset;
+ }
+
+ netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
+ status);
++
++rx_reset:
++ /* Perform Rx reset for the DFE changes */
++ XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0);
++ XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1);
+ }
+
+ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
+@@ -466,12 +562,20 @@ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
+
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_10000_RATE);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_10000_WORD);
+- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_10000_TXAMP);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_10000_PLL);
+- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_10000_CDR);
+
+- XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_10000_BLWC);
+- XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_10000_PQ);
++ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
++ priv->serdes_cdr_rate[XGBE_PHY_SPEED_10000]);
++ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
++ priv->serdes_tx_amp[XGBE_PHY_SPEED_10000]);
++ XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
++ priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
++ XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
++ priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
++ XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
++ priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]);
++ XRXTX_IOWRITE(priv, RXTX_REG22,
++ priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]);
+
+ amd_xgbe_phy_serdes_complete_ratechange(phydev);
+
+@@ -514,12 +618,20 @@ static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
+
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_2500_RATE);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_2500_WORD);
+- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_2500_TXAMP);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_2500_PLL);
+- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_2500_CDR);
+
+- XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_2500_BLWC);
+- XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_2500_PQ);
++ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
++ priv->serdes_cdr_rate[XGBE_PHY_SPEED_2500]);
++ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
++ priv->serdes_tx_amp[XGBE_PHY_SPEED_2500]);
++ XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
++ priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
++ XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
++ priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
++ XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
++ priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]);
++ XRXTX_IOWRITE(priv, RXTX_REG22,
++ priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]);
+
+ amd_xgbe_phy_serdes_complete_ratechange(phydev);
+
+@@ -562,12 +674,20 @@ static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
+
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_1000_RATE);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_1000_WORD);
+- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_1000_TXAMP);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_1000_PLL);
+- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_1000_CDR);
+
+- XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_1000_BLWC);
+- XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_1000_PQ);
++ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
++ priv->serdes_cdr_rate[XGBE_PHY_SPEED_1000]);
++ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
++ priv->serdes_tx_amp[XGBE_PHY_SPEED_1000]);
++ XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
++ priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
++ XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
++ priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
++ XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
++ priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]);
++ XRXTX_IOWRITE(priv, RXTX_REG22,
++ priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]);
+
+ amd_xgbe_phy_serdes_complete_ratechange(phydev);
+
+@@ -635,6 +755,77 @@ static int amd_xgbe_phy_set_mode(struct phy_device *phydev,
+ return ret;
+ }
+
++static bool amd_xgbe_phy_use_xgmii_mode(struct phy_device *phydev)
++{
++ if (phydev->autoneg == AUTONEG_ENABLE) {
++ if (phydev->advertising & ADVERTISED_10000baseKR_Full)
++ return true;
++ } else {
++ if (phydev->speed == SPEED_10000)
++ return true;
++ }
++
++ return false;
++}
++
++static bool amd_xgbe_phy_use_gmii_2500_mode(struct phy_device *phydev)
++{
++ if (phydev->autoneg == AUTONEG_ENABLE) {
++ if (phydev->advertising & ADVERTISED_2500baseX_Full)
++ return true;
++ } else {
++ if (phydev->speed == SPEED_2500)
++ return true;
++ }
++
++ return false;
++}
++
++static bool amd_xgbe_phy_use_gmii_mode(struct phy_device *phydev)
++{
++ if (phydev->autoneg == AUTONEG_ENABLE) {
++ if (phydev->advertising & ADVERTISED_1000baseKX_Full)
++ return true;
++ } else {
++ if (phydev->speed == SPEED_1000)
++ return true;
++ }
++
++ return false;
++}
++
++static int amd_xgbe_phy_set_an(struct phy_device *phydev, bool enable,
++ bool restart)
++{
++ int ret;
++
++ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
++ if (ret < 0)
++ return ret;
++
++ ret &= ~MDIO_AN_CTRL1_ENABLE;
++
++ if (enable)
++ ret |= MDIO_AN_CTRL1_ENABLE;
++
++ if (restart)
++ ret |= MDIO_AN_CTRL1_RESTART;
++
++ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
++
++ return 0;
++}
++
++static int amd_xgbe_phy_restart_an(struct phy_device *phydev)
++{
++ return amd_xgbe_phy_set_an(phydev, true, true);
++}
++
++static int amd_xgbe_phy_disable_an(struct phy_device *phydev)
++{
++ return amd_xgbe_phy_set_an(phydev, false, false);
++}
++
+ static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
+ enum amd_xgbe_phy_rx *state)
+ {
+@@ -645,7 +836,7 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
+
+ /* If we're not in KR mode then we're done */
+ if (!amd_xgbe_phy_in_kr_mode(phydev))
+- return AMD_XGBE_AN_EVENT;
++ return AMD_XGBE_AN_PAGE_RECEIVED;
+
+ /* Enable/Disable FEC */
+ ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+@@ -660,10 +851,9 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
+ if (ret < 0)
+ return AMD_XGBE_AN_ERROR;
+
++ ret &= ~XGBE_PHY_FEC_MASK;
+ if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
+- ret |= 0x01;
+- else
+- ret &= ~0x01;
++ ret |= priv->fec_ability;
+
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL, ret);
+
+@@ -672,14 +862,17 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
+ if (ret < 0)
+ return AMD_XGBE_AN_ERROR;
+
+- XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1);
++ if (ret & XGBE_PHY_KR_TRAINING_ENABLE) {
++ XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1);
+
+- ret |= 0x01;
+- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
++ ret |= XGBE_PHY_KR_TRAINING_START;
++ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
++ ret);
+
+- XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0);
++ XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0);
++ }
+
+- return AMD_XGBE_AN_EVENT;
++ return AMD_XGBE_AN_PAGE_RECEIVED;
+ }
+
+ static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
+@@ -696,7 +889,7 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP, msg);
+
+- return AMD_XGBE_AN_EVENT;
++ return AMD_XGBE_AN_PAGE_RECEIVED;
+ }
+
+ static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev,
+@@ -735,11 +928,11 @@ static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
+ int ad_reg, lp_reg;
+
+ /* Check Extended Next Page support */
+- ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
++ ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP);
+ if (ad_reg < 0)
+ return AMD_XGBE_AN_ERROR;
+
+- lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
++ lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPX);
+ if (lp_reg < 0)
+ return AMD_XGBE_AN_ERROR;
+
+@@ -748,226 +941,271 @@ static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
+ amd_xgbe_an_tx_training(phydev, state);
+ }
+
+-static enum amd_xgbe_phy_an amd_xgbe_an_start(struct phy_device *phydev)
++static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
+ {
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
++ enum amd_xgbe_phy_rx *state;
++ unsigned long an_timeout;
+ int ret;
+
+- /* Be sure we aren't looping trying to negotiate */
+- if (amd_xgbe_phy_in_kr_mode(phydev)) {
+- if (priv->kr_state != AMD_XGBE_RX_READY)
+- return AMD_XGBE_AN_NO_LINK;
+- priv->kr_state = AMD_XGBE_RX_BPA;
++ if (!priv->an_start) {
++ priv->an_start = jiffies;
+ } else {
+- if (priv->kx_state != AMD_XGBE_RX_READY)
+- return AMD_XGBE_AN_NO_LINK;
+- priv->kx_state = AMD_XGBE_RX_BPA;
++ an_timeout = priv->an_start +
++ msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
++ if (time_after(jiffies, an_timeout)) {
++ /* Auto-negotiation timed out, reset state */
++ priv->kr_state = AMD_XGBE_RX_BPA;
++ priv->kx_state = AMD_XGBE_RX_BPA;
++
++ priv->an_start = jiffies;
++ }
+ }
+
+- /* Set up Advertisement register 3 first */
+- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+- if (ret < 0)
+- return AMD_XGBE_AN_ERROR;
+-
+- if (phydev->supported & SUPPORTED_10000baseR_FEC)
+- ret |= 0xc000;
+- else
+- ret &= ~0xc000;
+-
+- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
++ state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state
++ : &priv->kx_state;
+
+- /* Set up Advertisement register 2 next */
+- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+- if (ret < 0)
+- return AMD_XGBE_AN_ERROR;
++ switch (*state) {
++ case AMD_XGBE_RX_BPA:
++ ret = amd_xgbe_an_rx_bpa(phydev, state);
++ break;
+
+- if (phydev->supported & SUPPORTED_10000baseKR_Full)
+- ret |= 0x80;
+- else
+- ret &= ~0x80;
++ case AMD_XGBE_RX_XNP:
++ ret = amd_xgbe_an_rx_xnp(phydev, state);
++ break;
+
+- if ((phydev->supported & SUPPORTED_1000baseKX_Full) ||
+- (phydev->supported & SUPPORTED_2500baseX_Full))
+- ret |= 0x20;
+- else
+- ret &= ~0x20;
++ default:
++ ret = AMD_XGBE_AN_ERROR;
++ }
+
+- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
++ return ret;
++}
+
+- /* Set up Advertisement register 1 last */
+- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+- if (ret < 0)
+- return AMD_XGBE_AN_ERROR;
++static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
++{
++ struct amd_xgbe_phy_priv *priv = phydev->priv;
++ int ret;
+
+- if (phydev->supported & SUPPORTED_Pause)
+- ret |= 0x400;
+- else
+- ret &= ~0x400;
++ /* Be sure we aren't looping trying to negotiate */
++ if (amd_xgbe_phy_in_kr_mode(phydev)) {
++ priv->kr_state = AMD_XGBE_RX_ERROR;
+
+- if (phydev->supported & SUPPORTED_Asym_Pause)
+- ret |= 0x800;
+- else
+- ret &= ~0x800;
++ if (!(phydev->advertising & SUPPORTED_1000baseKX_Full) &&
++ !(phydev->advertising & SUPPORTED_2500baseX_Full))
++ return AMD_XGBE_AN_NO_LINK;
+
+- /* We don't intend to perform XNP */
+- ret &= ~XNP_NP_EXCHANGE;
++ if (priv->kx_state != AMD_XGBE_RX_BPA)
++ return AMD_XGBE_AN_NO_LINK;
++ } else {
++ priv->kx_state = AMD_XGBE_RX_ERROR;
+
+- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret);
++ if (!(phydev->advertising & SUPPORTED_10000baseKR_Full))
++ return AMD_XGBE_AN_NO_LINK;
+
+- /* Enable and start auto-negotiation */
+- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
++ if (priv->kr_state != AMD_XGBE_RX_BPA)
++ return AMD_XGBE_AN_NO_LINK;
++ }
+
+- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_KR_CTRL);
+- if (ret < 0)
++ ret = amd_xgbe_phy_disable_an(phydev);
++ if (ret)
+ return AMD_XGBE_AN_ERROR;
+
+- ret |= MDIO_KR_CTRL_PDETECT;
+- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_KR_CTRL, ret);
+-
+- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
+- if (ret < 0)
++ ret = amd_xgbe_phy_switch_mode(phydev);
++ if (ret)
+ return AMD_XGBE_AN_ERROR;
+
+- ret |= MDIO_AN_CTRL1_ENABLE;
+- ret |= MDIO_AN_CTRL1_RESTART;
+- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
++ ret = amd_xgbe_phy_restart_an(phydev);
++ if (ret)
++ return AMD_XGBE_AN_ERROR;
+
+- return AMD_XGBE_AN_EVENT;
++ return AMD_XGBE_AN_INCOMPAT_LINK;
+ }
+
+-static enum amd_xgbe_phy_an amd_xgbe_an_event(struct phy_device *phydev)
++static irqreturn_t amd_xgbe_an_isr(int irq, void *data)
+ {
+- enum amd_xgbe_phy_an new_state;
+- int ret;
++ struct amd_xgbe_phy_priv *priv = (struct amd_xgbe_phy_priv *)data;
+
+- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT);
+- if (ret < 0)
+- return AMD_XGBE_AN_ERROR;
++ /* Interrupt reason must be read and cleared outside of IRQ context */
++ disable_irq_nosync(priv->an_irq);
+
+- new_state = AMD_XGBE_AN_EVENT;
+- if (ret & XGBE_AN_PG_RCV)
+- new_state = AMD_XGBE_AN_PAGE_RECEIVED;
+- else if (ret & XGBE_AN_INC_LINK)
+- new_state = AMD_XGBE_AN_INCOMPAT_LINK;
+- else if (ret & XGBE_AN_INT_CMPLT)
+- new_state = AMD_XGBE_AN_COMPLETE;
++ queue_work(priv->an_workqueue, &priv->an_irq_work);
+
+- if (new_state != AMD_XGBE_AN_EVENT)
+- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
++ return IRQ_HANDLED;
++}
++
++static void amd_xgbe_an_irq_work(struct work_struct *work)
++{
++ struct amd_xgbe_phy_priv *priv = container_of(work,
++ struct amd_xgbe_phy_priv,
++ an_irq_work);
+
+- return new_state;
++ /* Avoid a race between enabling the IRQ and exiting the work by
++ * waiting for the work to finish and then queueing it
++ */
++ flush_work(&priv->an_work);
++ queue_work(priv->an_workqueue, &priv->an_work);
+ }
+
+-static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
++static void amd_xgbe_an_state_machine(struct work_struct *work)
+ {
+- struct amd_xgbe_phy_priv *priv = phydev->priv;
+- enum amd_xgbe_phy_rx *state;
+- int ret;
++ struct amd_xgbe_phy_priv *priv = container_of(work,
++ struct amd_xgbe_phy_priv,
++ an_work);
++ struct phy_device *phydev = priv->phydev;
++ enum amd_xgbe_phy_an cur_state = priv->an_state;
++ int int_reg, int_mask;
+
+- state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state
+- : &priv->kx_state;
++ mutex_lock(&priv->an_mutex);
+
+- switch (*state) {
+- case AMD_XGBE_RX_BPA:
+- ret = amd_xgbe_an_rx_bpa(phydev, state);
++ /* Read the interrupt */
++ int_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT);
++ if (!int_reg)
++ goto out;
++
++next_int:
++ if (int_reg < 0) {
++ priv->an_state = AMD_XGBE_AN_ERROR;
++ int_mask = XGBE_AN_INT_MASK;
++ } else if (int_reg & XGBE_AN_PG_RCV) {
++ priv->an_state = AMD_XGBE_AN_PAGE_RECEIVED;
++ int_mask = XGBE_AN_PG_RCV;
++ } else if (int_reg & XGBE_AN_INC_LINK) {
++ priv->an_state = AMD_XGBE_AN_INCOMPAT_LINK;
++ int_mask = XGBE_AN_INC_LINK;
++ } else if (int_reg & XGBE_AN_INT_CMPLT) {
++ priv->an_state = AMD_XGBE_AN_COMPLETE;
++ int_mask = XGBE_AN_INT_CMPLT;
++ } else {
++ priv->an_state = AMD_XGBE_AN_ERROR;
++ int_mask = 0;
++ }
++
++ /* Clear the interrupt to be processed */
++ int_reg &= ~int_mask;
++ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, int_reg);
++
++ priv->an_result = priv->an_state;
++
++again:
++ cur_state = priv->an_state;
++
++ switch (priv->an_state) {
++ case AMD_XGBE_AN_READY:
++ priv->an_supported = 0;
+ break;
+
+- case AMD_XGBE_RX_XNP:
+- ret = amd_xgbe_an_rx_xnp(phydev, state);
++ case AMD_XGBE_AN_PAGE_RECEIVED:
++ priv->an_state = amd_xgbe_an_page_received(phydev);
++ priv->an_supported++;
++ break;
++
++ case AMD_XGBE_AN_INCOMPAT_LINK:
++ priv->an_supported = 0;
++ priv->parallel_detect = 0;
++ priv->an_state = amd_xgbe_an_incompat_link(phydev);
++ break;
++
++ case AMD_XGBE_AN_COMPLETE:
++ priv->parallel_detect = priv->an_supported ? 0 : 1;
++ netdev_dbg(phydev->attached_dev, "%s successful\n",
++ priv->an_supported ? "Auto negotiation"
++ : "Parallel detection");
++ break;
++
++ case AMD_XGBE_AN_NO_LINK:
+ break;
+
+ default:
+- ret = AMD_XGBE_AN_ERROR;
++ priv->an_state = AMD_XGBE_AN_ERROR;
+ }
+
+- return ret;
+-}
++ if (priv->an_state == AMD_XGBE_AN_NO_LINK) {
++ int_reg = 0;
++ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
++ } else if (priv->an_state == AMD_XGBE_AN_ERROR) {
++ netdev_err(phydev->attached_dev,
++ "error during auto-negotiation, state=%u\n",
++ cur_state);
+
+-static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
+-{
+- int ret;
++ int_reg = 0;
++ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
++ }
+
+- ret = amd_xgbe_phy_switch_mode(phydev);
+- if (ret)
+- return AMD_XGBE_AN_ERROR;
++ if (priv->an_state >= AMD_XGBE_AN_COMPLETE) {
++ priv->an_result = priv->an_state;
++ priv->an_state = AMD_XGBE_AN_READY;
++ priv->kr_state = AMD_XGBE_RX_BPA;
++ priv->kx_state = AMD_XGBE_RX_BPA;
++ priv->an_start = 0;
++ }
+
+- return AMD_XGBE_AN_START;
+-}
++ if (cur_state != priv->an_state)
++ goto again;
+
+-static void amd_xgbe_an_state_machine(struct work_struct *work)
+-{
+- struct amd_xgbe_phy_priv *priv = container_of(work,
+- struct amd_xgbe_phy_priv,
+- an_work);
+- struct phy_device *phydev = priv->phydev;
+- enum amd_xgbe_phy_an cur_state;
+- int sleep;
+- unsigned int an_supported = 0;
++ if (int_reg)
++ goto next_int;
+
+- /* Start in KX mode */
+- if (amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX))
+- priv->an_state = AMD_XGBE_AN_ERROR;
++out:
++ enable_irq(priv->an_irq);
+
+- while (1) {
+- mutex_lock(&priv->an_mutex);
++ mutex_unlock(&priv->an_mutex);
++}
+
+- cur_state = priv->an_state;
++static int amd_xgbe_an_init(struct phy_device *phydev)
++{
++ int ret;
+
+- switch (priv->an_state) {
+- case AMD_XGBE_AN_START:
+- an_supported = 0;
+- priv->parallel_detect = 0;
+- priv->an_state = amd_xgbe_an_start(phydev);
+- break;
++ /* Set up Advertisement register 3 first */
++ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
++ if (ret < 0)
++ return ret;
+
+- case AMD_XGBE_AN_EVENT:
+- priv->an_state = amd_xgbe_an_event(phydev);
+- break;
++ if (phydev->advertising & SUPPORTED_10000baseR_FEC)
++ ret |= 0xc000;
++ else
++ ret &= ~0xc000;
+
+- case AMD_XGBE_AN_PAGE_RECEIVED:
+- priv->an_state = amd_xgbe_an_page_received(phydev);
+- an_supported++;
+- break;
++ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
+
+- case AMD_XGBE_AN_INCOMPAT_LINK:
+- priv->an_state = amd_xgbe_an_incompat_link(phydev);
+- break;
++ /* Set up Advertisement register 2 next */
++ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
++ if (ret < 0)
++ return ret;
+
+- case AMD_XGBE_AN_COMPLETE:
+- priv->parallel_detect = an_supported ? 0 : 1;
+- netdev_info(phydev->attached_dev, "%s successful\n",
+- an_supported ? "Auto negotiation"
+- : "Parallel detection");
+- /* fall through */
++ if (phydev->advertising & SUPPORTED_10000baseKR_Full)
++ ret |= 0x80;
++ else
++ ret &= ~0x80;
+
+- case AMD_XGBE_AN_NO_LINK:
+- case AMD_XGBE_AN_EXIT:
+- goto exit_unlock;
++ if ((phydev->advertising & SUPPORTED_1000baseKX_Full) ||
++ (phydev->advertising & SUPPORTED_2500baseX_Full))
++ ret |= 0x20;
++ else
++ ret &= ~0x20;
+
+- default:
+- priv->an_state = AMD_XGBE_AN_ERROR;
+- }
++ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
+
+- if (priv->an_state == AMD_XGBE_AN_ERROR) {
+- netdev_err(phydev->attached_dev,
+- "error during auto-negotiation, state=%u\n",
+- cur_state);
+- goto exit_unlock;
+- }
++ /* Set up Advertisement register 1 last */
++ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
++ if (ret < 0)
++ return ret;
+
+- sleep = (priv->an_state == AMD_XGBE_AN_EVENT) ? 1 : 0;
++ if (phydev->advertising & SUPPORTED_Pause)
++ ret |= 0x400;
++ else
++ ret &= ~0x400;
+
+- mutex_unlock(&priv->an_mutex);
++ if (phydev->advertising & SUPPORTED_Asym_Pause)
++ ret |= 0x800;
++ else
++ ret &= ~0x800;
+
+- if (sleep)
+- usleep_range(20, 50);
+- }
++ /* We don't intend to perform XNP */
++ ret &= ~XNP_NP_EXCHANGE;
+
+-exit_unlock:
+- priv->an_result = priv->an_state;
+- priv->an_state = AMD_XGBE_AN_READY;
++ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret);
+
+- mutex_unlock(&priv->an_mutex);
++ return 0;
+ }
+
+ static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
+@@ -992,33 +1230,68 @@ static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
+ if (ret & MDIO_CTRL1_RESET)
+ return -ETIMEDOUT;
+
+- /* Make sure the XPCS and SerDes are in compatible states */
+- return amd_xgbe_phy_xgmii_mode(phydev);
++ /* Disable auto-negotiation for now */
++ ret = amd_xgbe_phy_disable_an(phydev);
++ if (ret < 0)
++ return ret;
++
++ /* Clear auto-negotiation interrupts */
++ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
++
++ return 0;
+ }
+
+ static int amd_xgbe_phy_config_init(struct phy_device *phydev)
+ {
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
++ struct net_device *netdev = phydev->attached_dev;
++ int ret;
+
+- /* Initialize supported features */
+- phydev->supported = SUPPORTED_Autoneg;
+- phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+- phydev->supported |= SUPPORTED_Backplane;
+- phydev->supported |= SUPPORTED_10000baseKR_Full |
+- SUPPORTED_10000baseR_FEC;
+- switch (priv->speed_set) {
+- case AMD_XGBE_PHY_SPEEDSET_1000_10000:
+- phydev->supported |= SUPPORTED_1000baseKX_Full;
+- break;
+- case AMD_XGBE_PHY_SPEEDSET_2500_10000:
+- phydev->supported |= SUPPORTED_2500baseX_Full;
+- break;
++ if (!priv->an_irq_allocated) {
++ /* Allocate the auto-negotiation workqueue and interrupt */
++ snprintf(priv->an_irq_name, sizeof(priv->an_irq_name) - 1,
++ "%s-pcs", netdev_name(netdev));
++
++ priv->an_workqueue =
++ create_singlethread_workqueue(priv->an_irq_name);
++ if (!priv->an_workqueue) {
++ netdev_err(netdev, "phy workqueue creation failed\n");
++ return -ENOMEM;
++ }
++
++ ret = devm_request_irq(priv->dev, priv->an_irq,
++ amd_xgbe_an_isr, 0, priv->an_irq_name,
++ priv);
++ if (ret) {
++ netdev_err(netdev, "phy irq request failed\n");
++ destroy_workqueue(priv->an_workqueue);
++ return ret;
++ }
++
++ priv->an_irq_allocated = 1;
+ }
+- phydev->advertising = phydev->supported;
+
+- /* Turn off and clear interrupts */
+- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
++ /* Set initial mode - call the mode setting routines
++ * directly to insure we are properly configured
++ */
++ if (amd_xgbe_phy_use_xgmii_mode(phydev))
++ ret = amd_xgbe_phy_xgmii_mode(phydev);
++ else if (amd_xgbe_phy_use_gmii_mode(phydev))
++ ret = amd_xgbe_phy_gmii_mode(phydev);
++ else if (amd_xgbe_phy_use_gmii_2500_mode(phydev))
++ ret = amd_xgbe_phy_gmii_2500_mode(phydev);
++ else
++ ret = -EINVAL;
++ if (ret < 0)
++ return ret;
++
++ /* Set up advertisement registers based on current settings */
++ ret = amd_xgbe_an_init(phydev);
++ if (ret)
++ return ret;
++
++ /* Enable auto-negotiation interrupts */
++ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
+
+ return 0;
+ }
+@@ -1028,25 +1301,19 @@ static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
+ int ret;
+
+ /* Disable auto-negotiation */
+- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
++ ret = amd_xgbe_phy_disable_an(phydev);
+ if (ret < 0)
+ return ret;
+
+- ret &= ~MDIO_AN_CTRL1_ENABLE;
+- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
+-
+ /* Validate/Set specified speed */
+ switch (phydev->speed) {
+ case SPEED_10000:
+- ret = amd_xgbe_phy_xgmii_mode(phydev);
++ ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
+ break;
+
+ case SPEED_2500:
+- ret = amd_xgbe_phy_gmii_2500_mode(phydev);
+- break;
+-
+ case SPEED_1000:
+- ret = amd_xgbe_phy_gmii_mode(phydev);
++ ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
+ break;
+
+ default:
+@@ -1066,10 +1333,11 @@ static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
+ return 0;
+ }
+
+-static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
++static int __amd_xgbe_phy_config_aneg(struct phy_device *phydev)
+ {
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ u32 mmd_mask = phydev->c45_ids.devices_in_package;
++ int ret;
+
+ if (phydev->autoneg != AUTONEG_ENABLE)
+ return amd_xgbe_phy_setup_forced(phydev);
+@@ -1078,56 +1346,79 @@ static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
+ if (!(mmd_mask & MDIO_DEVS_AN))
+ return -EINVAL;
+
+- /* Start/Restart the auto-negotiation state machine */
+- mutex_lock(&priv->an_mutex);
++ /* Disable auto-negotiation interrupt */
++ disable_irq(priv->an_irq);
++
++ /* Start auto-negotiation in a supported mode */
++ if (phydev->advertising & SUPPORTED_10000baseKR_Full)
++ ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
++ else if ((phydev->advertising & SUPPORTED_1000baseKX_Full) ||
++ (phydev->advertising & SUPPORTED_2500baseX_Full))
++ ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
++ else
++ ret = -EINVAL;
++ if (ret < 0) {
++ enable_irq(priv->an_irq);
++ return ret;
++ }
++
++ /* Disable and stop any in progress auto-negotiation */
++ ret = amd_xgbe_phy_disable_an(phydev);
++ if (ret < 0)
++ return ret;
++
++ /* Clear any auto-negotitation interrupts */
++ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
++
+ priv->an_result = AMD_XGBE_AN_READY;
+- priv->an_state = AMD_XGBE_AN_START;
+- priv->kr_state = AMD_XGBE_RX_READY;
+- priv->kx_state = AMD_XGBE_RX_READY;
+- mutex_unlock(&priv->an_mutex);
++ priv->an_state = AMD_XGBE_AN_READY;
++ priv->kr_state = AMD_XGBE_RX_BPA;
++ priv->kx_state = AMD_XGBE_RX_BPA;
+
+- queue_work(priv->an_workqueue, &priv->an_work);
++ /* Re-enable auto-negotiation interrupt */
++ enable_irq(priv->an_irq);
+
+- return 0;
++ /* Set up advertisement registers based on current settings */
++ ret = amd_xgbe_an_init(phydev);
++ if (ret)
++ return ret;
++
++ /* Enable and start auto-negotiation */
++ return amd_xgbe_phy_restart_an(phydev);
+ }
+
+-static int amd_xgbe_phy_aneg_done(struct phy_device *phydev)
++static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
+ {
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+- enum amd_xgbe_phy_an state;
++ int ret;
+
+ mutex_lock(&priv->an_mutex);
+- state = priv->an_result;
++
++ ret = __amd_xgbe_phy_config_aneg(phydev);
++
+ mutex_unlock(&priv->an_mutex);
+
+- return (state == AMD_XGBE_AN_COMPLETE);
++ return ret;
++}
++
++static int amd_xgbe_phy_aneg_done(struct phy_device *phydev)
++{
++ struct amd_xgbe_phy_priv *priv = phydev->priv;
++
++ return (priv->an_result == AMD_XGBE_AN_COMPLETE);
+ }
+
+ static int amd_xgbe_phy_update_link(struct phy_device *phydev)
+ {
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+- enum amd_xgbe_phy_an state;
+- unsigned int check_again, autoneg;
+ int ret;
+
+ /* If we're doing auto-negotiation don't report link down */
+- mutex_lock(&priv->an_mutex);
+- state = priv->an_state;
+- mutex_unlock(&priv->an_mutex);
+-
+- if (state != AMD_XGBE_AN_READY) {
++ if (priv->an_state != AMD_XGBE_AN_READY) {
+ phydev->link = 1;
+ return 0;
+ }
+
+- /* Since the device can be in the wrong mode when a link is
+- * (re-)established (cable connected after the interface is
+- * up, etc.), the link status may report no link. If there
+- * is no link, try switching modes and checking the status
+- * again if auto negotiation is enabled.
+- */
+- check_again = (phydev->autoneg == AUTONEG_ENABLE) ? 1 : 0;
+-again:
+ /* Link status is latched low, so read once to clear
+ * and then read again to get current state
+ */
+@@ -1141,25 +1432,6 @@ again:
+
+ phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0;
+
+- if (!phydev->link) {
+- if (check_again) {
+- ret = amd_xgbe_phy_switch_mode(phydev);
+- if (ret < 0)
+- return ret;
+- check_again = 0;
+- goto again;
+- }
+- }
+-
+- autoneg = (phydev->link && !priv->link) ? 1 : 0;
+- priv->link = phydev->link;
+- if (autoneg) {
+- /* Link is (back) up, re-start auto-negotiation */
+- ret = amd_xgbe_phy_config_aneg(phydev);
+- if (ret < 0)
+- return ret;
+- }
+-
+ return 0;
+ }
+
+@@ -1249,6 +1521,7 @@ static int amd_xgbe_phy_read_status(struct phy_device *phydev)
+
+ static int amd_xgbe_phy_suspend(struct phy_device *phydev)
+ {
++ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ int ret;
+
+ mutex_lock(&phydev->lock);
+@@ -1257,6 +1530,8 @@ static int amd_xgbe_phy_suspend(struct phy_device *phydev)
+ if (ret < 0)
+ goto unlock;
+
++ priv->lpm_ctrl = ret;
++
+ ret |= MDIO_CTRL1_LPOWER;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+@@ -1270,69 +1545,106 @@ unlock:
+
+ static int amd_xgbe_phy_resume(struct phy_device *phydev)
+ {
+- int ret;
++ struct amd_xgbe_phy_priv *priv = phydev->priv;
+
+ mutex_lock(&phydev->lock);
+
+- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
+- if (ret < 0)
+- goto unlock;
++ priv->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
++ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, priv->lpm_ctrl);
+
+- ret &= ~MDIO_CTRL1_LPOWER;
+- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
++ mutex_unlock(&phydev->lock);
+
+- ret = 0;
++ return 0;
++}
+
+-unlock:
+- mutex_unlock(&phydev->lock);
++static unsigned int amd_xgbe_phy_resource_count(struct platform_device *pdev,
++ unsigned int type)
++{
++ unsigned int count;
++ int i;
+
+- return ret;
++ for (i = 0, count = 0; i < pdev->num_resources; i++) {
++ struct resource *r = &pdev->resource[i];
++
++ if (type == resource_type(r))
++ count++;
++ }
++
++ return count;
+ }
+
+ static int amd_xgbe_phy_probe(struct phy_device *phydev)
+ {
+ struct amd_xgbe_phy_priv *priv;
+- struct platform_device *pdev;
+- struct device *dev;
+- char *wq_name;
+- const __be32 *property;
+- unsigned int speed_set;
++ struct platform_device *phy_pdev;
++ struct device *dev, *phy_dev;
++ unsigned int phy_resnum, phy_irqnum;
+ int ret;
+
+- if (!phydev->dev.of_node)
++ if (!phydev->bus || !phydev->bus->parent)
+ return -EINVAL;
+
+- pdev = of_find_device_by_node(phydev->dev.of_node);
+- if (!pdev)
+- return -EINVAL;
+- dev = &pdev->dev;
+-
+- wq_name = kasprintf(GFP_KERNEL, "%s-amd-xgbe-phy", phydev->bus->name);
+- if (!wq_name) {
+- ret = -ENOMEM;
+- goto err_pdev;
+- }
++ dev = phydev->bus->parent;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+- if (!priv) {
+- ret = -ENOMEM;
+- goto err_name;
+- }
++ if (!priv)
++ return -ENOMEM;
+
+- priv->pdev = pdev;
++ priv->pdev = to_platform_device(dev);
++ priv->adev = ACPI_COMPANION(dev);
+ priv->dev = dev;
+ priv->phydev = phydev;
++ mutex_init(&priv->an_mutex);
++ INIT_WORK(&priv->an_irq_work, amd_xgbe_an_irq_work);
++ INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
++
++ if (!priv->adev || acpi_disabled) {
++ struct device_node *bus_node;
++ struct device_node *phy_node;
++
++ bus_node = priv->dev->of_node;
++ phy_node = of_parse_phandle(bus_node, "phy-handle", 0);
++ if (!phy_node) {
++ dev_err(dev, "unable to parse phy-handle\n");
++ ret = -EINVAL;
++ goto err_priv;
++ }
++
++ phy_pdev = of_find_device_by_node(phy_node);
++ of_node_put(phy_node);
++
++ if (!phy_pdev) {
++ dev_err(dev, "unable to obtain phy device\n");
++ ret = -EINVAL;
++ goto err_priv;
++ }
++
++ phy_resnum = 0;
++ phy_irqnum = 0;
++ } else {
++ /* In ACPI, the XGBE and PHY resources are the grouped
++ * together with the PHY resources at the end
++ */
++ phy_pdev = priv->pdev;
++ phy_resnum = amd_xgbe_phy_resource_count(phy_pdev,
++ IORESOURCE_MEM) - 3;
++ phy_irqnum = amd_xgbe_phy_resource_count(phy_pdev,
++ IORESOURCE_IRQ) - 1;
++ }
++ phy_dev = &phy_pdev->dev;
+
+ /* Get the device mmio areas */
+- priv->rxtx_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ priv->rxtx_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
++ phy_resnum++);
+ priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res);
+ if (IS_ERR(priv->rxtx_regs)) {
+ dev_err(dev, "rxtx ioremap failed\n");
+ ret = PTR_ERR(priv->rxtx_regs);
+- goto err_priv;
++ goto err_put;
+ }
+
+- priv->sir0_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
++ priv->sir0_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
++ phy_resnum++);
+ priv->sir0_regs = devm_ioremap_resource(dev, priv->sir0_res);
+ if (IS_ERR(priv->sir0_regs)) {
+ dev_err(dev, "sir0 ioremap failed\n");
+@@ -1340,7 +1652,8 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
+ goto err_rxtx;
+ }
+
+- priv->sir1_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
++ priv->sir1_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
++ phy_resnum++);
+ priv->sir1_regs = devm_ioremap_resource(dev, priv->sir1_res);
+ if (IS_ERR(priv->sir1_regs)) {
+ dev_err(dev, "sir1 ioremap failed\n");
+@@ -1348,40 +1661,153 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
+ goto err_sir0;
+ }
+
++ /* Get the auto-negotiation interrupt */
++ ret = platform_get_irq(phy_pdev, phy_irqnum);
++ if (ret < 0) {
++ dev_err(dev, "platform_get_irq failed\n");
++ goto err_sir1;
++ }
++ priv->an_irq = ret;
++
+ /* Get the device speed set property */
+- speed_set = 0;
+- property = of_get_property(dev->of_node, XGBE_PHY_SPEEDSET_PROPERTY,
+- NULL);
+- if (property)
+- speed_set = be32_to_cpu(*property);
+-
+- switch (speed_set) {
+- case 0:
+- priv->speed_set = AMD_XGBE_PHY_SPEEDSET_1000_10000;
+- break;
+- case 1:
+- priv->speed_set = AMD_XGBE_PHY_SPEEDSET_2500_10000;
++ ret = device_property_read_u32(phy_dev, XGBE_PHY_SPEEDSET_PROPERTY,
++ &priv->speed_set);
++ if (ret) {
++ dev_err(dev, "invalid %s property\n",
++ XGBE_PHY_SPEEDSET_PROPERTY);
++ goto err_sir1;
++ }
++
++ switch (priv->speed_set) {
++ case AMD_XGBE_PHY_SPEEDSET_1000_10000:
++ case AMD_XGBE_PHY_SPEEDSET_2500_10000:
+ break;
+ default:
+- dev_err(dev, "invalid amd,speed-set property\n");
++ dev_err(dev, "invalid %s property\n",
++ XGBE_PHY_SPEEDSET_PROPERTY);
+ ret = -EINVAL;
+ goto err_sir1;
+ }
+
+- priv->link = 1;
++ if (device_property_present(phy_dev, XGBE_PHY_BLWC_PROPERTY)) {
++ ret = device_property_read_u32_array(phy_dev,
++ XGBE_PHY_BLWC_PROPERTY,
++ priv->serdes_blwc,
++ XGBE_PHY_SPEEDS);
++ if (ret) {
++ dev_err(dev, "invalid %s property\n",
++ XGBE_PHY_BLWC_PROPERTY);
++ goto err_sir1;
++ }
++ } else {
++ memcpy(priv->serdes_blwc, amd_xgbe_phy_serdes_blwc,
++ sizeof(priv->serdes_blwc));
++ }
+
+- mutex_init(&priv->an_mutex);
+- INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
+- priv->an_workqueue = create_singlethread_workqueue(wq_name);
+- if (!priv->an_workqueue) {
+- ret = -ENOMEM;
+- goto err_sir1;
++ if (device_property_present(phy_dev, XGBE_PHY_CDR_RATE_PROPERTY)) {
++ ret = device_property_read_u32_array(phy_dev,
++ XGBE_PHY_CDR_RATE_PROPERTY,
++ priv->serdes_cdr_rate,
++ XGBE_PHY_SPEEDS);
++ if (ret) {
++ dev_err(dev, "invalid %s property\n",
++ XGBE_PHY_CDR_RATE_PROPERTY);
++ goto err_sir1;
++ }
++ } else {
++ memcpy(priv->serdes_cdr_rate, amd_xgbe_phy_serdes_cdr_rate,
++ sizeof(priv->serdes_cdr_rate));
++ }
++
++ if (device_property_present(phy_dev, XGBE_PHY_PQ_SKEW_PROPERTY)) {
++ ret = device_property_read_u32_array(phy_dev,
++ XGBE_PHY_PQ_SKEW_PROPERTY,
++ priv->serdes_pq_skew,
++ XGBE_PHY_SPEEDS);
++ if (ret) {
++ dev_err(dev, "invalid %s property\n",
++ XGBE_PHY_PQ_SKEW_PROPERTY);
++ goto err_sir1;
++ }
++ } else {
++ memcpy(priv->serdes_pq_skew, amd_xgbe_phy_serdes_pq_skew,
++ sizeof(priv->serdes_pq_skew));
++ }
++
++ if (device_property_present(phy_dev, XGBE_PHY_TX_AMP_PROPERTY)) {
++ ret = device_property_read_u32_array(phy_dev,
++ XGBE_PHY_TX_AMP_PROPERTY,
++ priv->serdes_tx_amp,
++ XGBE_PHY_SPEEDS);
++ if (ret) {
++ dev_err(dev, "invalid %s property\n",
++ XGBE_PHY_TX_AMP_PROPERTY);
++ goto err_sir1;
++ }
++ } else {
++ memcpy(priv->serdes_tx_amp, amd_xgbe_phy_serdes_tx_amp,
++ sizeof(priv->serdes_tx_amp));
++ }
++
++ if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) {
++ ret = device_property_read_u32_array(phy_dev,
++ XGBE_PHY_DFE_CFG_PROPERTY,
++ priv->serdes_dfe_tap_cfg,
++ XGBE_PHY_SPEEDS);
++ if (ret) {
++ dev_err(dev, "invalid %s property\n",
++ XGBE_PHY_DFE_CFG_PROPERTY);
++ goto err_sir1;
++ }
++ } else {
++ memcpy(priv->serdes_dfe_tap_cfg,
++ amd_xgbe_phy_serdes_dfe_tap_cfg,
++ sizeof(priv->serdes_dfe_tap_cfg));
+ }
+
++ if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) {
++ ret = device_property_read_u32_array(phy_dev,
++ XGBE_PHY_DFE_ENA_PROPERTY,
++ priv->serdes_dfe_tap_ena,
++ XGBE_PHY_SPEEDS);
++ if (ret) {
++ dev_err(dev, "invalid %s property\n",
++ XGBE_PHY_DFE_ENA_PROPERTY);
++ goto err_sir1;
++ }
++ } else {
++ memcpy(priv->serdes_dfe_tap_ena,
++ amd_xgbe_phy_serdes_dfe_tap_ena,
++ sizeof(priv->serdes_dfe_tap_ena));
++ }
++
++ /* Initialize supported features */
++ phydev->supported = SUPPORTED_Autoneg;
++ phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++ phydev->supported |= SUPPORTED_Backplane;
++ phydev->supported |= SUPPORTED_10000baseKR_Full;
++ switch (priv->speed_set) {
++ case AMD_XGBE_PHY_SPEEDSET_1000_10000:
++ phydev->supported |= SUPPORTED_1000baseKX_Full;
++ break;
++ case AMD_XGBE_PHY_SPEEDSET_2500_10000:
++ phydev->supported |= SUPPORTED_2500baseX_Full;
++ break;
++ }
++
++ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_ABILITY);
++ if (ret < 0)
++ return ret;
++ priv->fec_ability = ret & XGBE_PHY_FEC_MASK;
++ if (priv->fec_ability & XGBE_PHY_FEC_ENABLE)
++ phydev->supported |= SUPPORTED_10000baseR_FEC;
++
++ phydev->advertising = phydev->supported;
++
+ phydev->priv = priv;
+
+- kfree(wq_name);
+- of_dev_put(pdev);
++ if (!priv->adev || acpi_disabled)
++ platform_device_put(phy_pdev);
+
+ return 0;
+
+@@ -1400,15 +1826,13 @@ err_rxtx:
+ devm_release_mem_region(dev, priv->rxtx_res->start,
+ resource_size(priv->rxtx_res));
+
++err_put:
++ if (!priv->adev || acpi_disabled)
++ platform_device_put(phy_pdev);
++
+ err_priv:
+ devm_kfree(dev, priv);
+
+-err_name:
+- kfree(wq_name);
+-
+-err_pdev:
+- of_dev_put(pdev);
+-
+ return ret;
+ }
+
+@@ -1417,13 +1841,12 @@ static void amd_xgbe_phy_remove(struct phy_device *phydev)
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ struct device *dev = priv->dev;
+
+- /* Stop any in process auto-negotiation */
+- mutex_lock(&priv->an_mutex);
+- priv->an_state = AMD_XGBE_AN_EXIT;
+- mutex_unlock(&priv->an_mutex);
++ if (priv->an_irq_allocated) {
++ devm_free_irq(dev, priv->an_irq, priv);
+
+- flush_workqueue(priv->an_workqueue);
+- destroy_workqueue(priv->an_workqueue);
++ flush_workqueue(priv->an_workqueue);
++ destroy_workqueue(priv->an_workqueue);
++ }
+
+ /* Release resources */
+ devm_iounmap(dev, priv->sir1_regs);
+@@ -1452,6 +1875,7 @@ static struct phy_driver amd_xgbe_phy_driver[] = {
+ .phy_id_mask = XGBE_PHY_MASK,
+ .name = "AMD XGBE PHY",
+ .features = 0,
++ .flags = PHY_IS_INTERNAL,
+ .probe = amd_xgbe_phy_probe,
+ .remove = amd_xgbe_phy_remove,
+ .soft_reset = amd_xgbe_phy_soft_reset,
+diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
+index abcafaa..6bdf476 100644
+--- a/include/linux/clocksource.h
++++ b/include/linux/clocksource.h
+@@ -87,6 +87,15 @@ static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
+ }
+
+ /**
++ * timecounter_adjtime - Shifts the time of the clock.
++ * @delta: Desired change in nanoseconds.
++ */
++static inline void timecounter_adjtime(struct timecounter *tc, s64 delta)
++{
++ tc->nsec += delta;
++}
++
++/**
+ * timecounter_init - initialize a time counter
+ * @tc: Pointer to time counter which is to be initialized/reset
+ * @cc: A cycle counter, ready to be used.
+--
+1.9.1
+