aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c103
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c50
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c22
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c21
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c15
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c8
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c57
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c5
27 files changed, 287 insertions, 169 deletions
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index a41008523c98..2e07ffa87e34 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -607,6 +607,7 @@ static int e1000_set_ringparam(struct net_device *netdev,
for (i = 0; i < adapter->num_rx_queues; i++)
rxdr[i].count = rxdr->count;
+ err = 0;
if (netif_running(adapter->netdev)) {
/* Try to get new resources before deleting old */
err = e1000_setup_all_rx_resources(adapter);
@@ -627,14 +628,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
adapter->rx_ring = rxdr;
adapter->tx_ring = txdr;
err = e1000_up(adapter);
- if (err)
- goto err_setup;
}
kfree(tx_old);
kfree(rx_old);
clear_bit(__E1000_RESETTING, &adapter->flags);
- return 0;
+ return err;
+
err_setup_tx:
e1000_free_all_rx_resources(adapter);
err_setup_rx:
@@ -646,7 +646,6 @@ err_alloc_rx:
err_alloc_tx:
if (netif_running(adapter->netdev))
e1000_up(adapter);
-err_setup:
clear_bit(__E1000_RESETTING, &adapter->flags);
return err;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 551de8c2fef2..5e6722f286aa 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3141,8 +3141,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
if (skb->data_len && hdr_len == len) {
switch (hw->mac_type) {
+ case e1000_82544: {
unsigned int pull_size;
- case e1000_82544:
+
/* Make sure we have room to chop off 4 bytes,
* and that the end alignment will work out to
* this hardware's requirements
@@ -3163,6 +3164,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
}
len = skb_headlen(skb);
break;
+ }
default:
/* do nothing */
break;
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index be13227f1697..b91d315b123f 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -577,7 +577,6 @@ static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
#define er32(reg) __er32(hw, E1000_##reg)
-s32 __ew32_prepare(struct e1000_hw *hw);
void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val);
#define ew32(reg, val) __ew32(hw, E1000_##reg, (val))
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index b081a1ef6859..4ac19ce5e251 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -119,14 +119,12 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
* has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
* and try again a number of times.
**/
-s32 __ew32_prepare(struct e1000_hw *hw)
+static void __ew32_prepare(struct e1000_hw *hw)
{
s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
udelay(50);
-
- return i;
}
void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
@@ -607,11 +605,11 @@ static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
{
struct e1000_adapter *adapter = rx_ring->adapter;
struct e1000_hw *hw = &adapter->hw;
- s32 ret_val = __ew32_prepare(hw);
+ __ew32_prepare(hw);
writel(i, rx_ring->tail);
- if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
+ if (unlikely(i != readl(rx_ring->tail))) {
u32 rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
@@ -624,11 +622,11 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
{
struct e1000_adapter *adapter = tx_ring->adapter;
struct e1000_hw *hw = &adapter->hw;
- s32 ret_val = __ew32_prepare(hw);
+ __ew32_prepare(hw);
writel(i, tx_ring->tail);
- if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
+ if (unlikely(i != readl(tx_ring->tail))) {
u32 tctl = er32(TCTL);
ew32(TCTL, tctl & ~E1000_TCTL_EN);
@@ -4713,12 +4711,12 @@ int e1000e_close(struct net_device *netdev)
pm_runtime_get_sync(&pdev->dev);
- if (!test_bit(__E1000_DOWN, &adapter->state)) {
+ if (netif_device_present(netdev)) {
e1000e_down(adapter, true);
e1000_free_irq(adapter);
/* Link status message must follow this format */
- pr_info("%s NIC Link is Down\n", adapter->netdev->name);
+ pr_info("%s NIC Link is Down\n", netdev->name);
}
napi_disable(&adapter->napi);
@@ -5273,6 +5271,10 @@ static void e1000_watchdog_task(struct work_struct *work)
/* oops */
break;
}
+ if (hw->mac.type == e1000_pch_spt) {
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ }
}
/* enable transmits in the hardware, need to do this
@@ -6292,12 +6294,16 @@ fl_out:
static int e1000e_pm_freeze(struct device *dev)
{
- struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
+ struct net_device *netdev = dev_get_drvdata(dev);
struct e1000_adapter *adapter = netdev_priv(netdev);
+ bool present;
+ rtnl_lock();
+
+ present = netif_device_present(netdev);
netif_device_detach(netdev);
- if (netif_running(netdev)) {
+ if (present && netif_running(netdev)) {
int count = E1000_CHECK_RESET_COUNT;
while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
@@ -6309,6 +6315,8 @@ static int e1000e_pm_freeze(struct device *dev)
e1000e_down(adapter, false);
e1000_free_irq(adapter);
}
+ rtnl_unlock();
+
e1000e_reset_interrupt_capability(adapter);
/* Allow time for pending master requests to run */
@@ -6322,11 +6330,17 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 ctrl, ctrl_ext, rctl, status;
- /* Runtime suspend should only enable wakeup for link changes */
- u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
+ u32 ctrl, ctrl_ext, rctl, status, wufc;
int retval = 0;
+ /* Runtime suspend should only enable wakeup for link changes */
+ if (runtime)
+ wufc = E1000_WUFC_LNKC;
+ else if (device_may_wakeup(&pdev->dev))
+ wufc = adapter->wol;
+ else
+ wufc = 0;
+
status = er32(STATUS);
if (status & E1000_STATUS_LU)
wufc &= ~E1000_WUFC_LNKC;
@@ -6383,7 +6397,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
if (adapter->hw.phy.type == e1000_phy_igp_3) {
e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
} else if (hw->mac.type >= e1000_pch_lpt) {
- if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
+ if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
/* ULP does not support wake from unicast, multicast
* or broadcast.
*/
@@ -6556,6 +6570,30 @@ static void e1000e_disable_aspm_locked(struct pci_dev *pdev, u16 state)
__e1000e_disable_aspm(pdev, state, 1);
}
+static int e1000e_pm_thaw(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ int rc = 0;
+
+ e1000e_set_interrupt_capability(adapter);
+
+ rtnl_lock();
+ if (netif_running(netdev)) {
+ rc = e1000_request_irq(adapter);
+ if (rc)
+ goto err_irq;
+
+ e1000e_up(adapter);
+ }
+
+ netif_device_attach(netdev);
+err_irq:
+ rtnl_unlock();
+
+ return rc;
+}
+
#ifdef CONFIG_PM
static int __e1000_resume(struct pci_dev *pdev)
{
@@ -6623,26 +6661,6 @@ static int __e1000_resume(struct pci_dev *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int e1000e_pm_thaw(struct device *dev)
-{
- struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- e1000e_set_interrupt_capability(adapter);
- if (netif_running(netdev)) {
- u32 err = e1000_request_irq(adapter);
-
- if (err)
- return err;
-
- e1000e_up(adapter);
- }
-
- netif_device_attach(netdev);
-
- return 0;
-}
-
static int e1000e_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -6674,8 +6692,7 @@ static int e1000e_pm_resume(struct device *dev)
static int e1000e_pm_runtime_idle(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct e1000_adapter *adapter = netdev_priv(netdev);
u16 eee_lp;
@@ -6815,16 +6832,11 @@ static void e1000_netpoll(struct net_device *netdev)
static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- netif_device_detach(netdev);
+ e1000e_pm_freeze(&pdev->dev);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
- if (netif_running(netdev))
- e1000e_down(adapter, true);
pci_disable_device(pdev);
/* Request a slot slot reset. */
@@ -6890,10 +6902,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
e1000_init_manageability_pt(adapter);
- if (netif_running(netdev))
- e1000e_up(adapter);
-
- netif_device_attach(netdev);
+ e1000e_pm_thaw(&pdev->dev);
/* If the controller has AMT, do not set DRV_LOAD until the interface
* is up. For all other cases, let the f/w know that the h/w is now
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 7ce42040b851..9ef2d3f86aff 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1125,7 +1125,7 @@ void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags);
static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
{
- return !!vsi->xdp_prog;
+ return !!READ_ONCE(vsi->xdp_prog);
}
int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index ecb1adaa54ec..017ef48219e8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1859,8 +1859,8 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
- if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
- hw->aq.api_min_ver >= 7) {
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE &&
+ hw->mac.type != I40E_MAC_X722) {
__le32 tmp;
memcpy(&tmp, resp->link_type, sizeof(tmp));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index f2906479f5a6..10173a7987c5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -457,11 +457,15 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
i40e_get_netdev_stats_struct_tx(ring, stats);
if (i40e_enabled_xdp_vsi(vsi)) {
- ring++;
+ ring = READ_ONCE(vsi->xdp_rings[i]);
+ if (!ring)
+ continue;
i40e_get_netdev_stats_struct_tx(ring, stats);
}
- ring++;
+ ring = READ_ONCE(vsi->rx_rings[i]);
+ if (!ring)
+ continue;
do {
start = u64_stats_fetch_begin_irq(&ring->syncp);
packets = ring->stats.packets;
@@ -804,6 +808,8 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
for (q = 0; q < vsi->num_queue_pairs; q++) {
/* locate Tx ring */
p = READ_ONCE(vsi->tx_rings[q]);
+ if (!p)
+ continue;
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
@@ -817,8 +823,11 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
tx_linearize += p->tx_stats.tx_linearize;
tx_force_wb += p->tx_stats.tx_force_wb;
- /* Rx queue is part of the same block as Tx queue */
- p = &p[1];
+ /* locate Rx ring */
+ p = READ_ONCE(vsi->rx_rings[q]);
+ if (!p)
+ continue;
+
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
packets = p->stats.packets;
@@ -3528,14 +3537,14 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
q_vector->rx.target_itr =
ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
- q_vector->rx.target_itr);
+ q_vector->rx.target_itr >> 1);
q_vector->rx.current_itr = q_vector->rx.target_itr;
q_vector->tx.next_update = jiffies + 1;
q_vector->tx.target_itr =
ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
- q_vector->tx.target_itr);
+ q_vector->tx.target_itr >> 1);
q_vector->tx.current_itr = q_vector->tx.target_itr;
wr32(hw, I40E_PFINT_RATEN(vector - 1),
@@ -3640,11 +3649,11 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
/* set the ITR configuration */
q_vector->rx.next_update = jiffies + 1;
q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
- wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr);
+ wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
q_vector->rx.current_itr = q_vector->rx.target_itr;
q_vector->tx.next_update = jiffies + 1;
q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
- wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr);
+ wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
q_vector->tx.current_itr = q_vector->tx.target_itr;
i40e_enable_misc_int_causes(pf);
@@ -6272,6 +6281,7 @@ static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
dev_err(&vsi->back->pdev->dev,
"Failed creating queue channel with TC%d: queues %d\n",
i, ch->num_queue_pairs);
+ kfree(ch);
goto err_free;
}
vsi->tc_seid_map[i] = ch->seid;
@@ -6789,8 +6799,8 @@ void i40e_down(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_clean_tx_ring(vsi->tx_rings[i]);
if (i40e_enabled_xdp_vsi(vsi)) {
- /* Make sure that in-progress ndo_xdp_xmit
- * calls are completed.
+ /* Make sure that in-progress ndo_xdp_xmit and
+ * ndo_xsk_wakeup calls are completed.
*/
synchronize_rcu();
i40e_clean_tx_ring(vsi->xdp_rings[i]);
@@ -10307,10 +10317,10 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
if (vsi->tx_rings && vsi->tx_rings[0]) {
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
kfree_rcu(vsi->tx_rings[i], rcu);
- vsi->tx_rings[i] = NULL;
- vsi->rx_rings[i] = NULL;
+ WRITE_ONCE(vsi->tx_rings[i], NULL);
+ WRITE_ONCE(vsi->rx_rings[i], NULL);
if (vsi->xdp_rings)
- vsi->xdp_rings[i] = NULL;
+ WRITE_ONCE(vsi->xdp_rings[i], NULL);
}
}
}
@@ -10344,7 +10354,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
ring->itr_setting = pf->tx_itr_default;
- vsi->tx_rings[i] = ring++;
+ WRITE_ONCE(vsi->tx_rings[i], ring++);
if (!i40e_enabled_xdp_vsi(vsi))
goto setup_rx;
@@ -10362,7 +10372,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
set_ring_xdp(ring);
ring->itr_setting = pf->tx_itr_default;
- vsi->xdp_rings[i] = ring++;
+ WRITE_ONCE(vsi->xdp_rings[i], ring++);
setup_rx:
ring->queue_index = i;
@@ -10375,7 +10385,7 @@ setup_rx:
ring->size = 0;
ring->dcb_tc = 0;
ring->itr_setting = pf->rx_itr_default;
- vsi->rx_rings[i] = ring;
+ WRITE_ONCE(vsi->rx_rings[i], ring);
}
return 0;
@@ -10888,7 +10898,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
/* associate no queues to the misc vector */
wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
- wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
+ wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
i40e_flush(hw);
@@ -12015,8 +12025,12 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
old_prog = xchg(&vsi->xdp_prog, prog);
- if (need_reset)
+ if (need_reset) {
+ if (!prog)
+ /* Wait until ndo_xsk_wakeup completes. */
+ synchronize_rcu();
i40e_reset_and_rebuild(pf, true, true);
+ }
for (i = 0; i < vsi->num_queue_pairs; i++)
WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 479bc60c8f71..1b1e60b86f31 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2307,6 +2307,22 @@ static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
}
/**
+ * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
+ * @vqs: virtchnl_queue_select structure containing bitmaps to validate
+ *
+ * Returns true if validation was successful, else false.
+ */
+static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
+{
+ if ((!vqs->rx_queues && !vqs->tx_queues) ||
+ vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
+ vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
+ return false;
+
+ return true;
+}
+
+/**
* i40e_vc_enable_queues_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2332,7 +2348,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
- if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
+ if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2389,9 +2405,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
- if ((vqs->rx_queues == 0 && vqs->tx_queues == 0) ||
- vqs->rx_queues > I40E_MAX_VF_QUEUES ||
- vqs->tx_queues > I40E_MAX_VF_QUEUES) {
+ if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 1b17486543ac..71ad5c93048d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -772,8 +772,12 @@ int i40e_xsk_async_xmit(struct net_device *dev, u32 queue_id)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
struct i40e_ring *ring;
+ if (test_bit(__I40E_CONFIG_BUSY, pf->state))
+ return -EAGAIN;
+
if (test_bit(__I40E_VSI_DOWN, vsi->state))
return -ENETDOWN;
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 272d76b733aa..4df14be14ce3 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -415,4 +415,6 @@ void iavf_enable_channels(struct iavf_adapter *adapter);
void iavf_disable_channels(struct iavf_adapter *adapter);
void iavf_add_cloud_filter(struct iavf_adapter *adapter);
void iavf_del_cloud_filter(struct iavf_adapter *adapter);
+struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
+ const u8 *macaddr);
#endif /* _IAVF_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 4569d69a2b55..cd89cf39f984 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -331,7 +331,7 @@ iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
q_vector->ring_mask |= BIT(r_idx);
wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
- q_vector->rx.current_itr);
+ q_vector->rx.current_itr >> 1);
q_vector->rx.current_itr = q_vector->rx.target_itr;
}
@@ -357,7 +357,7 @@ iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
q_vector->num_ringpairs++;
wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
- q_vector->tx.target_itr);
+ q_vector->tx.target_itr >> 1);
q_vector->tx.current_itr = q_vector->tx.target_itr;
}
@@ -761,9 +761,8 @@ iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
*
* Returns ptr to the filter object or NULL when no memory available.
**/
-static struct
-iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
- const u8 *macaddr)
+struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
+ const u8 *macaddr)
{
struct iavf_mac_filter *f;
@@ -1825,9 +1824,9 @@ static void iavf_reset_task(struct work_struct *work)
struct virtchnl_vf_resource *vfres = adapter->vf_res;
struct net_device *netdev = adapter->netdev;
struct iavf_hw *hw = &adapter->hw;
+ struct iavf_mac_filter *f, *ftmp;
struct iavf_vlan_filter *vlf;
struct iavf_cloud_filter *cf;
- struct iavf_mac_filter *f;
u32 reg_val;
int i = 0, err;
bool running;
@@ -1941,6 +1940,16 @@ continue_reset:
spin_lock_bh(&adapter->mac_vlan_list_lock);
+ /* Delete filter for the current MAC address, it could have
+ * been changed by the PF via administratively set MAC.
+ * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
+ */
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+ if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
/* re-add all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->add = true;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index e64751da0921..418890a9b551 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -1357,6 +1357,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
ether_addr_copy(netdev->perm_addr,
adapter->hw.mac.addr);
}
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+ iavf_add_filter(adapter, adapter->hw.mac.addr);
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
iavf_process_config(adapter);
}
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index da7878529929..28def013dab5 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -423,6 +423,7 @@ static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
{
struct ice_switch_info *sw;
+ enum ice_status status;
hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*hw->switch_info), GFP_KERNEL);
@@ -433,7 +434,12 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
INIT_LIST_HEAD(&sw->vsi_list_map_head);
- return ice_init_def_sw_recp(hw);
+ status = ice_init_def_sw_recp(hw);
+ if (status) {
+ devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
+ return status;
+ }
+ return 0;
}
/**
@@ -842,7 +848,7 @@ void ice_deinit_hw(struct ice_hw *hw)
*/
enum ice_status ice_check_reset(struct ice_hw *hw)
{
- u32 cnt, reg = 0, grst_delay;
+ u32 cnt, reg = 0, grst_delay, uld_mask;
/* Poll for Device Active state in case a recent CORER, GLOBR,
* or EMPR has occurred. The grst delay value is in 100ms units.
@@ -864,13 +870,20 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
return ICE_ERR_RESET_FAILED;
}
-#define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \
- GLNVM_ULD_GLOBR_DONE_M)
+#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
+ GLNVM_ULD_PCIER_DONE_1_M |\
+ GLNVM_ULD_CORER_DONE_M |\
+ GLNVM_ULD_GLOBR_DONE_M |\
+ GLNVM_ULD_POR_DONE_M |\
+ GLNVM_ULD_POR_DONE_1_M |\
+ GLNVM_ULD_PCIER_DONE_2_M)
+
+ uld_mask = ICE_RESET_DONE_MASK;
/* Device is Active; check Global Reset processes are done */
for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
- reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
- if (reg == ICE_RESET_DONE_MASK) {
+ reg = rd32(hw, GLNVM_ULD) & uld_mask;
+ if (reg == uld_mask) {
ice_debug(hw, ICE_DBG_INIT,
"Global reset processes done. %d\n", cnt);
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index cc8cb5fdcdc1..b33e24e02aab 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -199,7 +199,9 @@ unwind_alloc_rq_bufs:
cq->rq.r.rq_bi[i].pa = 0;
cq->rq.r.rq_bi[i].size = 0;
}
+ cq->rq.r.rq_bi = NULL;
devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
+ cq->rq.dma_head = NULL;
return ICE_ERR_NO_MEMORY;
}
@@ -245,7 +247,9 @@ unwind_alloc_sq_bufs:
cq->sq.r.sq_bi[i].pa = 0;
cq->sq.r.sq_bi[i].size = 0;
}
+ cq->sq.r.sq_bi = NULL;
devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
+ cq->sq.dma_head = NULL;
return ICE_ERR_NO_MEMORY;
}
@@ -304,6 +308,28 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
return 0;
}
+#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
+do { \
+ int i; \
+ /* free descriptors */ \
+ if ((qi)->ring.r.ring##_bi) \
+ for (i = 0; i < (qi)->num_##ring##_entries; i++) \
+ if ((qi)->ring.r.ring##_bi[i].pa) { \
+ dmam_free_coherent(ice_hw_to_dev(hw), \
+ (qi)->ring.r.ring##_bi[i].size, \
+ (qi)->ring.r.ring##_bi[i].va, \
+ (qi)->ring.r.ring##_bi[i].pa); \
+ (qi)->ring.r.ring##_bi[i].va = NULL;\
+ (qi)->ring.r.ring##_bi[i].pa = 0;\
+ (qi)->ring.r.ring##_bi[i].size = 0;\
+ } \
+ /* free the buffer info list */ \
+ if ((qi)->ring.cmd_buf) \
+ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
+ /* free DMA head */ \
+ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
+} while (0)
+
/**
* ice_init_sq - main initialization routine for Control ATQ
* @hw: pointer to the hardware structure
@@ -357,6 +383,7 @@ static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
goto init_ctrlq_exit;
init_ctrlq_free_rings:
+ ICE_FREE_CQ_BUFS(hw, cq, sq);
ice_free_cq_ring(hw, &cq->sq);
init_ctrlq_exit:
@@ -416,33 +443,13 @@ static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
goto init_ctrlq_exit;
init_ctrlq_free_rings:
+ ICE_FREE_CQ_BUFS(hw, cq, rq);
ice_free_cq_ring(hw, &cq->rq);
init_ctrlq_exit:
return ret_code;
}
-#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
-do { \
- int i; \
- /* free descriptors */ \
- for (i = 0; i < (qi)->num_##ring##_entries; i++) \
- if ((qi)->ring.r.ring##_bi[i].pa) { \
- dmam_free_coherent(ice_hw_to_dev(hw), \
- (qi)->ring.r.ring##_bi[i].size,\
- (qi)->ring.r.ring##_bi[i].va,\
- (qi)->ring.r.ring##_bi[i].pa);\
- (qi)->ring.r.ring##_bi[i].va = NULL; \
- (qi)->ring.r.ring##_bi[i].pa = 0; \
- (qi)->ring.r.ring##_bi[i].size = 0; \
- } \
- /* free the buffer info list */ \
- if ((qi)->ring.cmd_buf) \
- devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
- /* free dma head */ \
- devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
-} while (0)
-
/**
* ice_shutdown_sq - shutdown the Control ATQ
* @hw: pointer to the hardware structure
@@ -888,7 +895,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (ice_sq_done(hw, cq))
break;
- mdelay(1);
+ udelay(ICE_CTL_Q_SQ_CMD_USEC);
total_delay++;
} while (total_delay < cq->sq_cmd_timeout);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index e0585394d984..413c5858f105 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -31,8 +31,9 @@ enum ice_ctl_q {
ICE_CTL_Q_MAILBOX,
};
-/* Control Queue default settings */
-#define ICE_CTL_Q_SQ_CMD_TIMEOUT 250 /* msecs */
+/* Control Queue timeout settings - max delay 250ms */
+#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */
+#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
struct ice_ctl_q_ring {
void *dma_head; /* Virtual address to dma head */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 1341fde8d53f..4c81d77cf5cc 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -2032,13 +2032,6 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
else
return -EINVAL;
- /* Tell the OS link is going down, the link will go back up when fw
- * says it is ready asynchronously
- */
- ice_print_link_msg(vsi, false);
- netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
-
/* Set the FC mode and only restart AN if link is up */
status = ice_set_fc(pi, &aq_failures, link_up);
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index ec25f26069b0..63b74424e402 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -264,8 +264,14 @@
#define GLNVM_GENS_SR_SIZE_S 5
#define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, 5)
#define GLNVM_ULD 0x000B6008
+#define GLNVM_ULD_PCIER_DONE_M BIT(0)
+#define GLNVM_ULD_PCIER_DONE_1_M BIT(1)
#define GLNVM_ULD_CORER_DONE_M BIT(3)
#define GLNVM_ULD_GLOBR_DONE_M BIT(4)
+#define GLNVM_ULD_POR_DONE_M BIT(5)
+#define GLNVM_ULD_POR_DONE_1_M BIT(8)
+#define GLNVM_ULD_PCIER_DONE_2_M BIT(9)
+#define GLNVM_ULD_PE_DONE_M BIT(10)
#define GLPCI_CNF2 0x000BE004
#define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1)
#define PF_FUNC_RID 0x0009E880
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 1c803106e301..07fcda55f6df 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2329,7 +2329,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
if (err) {
dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
err = -EIO;
- goto err_init_interrupt_unroll;
+ goto err_init_vsi_unroll;
}
/* Driver is mostly up */
@@ -2394,6 +2394,7 @@ err_msix_misc_unroll:
ice_free_irq_msix_misc(pf);
err_init_interrupt_unroll:
ice_clear_interrupt_scheme(pf);
+err_init_vsi_unroll:
devm_kfree(dev, pf->vsi);
err_init_pf_unroll:
ice_deinit_pf(pf);
@@ -3466,8 +3467,13 @@ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
}
ice_for_each_txq(vsi, i) {
- vsi->tx_rings[i]->netdev = vsi->netdev;
- err = ice_setup_tx_ring(vsi->tx_rings[i]);
+ struct ice_ring *ring = vsi->tx_rings[i];
+
+ if (!ring)
+ return -EINVAL;
+
+ ring->netdev = vsi->netdev;
+ err = ice_setup_tx_ring(ring);
if (err)
break;
}
@@ -3492,8 +3498,13 @@ static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
}
ice_for_each_rxq(vsi, i) {
- vsi->rx_rings[i]->netdev = vsi->netdev;
- err = ice_setup_rx_ring(vsi->rx_rings[i]);
+ struct ice_ring *ring = vsi->rx_rings[i];
+
+ if (!ring)
+ return -EINVAL;
+
+ ring->netdev = vsi->netdev;
+ err = ice_setup_rx_ring(ring);
if (err)
break;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 8d49f83be7a5..88aeb8f6913a 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -1052,7 +1052,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
struct ice_aqc_query_txsched_res_resp *buf;
enum ice_status status = 0;
__le16 max_sibl;
- u8 i;
+ u16 i;
if (hw->layer_info)
return status;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 81ea77978355..f4e9c6ba568f 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -275,8 +275,9 @@ void ice_free_vfs(struct ice_pf *pf)
pf->num_alloc_vfs = 0;
for (i = 0; i < tmp; i++) {
if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
- /* disable VF qp mappings */
+ /* disable VF qp mappings and set VF disable state */
ice_dis_vf_mappings(&pf->vf[i]);
+ set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
/* Set this state so that assigned VF vectors can be
* reclaimed by PF for reuse in ice_vsi_release(). No
@@ -1095,9 +1096,12 @@ static void ice_vc_notify_vf_reset(struct ice_vf *vf)
if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
return;
- /* verify if the VF is in either init or active before proceeding */
- if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
- !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ /* Bail out if VF is in disabled state, neither initialized, nor active
+ * state - otherwise proceed with notifications
+ */
+ if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
+ test_bit(ICE_VF_STATE_DIS, vf->vf_states))
return;
pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
@@ -1658,8 +1662,8 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
+ struct ice_eth_stats stats = { 0 };
struct ice_pf *pf = vf->pf;
- struct ice_eth_stats stats;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
@@ -1678,7 +1682,6 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- memset(&stats, 0, sizeof(struct ice_eth_stats));
ice_update_eth_stats(vsi);
stats = vsi->eth_stats;
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index bafdcf70a353..fdab974b245b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -530,7 +530,7 @@ static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw)
dev_spec->module_plugged = true;
if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
hw->phy.media_type = e1000_media_type_internal_serdes;
- } else if (eth_flags->e100_base_fx) {
+ } else if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) {
dev_spec->sgmii_active = true;
hw->phy.media_type = e1000_media_type_internal_serdes;
} else if (eth_flags->e1000_base_t) {
@@ -657,14 +657,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
break;
}
- /* do not change link mode for 100BaseFX */
- if (dev_spec->eth_flags.e100_base_fx)
- break;
-
/* change current link mode setting */
ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
- if (hw->phy.media_type == e1000_media_type_copper)
+ if (dev_spec->sgmii_active)
ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
else
ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index c645d9e648e0..fcfbc0407dd9 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -143,7 +143,8 @@ static int igb_get_link_ksettings(struct net_device *netdev,
u32 speed;
u32 supported, advertising;
- status = rd32(E1000_STATUS);
+ status = pm_runtime_suspended(&adapter->pdev->dev) ?
+ 0 : rd32(E1000_STATUS);
if (hw->phy.media_type == e1000_media_type_copper) {
supported = (SUPPORTED_10baseT_Half |
@@ -181,7 +182,7 @@ static int igb_get_link_ksettings(struct net_device *netdev,
advertising &= ~ADVERTISED_1000baseKX_Full;
}
}
- if (eth_flags->e100_base_fx) {
+ if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) {
supported |= SUPPORTED_100baseT_Full;
advertising |= ADVERTISED_100baseT_Full;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 005c1693efc8..879ade3adefc 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2063,7 +2063,8 @@ static void igb_check_swap_media(struct igb_adapter *adapter)
if ((hw->phy.media_type == e1000_media_type_copper) &&
(!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
swap_now = true;
- } else if (!(connsw & E1000_CONNSW_SERDESD)) {
+ } else if ((hw->phy.media_type != e1000_media_type_copper) &&
+ !(connsw & E1000_CONNSW_SERDESD)) {
/* copper signal takes time to appear */
if (adapter->copper_tries < 4) {
adapter->copper_tries++;
@@ -6204,9 +6205,18 @@ static void igb_reset_task(struct work_struct *work)
struct igb_adapter *adapter;
adapter = container_of(work, struct igb_adapter, reset_task);
+ rtnl_lock();
+ /* If we're already down or resetting, just bail */
+ if (test_bit(__IGB_DOWN, &adapter->state) ||
+ test_bit(__IGB_RESETTING, &adapter->state)) {
+ rtnl_unlock();
+ return;
+ }
+
igb_dump(adapter);
netdev_err(adapter->netdev, "Reset adapter\n");
igb_reinit_locked(adapter);
+ rtnl_unlock();
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 0bd1294ba517..39c5e6fdb72c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -2243,7 +2243,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
}
/* Configure pause time (2 TCs per register) */
- reg = hw->fc.pause_time * 0x00010001;
+ reg = hw->fc.pause_time * 0x00010001U;
for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index cc3196ae5aea..636e6e840afa 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -923,7 +923,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
ring->queue_index = txr_idx;
/* assign ring to adapter */
- adapter->tx_ring[txr_idx] = ring;
+ WRITE_ONCE(adapter->tx_ring[txr_idx], ring);
/* update count and index */
txr_count--;
@@ -950,7 +950,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
set_ring_xdp(ring);
/* assign ring to adapter */
- adapter->xdp_ring[xdp_idx] = ring;
+ WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring);
/* update count and index */
xdp_count--;
@@ -993,7 +993,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
ring->queue_index = rxr_idx;
/* assign ring to adapter */
- adapter->rx_ring[rxr_idx] = ring;
+ WRITE_ONCE(adapter->rx_ring[rxr_idx], ring);
/* update count and index */
rxr_count--;
@@ -1022,13 +1022,13 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
ixgbe_for_each_ring(ring, q_vector->tx) {
if (ring_is_xdp(ring))
- adapter->xdp_ring[ring->queue_index] = NULL;
+ WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL);
else
- adapter->tx_ring[ring->queue_index] = NULL;
+ WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL);
}
ixgbe_for_each_ring(ring, q_vector->rx)
- adapter->rx_ring[ring->queue_index] = NULL;
+ WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL);
adapter->q_vector[v_idx] = NULL;
napi_hash_del(&q_vector->napi);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index f9f473ae4abe..93857de8261d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2252,7 +2252,8 @@ static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,
rx_buffer->page_offset ^= truesize;
#else
unsigned int truesize = ring_uses_build_skb(rx_ring) ?
- SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
+ SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
SKB_DATA_ALIGN(size);
rx_buffer->page_offset += truesize;
@@ -5238,7 +5239,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct hlist_node *node2;
struct ixgbe_fdir_filter *filter;
- u64 action;
+ u8 queue;
spin_lock(&adapter->fdir_perfect_lock);
@@ -5247,17 +5248,34 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
hlist_for_each_entry_safe(filter, node2,
&adapter->fdir_filter_list, fdir_node) {
- action = filter->action;
- if (action != IXGBE_FDIR_DROP_QUEUE && action != 0)
- action =
- (action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1;
+ if (filter->action == IXGBE_FDIR_DROP_QUEUE) {
+ queue = IXGBE_FDIR_DROP_QUEUE;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(filter->action);
+ u8 vf = ethtool_get_flow_spec_ring_vf(filter->action);
+
+ if (!vf && (ring >= adapter->num_rx_queues)) {
+ e_err(drv, "FDIR restore failed without VF, ring: %u\n",
+ ring);
+ continue;
+ } else if (vf &&
+ ((vf > adapter->num_vfs) ||
+ ring >= adapter->num_rx_queues_per_pool)) {
+ e_err(drv, "FDIR restore failed with VF, vf: %hhu, ring: %u\n",
+ vf, ring);
+ continue;
+ }
+
+ /* Map the ring onto the absolute queue index */
+ if (!vf)
+ queue = adapter->rx_ring[ring]->reg_idx;
+ else
+ queue = ((vf - 1) *
+ adapter->num_rx_queues_per_pool) + ring;
+ }
ixgbe_fdir_write_perfect_filter_82599(hw,
- &filter->filter,
- filter->sw_idx,
- (action == IXGBE_FDIR_DROP_QUEUE) ?
- IXGBE_FDIR_DROP_QUEUE :
- adapter->rx_ring[action]->reg_idx);
+ &filter->filter, filter->sw_idx, queue);
}
spin_unlock(&adapter->fdir_perfect_lock);
@@ -7041,7 +7059,10 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
}
for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
+ struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]);
+
+ if (!rx_ring)
+ continue;
non_eop_descs += rx_ring->rx_stats.non_eop_descs;
alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
@@ -7062,15 +7083,20 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
packets = 0;
/* gather some stats to the adapter struct that are per queue */
for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+ struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]);
+
+ if (!tx_ring)
+ continue;
restart_queue += tx_ring->tx_stats.restart_queue;
tx_busy += tx_ring->tx_stats.tx_busy;
bytes += tx_ring->stats.bytes;
packets += tx_ring->stats.packets;
}
for (i = 0; i < adapter->num_xdp_queues; i++) {
- struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
+ struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]);
+ if (!xdp_ring)
+ continue;
restart_queue += xdp_ring->tx_stats.restart_queue;
tx_busy += xdp_ring->tx_stats.tx_busy;
bytes += xdp_ring->stats.bytes;
@@ -8633,7 +8659,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
adapter->ptp_clock) {
- if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
+ if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
+ !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
&adapter->state)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index ac6c18821958..e35195a38122 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -2078,11 +2078,6 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
struct ixgbe_hw *hw = &adapter->hw;
int count = 0;
- if ((netdev_uc_count(netdev)) > 10) {
- pr_err("Too many unicast filters - No Space\n");
- return -ENOSPC;
- }
-
if (!netdev_uc_empty(netdev)) {
struct netdev_hw_addr *ha;