diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_main.c')
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_main.c | 2624 |
1 files changed, 2120 insertions, 504 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 3eea68f3a526..a5530f7bbc26 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -6,12 +6,16 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "ice.h" +#include "ice_base.h" #include "ice_lib.h" +#include "ice_fltr.h" #include "ice_dcb_lib.h" +#include "ice_dcb_nl.h" +#include "ice_devlink.h" #define DRV_VERSION_MAJOR 0 #define DRV_VERSION_MINOR 8 -#define DRV_VERSION_BUILD 1 +#define DRV_VERSION_BUILD 2 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ @@ -42,6 +46,7 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); static struct workqueue_struct *ice_wq; static const struct net_device_ops ice_netdev_safe_mode_ops; static const struct net_device_ops ice_netdev_ops; +static int ice_vsi_open(struct ice_vsi *vsi); static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); @@ -129,39 +134,24 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf) static int ice_init_mac_fltr(struct ice_pf *pf) { enum ice_status status; - u8 broadcast[ETH_ALEN]; struct ice_vsi *vsi; + u8 *perm_addr; vsi = ice_get_main_vsi(pf); if (!vsi) return -EINVAL; - /* To add a MAC filter, first add the MAC to a list and then - * pass the list to ice_add_mac. - */ - - /* Add a unicast MAC filter so the VSI can get its packets */ - status = ice_vsi_cfg_mac_fltr(vsi, vsi->port_info->mac.perm_addr, true); - if (status) - goto unregister; - - /* VSI needs to receive broadcast traffic, so add the broadcast - * MAC address to the list as well. - */ - eth_broadcast_addr(broadcast); - status = ice_vsi_cfg_mac_fltr(vsi, broadcast, true); - if (status) - goto unregister; + perm_addr = vsi->port_info->mac.perm_addr; + status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); + if (!status) + return 0; - return 0; -unregister: /* We aren't useful with no MAC filters, so unregister if we * had an error */ - if (status && vsi->netdev->reg_state == NETREG_REGISTERED) { - dev_err(&pf->pdev->dev, - "Could not add MAC filters error %d. Unregistering device\n", - status); + if (vsi->netdev->reg_state == NETREG_REGISTERED) { + dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %s. Unregistering device\n", + ice_stat_str(status)); unregister_netdev(vsi->netdev); free_netdev(vsi->netdev); vsi->netdev = NULL; @@ -185,7 +175,8 @@ static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr) struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; - if (ice_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr)) + if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr, + ICE_FWD_TO_VSI)) return -EINVAL; return 0; @@ -206,7 +197,8 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; - if (ice_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr)) + if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr, + ICE_FWD_TO_VSI)) return -EINVAL; return 0; @@ -266,7 +258,7 @@ static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc) */ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) { - struct device *dev = &vsi->back->pdev->dev; + struct device *dev = ice_pf_to_dev(vsi->back); struct net_device *netdev = vsi->netdev; bool promisc_forced_on = false; struct ice_pf *pf = vsi->back; @@ -304,8 +296,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) } /* Remove MAC addresses in the unsync list */ - status = ice_remove_mac(hw, &vsi->tmp_unsync_list); - ice_free_fltr_list(dev, &vsi->tmp_unsync_list); + status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); + ice_fltr_free_list(dev, &vsi->tmp_unsync_list); if (status) { netdev_err(netdev, "Failed to delete MAC filters\n"); /* if we failed because of alloc failures, just bail */ @@ -316,8 +308,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) } /* Add MAC addresses in the sync list */ - status = ice_add_mac(hw, &vsi->tmp_sync_list); - ice_free_fltr_list(dev, &vsi->tmp_sync_list); + status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); + ice_fltr_free_list(dev, &vsi->tmp_sync_list); /* If filter is added successfully or already exists, do not go into * 'if' condition and report it as error. Instead continue processing * rest of the function. @@ -332,8 +324,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) !test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC, vsi->state)) { promisc_forced_on = true; - netdev_warn(netdev, - "Reached MAC filter limit, forcing promisc mode on VSI %d\n", + netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n", vsi->vsi_num); } else { err = -EIO; @@ -355,7 +346,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) vsi->current_netdev_flags &= ~IFF_ALLMULTI; goto out_promisc; } - } else if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) { + } else { + /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */ if (vsi->vlan_ena) promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; else @@ -376,25 +368,30 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags); if (vsi->current_netdev_flags & IFF_PROMISC) { /* Apply Rx filter rule to get traffic from wire */ - status = ice_cfg_dflt_vsi(hw, vsi->idx, true, - ICE_FLTR_RX); - if (status) { - netdev_err(netdev, "Error setting default VSI %i Rx rule\n", - vsi->vsi_num); - vsi->current_netdev_flags &= ~IFF_PROMISC; - err = -EIO; - goto out_promisc; + if (!ice_is_dflt_vsi_in_use(pf->first_sw)) { + err = ice_set_dflt_vsi(pf->first_sw, vsi); + if (err && err != -EEXIST) { + netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n", + err, vsi->vsi_num); + vsi->current_netdev_flags &= + ~IFF_PROMISC; + goto out_promisc; + } + ice_cfg_vlan_pruning(vsi, false, false); } } else { /* Clear Rx filter to remove traffic from wire */ - status = ice_cfg_dflt_vsi(hw, vsi->idx, false, - ICE_FLTR_RX); - if (status) { - netdev_err(netdev, "Error clearing default VSI %i Rx rule\n", - vsi->vsi_num); - vsi->current_netdev_flags |= IFF_PROMISC; - err = -EIO; - goto out_promisc; + if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) { + err = ice_clear_dflt_vsi(pf->first_sw); + if (err) { + netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n", + err, vsi->vsi_num); + vsi->current_netdev_flags |= + IFF_PROMISC; + goto out_promisc; + } + if (vsi->num_vlan > 1) + ice_cfg_vlan_pruning(vsi, true, false); } } } @@ -435,42 +432,11 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf) } /** - * ice_dis_vsi - pause a VSI - * @vsi: the VSI being paused - * @locked: is the rtnl_lock already held - */ -static void ice_dis_vsi(struct ice_vsi *vsi, bool locked) -{ - if (test_bit(__ICE_DOWN, vsi->state)) - return; - - set_bit(__ICE_NEEDS_RESTART, vsi->state); - - if (vsi->type == ICE_VSI_PF && vsi->netdev) { - if (netif_running(vsi->netdev)) { - if (!locked) - rtnl_lock(); - - ice_stop(vsi->netdev); - - if (!locked) - rtnl_unlock(); - } else { - ice_vsi_close(vsi); - } - } -} - -/** * ice_pf_dis_all_vsi - Pause all VSIs on a PF * @pf: the PF * @locked: is the rtnl_lock already held */ -#ifdef CONFIG_DCB -void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) -#else static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) -#endif /* CONFIG_DCB */ { int v; @@ -489,7 +455,7 @@ static void ice_prepare_for_reset(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; - int i; + unsigned int i; /* already prepared for reset */ if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) @@ -500,7 +466,7 @@ ice_prepare_for_reset(struct ice_pf *pf) ice_vc_notify_reset(pf); /* Disable VFs until reset is completed */ - for (i = 0; i < pf->num_alloc_vfs; i++) + ice_for_each_vf(pf, i) ice_set_vf_state_qs_dis(&pf->vf[i]); /* clear SW filtering DB */ @@ -524,7 +490,7 @@ ice_prepare_for_reset(struct ice_pf *pf) */ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) { - struct device *dev = &pf->pdev->dev; + struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; dev_dbg(dev, "reset_type 0x%x requested\n", reset_type); @@ -636,8 +602,14 @@ static void ice_print_topo_conflict(struct ice_vsi *vsi) switch (vsi->port_info->phy.link_info.topo_media_conflict) { case ICE_AQ_LINK_TOPO_CONFLICT: case ICE_AQ_LINK_MEDIA_CONFLICT: + case ICE_AQ_LINK_TOPO_UNREACH_PRT: + case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT: + case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA: netdev_info(vsi->netdev, "Possible mis-configuration of the Ethernet port detected, please use the Intel(R) Ethernet Port Configuration Tool application to address the issue.\n"); break; + case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA: + netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); + break; default: break; } @@ -728,7 +700,6 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) /* Get FEC mode based on negotiated link info */ switch (vsi->port_info->phy.link_info.fec_info) { case ICE_AQ_LINK_25G_RS_528_FEC_EN: - /* fall through */ case ICE_AQ_LINK_25G_RS_544_FEC_EN: fec = "RS-FEC"; break; @@ -747,7 +718,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) an = "False"; /* Get FEC mode requested based on PHY caps last SW configuration */ - caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL); + caps = kzalloc(sizeof(*caps), GFP_KERNEL); if (!caps) { fec_req = "Unknown"; goto done; @@ -767,10 +738,10 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) else fec_req = "NONE"; - devm_kfree(&vsi->back->pdev->dev, caps); + kfree(caps); done: - netdev_info(vsi->netdev, "NIC Link is up %sbps, Requested FEC: %s, FEC: %s, Autoneg: %s, Flow Control: %s\n", + netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", speed, fec_req, fec, an, fc); ice_print_topo_conflict(vsi); } @@ -815,6 +786,7 @@ static int ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, u16 link_speed) { + struct device *dev = ice_pf_to_dev(pf); struct ice_phy_info *phy_info; struct ice_vsi *vsi; u16 old_link_speed; @@ -832,13 +804,14 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, */ result = ice_update_link_info(pi); if (result) - dev_dbg(&pf->pdev->dev, - "Failed to update link status and re-enable link events for port %d\n", + dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n", pi->lport); - /* if the old link up/down and speed is the same as the new */ - if (link_up == old_link && link_speed == old_link_speed) - return result; + /* Check if the link state is up after updating link info, and treat + * this event as an UP event since the link is actually UP now. + */ + if (phy_info->link_info.link_info & ICE_AQ_LINK_UP) + link_up = true; vsi = ice_get_main_vsi(pf); if (!vsi || !vsi->port_info) @@ -851,18 +824,21 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, result = ice_aq_set_link_restart_an(pi, false, NULL); if (result) { - dev_dbg(&pf->pdev->dev, - "Failed to set link down, VSI %d error %d\n", + dev_dbg(dev, "Failed to set link down, VSI %d error %d\n", vsi->vsi_num, result); return result; } } + /* if the old link up/down and speed is the same as the new */ + if (link_up == old_link && link_speed == old_link_speed) + return result; + + ice_dcb_rebuild(pf); ice_vsi_link_event(vsi, link_up); ice_print_link_msg(vsi, link_up); - if (pf->num_alloc_vfs) - ice_vc_notify_link_state(pf); + ice_vc_notify_link_state(pf); return result; } @@ -910,15 +886,13 @@ static int ice_init_link_events(struct ice_port_info *pi) ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL)); if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) { - dev_dbg(ice_hw_to_dev(pi->hw), - "Failed to set link event mask for port %d\n", + dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n", pi->lport); return -EIO; } if (ice_aq_get_link_info(pi, true, NULL, NULL)) { - dev_dbg(ice_hw_to_dev(pi->hw), - "Failed to enable link events for port %d\n", + dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n", pi->lport); return -EIO; } @@ -947,8 +921,8 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) !!(link_data->link_info & ICE_AQ_LINK_UP), le16_to_cpu(link_data->link_speed)); if (status) - dev_dbg(&pf->pdev->dev, - "Could not process link event, error %d\n", status); + dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n", + status); return status; } @@ -960,6 +934,7 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) */ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) { + struct device *dev = ice_pf_to_dev(pf); struct ice_rq_event_info event; struct ice_hw *hw = &pf->hw; struct ice_ctl_q_info *cq; @@ -981,8 +956,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) qtype = "Mailbox"; break; default: - dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n", - q_type); + dev_warn(dev, "Unknown control queue type 0x%x\n", q_type); return 0; } @@ -994,16 +968,14 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) PF_FW_ARQLEN_ARQCRIT_M)) { oldval = val; if (val & PF_FW_ARQLEN_ARQVFE_M) - dev_dbg(&pf->pdev->dev, - "%s Receive Queue VF Error detected\n", qtype); + dev_dbg(dev, "%s Receive Queue VF Error detected\n", + qtype); if (val & PF_FW_ARQLEN_ARQOVFL_M) { - dev_dbg(&pf->pdev->dev, - "%s Receive Queue Overflow Error detected\n", + dev_dbg(dev, "%s Receive Queue Overflow Error detected\n", qtype); } if (val & PF_FW_ARQLEN_ARQCRIT_M) - dev_dbg(&pf->pdev->dev, - "%s Receive Queue Critical Error detected\n", + dev_dbg(dev, "%s Receive Queue Critical Error detected\n", qtype); val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | PF_FW_ARQLEN_ARQCRIT_M); @@ -1016,16 +988,14 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) PF_FW_ATQLEN_ATQCRIT_M)) { oldval = val; if (val & PF_FW_ATQLEN_ATQVFE_M) - dev_dbg(&pf->pdev->dev, - "%s Send Queue VF Error detected\n", qtype); + dev_dbg(dev, "%s Send Queue VF Error detected\n", + qtype); if (val & PF_FW_ATQLEN_ATQOVFL_M) { - dev_dbg(&pf->pdev->dev, - "%s Send Queue Overflow Error detected\n", + dev_dbg(dev, "%s Send Queue Overflow Error detected\n", qtype); } if (val & PF_FW_ATQLEN_ATQCRIT_M) - dev_dbg(&pf->pdev->dev, - "%s Send Queue Critical Error detected\n", + dev_dbg(dev, "%s Send Queue Critical Error detected\n", qtype); val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | PF_FW_ATQLEN_ATQCRIT_M); @@ -1034,8 +1004,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) } event.buf_len = cq->rq_buf_size; - event.msg_buf = devm_kzalloc(&pf->pdev->dev, event.buf_len, - GFP_KERNEL); + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) return 0; @@ -1047,9 +1016,8 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) if (ret == ICE_ERR_AQ_NO_WORK) break; if (ret) { - dev_err(&pf->pdev->dev, - "%s Receive Queue event error %d\n", qtype, - ret); + dev_err(dev, "%s Receive Queue event error %s\n", qtype, + ice_stat_str(ret)); break; } @@ -1058,8 +1026,10 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) switch (opcode) { case ice_aqc_opc_get_link_status: if (ice_handle_link_event(pf, &event)) - dev_err(&pf->pdev->dev, - "Could not handle link event\n"); + dev_err(dev, "Could not handle link event\n"); + break; + case ice_aqc_opc_event_lan_overflow: + ice_vf_lan_overflow_event(pf, &event); break; case ice_mbx_opc_send_msg_to_pf: ice_vc_process_vf_msg(pf, &event); @@ -1071,14 +1041,13 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) ice_dcb_process_lldp_set_mib_change(pf, &event); break; default: - dev_dbg(&pf->pdev->dev, - "%s Receive Queue unknown event 0x%04x ignored\n", + dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n", qtype, opcode); break; } } while (pending && (i++ < ICE_DFLT_IRQ_WORK)); - devm_kfree(&pf->pdev->dev, event.msg_buf); + kfree(event.msg_buf); return pending && (i == ICE_DFLT_IRQ_WORK); } @@ -1153,7 +1122,7 @@ static void ice_clean_mailboxq_subtask(struct ice_pf *pf) * * If not already scheduled, this puts the task into the work queue. */ -static void ice_service_task_schedule(struct ice_pf *pf) +void ice_service_task_schedule(struct ice_pf *pf) { if (!test_bit(__ICE_SERVICE_DIS, pf->state) && !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) && @@ -1177,10 +1146,15 @@ static void ice_service_task_complete(struct ice_pf *pf) /** * ice_service_task_stop - stop service task and cancel works * @pf: board private structure + * + * Return 0 if the __ICE_SERVICE_DIS bit was not already set, + * 1 otherwise. */ -static void ice_service_task_stop(struct ice_pf *pf) +static int ice_service_task_stop(struct ice_pf *pf) { - set_bit(__ICE_SERVICE_DIS, pf->state); + int ret; + + ret = test_and_set_bit(__ICE_SERVICE_DIS, pf->state); if (pf->serv_tmr.function) del_timer_sync(&pf->serv_tmr); @@ -1188,6 +1162,7 @@ static void ice_service_task_stop(struct ice_pf *pf) cancel_work_sync(&pf->serv_task); clear_bit(__ICE_SERVICE_SCHED, pf->state); + return ret; } /** @@ -1218,19 +1193,28 @@ static void ice_service_timer(struct timer_list *t) * ice_handle_mdd_event - handle malicious driver detect event * @pf: pointer to the PF structure * - * Called from service task. OICR interrupt handler indicates MDD event + * Called from service task. OICR interrupt handler indicates MDD event. + * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log + * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events + * disable the queue, the PF can be configured to reset the VF using ethtool + * private flag mdd-auto-reset-vf. */ static void ice_handle_mdd_event(struct ice_pf *pf) { + struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - bool mdd_detected = false; + unsigned int i; u32 reg; - int i; - if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) + if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) { + /* Since the VF MDD event logging is rate limited, check if + * there are pending MDD events. + */ + ice_print_vfs_mdd_events(pf); return; + } - /* find what triggered the MDD event */ + /* find what triggered an MDD event */ reg = rd32(hw, GL_MDET_TX_PQM); if (reg & GL_MDET_TX_PQM_VALID_M) { u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> @@ -1243,10 +1227,9 @@ static void ice_handle_mdd_event(struct ice_pf *pf) GL_MDET_TX_PQM_QNUM_S); if (netif_msg_tx_err(pf)) - dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", + dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", event, queue, pf_num, vf_num); wr32(hw, GL_MDET_TX_PQM, 0xffffffff); - mdd_detected = true; } reg = rd32(hw, GL_MDET_TX_TCLAN); @@ -1260,11 +1243,10 @@ static void ice_handle_mdd_event(struct ice_pf *pf) u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >> GL_MDET_TX_TCLAN_QNUM_S); - if (netif_msg_rx_err(pf)) - dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", + if (netif_msg_tx_err(pf)) + dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", event, queue, pf_num, vf_num); wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); - mdd_detected = true; } reg = rd32(hw, GL_MDET_RX); @@ -1279,89 +1261,93 @@ static void ice_handle_mdd_event(struct ice_pf *pf) GL_MDET_RX_QNUM_S); if (netif_msg_rx_err(pf)) - dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", + dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", event, queue, pf_num, vf_num); wr32(hw, GL_MDET_RX, 0xffffffff); - mdd_detected = true; } - if (mdd_detected) { - bool pf_mdd_detected = false; - - reg = rd32(hw, PF_MDET_TX_PQM); - if (reg & PF_MDET_TX_PQM_VALID_M) { - wr32(hw, PF_MDET_TX_PQM, 0xFFFF); - dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); - pf_mdd_detected = true; - } + /* check to see if this PF caused an MDD event */ + reg = rd32(hw, PF_MDET_TX_PQM); + if (reg & PF_MDET_TX_PQM_VALID_M) { + wr32(hw, PF_MDET_TX_PQM, 0xFFFF); + if (netif_msg_tx_err(pf)) + dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n"); + } - reg = rd32(hw, PF_MDET_TX_TCLAN); - if (reg & PF_MDET_TX_TCLAN_VALID_M) { - wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); - dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); - pf_mdd_detected = true; - } + reg = rd32(hw, PF_MDET_TX_TCLAN); + if (reg & PF_MDET_TX_TCLAN_VALID_M) { + wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); + if (netif_msg_tx_err(pf)) + dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n"); + } - reg = rd32(hw, PF_MDET_RX); - if (reg & PF_MDET_RX_VALID_M) { - wr32(hw, PF_MDET_RX, 0xFFFF); - dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); - pf_mdd_detected = true; - } - /* Queue belongs to the PF initiate a reset */ - if (pf_mdd_detected) { - set_bit(__ICE_NEEDS_RESTART, pf->state); - ice_service_task_schedule(pf); - } + reg = rd32(hw, PF_MDET_RX); + if (reg & PF_MDET_RX_VALID_M) { + wr32(hw, PF_MDET_RX, 0xFFFF); + if (netif_msg_rx_err(pf)) + dev_info(dev, "Malicious Driver Detection event RX detected on PF\n"); } - /* check to see if one of the VFs caused the MDD */ - for (i = 0; i < pf->num_alloc_vfs; i++) { + /* Check to see if one of the VFs caused an MDD event, and then + * increment counters and set print pending + */ + ice_for_each_vf(pf, i) { struct ice_vf *vf = &pf->vf[i]; - bool vf_mdd_detected = false; - reg = rd32(hw, VP_MDET_TX_PQM(i)); if (reg & VP_MDET_TX_PQM_VALID_M) { wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); - vf_mdd_detected = true; - dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", - i); + vf->mdd_tx_events.count++; + set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state); + if (netif_msg_tx_err(pf)) + dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n", + i); } reg = rd32(hw, VP_MDET_TX_TCLAN(i)); if (reg & VP_MDET_TX_TCLAN_VALID_M) { wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); - vf_mdd_detected = true; - dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", - i); + vf->mdd_tx_events.count++; + set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state); + if (netif_msg_tx_err(pf)) + dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n", + i); } reg = rd32(hw, VP_MDET_TX_TDPU(i)); if (reg & VP_MDET_TX_TDPU_VALID_M) { wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); - vf_mdd_detected = true; - dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", - i); + vf->mdd_tx_events.count++; + set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state); + if (netif_msg_tx_err(pf)) + dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n", + i); } reg = rd32(hw, VP_MDET_RX(i)); if (reg & VP_MDET_RX_VALID_M) { wr32(hw, VP_MDET_RX(i), 0xFFFF); - vf_mdd_detected = true; - dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", - i); - } - - if (vf_mdd_detected) { - vf->num_mdd_events++; - if (vf->num_mdd_events && - vf->num_mdd_events <= ICE_MDD_EVENTS_THRESHOLD) - dev_info(&pf->pdev->dev, - "VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n", - i, vf->num_mdd_events); + vf->mdd_rx_events.count++; + set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state); + if (netif_msg_rx_err(pf)) + dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n", + i); + + /* Since the queue is disabled on VF Rx MDD events, the + * PF can be configured to reset the VF through ethtool + * private flag mdd-auto-reset-vf. + */ + if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) { + /* VF MDD event counters will be cleared by + * reset, so print the event prior to reset. + */ + ice_print_vf_rx_mdd_event(vf); + ice_reset_vf(&pf->vf[i], false); + } } } + + ice_print_vfs_mdd_events(pf); } /** @@ -1389,19 +1375,18 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) if (vsi->type != ICE_VSI_PF) return 0; - dev = &vsi->back->pdev->dev; + dev = ice_pf_to_dev(vsi->back); pi = vsi->port_info; - pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL); + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); if (!pcaps) return -ENOMEM; retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, NULL); if (retcode) { - dev_err(dev, - "Failed to get phy capabilities, VSI %d error %d\n", + dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n", vsi->vsi_num, retcode); retcode = -EIO; goto out; @@ -1412,40 +1397,249 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) goto out; - cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL); + /* Use the current user PHY configuration. The current user PHY + * configuration is initialized during probe from PHY capabilities + * software mode, and updated on set PHY configuration. + */ + cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL); if (!cfg) { retcode = -ENOMEM; goto out; } - cfg->phy_type_low = pcaps->phy_type_low; - cfg->phy_type_high = pcaps->phy_type_high; - cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; - cfg->low_power_ctrl = pcaps->low_power_ctrl; - cfg->eee_cap = pcaps->eee_cap; - cfg->eeer_value = pcaps->eeer_value; - cfg->link_fec_opt = pcaps->link_fec_options; + cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; if (link_up) cfg->caps |= ICE_AQ_PHY_ENA_LINK; else cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; - retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL); + retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); if (retcode) { dev_err(dev, "Failed to set phy config, VSI %d error %d\n", vsi->vsi_num, retcode); retcode = -EIO; } - devm_kfree(dev, cfg); + kfree(cfg); out: - devm_kfree(dev, pcaps); + kfree(pcaps); return retcode; } /** - * ice_check_media_subtask - Check for media; bring link up if detected. + * ice_init_nvm_phy_type - Initialize the NVM PHY type + * @pi: port info structure + * + * Initialize nvm_phy_type_[low|high] + */ +static int ice_init_nvm_phy_type(struct ice_port_info *pi) +{ + struct ice_aqc_get_phy_caps_data *pcaps; + struct ice_pf *pf = pi->hw->back; + enum ice_status status; + int err = 0; + + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); + if (!pcaps) + return -ENOMEM; + + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_NVM_CAP, pcaps, + NULL); + + if (status) { + dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); + err = -EIO; + goto out; + } + + pf->nvm_phy_type_hi = pcaps->phy_type_high; + pf->nvm_phy_type_lo = pcaps->phy_type_low; + +out: + kfree(pcaps); + return err; +} + +/** + * ice_init_phy_user_cfg - Initialize the PHY user configuration + * @pi: port info structure + * + * Initialize the current user PHY configuration, speed, FEC, and FC requested + * mode to default. The PHY defaults are from get PHY capabilities topology + * with media so call when media is first available. An error is returned if + * called when media is not available. The PHY initialization completed state is + * set here. + * + * These configurations are used when setting PHY + * configuration. The user PHY configuration is updated on set PHY + * configuration. Returns 0 on success, negative on failure + */ +static int ice_init_phy_user_cfg(struct ice_port_info *pi) +{ + struct ice_aqc_get_phy_caps_data *pcaps; + struct ice_phy_info *phy = &pi->phy; + struct ice_pf *pf = pi->hw->back; + enum ice_status status; + struct ice_vsi *vsi; + int err = 0; + + if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) + return -EIO; + + vsi = ice_get_main_vsi(pf); + if (!vsi) + return -EINVAL; + + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); + if (!pcaps) + return -ENOMEM; + + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, + NULL); + if (status) { + dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); + err = -EIO; + goto err_out; + } + + ice_copy_phy_caps_to_cfg(pcaps, &pi->phy.curr_user_phy_cfg); + /* initialize PHY using topology with media */ + phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps, + pcaps->link_fec_options); + phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps); + + phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M; + set_bit(__ICE_PHY_INIT_COMPLETE, pf->state); +err_out: + kfree(pcaps); + return err; +} + +/** + * ice_configure_phy - configure PHY + * @vsi: VSI of PHY + * + * Set the PHY configuration. If the current PHY configuration is the same as + * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise + * configure the based get PHY capabilities for topology with media. + */ +static int ice_configure_phy(struct ice_vsi *vsi) +{ + struct device *dev = ice_pf_to_dev(vsi->back); + struct ice_aqc_get_phy_caps_data *pcaps; + struct ice_aqc_set_phy_cfg_data *cfg; + u64 phy_low = 0, phy_high = 0; + struct ice_port_info *pi; + enum ice_status status; + int err = 0; + + pi = vsi->port_info; + if (!pi) + return -EINVAL; + + /* Ensure we have media as we cannot configure a medialess port */ + if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) + return -EPERM; + + ice_print_topo_conflict(vsi); + + if (vsi->port_info->phy.link_info.topo_media_conflict == + ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) + return -EPERM; + + if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) + return ice_force_phys_link_state(vsi, true); + + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); + if (!pcaps) + return -ENOMEM; + + /* Get current PHY config */ + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, + NULL); + if (status) { + dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n", + vsi->vsi_num, ice_stat_str(status)); + err = -EIO; + goto done; + } + + /* If PHY enable link is configured and configuration has not changed, + * there's nothing to do + */ + if (pcaps->caps & ICE_AQC_PHY_EN_LINK && + ice_phy_caps_equals_cfg(pcaps, &pi->phy.curr_user_phy_cfg)) + goto done; + + /* Use PHY topology as baseline for configuration */ + memset(pcaps, 0, sizeof(*pcaps)); + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, + NULL); + if (status) { + dev_err(dev, "Failed to get PHY topology, VSI %d error %s\n", + vsi->vsi_num, ice_stat_str(status)); + err = -EIO; + goto done; + } + + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); + if (!cfg) { + err = -ENOMEM; + goto done; + } + + ice_copy_phy_caps_to_cfg(pcaps, cfg); + + /* Speed - If default override pending, use curr_user_phy_cfg set in + * ice_init_phy_user_cfg_ldo. + */ + ice_update_phy_type(&phy_low, &phy_high, pi->phy.curr_user_speed_req); + cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low); + cfg->phy_type_high = pcaps->phy_type_high & cpu_to_le64(phy_high); + + /* Can't provide what was requested; use PHY capabilities */ + if (!cfg->phy_type_low && !cfg->phy_type_high) { + cfg->phy_type_low = pcaps->phy_type_low; + cfg->phy_type_high = pcaps->phy_type_high; + } + + /* FEC */ + ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req); + + /* Can't provide what was requested; use PHY capabilities */ + if (cfg->link_fec_opt != + (cfg->link_fec_opt & pcaps->link_fec_options)) { + cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; + cfg->link_fec_opt = pcaps->link_fec_options; + } + + /* Flow Control - always supported; no need to check against + * capabilities + */ + ice_cfg_phy_fc(pi, cfg, pi->phy.curr_user_fc_req); + + /* Enable link and link update */ + cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; + + status = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); + if (status) { + dev_err(dev, "Failed to set phy config, VSI %d error %s\n", + vsi->vsi_num, ice_stat_str(status)); + err = -EIO; + } + + kfree(cfg); +done: + kfree(pcaps); + return err; +} + +/** + * ice_check_media_subtask - Check for media * @pf: pointer to PF struct + * + * If media is available, then initialize PHY user configuration if it is not + * been, and configure the PHY if the interface is up. */ static void ice_check_media_subtask(struct ice_pf *pf) { @@ -1453,15 +1647,12 @@ static void ice_check_media_subtask(struct ice_pf *pf) struct ice_vsi *vsi; int err; - vsi = ice_get_main_vsi(pf); - if (!vsi) + /* No need to check for media if it's already present */ + if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags)) return; - /* No need to check for media if it's already present or the interface - * is down - */ - if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) || - test_bit(__ICE_DOWN, vsi->state)) + vsi = ice_get_main_vsi(pf); + if (!vsi) return; /* Refresh link info and check if media is present */ @@ -1471,10 +1662,19 @@ static void ice_check_media_subtask(struct ice_pf *pf) return; if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { - err = ice_force_phys_link_state(vsi, true); - if (err) + if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state)) + ice_init_phy_user_cfg(pi); + + /* PHY settings are reset on media insertion, reconfigure + * PHY to preserve settings. + */ + if (test_bit(__ICE_DOWN, vsi->state) && + test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) return; - clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); + + err = ice_configure_phy(vsi); + if (!err) + clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); /* A Link Status Event will be generated; the event handler * will complete bringing the interface up @@ -1518,7 +1718,7 @@ static void ice_service_task(struct work_struct *work) ice_process_vflr_event(pf); ice_clean_mailboxq_subtask(pf); - + ice_sync_arfs_fltrs(pf); /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */ ice_service_task_complete(pf); @@ -1544,13 +1744,51 @@ static void ice_set_ctrlq_len(struct ice_hw *hw) hw->adminq.num_sq_entries = ICE_AQ_LEN; hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; - hw->mailboxq.num_rq_entries = ICE_MBXRQ_LEN; + hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M; hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN; hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; } /** + * ice_schedule_reset - schedule a reset + * @pf: board private structure + * @reset: reset being requested + */ +int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) +{ + struct device *dev = ice_pf_to_dev(pf); + + /* bail out if earlier reset has failed */ + if (test_bit(__ICE_RESET_FAILED, pf->state)) { + dev_dbg(dev, "earlier reset has failed\n"); + return -EIO; + } + /* bail if reset/recovery already in progress */ + if (ice_is_reset_in_progress(pf->state)) { + dev_dbg(dev, "Reset already in progress\n"); + return -EBUSY; + } + + switch (reset) { + case ICE_RESET_PFR: + set_bit(__ICE_PFR_REQ, pf->state); + break; + case ICE_RESET_CORER: + set_bit(__ICE_CORER_REQ, pf->state); + break; + case ICE_RESET_GLOBR: + set_bit(__ICE_GLOBR_REQ, pf->state); + break; + default: + return -EINVAL; + } + + ice_service_task_schedule(pf); + return 0; +} + +/** * ice_irq_affinity_notify - Callback for affinity changes * @notify: context as to what irq was changed * @mask: the new affinity mask @@ -1604,11 +1842,13 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) int q_vectors = vsi->num_q_vectors; struct ice_pf *pf = vsi->back; int base = vsi->base_vector; + struct device *dev; int rx_int_idx = 0; int tx_int_idx = 0; int vector, err; int irq_num; + dev = ice_pf_to_dev(pf); for (vector = 0; vector < q_vectors; vector++) { struct ice_q_vector *q_vector = vsi->q_vectors[vector]; @@ -1628,19 +1868,23 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) /* skip this unused q_vector */ continue; } - err = devm_request_irq(&pf->pdev->dev, irq_num, - vsi->irq_handler, 0, + err = devm_request_irq(dev, irq_num, vsi->irq_handler, 0, q_vector->name, q_vector); if (err) { - netdev_err(vsi->netdev, - "MSIX request_irq failed, error: %d\n", err); + netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", + err); goto free_q_irqs; } /* register for affinity change notifications */ - q_vector->affinity_notify.notify = ice_irq_affinity_notify; - q_vector->affinity_notify.release = ice_irq_affinity_release; - irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); + if (!IS_ENABLED(CONFIG_RFS_ACCEL)) { + struct irq_affinity_notify *affinity_notify; + + affinity_notify = &q_vector->affinity_notify; + affinity_notify->notify = ice_irq_affinity_notify; + affinity_notify->release = ice_irq_affinity_release; + irq_set_affinity_notifier(irq_num, affinity_notify); + } /* assign the mask for this irq */ irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); @@ -1652,15 +1896,334 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) free_q_irqs: while (vector) { vector--; - irq_num = pf->msix_entries[base + vector].vector, - irq_set_affinity_notifier(irq_num, NULL); + irq_num = pf->msix_entries[base + vector].vector; + if (!IS_ENABLED(CONFIG_RFS_ACCEL)) + irq_set_affinity_notifier(irq_num, NULL); irq_set_affinity_hint(irq_num, NULL); - devm_free_irq(&pf->pdev->dev, irq_num, &vsi->q_vectors[vector]); + devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); } return err; } /** + * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP + * @vsi: VSI to setup Tx rings used by XDP + * + * Return 0 on success and negative value on error + */ +static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) +{ + struct device *dev = ice_pf_to_dev(vsi->back); + int i; + + for (i = 0; i < vsi->num_xdp_txq; i++) { + u16 xdp_q_idx = vsi->alloc_txq + i; + struct ice_ring *xdp_ring; + + xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL); + + if (!xdp_ring) + goto free_xdp_rings; + + xdp_ring->q_index = xdp_q_idx; + xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx]; + xdp_ring->ring_active = false; + xdp_ring->vsi = vsi; + xdp_ring->netdev = NULL; + xdp_ring->dev = dev; + xdp_ring->count = vsi->num_tx_desc; + WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); + if (ice_setup_tx_ring(xdp_ring)) + goto free_xdp_rings; + ice_set_ring_xdp(xdp_ring); + xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring); + } + + return 0; + +free_xdp_rings: + for (; i >= 0; i--) + if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) + ice_free_tx_ring(vsi->xdp_rings[i]); + return -ENOMEM; +} + +/** + * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI + * @vsi: VSI to set the bpf prog on + * @prog: the bpf prog pointer + */ +static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) +{ + struct bpf_prog *old_prog; + int i; + + old_prog = xchg(&vsi->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + ice_for_each_rxq(vsi, i) + WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); +} + +/** + * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP + * @vsi: VSI to bring up Tx rings used by XDP + * @prog: bpf program that will be assigned to VSI + * + * Return 0 on success and negative value on error + */ +int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) +{ + u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; + int xdp_rings_rem = vsi->num_xdp_txq; + struct ice_pf *pf = vsi->back; + struct ice_qs_cfg xdp_qs_cfg = { + .qs_mutex = &pf->avail_q_mutex, + .pf_map = pf->avail_txqs, + .pf_map_size = pf->max_pf_txqs, + .q_count = vsi->num_xdp_txq, + .scatter_count = ICE_MAX_SCATTER_TXQS, + .vsi_map = vsi->txq_map, + .vsi_map_offset = vsi->alloc_txq, + .mapping_mode = ICE_VSI_MAP_CONTIG + }; + enum ice_status status; + struct device *dev; + int i, v_idx; + + dev = ice_pf_to_dev(pf); + vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, + sizeof(*vsi->xdp_rings), GFP_KERNEL); + if (!vsi->xdp_rings) + return -ENOMEM; + + vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode; + if (__ice_vsi_get_qs(&xdp_qs_cfg)) + goto err_map_xdp; + + if (ice_xdp_alloc_setup_rings(vsi)) + goto clear_xdp_rings; + + /* follow the logic from ice_vsi_map_rings_to_vectors */ + ice_for_each_q_vector(vsi, v_idx) { + struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; + int xdp_rings_per_v, q_id, q_base; + + xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, + vsi->num_q_vectors - v_idx); + q_base = vsi->num_xdp_txq - xdp_rings_rem; + + for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { + struct ice_ring *xdp_ring = vsi->xdp_rings[q_id]; + + xdp_ring->q_vector = q_vector; + xdp_ring->next = q_vector->tx.ring; + q_vector->tx.ring = xdp_ring; + } + xdp_rings_rem -= xdp_rings_per_v; + } + + /* omit the scheduler update if in reset path; XDP queues will be + * taken into account at the end of ice_vsi_rebuild, where + * ice_cfg_vsi_lan is being called + */ + if (ice_is_reset_in_progress(pf->state)) + return 0; + + /* tell the Tx scheduler that right now we have + * additional queues + */ + for (i = 0; i < vsi->tc_cfg.numtc; i++) + max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq; + + status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, + max_txqs); + if (status) { + dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n", + ice_stat_str(status)); + goto clear_xdp_rings; + } + ice_vsi_assign_bpf_prog(vsi, prog); + + return 0; +clear_xdp_rings: + for (i = 0; i < vsi->num_xdp_txq; i++) + if (vsi->xdp_rings[i]) { + kfree_rcu(vsi->xdp_rings[i], rcu); + vsi->xdp_rings[i] = NULL; + } + +err_map_xdp: + mutex_lock(&pf->avail_q_mutex); + for (i = 0; i < vsi->num_xdp_txq; i++) { + clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); + vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; + } + mutex_unlock(&pf->avail_q_mutex); + + devm_kfree(dev, vsi->xdp_rings); + return -ENOMEM; +} + +/** + * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings + * @vsi: VSI to remove XDP rings + * + * Detach XDP rings from irq vectors, clean up the PF bitmap and free + * resources + */ +int ice_destroy_xdp_rings(struct ice_vsi *vsi) +{ + u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; + struct ice_pf *pf = vsi->back; + int i, v_idx; + + /* q_vectors are freed in reset path so there's no point in detaching + * rings; in case of rebuild being triggered not from reset reset bits + * in pf->state won't be set, so additionally check first q_vector + * against NULL + */ + if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) + goto free_qmap; + + ice_for_each_q_vector(vsi, v_idx) { + struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; + struct ice_ring *ring; + + ice_for_each_ring(ring, q_vector->tx) + if (!ring->tx_buf || !ice_ring_is_xdp(ring)) + break; + + /* restore the value of last node prior to XDP setup */ + q_vector->tx.ring = ring; + } + +free_qmap: + mutex_lock(&pf->avail_q_mutex); + for (i = 0; i < vsi->num_xdp_txq; i++) { + clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); + vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; + } + mutex_unlock(&pf->avail_q_mutex); + + for (i = 0; i < vsi->num_xdp_txq; i++) + if (vsi->xdp_rings[i]) { + if (vsi->xdp_rings[i]->desc) + ice_free_tx_ring(vsi->xdp_rings[i]); + kfree_rcu(vsi->xdp_rings[i], rcu); + vsi->xdp_rings[i] = NULL; + } + + devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); + vsi->xdp_rings = NULL; + + if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) + return 0; + + ice_vsi_assign_bpf_prog(vsi, NULL); + + /* notify Tx scheduler that we destroyed XDP queues and bring + * back the old number of child nodes + */ + for (i = 0; i < vsi->tc_cfg.numtc; i++) + max_txqs[i] = vsi->num_txq; + + /* change number of XDP Tx queues to 0 */ + vsi->num_xdp_txq = 0; + + return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, + max_txqs); +} + +/** + * ice_xdp_setup_prog - Add or remove XDP eBPF program + * @vsi: VSI to setup XDP for + * @prog: XDP program + * @extack: netlink extended ack + */ +static int +ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; + bool if_running = netif_running(vsi->netdev); + int ret = 0, xdp_ring_err = 0; + + if (frame_size > vsi->rx_buf_len) { + NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP"); + return -EOPNOTSUPP; + } + + /* need to stop netdev while setting up the program for Rx rings */ + if (if_running && !test_and_set_bit(__ICE_DOWN, vsi->state)) { + ret = ice_down(vsi); + if (ret) { + NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed"); + return ret; + } + } + + if (!ice_is_xdp_ena_vsi(vsi) && prog) { + vsi->num_xdp_txq = vsi->alloc_rxq; + xdp_ring_err = ice_prepare_xdp_rings(vsi, prog); + if (xdp_ring_err) + NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); + } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { + xdp_ring_err = ice_destroy_xdp_rings(vsi); + if (xdp_ring_err) + NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); + } else { + ice_vsi_assign_bpf_prog(vsi, prog); + } + + if (if_running) + ret = ice_up(vsi); + + if (!ret && prog && vsi->xsk_umems) { + int i; + + ice_for_each_rxq(vsi, i) { + struct ice_ring *rx_ring = vsi->rx_rings[i]; + + if (rx_ring->xsk_umem) + napi_schedule(&rx_ring->q_vector->napi); + } + } + + return (ret || xdp_ring_err) ? -ENOMEM : 0; +} + +/** + * ice_xdp - implements XDP handler + * @dev: netdevice + * @xdp: XDP command + */ +static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + struct ice_netdev_priv *np = netdev_priv(dev); + struct ice_vsi *vsi = np->vsi; + + if (vsi->type != ICE_VSI_PF) { + NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI"); + return -EINVAL; + } + + switch (xdp->command) { + case XDP_SETUP_PROG: + return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); + case XDP_QUERY_PROG: + xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0; + return 0; + case XDP_SETUP_XSK_UMEM: + return ice_xsk_umem_setup(vsi, xdp->xsk.umem, + xdp->xsk.queue_id); + default: + return -EINVAL; + } +} + +/** * ice_ena_misc_vector - enable the non-queue interrupts * @pf: board private structure */ @@ -1669,6 +2232,14 @@ static void ice_ena_misc_vector(struct ice_pf *pf) struct ice_hw *hw = &pf->hw; u32 val; + /* Disable anti-spoof detection interrupt to prevent spurious event + * interrupts during a function reset. Anti-spoof functionally is + * still supported. + */ + val = rd32(hw, GL_MDCK_TX_TDPU); + val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M; + wr32(hw, GL_MDCK_TX_TDPU, val); + /* clear things first */ wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ rd32(hw, PFINT_OICR); /* read to clear */ @@ -1698,8 +2269,10 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) struct ice_pf *pf = (struct ice_pf *)data; struct ice_hw *hw = &pf->hw; irqreturn_t ret = IRQ_NONE; + struct device *dev; u32 oicr, ena_mask; + dev = ice_pf_to_dev(pf); set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state); @@ -1716,8 +2289,16 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) set_bit(__ICE_MDD_EVENT_PENDING, pf->state); } if (oicr & PFINT_OICR_VFLR_M) { - ena_mask &= ~PFINT_OICR_VFLR_M; - set_bit(__ICE_VFLR_EVENT_PENDING, pf->state); + /* disable any further VFLR event notifications */ + if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) { + u32 reg = rd32(hw, PFINT_OICR_ENA); + + reg &= ~PFINT_OICR_VFLR_M; + wr32(hw, PFINT_OICR_ENA, reg); + } else { + ena_mask &= ~PFINT_OICR_VFLR_M; + set_bit(__ICE_VFLR_EVENT_PENDING, pf->state); + } } if (oicr & PFINT_OICR_GRST_M) { @@ -1735,8 +2316,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) else if (reset == ICE_RESET_EMPR) pf->empr_count++; else - dev_dbg(&pf->pdev->dev, "Invalid reset type %d\n", - reset); + dev_dbg(dev, "Invalid reset type %d\n", reset); /* If a reset cycle isn't already in progress, we set a bit in * pf->state so that the service task can start a reset/rebuild. @@ -1770,8 +2350,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) if (oicr & PFINT_OICR_HMC_ERR_M) { ena_mask &= ~PFINT_OICR_HMC_ERR_M; - dev_dbg(&pf->pdev->dev, - "HMC Error interrupt - info 0x%x, data 0x%x\n", + dev_dbg(dev, "HMC Error interrupt - info 0x%x, data 0x%x\n", rd32(hw, PFHMC_ERRORINFO), rd32(hw, PFHMC_ERRORDATA)); } @@ -1779,8 +2358,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) /* Report any remaining unexpected interrupts */ oicr &= ena_mask; if (oicr) { - dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n", - oicr); + dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr); /* If a critical error is pending there is no choice but to * reset the device. */ @@ -1793,10 +2371,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) } ret = IRQ_HANDLED; - if (!test_bit(__ICE_DOWN, pf->state)) { - ice_service_task_schedule(pf); - ice_irq_dynamic_ena(hw, NULL, NULL); - } + ice_service_task_schedule(pf); + ice_irq_dynamic_ena(hw, NULL, NULL); return ret; } @@ -1838,7 +2414,7 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf) if (pf->msix_entries) { synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); - devm_free_irq(&pf->pdev->dev, + devm_free_irq(ice_pf_to_dev(pf), pf->msix_entries[pf->oicr_idx].vector, pf); } @@ -1882,13 +2458,13 @@ static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx) */ static int ice_req_irq_msix_misc(struct ice_pf *pf) { + struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; int oicr_idx, err = 0; if (!pf->int_name[0]) snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", - dev_driver_string(&pf->pdev->dev), - dev_name(&pf->pdev->dev)); + dev_driver_string(dev), dev_name(dev)); /* Do not request IRQ but do enable OICR interrupt since settings are * lost during reset. Note that this function is called only during @@ -1903,14 +2479,12 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) return oicr_idx; pf->num_avail_sw_msix -= 1; - pf->oicr_idx = oicr_idx; + pf->oicr_idx = (u16)oicr_idx; - err = devm_request_irq(&pf->pdev->dev, - pf->msix_entries[pf->oicr_idx].vector, + err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector, ice_misc_intr, 0, pf->int_name, pf); if (err) { - dev_err(&pf->pdev->dev, - "devm_request_irq for %s failed: %d\n", + dev_err(dev, "devm_request_irq for %s failed: %d\n", pf->int_name, err); ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); pf->num_avail_sw_msix += 1; @@ -1989,6 +2563,7 @@ static void ice_set_netdev_features(struct net_device *netdev) dflt_features = NETIF_F_SG | NETIF_F_HIGHDMA | + NETIF_F_NTUPLE | NETIF_F_RXHASH; csumo_features = NETIF_F_RXCSUM | @@ -2000,12 +2575,27 @@ static void ice_set_netdev_features(struct net_device *netdev) NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; - tso_features = NETIF_F_TSO; - + tso_features = NETIF_F_TSO | + NETIF_F_TSO_ECN | + NETIF_F_TSO6 | + NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL | + NETIF_F_GSO_IPXIP4 | + NETIF_F_GSO_IPXIP6 | + NETIF_F_GSO_UDP_L4; + + netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_GRE_CSUM; /* set features that user can change */ netdev->hw_features = dflt_features | csumo_features | vlano_features | tso_features; + /* add support for HW_CSUM on packets with MPLS header */ + netdev->mpls_features = NETIF_F_HW_CSUM; + /* enable features */ netdev->features |= netdev->hw_features; /* encap and VLAN devices inherit default, csumo and tso features */ @@ -2029,10 +2619,16 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) u8 mac_addr[ETH_ALEN]; int err; + err = ice_devlink_create_port(pf); + if (err) + return err; + netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, vsi->alloc_rxq); - if (!netdev) - return -ENOMEM; + if (!netdev) { + err = -ENOMEM; + goto err_destroy_devlink_port; + } vsi->netdev = netdev; np = netdev_priv(netdev); @@ -2043,7 +2639,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) ice_set_ops(netdev); if (vsi->type == ICE_VSI_PF) { - SET_NETDEV_DEV(netdev, &pf->pdev->dev); + SET_NETDEV_DEV(netdev, ice_pf_to_dev(pf)); ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); ether_addr_copy(netdev->dev_addr, mac_addr); ether_addr_copy(netdev->perm_addr, mac_addr); @@ -2062,7 +2658,9 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) err = register_netdev(vsi->netdev); if (err) - return err; + goto err_free_netdev; + + devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev); netif_carrier_off(vsi->netdev); @@ -2070,6 +2668,13 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) netif_tx_stop_all_queues(vsi->netdev); return 0; + +err_free_netdev: + free_netdev(vsi->netdev); + vsi->netdev = NULL; +err_destroy_devlink_port: + ice_devlink_destroy_port(pf); + return err; } /** @@ -2101,6 +2706,20 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) } /** + * ice_ctrl_vsi_setup - Set up a control VSI + * @pf: board private structure + * @pi: pointer to the port_info instance + * + * Returns pointer to the successfully allocated VSI software struct + * on success, otherwise returns NULL on failure. + */ +static struct ice_vsi * +ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) +{ + return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID); +} + +/** * ice_lb_vsi_setup - Set up a loopback VSI * @pf: board private structure * @pi: pointer to the port_info instance @@ -2139,18 +2758,21 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, if (vsi->info.pvid) return -EINVAL; - /* Enable VLAN pruning when VLAN 0 is added */ - if (unlikely(!vid)) { + /* VLAN 0 is added by default during load/reset */ + if (!vid) + return 0; + + /* Enable VLAN pruning when a VLAN other than 0 is added */ + if (!ice_vsi_is_vlan_pruning_ena(vsi)) { ret = ice_cfg_vlan_pruning(vsi, true, false); if (ret) return ret; } - /* Add all VLAN IDs including 0 to the switch filter. VLAN ID 0 is - * needed to continue allowing all untagged packets since VLAN prune - * list is applied to all packets by the switch + /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged + * packets aren't pruned by the device's internal switch on Rx */ - ret = ice_vsi_add_vlan(vsi, vid); + ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI); if (!ret) { vsi->vlan_ena = true; set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); @@ -2178,6 +2800,10 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, if (vsi->info.pvid) return -EINVAL; + /* don't allow removal of VLAN 0 */ + if (!vid) + return 0; + /* Make sure ice_vsi_kill_vlan is successful before updating VLAN * information */ @@ -2185,8 +2811,8 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, if (ret) return ret; - /* Disable VLAN pruning when VLAN 0 is removed */ - if (unlikely(!vid)) + /* Disable pruning when VLAN 0 is the only VLAN rule */ + if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi)) ret = ice_cfg_vlan_pruning(vsi, false, false); vsi->vlan_ena = false; @@ -2219,6 +2845,11 @@ static int ice_setup_pf_sw(struct ice_pf *pf) status = -ENODEV; goto unroll_vsi_setup; } + /* netdev has to be configured before setting frame size */ + ice_vsi_cfg_frame_size(vsi); + + /* Setup DCB netlink interface */ + ice_dcbnl_setup(vsi); /* registering the NAPI handler requires both the queues and * netdev to be created, which are done in ice_pf_vsi_setup() @@ -2226,12 +2857,22 @@ static int ice_setup_pf_sw(struct ice_pf *pf) */ ice_napi_add(vsi); + status = ice_set_cpu_rx_rmap(vsi); + if (status) { + dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n", + vsi->vsi_num, status); + status = -EINVAL; + goto unroll_napi_add; + } status = ice_init_mac_fltr(pf); if (status) - goto unroll_napi_add; + goto free_cpu_rx_map; return status; +free_cpu_rx_map: + ice_free_cpu_rx_rmap(vsi); + unroll_napi_add: if (vsi) { ice_napi_del(vsi); @@ -2262,7 +2903,8 @@ unroll_vsi_setup: static u16 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size) { - u16 count = 0, bit; + unsigned long bit; + u16 count = 0; mutex_lock(lock); for_each_clear_bit(bit, pf_qmap, size) @@ -2300,6 +2942,7 @@ static void ice_deinit_pf(struct ice_pf *pf) { ice_service_task_stop(pf); mutex_destroy(&pf->sw_mutex); + mutex_destroy(&pf->tc_mutex); mutex_destroy(&pf->avail_q_mutex); if (pf->avail_txqs) { @@ -2324,18 +2967,33 @@ static void ice_set_pf_caps(struct ice_pf *pf) clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); if (func_caps->common_cap.dcb) set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); -#ifdef CONFIG_PCI_IOV clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); if (func_caps->common_cap.sr_iov_1_1) { set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs, ICE_MAX_VF_COUNT); } -#endif /* CONFIG_PCI_IOV */ clear_bit(ICE_FLAG_RSS_ENA, pf->flags); if (func_caps->common_cap.rss_table_size) set_bit(ICE_FLAG_RSS_ENA, pf->flags); + clear_bit(ICE_FLAG_FD_ENA, pf->flags); + if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) { + u16 unused; + + /* ctrl_vsi_idx will be set to a valid value when flow director + * is setup by ice_init_fdir + */ + pf->ctrl_vsi_idx = ICE_NO_VSI; + set_bit(ICE_FLAG_FD_ENA, pf->flags); + /* force guaranteed filter pool for PF */ + ice_alloc_fd_guar_item(&pf->hw, &unused, + func_caps->fd_fltr_guar); + /* force shared filter pool for PF */ + ice_alloc_fd_shrd_item(&pf->hw, &unused, + func_caps->fd_fltr_best_effort); + } + pf->max_pf_txqs = func_caps->common_cap.num_txq; pf->max_pf_rxqs = func_caps->common_cap.num_rxq; } @@ -2349,6 +3007,7 @@ static int ice_init_pf(struct ice_pf *pf) ice_set_pf_caps(pf); mutex_init(&pf->sw_mutex); + mutex_init(&pf->tc_mutex); /* setup service timer and periodic service task */ timer_setup(&pf->serv_tmr, ice_service_timer, 0); @@ -2363,7 +3022,7 @@ static int ice_init_pf(struct ice_pf *pf) pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); if (!pf->avail_rxqs) { - devm_kfree(&pf->pdev->dev, pf->avail_txqs); + devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs); pf->avail_txqs = NULL; return -ENOMEM; } @@ -2380,6 +3039,7 @@ static int ice_init_pf(struct ice_pf *pf) */ static int ice_ena_msix_range(struct ice_pf *pf) { + struct device *dev = ice_pf_to_dev(pf); int v_left, v_actual, v_budget = 0; int needed, err, i; @@ -2400,7 +3060,16 @@ static int ice_ena_msix_range(struct ice_pf *pf) v_budget += needed; v_left -= needed; - pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget, + /* reserve one vector for flow director */ + if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { + needed = ICE_FDIR_MSIX; + if (v_left < needed) + goto no_hw_vecs_left_err; + v_budget += needed; + v_left -= needed; + } + + pf->msix_entries = devm_kcalloc(dev, v_budget, sizeof(*pf->msix_entries), GFP_KERNEL); if (!pf->msix_entries) { @@ -2416,17 +3085,18 @@ static int ice_ena_msix_range(struct ice_pf *pf) ICE_MIN_MSIX, v_budget); if (v_actual < 0) { - dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n"); + dev_err(dev, "unable to reserve MSI-X vectors\n"); err = v_actual; goto msix_err; } if (v_actual < v_budget) { - dev_warn(&pf->pdev->dev, - "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", + dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", v_budget, v_actual); -/* 2 vectors for LAN (traffic + OICR) */ +/* 2 vectors each for LAN and RDMA (traffic + OICR), one for flow director */ #define ICE_MIN_LAN_VECS 2 +#define ICE_MIN_RDMA_VECS 2 +#define ICE_MIN_VECS (ICE_MIN_LAN_VECS + ICE_MIN_RDMA_VECS + 1) if (v_actual < ICE_MIN_LAN_VECS) { /* error if we can't get minimum vectors */ @@ -2441,12 +3111,11 @@ static int ice_ena_msix_range(struct ice_pf *pf) return v_actual; msix_err: - devm_kfree(&pf->pdev->dev, pf->msix_entries); + devm_kfree(dev, pf->msix_entries); goto exit_err; no_hw_vecs_left_err: - dev_err(&pf->pdev->dev, - "not enough device MSI-X vectors. requested = %d, available = %d\n", + dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n", needed, v_left); err = -ERANGE; exit_err: @@ -2461,7 +3130,7 @@ exit_err: static void ice_dis_msix(struct ice_pf *pf) { pci_disable_msix(pf->pdev); - devm_kfree(&pf->pdev->dev, pf->msix_entries); + devm_kfree(ice_pf_to_dev(pf), pf->msix_entries); pf->msix_entries = NULL; } @@ -2474,7 +3143,7 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf) ice_dis_msix(pf); if (pf->irq_tracker) { - devm_kfree(&pf->pdev->dev, pf->irq_tracker); + devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker); pf->irq_tracker = NULL; } } @@ -2494,7 +3163,7 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) /* set up vector assignment tracking */ pf->irq_tracker = - devm_kzalloc(&pf->pdev->dev, sizeof(*pf->irq_tracker) + + devm_kzalloc(ice_pf_to_dev(pf), sizeof(*pf->irq_tracker) + (sizeof(u16) * vectors), GFP_KERNEL); if (!pf->irq_tracker) { ice_dis_msix(pf); @@ -2502,14 +3171,135 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) } /* populate SW interrupts pool with number of OS granted IRQs. */ - pf->num_avail_sw_msix = vectors; - pf->irq_tracker->num_entries = vectors; + pf->num_avail_sw_msix = (u16)vectors; + pf->irq_tracker->num_entries = (u16)vectors; pf->irq_tracker->end = pf->irq_tracker->num_entries; return 0; } /** + * ice_is_wol_supported - get NVM state of WoL + * @pf: board private structure + * + * Check if WoL is supported based on the HW configuration. + * Returns true if NVM supports and enables WoL for this port, false otherwise + */ +bool ice_is_wol_supported(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + u16 wol_ctrl; + + /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control + * word) indicates WoL is not supported on the corresponding PF ID. + */ + if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl)) + return false; + + return !(BIT(hw->pf_id) & wol_ctrl); +} + +/** + * ice_vsi_recfg_qs - Change the number of queues on a VSI + * @vsi: VSI being changed + * @new_rx: new number of Rx queues + * @new_tx: new number of Tx queues + * + * Only change the number of queues if new_tx, or new_rx is non-0. + * + * Returns 0 on success. + */ +int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx) +{ + struct ice_pf *pf = vsi->back; + int err = 0, timeout = 50; + + if (!new_rx && !new_tx) + return -EINVAL; + + while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { + timeout--; + if (!timeout) + return -EBUSY; + usleep_range(1000, 2000); + } + + if (new_tx) + vsi->req_txq = (u16)new_tx; + if (new_rx) + vsi->req_rxq = (u16)new_rx; + + /* set for the next time the netdev is started */ + if (!netif_running(vsi->netdev)) { + ice_vsi_rebuild(vsi, false); + dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); + goto done; + } + + ice_vsi_close(vsi); + ice_vsi_rebuild(vsi, false); + ice_pf_dcb_recfg(pf); + ice_vsi_open(vsi); +done: + clear_bit(__ICE_CFG_BUSY, pf->state); + return err; +} + +/** + * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode + * @pf: PF to configure + * + * No VLAN offloads/filtering are advertised in safe mode so make sure the PF + * VSI can still Tx/Rx VLAN tagged packets. + */ +static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) +{ + struct ice_vsi *vsi = ice_get_main_vsi(pf); + struct ice_vsi_ctx *ctxt; + enum ice_status status; + struct ice_hw *hw; + + if (!vsi) + return; + + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); + if (!ctxt) + return; + + hw = &pf->hw; + ctxt->info = vsi->info; + + ctxt->info.valid_sections = + cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | + ICE_AQ_VSI_PROP_SECURITY_VALID | + ICE_AQ_VSI_PROP_SW_VALID); + + /* disable VLAN anti-spoof */ + ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << + ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); + + /* disable VLAN pruning and keep all other settings */ + ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + + /* allow all VLANs on Tx and don't strip on Rx */ + ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL | + ICE_AQ_VSI_VLAN_EMOD_NOTHING; + + status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (status) { + dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + } else { + vsi->info.sec_flags = ctxt->info.sec_flags; + vsi->info.sw_flags2 = ctxt->info.sw_flags2; + vsi->info.vlan_flags = ctxt->info.vlan_flags; + } + + kfree(ctxt); +} + +/** * ice_log_pkg_init - log result of DDP package load * @hw: pointer to hardware info * @status: status of package load @@ -2518,7 +3308,7 @@ static void ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) { struct ice_pf *pf = (struct ice_pf *)hw->back; - struct device *dev = &pf->pdev->dev; + struct device *dev = ice_pf_to_dev(pf); switch (*status) { case ICE_SUCCESS: @@ -2533,16 +3323,14 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) { if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST) - dev_info(dev, - "DDP package already present on device: %s version %d.%d.%d.%d\n", + dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n", hw->active_pkg_name, hw->active_pkg_ver.major, hw->active_pkg_ver.minor, hw->active_pkg_ver.update, hw->active_pkg_ver.draft); else - dev_info(dev, - "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", + dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", hw->active_pkg_name, hw->active_pkg_ver.major, hw->active_pkg_ver.minor, @@ -2550,8 +3338,7 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) hw->active_pkg_ver.draft); } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { - dev_err(dev, - "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", + dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", hw->active_pkg_name, hw->active_pkg_ver.major, hw->active_pkg_ver.minor, @@ -2559,8 +3346,7 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) *status = ICE_ERR_NOT_SUPPORTED; } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { - dev_info(dev, - "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", + dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", hw->active_pkg_name, hw->active_pkg_ver.major, hw->active_pkg_ver.minor, @@ -2572,54 +3358,51 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) hw->pkg_ver.update, hw->pkg_ver.draft); } else { - dev_err(dev, - "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n"); + dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n"); *status = ICE_ERR_NOT_SUPPORTED; } break; + case ICE_ERR_FW_DDP_MISMATCH: + dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); + break; case ICE_ERR_BUF_TOO_SHORT: - /* fall-through */ case ICE_ERR_CFG: - dev_err(dev, - "The DDP package file is invalid. Entering Safe Mode.\n"); + dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n"); break; case ICE_ERR_NOT_SUPPORTED: /* Package File version not supported */ if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ || (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR)) - dev_err(dev, - "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); + dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ || (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR)) - dev_err(dev, - "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", + dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); break; case ICE_ERR_AQ_ERROR: - switch (hw->adminq.sq_last_status) { + switch (hw->pkg_dwnld_status) { case ICE_AQ_RC_ENOSEC: case ICE_AQ_RC_EBADSIG: - dev_err(dev, - "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); + dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); return; case ICE_AQ_RC_ESVN: - dev_err(dev, - "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); + dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); return; case ICE_AQ_RC_EBADMAN: case ICE_AQ_RC_EBADBUF: - dev_err(dev, - "An error occurred on the device while loading the DDP package. The device will be reset.\n"); + dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n"); + /* poll for reset to complete */ + if (ice_check_reset(hw)) + dev_err(dev, "Error resetting device. Please reload the driver\n"); return; default: break; } - /* fall-through */ + fallthrough; default: - dev_err(dev, - "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n", + dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n", *status); break; } @@ -2637,7 +3420,7 @@ static void ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) { enum ice_status status = ICE_ERR_PARAM; - struct device *dev = &pf->pdev->dev; + struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; /* Load DDP Package */ @@ -2650,8 +3433,7 @@ ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); ice_log_pkg_init(hw, &status); } else { - dev_err(dev, - "The DDP package file failed to load. Entering Safe Mode.\n"); + dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n"); } if (status) { @@ -2677,8 +3459,7 @@ ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) static void ice_verify_cacheline_size(struct ice_pf *pf) { if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) - dev_warn(&pf->pdev->dev, - "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n", + dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n", ICE_CACHE_LINE_BYTES); } @@ -2702,6 +3483,53 @@ static enum ice_status ice_send_version(struct ice_pf *pf) } /** + * ice_init_fdir - Initialize flow director VSI and configuration + * @pf: pointer to the PF instance + * + * returns 0 on success, negative on error + */ +static int ice_init_fdir(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + struct ice_vsi *ctrl_vsi; + int err; + + /* Side Band Flow Director needs to have a control VSI. + * Allocate it and store it in the PF. + */ + ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info); + if (!ctrl_vsi) { + dev_dbg(dev, "could not create control VSI\n"); + return -ENOMEM; + } + + err = ice_vsi_open_ctrl(ctrl_vsi); + if (err) { + dev_dbg(dev, "could not open control VSI\n"); + goto err_vsi_open; + } + + mutex_init(&pf->hw.fdir_fltr_lock); + + err = ice_fdir_create_dflt_rules(pf); + if (err) + goto err_fdir_rule; + + return 0; + +err_fdir_rule: + ice_fdir_release_flows(&pf->hw); + ice_vsi_close(ctrl_vsi); +err_vsi_open: + ice_vsi_release(ctrl_vsi); + if (pf->ctrl_vsi_idx != ICE_NO_VSI) { + pf->vsi[pf->ctrl_vsi_idx] = NULL; + pf->ctrl_vsi_idx = ICE_NO_VSI; + } + return err; +} + +/** * ice_get_opt_fw_name - return optional firmware file name or NULL * @pf: pointer to the PF instance */ @@ -2747,7 +3575,7 @@ static void ice_request_fw(struct ice_pf *pf) { char *opt_fw_filename = ice_get_opt_fw_name(pf); const struct firmware *firmware = NULL; - struct device *dev = &pf->pdev->dev; + struct device *dev = ice_pf_to_dev(pf); int err = 0; /* optional device-specific DDP (if present) overrides the default DDP @@ -2771,8 +3599,7 @@ static void ice_request_fw(struct ice_pf *pf) dflt_pkg_load: err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev); if (err) { - dev_err(dev, - "The DDP package file was not found or could not be read. Entering Safe Mode\n"); + dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n"); return; } @@ -2782,6 +3609,33 @@ dflt_pkg_load: } /** + * ice_print_wake_reason - show the wake up cause in the log + * @pf: pointer to the PF struct + */ +static void ice_print_wake_reason(struct ice_pf *pf) +{ + u32 wus = pf->wakeup_reason; + const char *wake_str; + + /* if no wake event, nothing to print */ + if (!wus) + return; + + if (wus & PFPM_WUS_LNKC_M) + wake_str = "Link\n"; + else if (wus & PFPM_WUS_MAG_M) + wake_str = "Magic Packet\n"; + else if (wus & PFPM_WUS_MNG_M) + wake_str = "Management\n"; + else if (wus & PFPM_WUS_FW_RST_WK_M) + wake_str = "Firmware Reset\n"; + else + wake_str = "Unknown\n"; + + dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str); +} + +/** * ice_probe - Device initialization routine * @pdev: PCI device information struct * @ent: entry in ice_pci_tbl @@ -2796,7 +3650,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) struct ice_hw *hw; int err; - /* this driver uses devres, see Documentation/driver-api/driver-model/devres.rst */ + /* this driver uses devres, see + * Documentation/driver-api/driver-model/devres.rst + */ err = pcim_enable_device(pdev); if (err) return err; @@ -2807,7 +3663,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) return err; } - pf = devm_kzalloc(dev, sizeof(*pf), GFP_KERNEL); + pf = ice_allocate_pf(dev); if (!pf) return -ENOMEM; @@ -2831,6 +3687,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) hw = &pf->hw; hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; + pci_save_state(pdev); + hw->back = pf; hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; @@ -2843,6 +3701,12 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); + err = ice_devlink_register(pf); + if (err) { + dev_err(dev, "ice_devlink_register failed: %d\n", err); + goto err_exit_unroll; + } + #ifndef CONFIG_DYNAMIC_DEBUG if (debug < -1) hw->debug_mask = debug; @@ -2855,11 +3719,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) goto err_exit_unroll; } - dev_info(dev, "firmware %d.%d.%d api %d.%d.%d nvm %s build 0x%08x\n", - hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch, - hw->api_maj_ver, hw->api_min_ver, hw->api_patch, - ice_nvm_version_str(hw), hw->fw_build); - ice_request_fw(pf); /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be @@ -2867,8 +3726,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) * true */ if (ice_is_safe_mode(pf)) { - dev_err(dev, - "Package download failed. Advanced features disabled - Device now in Safe Mode\n"); + dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n"); /* we already got function/device capabilities but these don't * reflect what the driver needs to do in safe mode. Instead of * adding conditional logic everywhere to ignore these @@ -2903,9 +3761,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) goto err_init_vsi_unroll; } - /* Driver is mostly up */ - clear_bit(__ICE_DOWN, pf->state); - /* In case of MSIX we are going to setup the misc vector right here * to handle admin queue events etc. In case of legacy and MSI * the misc functionality and queue processing is combined in @@ -2936,7 +3791,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) err = ice_setup_pf_sw(pf); if (err) { - dev_err(dev, "probe failed due to setup PF switch:%d\n", err); + dev_err(dev, "probe failed due to setup PF switch: %d\n", err); goto err_alloc_sw_unroll; } @@ -2945,8 +3800,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) /* tell the firmware we are up */ err = ice_send_version(pf); if (err) { - dev_err(dev, - "probe failed sending driver version %s. error: %d\n", + dev_err(dev, "probe failed sending driver version %s. error: %d\n", ice_drv_ver, err); goto err_alloc_sw_unroll; } @@ -2960,14 +3814,62 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) goto err_alloc_sw_unroll; } + err = ice_init_nvm_phy_type(pf->hw.port_info); + if (err) { + dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err); + goto err_alloc_sw_unroll; + } + + err = ice_update_link_info(pf->hw.port_info); + if (err) { + dev_err(dev, "ice_update_link_info failed: %d\n", err); + goto err_alloc_sw_unroll; + } + + /* if media available, initialize PHY settings */ + if (pf->hw.port_info->phy.link_info.link_info & + ICE_AQ_MEDIA_AVAILABLE) { + err = ice_init_phy_user_cfg(pf->hw.port_info); + if (err) { + dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err); + goto err_alloc_sw_unroll; + } + + if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { + struct ice_vsi *vsi = ice_get_main_vsi(pf); + + if (vsi) + ice_configure_phy(vsi); + } + } else { + set_bit(ICE_FLAG_NO_MEDIA, pf->flags); + } + ice_verify_cacheline_size(pf); - /* If no DDP driven features have to be setup, return here */ - if (ice_is_safe_mode(pf)) - return 0; + /* Save wakeup reason register for later use */ + pf->wakeup_reason = rd32(hw, PFPM_WUS); + + /* check for a power management event */ + ice_print_wake_reason(pf); + + /* clear wake status, all bits */ + wr32(hw, PFPM_WUS, U32_MAX); + + /* Disable WoL at init, wait for user to enable */ + device_set_wakeup_enable(dev, false); + + if (ice_is_safe_mode(pf)) { + ice_set_safe_mode_vlan_cfg(pf); + goto probe_done; + } /* initialize DDP driven features */ + /* Note: Flow director init failure is non-fatal to load */ + if (ice_init_fdir(pf)) + dev_err(dev, "could not initialize flow director\n"); + /* Note: DCB init failure is non-fatal to load */ if (ice_init_pf_dcb(pf, false)) { clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); @@ -2976,12 +3878,19 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ice_cfg_lldp_mib_change(&pf->hw, true); } + /* print PCI link speed and width */ + pcie_print_link_status(pf->pdev); + +probe_done: + /* ready to go, so clear down state bit */ + clear_bit(__ICE_DOWN, pf->state); return 0; err_alloc_sw_unroll: + ice_devlink_destroy_port(pf); set_bit(__ICE_SERVICE_DIS, pf->state); set_bit(__ICE_DOWN, pf->state); - devm_kfree(&pf->pdev->dev, pf->first_sw); + devm_kfree(dev, pf->first_sw); err_msix_misc_unroll: ice_free_irq_msix_misc(pf); err_init_interrupt_unroll: @@ -2992,11 +3901,75 @@ err_init_pf_unroll: ice_deinit_pf(pf); ice_deinit_hw(hw); err_exit_unroll: + ice_devlink_unregister(pf); pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); return err; } /** + * ice_set_wake - enable or disable Wake on LAN + * @pf: pointer to the PF struct + * + * Simple helper for WoL control + */ +static void ice_set_wake(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + bool wol = pf->wol_ena; + + /* clear wake state, otherwise new wake events won't fire */ + wr32(hw, PFPM_WUS, U32_MAX); + + /* enable / disable APM wake up, no RMW needed */ + wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0); + + /* set magic packet filter enabled */ + wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0); +} + +/** + * ice_setup_magic_mc_wake - setup device to wake on multicast magic packet + * @pf: pointer to the PF struct + * + * Issue firmware command to enable multicast magic wake, making + * sure that any locally administered address (LAA) is used for + * wake, and that PF reset doesn't undo the LAA. + */ +static void ice_setup_mc_magic_wake(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + enum ice_status status; + u8 mac_addr[ETH_ALEN]; + struct ice_vsi *vsi; + u8 flags; + + if (!pf->wol_ena) + return; + + vsi = ice_get_main_vsi(pf); + if (!vsi) + return; + + /* Get current MAC address in case it's an LAA */ + if (vsi->netdev) + ether_addr_copy(mac_addr, vsi->netdev->dev_addr); + else + ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); + + flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN | + ICE_AQC_MAN_MAC_UPDATE_LAA_WOL | + ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP; + + status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL); + if (status) + dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); +} + +/** * ice_remove - Device removal routine * @pdev: PCI device information struct */ @@ -3011,12 +3984,21 @@ static void ice_remove(struct pci_dev *pdev) msleep(100); } + if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { + set_bit(__ICE_VF_RESETS_DISABLED, pf->state); + ice_free_vfs(pf); + } + set_bit(__ICE_DOWN, pf->state); ice_service_task_stop(pf); - if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) - ice_free_vfs(pf); + mutex_destroy(&(&pf->hw)->fdir_fltr_lock); + if (!ice_is_safe_mode(pf)) + ice_remove_arfs(pf); + ice_setup_mc_magic_wake(pf); + ice_devlink_destroy_port(pf); ice_vsi_release_all(pf); + ice_set_wake(pf); ice_free_irq_msix_misc(pf); ice_for_each_vsi(pf, i) { if (!pf->vsi[i]) @@ -3025,16 +4007,242 @@ static void ice_remove(struct pci_dev *pdev) } ice_deinit_pf(pf); ice_deinit_hw(&pf->hw); - ice_clear_interrupt_scheme(pf); + ice_devlink_unregister(pf); + /* Issue a PFR as part of the prescribed driver unload flow. Do not * do it via ice_schedule_reset() since there is no need to rebuild * and the service task is already stopped. */ ice_reset(&pf->hw, ICE_RESET_PFR); + pci_wait_for_pending_transaction(pdev); + ice_clear_interrupt_scheme(pf); pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); } /** + * ice_shutdown - PCI callback for shutting down device + * @pdev: PCI device information struct + */ +static void ice_shutdown(struct pci_dev *pdev) +{ + struct ice_pf *pf = pci_get_drvdata(pdev); + + ice_remove(pdev); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, pf->wol_ena); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#ifdef CONFIG_PM +/** + * ice_prepare_for_shutdown - prep for PCI shutdown + * @pf: board private structure + * + * Inform or close all dependent features in prep for PCI device shutdown + */ +static void ice_prepare_for_shutdown(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + u32 v; + + /* Notify VFs of impending reset */ + if (ice_check_sq_alive(hw, &hw->mailboxq)) + ice_vc_notify_reset(pf); + + dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n"); + + /* disable the VSIs and their queues that are not already DOWN */ + ice_pf_dis_all_vsi(pf, false); + + ice_for_each_vsi(pf, v) + if (pf->vsi[v]) + pf->vsi[v]->vsi_num = 0; + + ice_shutdown_all_ctrlq(hw); +} + +/** + * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme + * @pf: board private structure to reinitialize + * + * This routine reinitialize interrupt scheme that was cleared during + * power management suspend callback. + * + * This should be called during resume routine to re-allocate the q_vectors + * and reacquire interrupts. + */ +static int ice_reinit_interrupt_scheme(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + int ret, v; + + /* Since we clear MSIX flag during suspend, we need to + * set it back during resume... + */ + + ret = ice_init_interrupt_scheme(pf); + if (ret) { + dev_err(dev, "Failed to re-initialize interrupt %d\n", ret); + return ret; + } + + /* Remap vectors and rings, after successful re-init interrupts */ + ice_for_each_vsi(pf, v) { + if (!pf->vsi[v]) + continue; + + ret = ice_vsi_alloc_q_vectors(pf->vsi[v]); + if (ret) + goto err_reinit; + ice_vsi_map_rings_to_vectors(pf->vsi[v]); + } + + ret = ice_req_irq_msix_misc(pf); + if (ret) { + dev_err(dev, "Setting up misc vector failed after device suspend %d\n", + ret); + goto err_reinit; + } + + return 0; + +err_reinit: + while (v--) + if (pf->vsi[v]) + ice_vsi_free_q_vectors(pf->vsi[v]); + + return ret; +} + +/** + * ice_suspend + * @dev: generic device information structure + * + * Power Management callback to quiesce the device and prepare + * for D3 transition. + */ +static int __maybe_unused ice_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct ice_pf *pf; + int disabled, v; + + pf = pci_get_drvdata(pdev); + + if (!ice_pf_state_is_nominal(pf)) { + dev_err(dev, "Device is not ready, no need to suspend it\n"); + return -EBUSY; + } + + /* Stop watchdog tasks until resume completion. + * Even though it is most likely that the service task is + * disabled if the device is suspended or down, the service task's + * state is controlled by a different state bit, and we should + * store and honor whatever state that bit is in at this point. + */ + disabled = ice_service_task_stop(pf); + + /* Already suspended?, then there is nothing to do */ + if (test_and_set_bit(__ICE_SUSPENDED, pf->state)) { + if (!disabled) + ice_service_task_restart(pf); + return 0; + } + + if (test_bit(__ICE_DOWN, pf->state) || + ice_is_reset_in_progress(pf->state)) { + dev_err(dev, "can't suspend device in reset or already down\n"); + if (!disabled) + ice_service_task_restart(pf); + return 0; + } + + ice_setup_mc_magic_wake(pf); + + ice_prepare_for_shutdown(pf); + + ice_set_wake(pf); + + /* Free vectors, clear the interrupt scheme and release IRQs + * for proper hibernation, especially with large number of CPUs. + * Otherwise hibernation might fail when mapping all the vectors back + * to CPU0. + */ + ice_free_irq_msix_misc(pf); + ice_for_each_vsi(pf, v) { + if (!pf->vsi[v]) + continue; + ice_vsi_free_q_vectors(pf->vsi[v]); + } + ice_clear_interrupt_scheme(pf); + + pci_save_state(pdev); + pci_wake_from_d3(pdev, pf->wol_ena); + pci_set_power_state(pdev, PCI_D3hot); + return 0; +} + +/** + * ice_resume - PM callback for waking up from D3 + * @dev: generic device information structure + */ +static int __maybe_unused ice_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + enum ice_reset_req reset_type; + struct ice_pf *pf; + struct ice_hw *hw; + int ret; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (!pci_device_is_present(pdev)) + return -ENODEV; + + ret = pci_enable_device_mem(pdev); + if (ret) { + dev_err(dev, "Cannot enable device after suspend\n"); + return ret; + } + + pf = pci_get_drvdata(pdev); + hw = &pf->hw; + + pf->wakeup_reason = rd32(hw, PFPM_WUS); + ice_print_wake_reason(pf); + + /* We cleared the interrupt scheme when we suspended, so we need to + * restore it now to resume device functionality. + */ + ret = ice_reinit_interrupt_scheme(pf); + if (ret) + dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret); + + clear_bit(__ICE_DOWN, pf->state); + /* Now perform PF reset and rebuild */ + reset_type = ICE_RESET_PFR; + /* re-enable service task for reset, but allow reset to schedule it */ + clear_bit(__ICE_SERVICE_DIS, pf->state); + + if (ice_schedule_reset(pf, reset_type)) + dev_err(dev, "Reset during resume failed.\n"); + + clear_bit(__ICE_SUSPENDED, pf->state); + ice_service_task_restart(pf); + + /* Restart the service task */ + mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); + + return 0; +} +#endif /* CONFIG_PM */ + +/** * ice_pci_err_detected - warning that PCI error has been detected * @pdev: PCI device information struct * @err: the type of PCI error @@ -3081,8 +4289,7 @@ static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev) err = pci_enable_device_mem(pdev); if (err) { - dev_err(&pdev->dev, - "Cannot re-enable PCI device after reset, error %d\n", + dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n", err); result = PCI_ERS_RESULT_DISCONNECT; } else { @@ -3101,8 +4308,7 @@ static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev) err = pci_cleanup_aer_uncorrect_error_status(pdev); if (err) - dev_dbg(&pdev->dev, - "pci_cleanup_aer_uncorrect_error_status failed, error %d\n", + dev_dbg(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status failed, error %d\n", err); /* non-fatal, continue */ @@ -3121,8 +4327,8 @@ static void ice_pci_err_resume(struct pci_dev *pdev) struct ice_pf *pf = pci_get_drvdata(pdev); if (!pf) { - dev_err(&pdev->dev, - "%s failed, device is unrecoverable\n", __func__); + dev_err(&pdev->dev, "%s failed, device is unrecoverable\n", + __func__); return; } @@ -3132,6 +4338,8 @@ static void ice_pci_err_resume(struct pci_dev *pdev) return; } + ice_restore_all_vfs_msi_state(pdev); + ice_do_reset(pf, ICE_RESET_PFR); ice_service_task_restart(pf); mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); @@ -3176,11 +4384,33 @@ static const struct pci_device_id ice_pci_tbl[] = { { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 }, /* required last entry */ { 0, } }; MODULE_DEVICE_TABLE(pci, ice_pci_tbl); +static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume); + static const struct pci_error_handlers ice_pci_err_handler = { .error_detected = ice_pci_err_detected, .slot_reset = ice_pci_err_slot_reset, @@ -3194,6 +4424,10 @@ static struct pci_driver ice_driver = { .id_table = ice_pci_tbl, .probe = ice_probe, .remove = ice_remove, +#ifdef CONFIG_PM + .driver.pm = &ice_pm_ops, +#endif /* CONFIG_PM */ + .shutdown = ice_shutdown, .sriov_configure = ice_sriov_configure, .err_handler = &ice_pci_err_handler }; @@ -3211,7 +4445,7 @@ static int __init ice_module_init(void) pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver); pr_info("%s\n", ice_copyright); - ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME); + ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME); if (!ice_wq) { pr_err("Failed to create workqueue\n"); return -ENOMEM; @@ -3277,25 +4511,24 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) return -EBUSY; } - /* When we change the MAC address we also have to change the MAC address - * based filter rules that were created previously for the old MAC - * address. So first, we remove the old filter rule using ice_remove_mac - * and then create a new filter rule using ice_add_mac via - * ice_vsi_cfg_mac_fltr function call for both add and/or remove - * filters. - */ - status = ice_vsi_cfg_mac_fltr(vsi, netdev->dev_addr, false); - if (status) { + /* Clean up old MAC filter. Not an error if old filter doesn't exist */ + status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI); + if (status && status != ICE_ERR_DOES_NOT_EXIST) { err = -EADDRNOTAVAIL; goto err_update_filters; } - status = ice_vsi_cfg_mac_fltr(vsi, mac, true); - if (status) { - err = -EADDRNOTAVAIL; - goto err_update_filters; + /* Add filter for new MAC. If filter exists, just return success */ + status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); + if (status == ICE_ERR_ALREADY_EXISTS) { + netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac); + return 0; } + /* error if the new filter addition failed */ + if (status) + err = -EADDRNOTAVAIL; + err_update_filters: if (err) { netdev_err(netdev, "can't set MAC %pM. filter update failed\n", @@ -3312,8 +4545,8 @@ err_update_filters: flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; status = ice_aq_manage_mac_write(hw, mac, flags, NULL); if (status) { - netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n", - mac, status); + netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n", + mac, ice_stat_str(status)); } return 0; } @@ -3345,6 +4578,47 @@ static void ice_set_rx_mode(struct net_device *netdev) } /** + * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate + * @netdev: network interface device structure + * @queue_index: Queue ID + * @maxrate: maximum bandwidth in Mbps + */ +static int +ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + enum ice_status status; + u16 q_handle; + u8 tc; + + /* Validate maxrate requested is within permitted range */ + if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) { + netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n", + maxrate, queue_index); + return -EINVAL; + } + + q_handle = vsi->tx_rings[queue_index]->q_handle; + tc = ice_dcb_get_tc(vsi, queue_index); + + /* Set BW back to default, when user set maxrate to 0 */ + if (!maxrate) + status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc, + q_handle, ICE_MAX_BW); + else + status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, + q_handle, ICE_MAX_BW, maxrate * 1000); + if (status) { + netdev_err(netdev, "Unable to set Tx max rate, error %s\n", + ice_stat_str(status)); + return -EIO; + } + + return 0; +} + +/** * ice_fdb_add - add an entry to the hardware database * @ndm: the input from the stack * @tb: pointer to array of nladdr (unused) @@ -3424,15 +4698,21 @@ ice_set_features(struct net_device *netdev, netdev_features_t features) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; int ret = 0; /* Don't set any netdev advanced features with device in Safe Mode */ if (ice_is_safe_mode(vsi->back)) { - dev_err(&vsi->back->pdev->dev, - "Device is in Safe Mode - not enabling advanced netdev features\n"); + dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n"); return ret; } + /* Do not change setting during reset */ + if (ice_is_reset_in_progress(pf->state)) { + dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n"); + return -EBUSY; + } + /* Multiple features can be changed in one call so keep features in * separate if/else statements to guarantee each feature is checked */ @@ -3463,6 +4743,16 @@ ice_set_features(struct net_device *netdev, netdev_features_t features) (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) ret = ice_cfg_vlan_pruning(vsi, false, false); + if ((features & NETIF_F_NTUPLE) && + !(netdev->features & NETIF_F_NTUPLE)) { + ice_vsi_manage_fdir(vsi, true); + ice_init_arfs(vsi); + } else if (!(features & NETIF_F_NTUPLE) && + (netdev->features & NETIF_F_NTUPLE)) { + ice_vsi_manage_fdir(vsi, false); + ice_clear_arfs(vsi); + } + return ret; } @@ -3492,17 +4782,18 @@ int ice_vsi_cfg(struct ice_vsi *vsi) { int err; - if (vsi->netdev) { + if (vsi->netdev && vsi->type == ICE_VSI_PF) { ice_set_rx_mode(vsi->netdev); err = ice_vsi_vlan_setup(vsi); - if (err) return err; } ice_vsi_cfg_dcb_rings(vsi); err = ice_vsi_cfg_lan_txqs(vsi); + if (!err && ice_is_xdp_ena_vsi(vsi)) + err = ice_vsi_cfg_xdp_txqs(vsi); if (!err) err = ice_vsi_cfg_rxqs(vsi); @@ -3545,7 +4836,7 @@ static int ice_up_complete(struct ice_vsi *vsi) * Tx queue group list was configured and the context bits were * programmed using ice_vsi_cfg_txqs */ - err = ice_vsi_start_rx_rings(vsi); + err = ice_vsi_start_all_rx_rings(vsi); if (err) return err; @@ -3555,16 +4846,19 @@ static int ice_up_complete(struct ice_vsi *vsi) if (vsi->port_info && (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && - vsi->netdev) { + vsi->netdev && vsi->type == ICE_VSI_PF) { ice_print_link_msg(vsi, true); netif_tx_start_all_queues(vsi->netdev); netif_carrier_on(vsi->netdev); } - /* clear this now, and the first stats read will be used as baseline */ - vsi->stat_offsets_loaded = false; + /* Perform an initial read of the statistics registers now to + * set the baseline so counters are ready when interface is up + */ + ice_update_eth_stats(vsi); - ice_service_task_schedule(pf); + if (vsi->type == ICE_VSI_PF) + ice_service_task_schedule(pf); return 0; } @@ -3610,6 +4904,33 @@ ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes) } /** + * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters + * @vsi: the VSI to be updated + * @rings: rings to work on + * @count: number of rings + */ +static void +ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings, + u16 count) +{ + struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; + u16 i; + + for (i = 0; i < count; i++) { + struct ice_ring *ring; + u64 pkts, bytes; + + ring = READ_ONCE(rings[i]); + ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); + vsi_stats->tx_packets += pkts; + vsi_stats->tx_bytes += bytes; + vsi->tx_restart += ring->tx_stats.restart_q; + vsi->tx_busy += ring->tx_stats.tx_busy; + vsi->tx_linearize += ring->tx_stats.tx_linearize; + } +} + +/** * ice_update_vsi_ring_stats - Update VSI stats counters * @vsi: the VSI to be updated */ @@ -3632,19 +4953,12 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) vsi->tx_linearize = 0; vsi->rx_buf_failed = 0; vsi->rx_page_failed = 0; + vsi->rx_gro_dropped = 0; rcu_read_lock(); /* update Tx rings counters */ - ice_for_each_txq(vsi, i) { - ring = READ_ONCE(vsi->tx_rings[i]); - ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); - vsi_stats->tx_packets += pkts; - vsi_stats->tx_bytes += bytes; - vsi->tx_restart += ring->tx_stats.restart_q; - vsi->tx_busy += ring->tx_stats.tx_busy; - vsi->tx_linearize += ring->tx_stats.tx_linearize; - } + ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq); /* update Rx rings counters */ ice_for_each_rxq(vsi, i) { @@ -3654,8 +4968,14 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) vsi_stats->rx_bytes += bytes; vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed; vsi->rx_page_failed += ring->rx_stats.alloc_page_failed; + vsi->rx_gro_dropped += ring->rx_stats.gro_dropped; } + /* update XDP Tx rings counters */ + if (ice_is_xdp_ena_vsi(vsi)) + ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings, + vsi->num_xdp_txq); + rcu_read_unlock(); } @@ -3680,7 +5000,7 @@ void ice_update_vsi_stats(struct ice_vsi *vsi) ice_update_eth_stats(vsi); cur_ns->tx_errors = cur_es->tx_errors; - cur_ns->rx_dropped = cur_es->rx_discards; + cur_ns->rx_dropped = cur_es->rx_discards + vsi->rx_gro_dropped; cur_ns->tx_dropped = cur_es->tx_discards; cur_ns->multicast = cur_es->rx_multicast; @@ -3688,7 +5008,13 @@ void ice_update_vsi_stats(struct ice_vsi *vsi) if (vsi->type == ICE_VSI_PF) { cur_ns->rx_crc_errors = pf->stats.crc_errors; cur_ns->rx_errors = pf->stats.crc_errors + - pf->stats.illegal_bytes; + pf->stats.illegal_bytes + + pf->stats.rx_len_errors + + pf->stats.rx_undersize + + pf->hw_csum_rx_error + + pf->stats.rx_jabber + + pf->stats.rx_fragments + + pf->stats.rx_oversize; cur_ns->rx_length_errors = pf->stats.rx_len_errors; /* record drops from the port level */ cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; @@ -3703,6 +5029,7 @@ void ice_update_pf_stats(struct ice_pf *pf) { struct ice_hw_port_stats *prev_ps, *cur_ps; struct ice_hw *hw = &pf->hw; + u16 fd_ctr_base; u8 port; port = hw->port_info->lport; @@ -3791,6 +5118,12 @@ void ice_update_pf_stats(struct ice_pf *pf) ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, &prev_ps->tx_size_big, &cur_ps->tx_size_big); + fd_ctr_base = hw->fd_ctr_base; + + ice_stat_update40(hw, + GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)), + pf->stat_prev_loaded, &prev_ps->fd_sb_match, + &cur_ps->fd_sb_match); ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); @@ -3834,6 +5167,8 @@ void ice_update_pf_stats(struct ice_pf *pf) ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, &prev_ps->rx_jabber, &cur_ps->rx_jabber); + cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; + pf->stat_prev_loaded = true; } @@ -3918,14 +5253,18 @@ int ice_down(struct ice_vsi *vsi) tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); if (tx_err) - netdev_err(vsi->netdev, - "Failed stop Tx rings, VSI %d error %d\n", + netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n", vsi->vsi_num, tx_err); + if (!tx_err && ice_is_xdp_ena_vsi(vsi)) { + tx_err = ice_vsi_stop_xdp_tx_rings(vsi); + if (tx_err) + netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n", + vsi->vsi_num, tx_err); + } - rx_err = ice_vsi_stop_rx_rings(vsi); + rx_err = ice_vsi_stop_all_rx_rings(vsi); if (rx_err) - netdev_err(vsi->netdev, - "Failed stop Rx rings, VSI %d error %d\n", + netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n", vsi->vsi_num, rx_err); ice_napi_disable_all(vsi); @@ -3933,8 +5272,7 @@ int ice_down(struct ice_vsi *vsi) if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { link_err = ice_force_phys_link_state(vsi, false); if (link_err) - netdev_err(vsi->netdev, - "Failed to set physical link down, VSI %d error %d\n", + netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", vsi->vsi_num, link_err); } @@ -3945,8 +5283,7 @@ int ice_down(struct ice_vsi *vsi) ice_clean_rx_ring(vsi->rx_rings[i]); if (tx_err || rx_err || link_err) { - netdev_err(vsi->netdev, - "Failed to close VSI 0x%04X on switch 0x%04X\n", + netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", vsi->vsi_num, vsi->vsw->sw_id); return -EIO; } @@ -3965,7 +5302,7 @@ int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) int i, err = 0; if (!vsi->num_txq) { - dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n", + dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n", vsi->vsi_num); return -EINVAL; } @@ -3996,7 +5333,7 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) int i, err = 0; if (!vsi->num_rxq) { - dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n", + dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n", vsi->vsi_num); return -EINVAL; } @@ -4017,6 +5354,62 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) } /** + * ice_vsi_open_ctrl - open control VSI for use + * @vsi: the VSI to open + * + * Initialization of the Control VSI + * + * Returns 0 on success, negative value on error + */ +int ice_vsi_open_ctrl(struct ice_vsi *vsi) +{ + char int_name[ICE_INT_NAME_STR_LEN]; + struct ice_pf *pf = vsi->back; + struct device *dev; + int err; + + dev = ice_pf_to_dev(pf); + /* allocate descriptors */ + err = ice_vsi_setup_tx_rings(vsi); + if (err) + goto err_setup_tx; + + err = ice_vsi_setup_rx_rings(vsi); + if (err) + goto err_setup_rx; + + err = ice_vsi_cfg(vsi); + if (err) + goto err_setup_rx; + + snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl", + dev_driver_string(dev), dev_name(dev)); + err = ice_vsi_req_irq_msix(vsi, int_name); + if (err) + goto err_setup_rx; + + ice_vsi_cfg_msix(vsi); + + err = ice_vsi_start_all_rx_rings(vsi); + if (err) + goto err_up_complete; + + clear_bit(__ICE_DOWN, vsi->state); + ice_vsi_ena_irq(vsi); + + return 0; + +err_up_complete: + ice_down(vsi); +err_setup_rx: + ice_vsi_free_rx_rings(vsi); +err_setup_tx: + ice_vsi_free_tx_rings(vsi); + + return err; +} + +/** * ice_vsi_open - Called when a network interface is made active * @vsi: the VSI to open * @@ -4044,7 +5437,7 @@ static int ice_vsi_open(struct ice_vsi *vsi) goto err_setup_rx; snprintf(int_name, sizeof(int_name) - 1, "%s-%s", - dev_driver_string(&pf->pdev->dev), vsi->netdev->name); + dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name); err = ice_vsi_req_irq_msix(vsi, int_name); if (err) goto err_setup_rx; @@ -4093,61 +5486,12 @@ static void ice_vsi_release_all(struct ice_pf *pf) err = ice_vsi_release(pf->vsi[i]); if (err) - dev_dbg(&pf->pdev->dev, - "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", + dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", i, err, pf->vsi[i]->vsi_num); } } /** - * ice_ena_vsi - resume a VSI - * @vsi: the VSI being resume - * @locked: is the rtnl_lock already held - */ -static int ice_ena_vsi(struct ice_vsi *vsi, bool locked) -{ - int err = 0; - - if (!test_bit(__ICE_NEEDS_RESTART, vsi->state)) - return 0; - - clear_bit(__ICE_NEEDS_RESTART, vsi->state); - - if (vsi->netdev && vsi->type == ICE_VSI_PF) { - if (netif_running(vsi->netdev)) { - if (!locked) - rtnl_lock(); - - err = ice_open(vsi->netdev); - - if (!locked) - rtnl_unlock(); - } - } - - return err; -} - -/** - * ice_pf_ena_all_vsi - Resume all VSIs on a PF - * @pf: the PF - * @locked: is the rtnl_lock already held - */ -#ifdef CONFIG_DCB -int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked) -{ - int v; - - ice_for_each_vsi(pf, v) - if (pf->vsi[v]) - if (ice_ena_vsi(pf->vsi[v], locked)) - return -EIO; - - return 0; -} -#endif /* CONFIG_DCB */ - -/** * ice_vsi_rebuild_by_type - Rebuild VSI of a given type * @pf: pointer to the PF instance * @type: VSI type to rebuild @@ -4156,6 +5500,7 @@ int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked) */ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) { + struct device *dev = ice_pf_to_dev(pf); enum ice_status status; int i, err; @@ -4166,20 +5511,19 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) continue; /* rebuild the VSI */ - err = ice_vsi_rebuild(vsi); + err = ice_vsi_rebuild(vsi, true); if (err) { - dev_err(&pf->pdev->dev, - "rebuild VSI failed, err %d, VSI index %d, type %d\n", - err, vsi->idx, type); + dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n", + err, vsi->idx, ice_vsi_type_str(type)); return err; } /* replay filters for the VSI */ status = ice_replay_vsi(&pf->hw, vsi->idx); if (status) { - dev_err(&pf->pdev->dev, - "replay VSI failed, status %d, VSI index %d, type %d\n", - status, vsi->idx, type); + dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n", + ice_stat_str(status), vsi->idx, + ice_vsi_type_str(type)); return -EIO; } @@ -4191,14 +5535,13 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) /* enable the VSI */ err = ice_ena_vsi(vsi, false); if (err) { - dev_err(&pf->pdev->dev, - "enable VSI failed, err %d, VSI index %d, type %d\n", - err, vsi->idx, type); + dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n", + err, vsi->idx, ice_vsi_type_str(type)); return err; } - dev_info(&pf->pdev->dev, "VSI rebuilt. VSI index %d, type %d\n", - vsi->idx, type); + dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx, + ice_vsi_type_str(type)); } return 0; @@ -4234,10 +5577,15 @@ static void ice_update_pf_netdev_link(struct ice_pf *pf) * ice_rebuild - rebuild after reset * @pf: PF to rebuild * @reset_type: type of reset + * + * Do not rebuild VF VSI in this flow because that is already handled via + * ice_reset_all_vfs(). This is because requirements for resetting a VF after a + * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want + * to reset/rebuild all the VF VSI twice. */ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) { - struct device *dev = &pf->pdev->dev; + struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; enum ice_status ret; int err; @@ -4249,7 +5597,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) ret = ice_init_all_ctrlq(hw); if (ret) { - dev_err(dev, "control queues init failed %d\n", ret); + dev_err(dev, "control queues init failed %s\n", + ice_stat_str(ret)); goto err_init_ctrlq; } @@ -4265,15 +5614,28 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) ret = ice_clear_pf_cfg(hw); if (ret) { - dev_err(dev, "clear PF configuration failed %d\n", ret); + dev_err(dev, "clear PF configuration failed %s\n", + ice_stat_str(ret)); goto err_init_ctrlq; } + if (pf->first_sw->dflt_vsi_ena) + dev_info(dev, "Clearing default VSI, re-enable after reset completes\n"); + /* clear the default VSI configuration if it exists */ + pf->first_sw->dflt_vsi = NULL; + pf->first_sw->dflt_vsi_ena = false; + ice_clear_pxe_mode(hw); ret = ice_get_caps(hw); if (ret) { - dev_err(dev, "ice_get_caps failed %d\n", ret); + dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret)); + goto err_init_ctrlq; + } + + ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); + if (ret) { + dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret)); goto err_init_ctrlq; } @@ -4281,10 +5643,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) if (err) goto err_sched_init_port; - err = ice_update_link_info(hw->port_info); - if (err) - dev_err(&pf->pdev->dev, "Get link status error %d\n", err); - /* start misc vector */ err = ice_req_irq_msix_misc(pf); if (err) { @@ -4292,6 +5650,21 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) goto err_sched_init_port; } + if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { + wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); + if (!rd32(hw, PFQF_FD_SIZE)) { + u16 unused, guar, b_effort; + + guar = hw->func_caps.fd_fltr_guar; + b_effort = hw->func_caps.fd_fltr_best_effort; + + /* force guaranteed filter pool for PF */ + ice_alloc_fd_guar_item(hw, &unused, guar); + /* force shared filter pool for PF */ + ice_alloc_fd_shrd_item(hw, &unused, b_effort); + } + } + if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) ice_dcb_rebuild(pf); @@ -4302,12 +5675,22 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) goto err_vsi_rebuild; } - if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { - err = ice_vsi_rebuild_by_type(pf, ICE_VSI_VF); + /* If Flow Director is active */ + if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { + err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL); if (err) { - dev_err(dev, "VF VSI rebuild failed: %d\n", err); + dev_err(dev, "control VSI rebuild failed: %d\n", err); goto err_vsi_rebuild; } + + /* replay HW Flow Director recipes */ + if (hw->fdir_prof) + ice_fdir_replay_flows(hw); + + /* replay Flow Director filters */ + ice_fdir_replay_fltrs(pf); + + ice_rebuild_arfs(pf); } ice_update_pf_netdev_link(pf); @@ -4315,9 +5698,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) /* tell the firmware we are up */ ret = ice_send_version(pf); if (ret) { - dev_err(dev, - "Rebuild failed due to error sending driver version: %d\n", - ret); + dev_err(dev, "Rebuild failed due to error sending driver version: %s\n", + ice_stat_str(ret)); goto err_vsi_rebuild; } @@ -4340,6 +5722,18 @@ clear_recovery: } /** + * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP + * @vsi: Pointer to VSI structure + */ +static int ice_max_xdp_frame_size(struct ice_vsi *vsi) +{ + if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) + return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM; + else + return ICE_RXBUF_3072; +} + +/** * ice_change_mtu - NDO callback to change the MTU * @netdev: network interface device structure * @new_mtu: new value for maximum frame size @@ -4353,16 +5747,26 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) struct ice_pf *pf = vsi->back; u8 count = 0; - if (new_mtu == netdev->mtu) { + if (new_mtu == (int)netdev->mtu) { netdev_warn(netdev, "MTU is already %u\n", netdev->mtu); return 0; } - if (new_mtu < netdev->min_mtu) { + if (ice_is_xdp_ena_vsi(vsi)) { + int frame_size = ice_max_xdp_frame_size(vsi); + + if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) { + netdev_err(netdev, "max MTU for XDP usage is %d\n", + frame_size - ICE_ETH_PKT_HDR_PAD); + return -EINVAL; + } + } + + if (new_mtu < (int)netdev->min_mtu) { netdev_err(netdev, "new MTU invalid. min_mtu is %d\n", netdev->min_mtu); return -EINVAL; - } else if (new_mtu > netdev->max_mtu) { + } else if (new_mtu > (int)netdev->max_mtu) { netdev_err(netdev, "new MTU invalid. max_mtu is %d\n", netdev->min_mtu); return -EINVAL; @@ -4383,7 +5787,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) return -EBUSY; } - netdev->mtu = new_mtu; + netdev->mtu = (unsigned int)new_mtu; /* if VSI is up, bring it down and then back up */ if (!test_and_set_bit(__ICE_DOWN, vsi->state)) { @@ -4402,11 +5806,123 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) } } - netdev_info(netdev, "changed MTU to %d\n", new_mtu); + netdev_dbg(netdev, "changed MTU to %d\n", new_mtu); return 0; } /** + * ice_aq_str - convert AQ err code to a string + * @aq_err: the AQ error code to convert + */ +const char *ice_aq_str(enum ice_aq_err aq_err) +{ + switch (aq_err) { + case ICE_AQ_RC_OK: + return "OK"; + case ICE_AQ_RC_EPERM: + return "ICE_AQ_RC_EPERM"; + case ICE_AQ_RC_ENOENT: + return "ICE_AQ_RC_ENOENT"; + case ICE_AQ_RC_ENOMEM: + return "ICE_AQ_RC_ENOMEM"; + case ICE_AQ_RC_EBUSY: + return "ICE_AQ_RC_EBUSY"; + case ICE_AQ_RC_EEXIST: + return "ICE_AQ_RC_EEXIST"; + case ICE_AQ_RC_EINVAL: + return "ICE_AQ_RC_EINVAL"; + case ICE_AQ_RC_ENOSPC: + return "ICE_AQ_RC_ENOSPC"; + case ICE_AQ_RC_ENOSYS: + return "ICE_AQ_RC_ENOSYS"; + case ICE_AQ_RC_EMODE: + return "ICE_AQ_RC_EMODE"; + case ICE_AQ_RC_ENOSEC: + return "ICE_AQ_RC_ENOSEC"; + case ICE_AQ_RC_EBADSIG: + return "ICE_AQ_RC_EBADSIG"; + case ICE_AQ_RC_ESVN: + return "ICE_AQ_RC_ESVN"; + case ICE_AQ_RC_EBADMAN: + return "ICE_AQ_RC_EBADMAN"; + case ICE_AQ_RC_EBADBUF: + return "ICE_AQ_RC_EBADBUF"; + } + + return "ICE_AQ_RC_UNKNOWN"; +} + +/** + * ice_stat_str - convert status err code to a string + * @stat_err: the status error code to convert + */ +const char *ice_stat_str(enum ice_status stat_err) +{ + switch (stat_err) { + case ICE_SUCCESS: + return "OK"; + case ICE_ERR_PARAM: + return "ICE_ERR_PARAM"; + case ICE_ERR_NOT_IMPL: + return "ICE_ERR_NOT_IMPL"; + case ICE_ERR_NOT_READY: + return "ICE_ERR_NOT_READY"; + case ICE_ERR_NOT_SUPPORTED: + return "ICE_ERR_NOT_SUPPORTED"; + case ICE_ERR_BAD_PTR: + return "ICE_ERR_BAD_PTR"; + case ICE_ERR_INVAL_SIZE: + return "ICE_ERR_INVAL_SIZE"; + case ICE_ERR_DEVICE_NOT_SUPPORTED: + return "ICE_ERR_DEVICE_NOT_SUPPORTED"; + case ICE_ERR_RESET_FAILED: + return "ICE_ERR_RESET_FAILED"; + case ICE_ERR_FW_API_VER: + return "ICE_ERR_FW_API_VER"; + case ICE_ERR_NO_MEMORY: + return "ICE_ERR_NO_MEMORY"; + case ICE_ERR_CFG: + return "ICE_ERR_CFG"; + case ICE_ERR_OUT_OF_RANGE: + return "ICE_ERR_OUT_OF_RANGE"; + case ICE_ERR_ALREADY_EXISTS: + return "ICE_ERR_ALREADY_EXISTS"; + case ICE_ERR_NVM_CHECKSUM: + return "ICE_ERR_NVM_CHECKSUM"; + case ICE_ERR_BUF_TOO_SHORT: + return "ICE_ERR_BUF_TOO_SHORT"; + case ICE_ERR_NVM_BLANK_MODE: + return "ICE_ERR_NVM_BLANK_MODE"; + case ICE_ERR_IN_USE: + return "ICE_ERR_IN_USE"; + case ICE_ERR_MAX_LIMIT: + return "ICE_ERR_MAX_LIMIT"; + case ICE_ERR_RESET_ONGOING: + return "ICE_ERR_RESET_ONGOING"; + case ICE_ERR_HW_TABLE: + return "ICE_ERR_HW_TABLE"; + case ICE_ERR_DOES_NOT_EXIST: + return "ICE_ERR_DOES_NOT_EXIST"; + case ICE_ERR_FW_DDP_MISMATCH: + return "ICE_ERR_FW_DDP_MISMATCH"; + case ICE_ERR_AQ_ERROR: + return "ICE_ERR_AQ_ERROR"; + case ICE_ERR_AQ_TIMEOUT: + return "ICE_ERR_AQ_TIMEOUT"; + case ICE_ERR_AQ_FULL: + return "ICE_ERR_AQ_FULL"; + case ICE_ERR_AQ_NO_WORK: + return "ICE_ERR_AQ_NO_WORK"; + case ICE_ERR_AQ_EMPTY: + return "ICE_ERR_AQ_EMPTY"; + case ICE_ERR_AQ_FW_CRITICAL: + return "ICE_ERR_AQ_FW_CRITICAL"; + } + + return "ICE_ERR_UNKNOWN"; +} + +/** * ice_set_rss - Set RSS keys and lut * @vsi: Pointer to VSI structure * @seed: RSS hash seed @@ -4420,7 +5936,9 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; enum ice_status status; + struct device *dev; + dev = ice_pf_to_dev(pf); if (seed) { struct ice_aqc_get_set_rss_keys *buf = (struct ice_aqc_get_set_rss_keys *)seed; @@ -4428,9 +5946,9 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) status = ice_aq_set_rss_key(hw, vsi->idx, buf); if (status) { - dev_err(&pf->pdev->dev, - "Cannot set RSS key, err %d aq_err %d\n", - status, hw->adminq.rq_last_status); + dev_err(dev, "Cannot set RSS key, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); return -EIO; } } @@ -4439,9 +5957,9 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, lut, lut_size); if (status) { - dev_err(&pf->pdev->dev, - "Cannot set RSS lut, err %d aq_err %d\n", - status, hw->adminq.rq_last_status); + dev_err(dev, "Cannot set RSS lut, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); return -EIO; } } @@ -4463,16 +5981,18 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; enum ice_status status; + struct device *dev; + dev = ice_pf_to_dev(pf); if (seed) { struct ice_aqc_get_set_rss_keys *buf = (struct ice_aqc_get_set_rss_keys *)seed; status = ice_aq_get_rss_key(hw, vsi->idx, buf); if (status) { - dev_err(&pf->pdev->dev, - "Cannot get RSS key, err %d aq_err %d\n", - status, hw->adminq.rq_last_status); + dev_err(dev, "Cannot get RSS key, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); return -EIO; } } @@ -4481,9 +6001,9 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type, lut, lut_size); if (status) { - dev_err(&pf->pdev->dev, - "Cannot get RSS lut, err %d aq_err %d\n", - status, hw->adminq.rq_last_status); + dev_err(dev, "Cannot get RSS lut, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); return -EIO; } } @@ -4526,7 +6046,6 @@ ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, */ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) { - struct device *dev = &vsi->back->pdev->dev; struct ice_aqc_vsi_props *vsi_props; struct ice_hw *hw = &vsi->back->hw; struct ice_vsi_ctx *ctxt; @@ -4535,7 +6054,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) vsi_props = &vsi->info; - ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) return -ENOMEM; @@ -4551,8 +6070,9 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (status) { - dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n", - bmode, status, hw->adminq.sq_last_status); + dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n", + bmode, ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); ret = -EIO; goto out; } @@ -4560,7 +6080,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) vsi_props->sw_flags = ctxt->info.sw_flags; out: - devm_kfree(dev, ctxt); + kfree(ctxt); return ret; } @@ -4592,6 +6112,8 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, pf_sw = pf->first_sw; /* find the attribute in the netlink message */ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!br_spec) + return -EINVAL; nla_for_each_nested(attr, br_spec, rem) { __u16 mode; @@ -4621,8 +6143,9 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, */ status = ice_update_sw_rule_bridge_mode(hw); if (status) { - netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %d\n", - mode, status, hw->adminq.sq_last_status); + netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n", + mode, ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); /* revert hw->evb_veb */ hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); return -EIO; @@ -4725,6 +6248,70 @@ static void ice_tx_timeout(struct net_device *netdev) } /** + * ice_udp_tunnel_add - Get notifications about UDP tunnel ports that come up + * @netdev: This physical port's netdev + * @ti: Tunnel endpoint information + */ +static void +ice_udp_tunnel_add(struct net_device *netdev, struct udp_tunnel_info *ti) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + enum ice_tunnel_type tnl_type; + u16 port = ntohs(ti->port); + enum ice_status status; + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + tnl_type = TNL_VXLAN; + break; + case UDP_TUNNEL_TYPE_GENEVE: + tnl_type = TNL_GENEVE; + break; + default: + netdev_err(netdev, "Unknown tunnel type\n"); + return; + } + + status = ice_create_tunnel(&pf->hw, tnl_type, port); + if (status == ICE_ERR_OUT_OF_RANGE) + netdev_info(netdev, "Max tunneled UDP ports reached, port %d not added\n", + port); + else if (status) + netdev_err(netdev, "Error adding UDP tunnel - %s\n", + ice_stat_str(status)); +} + +/** + * ice_udp_tunnel_del - Get notifications about UDP tunnel ports that go away + * @netdev: This physical port's netdev + * @ti: Tunnel endpoint information + */ +static void +ice_udp_tunnel_del(struct net_device *netdev, struct udp_tunnel_info *ti) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + u16 port = ntohs(ti->port); + enum ice_status status; + bool retval; + + retval = ice_tunnel_port_in_use(&pf->hw, port, NULL); + if (!retval) { + netdev_info(netdev, "port %d not found in UDP tunnels list\n", + port); + return; + } + + status = ice_destroy_tunnel(&pf->hw, port, false); + if (status) + netdev_err(netdev, "error deleting port %d from UDP tunnels list\n", + port); +} + +/** * ice_open - Called when a network interface becomes active * @netdev: network interface device structure * @@ -4740,14 +6327,20 @@ int ice_open(struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; struct ice_port_info *pi; int err; - if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) { + if (test_bit(__ICE_NEEDS_RESTART, pf->state)) { netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); return -EIO; } + if (test_bit(__ICE_DOWN, pf->state)) { + netdev_err(netdev, "device is not ready yet\n"); + return -EBUSY; + } + netif_carrier_off(netdev); pi = vsi->port_info; @@ -4760,27 +6353,40 @@ int ice_open(struct net_device *netdev) /* Set PHY if there is media, otherwise, turn off PHY */ if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { - err = ice_force_phys_link_state(vsi, true); + clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); + if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state)) { + err = ice_init_phy_user_cfg(pi); + if (err) { + netdev_err(netdev, "Failed to initialize PHY settings, error %d\n", + err); + return err; + } + } + + err = ice_configure_phy(vsi); if (err) { - netdev_err(netdev, - "Failed to set physical link up, error %d\n", + netdev_err(netdev, "Failed to set physical link up, error %d\n", err); return err; } } else { + set_bit(ICE_FLAG_NO_MEDIA, pf->flags); err = ice_aq_set_link_restart_an(pi, false, NULL); if (err) { netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n", vsi->vsi_num, err); return err; } - set_bit(ICE_FLAG_NO_MEDIA, vsi->back->flags); } err = ice_vsi_open(vsi); if (err) netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", vsi->vsi_num, vsi->vsw->sw_id); + + /* Update existing tunnels information */ + udp_tunnel_get_rx_info(netdev); + return err; } @@ -4831,21 +6437,21 @@ ice_features_check(struct sk_buff *skb, features &= ~NETIF_F_GSO_MASK; len = skb_network_header(skb) - skb->data; - if (len & ~(ICE_TXD_MACLEN_MAX)) + if (len > ICE_TXD_MACLEN_MAX || len & 0x1) goto out_rm_features; len = skb_transport_header(skb) - skb_network_header(skb); - if (len & ~(ICE_TXD_IPLEN_MAX)) + if (len > ICE_TXD_IPLEN_MAX || len & 0x1) goto out_rm_features; if (skb->encapsulation) { len = skb_inner_network_header(skb) - skb_transport_header(skb); - if (len & ~(ICE_TXD_L4LEN_MAX)) + if (len > ICE_TXD_L4LEN_MAX || len & 0x1) goto out_rm_features; len = skb_inner_transport_header(skb) - skb_inner_network_header(skb); - if (len & ~(ICE_TXD_IPLEN_MAX)) + if (len > ICE_TXD_IPLEN_MAX || len & 0x1) goto out_rm_features; } @@ -4875,12 +6481,14 @@ static const struct net_device_ops ice_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = ice_change_mtu, .ndo_get_stats64 = ice_get_stats64, + .ndo_set_tx_maxrate = ice_set_tx_maxrate, .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, .ndo_set_vf_mac = ice_set_vf_mac, .ndo_get_vf_config = ice_get_vf_cfg, .ndo_set_vf_trust = ice_set_vf_trust, .ndo_set_vf_vlan = ice_set_vf_port_vlan, .ndo_set_vf_link_state = ice_set_vf_link_state, + .ndo_get_vf_stats = ice_get_vf_stats, .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, .ndo_set_features = ice_set_features, @@ -4888,5 +6496,13 @@ static const struct net_device_ops ice_netdev_ops = { .ndo_bridge_setlink = ice_bridge_setlink, .ndo_fdb_add = ice_fdb_add, .ndo_fdb_del = ice_fdb_del, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = ice_rx_flow_steer, +#endif .ndo_tx_timeout = ice_tx_timeout, + .ndo_bpf = ice_xdp, + .ndo_xdp_xmit = ice_xdp_xmit, + .ndo_xsk_wakeup = ice_xsk_wakeup, + .ndo_udp_tunnel_add = ice_udp_tunnel_add, + .ndo_udp_tunnel_del = ice_udp_tunnel_del, }; |